Commit
·
a9eba09
1
Parent(s):
a6e5db9
Update README.md
Browse files
README.md
CHANGED
|
@@ -132,22 +132,30 @@ test_dataset = test_dataset.map(speech_file_to_array_fn)
|
|
| 132 |
# Preprocessing the datasets.
|
| 133 |
# We need to read the audio files as arrays
|
| 134 |
def evaluate(batch):
|
| 135 |
-
|
| 136 |
|
| 137 |
-
|
| 138 |
-
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
|
| 144 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
| 145 |
|
| 146 |
-
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
| 148 |
```
|
| 149 |
|
| 150 |
**Test Result**:
|
| 151 |
|
| 152 |
-
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
# Preprocessing the datasets.
|
| 133 |
# We need to read the audio files as arrays
|
| 134 |
def evaluate(batch):
|
| 135 |
+
\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
| 136 |
|
| 137 |
+
\twith torch.no_grad():
|
| 138 |
+
\t\tlogits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
|
| 139 |
|
| 140 |
+
\tpred_ids = torch.argmax(logits, dim=-1)
|
| 141 |
+
\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
|
| 142 |
+
\treturn batch
|
| 143 |
|
| 144 |
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
| 145 |
|
| 146 |
+
predictions = [x.upper() for x in result["pred_strings"]]
|
| 147 |
+
references = [x.upper() for x in result["sentence"]]
|
| 148 |
+
|
| 149 |
+
print(f"WER: {wer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}")
|
| 150 |
+
print(f"CER: {cer.compute(predictions=predictions, references=references, chunk_size=1000) * 100}")
|
| 151 |
```
|
| 152 |
|
| 153 |
**Test Result**:
|
| 154 |
|
| 155 |
+
My model may report better scores than others because of some specificity of my evaluation script, so I ran the same evaluation script on other models (on 2021-04-22) to make a fairer comparison.
|
| 156 |
+
|
| 157 |
+
| Model | WER | CER |
|
| 158 |
+
| ------------- | ------------- | ------------- |
|
| 159 |
+
| vumichien/wav2vec2-large-xlsr-japanese | 1108.86% | **25.92%** |
|
| 160 |
+
| jonatasgrosman/wav2vec2-large-xlsr-53-japanese | **93.35%** | 29.24% |
|
| 161 |
+
| qqhann/w2v_hf_jsut_xlsr53 | 1012.18% | 69.46% |
|