Original on kaggle

%%capture
!pip install transformers datasets jiwer
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
from datasets import Dataset
import soundfile as sf
import torch
from jiwer import wer
test_ids = []
with open("../input/cmu-us-awb-arctic-fairseq-files/test.tsv") as tsvf:
    for line in tsvf.readlines()[1:]:
        parts = line.split("\t")
        test_ids.append(parts[0].replace(".wav", ""))
transcripts = {}
with open("../input/cmu-us-awb-arctic-fairseq-files/text.tsv") as tsf:
    for line in tsf.readlines():
        parts = line.strip().split("\t")
        transcripts[parts[0]] = parts[1].upper()
paths = []
text = []
for id in test_ids:
    paths.append(f"/kaggle/input/cmu-us-awb-arctic-tts-dataset/cmu_us_awb_arctic/wav/{id}.wav")
    text.append(transcripts[id])
    
dataset = Dataset.from_dict({"file": paths, "text": text})
from datasets import load_dataset

model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")

def map_to_array(batch):
    speech, _ = sf.read(batch["file"])
    batch["speech"] = speech
    return batch

dataset = dataset.map(map_to_array)

def map_to_pred(batch):
    input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values
    with torch.no_grad():
        logits = model(input_values).logits

    predicted_ids = torch.argmax(logits, dim=-1)
    transcription = tokenizer.batch_decode(predicted_ids)
    batch["transcription"] = transcription
    return batch

result = dataset.map(map_to_pred, batched=True, batch_size=1, remove_columns=["speech"])

print("WER:", wer(result["text"], result["transcription"]))
Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base-960h and are newly initialized: ['wav2vec2.masked_spec_embed']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. 
The tokenizer class you load from this checkpoint is 'Wav2Vec2CTCTokenizer'. 
The class this function is called from is 'Wav2Vec2Tokenizer'.
/opt/conda/lib/python3.7/site-packages/transformers/models/wav2vec2/tokenization_wav2vec2.py:748: FutureWarning: The class `Wav2Vec2Tokenizer` is deprecated and will be removed in version 5 of Transformers. Please use `Wav2Vec2Processor` or `Wav2Vec2CTCTokenizer` instead.
  FutureWarning,
WER: 0.057692307692307696