from datasets import load_dataset
/home/joregan/miniconda3/envs/hf/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
  from .autonotebook import tqdm as notebook_tqdm
from pathlib import Path
BASE=Path("/home/chreri/overflow/OverFlow-sardrag/output/nordanvinden")
filenames = [str(x) for x in BASE.glob("**/*.wav")]
dataset = load_dataset("audiofolder", data_files=filenames)
Resolving data files: 100%|██████████| 550/550 [00:00<00:00, 541137.04it/s]
from transformers import WhisperProcessor, WhisperForConditionalGeneration
The Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. View Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details.
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")

model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
forced_decoder_ids = processor.get_decoder_prompt_ids(language="swedish", task="transcribe")
import torch
SENTENCES = {
    "sent-1": "Nordanvinden och solen tvistade en gång om vem av dem som var starkast.",
    "sent-2": "Just då kom en vandrare vägen fram, insvept i en varm kappa.",
    "sent-3": "De kom då överens om att den som först kunde få vandraren att ta av sig kappan, han skulle anses vara starkare än den andra.",
    "sent-4": "Då blåste nordanvinden så hårt han någonsin kunde, men ju hårdare han blåste, desto tätare svepte vandraren kappan om sig, och till slut gav nordanvinden upp försöket.",
    "sent-5": "Då lät solen sina strålar skina helt varmt och genast tog vandraren av sig kappan, och så var nordanvinden tvungen att erkänna att solen var den starkaste av de två."
}
The Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. View Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details.
from datasets import Audio
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
def map_to_pred(batch):
    audio = batch["audio"]
    path = audio["path"]
    start = path.find("sent-")
    sentence_id = path[start:start+6]
    batch["reference"] = SENTENCES[sentence_id]
    input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features
    with torch.no_grad():
        predicted_ids = model.generate(input_features)[0]
    transcription = processor.decode(predicted_ids)
    batch["prediction"] = processor.tokenizer._normalize(transcription)
    return batch
res = dataset.map(map_to_pred)
import whisper
from tqdm import tqdm
from pathlib import Path
BASE=Path("/home/chreri/overflow/OverFlow-sardrag/output/nordanvinden")
filenames = [str(x) for x in BASE.glob("**/*.wav")]
num_files = len(filenames)
model = whisper.load_model("large-v3")
def get_probs(model, filename, v3=True):
    num_mels = 128 if v3 else 80
    audio = whisper.load_audio(filename)
    audio = whisper.pad_or_trim(audio)
    mel = whisper.log_mel_spectrogram(audio, n_mels=num_mels).to(model.device)
    _, probs = model.detect_language(mel)
    return probs
results = []
for filename in tqdm(filenames):
        cur = {}
        cur["path"] = filename
        cur["result"] = model.transcribe(filename, language="sv", fp16=False, verbose=True)
        cur["probs"] = get_probs(model, filename)
        results.append(cur)
import os
os.getpid()
893157
from jiwer import wer
with open("/tmp/results.tsv", "w") as output:
    for res in results:
        path = res["path"]
        start = path.find("sent-")
        sentence_id = path[start:start+6]
        reference = SENTENCES[sentence_id]
        reference_clean = reference.strip().lower().replace(",", "").replace(".", "")
        pred = res["result"]["text"]
        pred_clean = pred.strip().lower().replace(",", "").replace(".", "")
        sv_prob = res["probs"]["sv"]
        en_prob = res["probs"]["en"]
        wer_raw = wer(reference, pred)
        wer_clean = wer(reference_clean, pred_clean)
        output.write(f"{path}\t{sentence_id}\t{reference}\t{pred}\t{wer_raw}\t{wer_clean}\t{sv_prob}\t{en_prob}\n")
from datasets import load_dataset
from pathlib import Path
BASE=Path("/home/chreri/overflow/OverFlow-sardrag/output/nordanvinden")
filenames = [str(x) for x in BASE.glob("**/*.wav")]
dataset = load_dataset("audiofolder", data_files=filenames)
from datasets import Audio
dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
_SWE_MODEL = "KBLab/wav2vec2-large-voxrex-swedish"
from transformers import pipeline
pipe = pipeline(model=_SWE_MODEL)
from transformers.pipelines.pt_utils import KeyDataset

res = []
for it in pipe(KeyDataset(dataset['train'], "audio"), return_timestamps="word"):
    res.append(it)
def attach_path(batch):
    batch['path'] = batch['audio']['path']
    return batch

dataset.map(attach_path)
for fn in filenames:
    tmp = pipe(fn, return_timestamps="word")
    tmp['path'] = fn
    res.append(tmp)
from jiwer import wer
with open("/tmp/results.tsv", "w") as output:
    for res in results:
        path = res["path"]
        start = path.find("sent-")
        sentence_id = path[start:start+6]
        sv_prob = res["probs"]["sv"]
        en_prob = res["probs"]["en"]
        output.write(f"{path}\t{sentence_id}\t{sv_prob}\t{en_prob}\n")
res_v3 = {}
with open("/Users/joregan/Downloads/results.tsv") as v3:
    for line in v3.readlines():
        parts = line.strip().split("\t")
        cur = {}
        # output.write(f"{path}\t{sentence_id}\t{reference}\t{pred}\t{wer_raw}\t{wer_clean}\t{sv_prob}\t{en_prob}\n")
        cur["sv_prob_v3"] = parts[6]
        cur["en_prob_v3"] = parts[7]
        res_v3[parts[0]] = cur
with open("/tmp/results.tsv") as v2:
    for line in v2.readlines():
        parts = line.strip().split("\t")
        # output.write(f"{path}\t{sentence_id}\t{sv_prob}\t{en_prob}\n")
        res_v3[parts[0]]["sv_prob_v2"] = parts[2]
        res_v3[parts[0]]["en_prob_v2"] = parts[3]
with open("/tmp/both.tsv", "w") as both:
    both.write("Path\tID SV (v2)\tID SV (v3)\tID EN (v2)\tID EN (v3)\n")
    for comp in res_v3:
        both.write(f'{comp}\t{res_v3[comp]["sv_prob_v2"]}\t{res_v3[comp]["sv_prob_v3"]}\t{res_v3[comp]["en_prob_v2"]}\t{res_v3[comp]["en_prob_v3"]}\n')
def get_audioseqment(audio_path):
    audio_segment = AudioSegment.from_wav(audio_path)
    # convert to expected format
    if audio_segment.frame_rate != 16000:
        audio_segment = audio_segment.set_frame_rate(16000)
    if audio_segment.sample_width != 2:
        audio_segment = audio_segment.set_sample_width(2)
    if audio_segment.channels != 1:
        audio_segment = audio_segment.set_channels(1)
    return audio_segment
def pydub_to_whisper(audio_segment):
    arr = np.array(audio_segment.get_array_of_samples())
    arr = arr.astype(np.float32)/32768.0
    return arr
import json
with open("w2v-rec2.json") as jsonf:
    rec_data = json.load(jsonf)
for recognition in tqdm(rec_data):
    segment = get_audioseqment(recognition['path'])
    for chunk in recognition['chunks']:
        start = int(chunk['timestamp'][0] * 1000)
        end = int(chunk['timestamp'][1] * 1000)
        audio_chunk = pydub_to_whisper(segment[start:end])
        probs = get_probs_audiosegment(model, audio_chunk)
        chunk['prob_sv'] = probs['sv']
        chunk['prob_en'] = probs['en']
def get_probs_audiosegment(model, audio, v3=True):
    num_mels = 128 if v3 else 80
    audio = whisper.pad_or_trim(audio)
    mel = whisper.log_mel_spectrogram(audio, n_mels=num_mels).to(model.device)
    _, probs = model.detect_language(mel)
    return probs
SENTENCES = {
    "sent-2": "Sebastian tyckte att maten kunde ha lagats med lite finess.",
    "sent-4": "Våra beroendeterapeuter har aldrig direkt räddat någon.",
    "sent-1": "Forskningen visade tydligt på kopplingen mellan motion och ökad levnadsstandard.",
    "sent-3": "I kemi är molekylärgeometri central för att förklara en förenings egenskaper."
}