Interesting links, 07/03/2023
Misc. interesting things.
Google USM: Scaling Automatic Speech Recognition Beyond 100 Languages, website
@misc{zhang2023usm,
doi = {10.48550/ARXIV.2303.01037},
author = {Zhang, Yu and Han, Wei and Qin, James and Wang, Yongqiang and Bapna, Ankur and Chen, Zhehuai and Chen, Nanxin and Li, Bo and Axelrod, Vera and Wang, Gary and Meng, Zhong and Hu, Ke and Rosenberg, Andrew and Prabhavalkar, Rohit and Park, Daniel S. and Haghani, Parisa and Riesa, Jason and Perng, Ginger and Soltau, Hagen and Strohman, Trevor and Ramabhadran, Bhuvana and Sainath, Tara and Moreno, Pedro and Chiu, Chung-Cheng and Schalkwyk, Johan and Beaufays, Françoise and Wu, Yonghui},
title = {Google USM: Scaling Automatic Speech Recognition Beyond 100 Languages},
year = {2023},
}
Flamingo: a Visual Language Model for Few-Shot Learning
@misc{alayrac2022flamingo,
doi = {10.48550/ARXIV.2204.14198},
author = {Alayrac, Jean-Baptiste and Donahue, Jeff and Luc, Pauline and Miech, Antoine and Barr, Iain and Hasson, Yana and Lenc, Karel and Mensch, Arthur and Millican, Katie and Reynolds, Malcolm and Ring, Roman and Rutherford, Eliza and Cabi, Serkan and Han, Tengda and Gong, Zhitao and Samangooei, Sina and Monteiro, Marianne and Menick, Jacob and Borgeaud, Sebastian and Brock, Andrew and Nematzadeh, Aida and Sharifzadeh, Sahand and Binkowski, Mikolaj and Barreira, Ricardo and Vinyals, Oriol and Zisserman, Andrew and Simonyan, Karen},
title = {Flamingo: a Visual Language Model for Few-Shot Learning},
year = {2022},
}
A Comparison of Speech Data Augmentation Methods Using S3PRL Toolkit
@misc{huh2023augmentation,
doi = {10.48550/ARXIV.2303.00510},
author = {Huh, Mina and Ray, Ruchira and Karnei, Corey},
title = {A Comparison of Speech Data Augmentation Methods Using S3PRL Toolkit},
year = {2023},
}
revdotcom/fstalign — An efficient OpenFST-based tool for calculating WER and aligning two transcript sequences.