@techreport{oai:ipsj.ixsq.nii.ac.jp:00209762, author = {Kak, Soky and Sheng, Li and Masato, Mimura and Chenhui, Chu and Tatsuya, Kawahara and Kak, Soky and Sheng, Li and Masato, Mimura and Chenhui, Chu and Tatsuya, Kawahara}, issue = {24}, month = {Feb}, note = {In this paper, we investigate the effectiveness of using speaker information on the performance of speaker- imbalanced automatic speech recognition (ASR). We identify major speakers and combine other speakers who have a small size of speech, and make a systematic comparison of three methods that use speaker information for ASR including speaker attribute augmentation (SAug), multi-task learning (MTL), and adversarial learning (AL). We conduct experiments on a large spontaneous speech corpus of the Extraordinary Chambers in the Courts of Cambodia (ECCC) and an open Khmer text-to-speech corpus. As a result, we find that the use of speaker clustering information improves ASR performance including new speakers. Moreover, AL achieves better performance and more robustness in the speaker-independent setting compared to the other methods. It reduces errors of the baseline model by 4.32%, 5.46%, and 16.10% for the closed test, open test, and out-of-domain test, respectively., In this paper, we investigate the effectiveness of using speaker information on the performance of speaker- imbalanced automatic speech recognition (ASR). We identify major speakers and combine other speakers who have a small size of speech, and make a systematic comparison of three methods that use speaker information for ASR including speaker attribute augmentation (SAug), multi-task learning (MTL), and adversarial learning (AL). We conduct experiments on a large spontaneous speech corpus of the Extraordinary Chambers in the Courts of Cambodia (ECCC) and an open Khmer text-to-speech corpus. As a result, we find that the use of speaker clustering information improves ASR performance including new speakers. Moreover, AL achieves better performance and more robustness in the speaker-independent setting compared to the other methods. It reduces errors of the baseline model by 4.32%, 5.46%, and 16.10% for the closed test, open test, and out-of-domain test, respectively.}, title = {Comparison of End-to-End Models for Joint Speaker and Speech Recognition}, year = {2021} }