@techreport{oai:ipsj.ixsq.nii.ac.jp:00222598,
 author = {牧島, 直輝 and 鈴木, 聡志 and 安藤, 厚志 and 増村, 亮 and Naoki, Makishima and Satoshi, Suzuki and Atsushi, Ando and Ryo, Masumura},
 issue = {18},
 month = {Nov},
 note = {本稿では,音声合成と音声認識をテキストと音声の少規模ペアデータ及び大規模なテキストデータで学習する新しい学習手法を提案する.従来の  cycle-consistency に基づく手法では,テキストデータを音声合成し,合成音声を音声認識して得られるテキストと元のテキストが一致するように音声合成と音声認識の学習が行われる.しかし,この方法では,合成音声が音声認識モデルに過適合し,(1)合成音声の話者性の欠落や(2)合成音声が音声認識しやすい音声となってしまう問題が発生する.この問題は合成音声の品質劣化だけではなく,音声認識の性能改善を限定的にする.本稿では,この問題を解決するため,(1)話者整合性損失と(2)段階的最適化に基づく学習方法を提案する.評価実験により,提案法の有効性を示す., This paper presents a novel joint training of text to speech (TTS) and automatic speech recognition (ASR) with small amounts of speech-text paired data and large amounts of text data. In conventional cycle-consistency-based methods, the TTS model and the ASR model are trained so that the text obtained by speech synthesis of text data and speech recognition of the synthesized speech matches the original text. However, this method leads to an overfitting of the synthesized speech to the ASR model, which results in the synthesized speech that (1) lacks speaker characteristics and (2) is easily recognizable. This problem not only degrades the quality of the synthesized speech but also limits the improvement of speech recognition performance. In this paper, we propose a learning method based on (1) speaker consistency loss and (2) step-wise optimization to solve this problem. Experimental results demonstrate the efficacy of the proposed method.},
 title = {音声合成と音声認識に対するテキストデータを用いた半教師あり統合学習},
 year = {2022}
}