@techreport{oai:ipsj.ixsq.nii.ac.jp:00231271,
 author = {Yuqin, Lin and Longbiao, Wang and Jianwu, Dang and Nobuaki, Minematsu and Yuqin, Lin and Longbiao, Wang and Jianwu, Dang and Nobuaki, Minematsu},
 issue = {13},
 month = {Nov},
 note = {This paper proposes the Accent-Activated adapter (AccentAct) approach to address the challenge of speech variations in multi-accent scenarios. By incorporating parallel accent and contextual extractors within a pre-trained model, AccentAct improves ASR performance while reducing computational resources. Experimental results show that AccentAct outperforms traditional methods with a significant reduction in computational requirements, promoting inclusivity for individuals with diverse accents or dialects., This paper proposes the Accent-Activated adapter (AccentAct) approach to address the challenge of speech variations in multi-accent scenarios. By incorporating parallel accent and contextual extractors within a pre-trained model, AccentAct improves ASR performance while reducing computational resources. Experimental results show that AccentAct outperforms traditional methods with a significant reduction in computational requirements, promoting inclusivity for individuals with diverse accents or dialects.},
 title = {Enhancing Multi-Accent Automated Speech Recognition with Accent-Activated Adapters},
 year = {2023}
}