@techreport{oai:ipsj.ixsq.nii.ac.jp:00216614,
 author = {Rui, Wang and Li, Li and Tomoki, Toda and Rui, Wang and Li, Li and Tomoki, Toda},
 issue = {13},
 month = {Feb},
 note = {This paper deals with a dual-channel target speaker extraction problem in underdetermined conditions. A blind source separation framework based on the demixing matrix estimation with deep source models has achieved reasonably high separation performance in determined conditions, but its performance is still limited in underdetermined conditions. For the dual-channel target speaker extraction, it is expected that the additional directional information is a useful cue, and the choice of the source model is crucial to the performance. In this report, we propose a target speaker extraction method by combining geometrical constraint-based target selection capability, more powerful source modeling, and nonlinear postprocessing. In the demixing matrix estimation, the target directional information is used as a soft constraint, and two conditional variational autoencoders are used to model a single speaker’s speech and interference mixture speech, respectively. As the postprocessing, a time-frequency mask estimated from the separated interference mixture speech is used to extract the target speaker’s speech. Experimental results have demonstrated that the proposed method outperforms baseline methods., This paper deals with a dual-channel target speaker extraction problem in underdetermined conditions. A blind source separation framework based on the demixing matrix estimation with deep source models has achieved reasonably high separation performance in determined conditions, but its performance is still limited in underdetermined conditions. For the dual-channel target speaker extraction, it is expected that the additional directional information is a useful cue, and the choice of the source model is crucial to the performance. In this report, we propose a target speaker extraction method by combining geometrical constraint-based target selection capability, more powerful source modeling, and nonlinear postprocessing. In the demixing matrix estimation, the target directional information is used as a soft constraint, and two conditional variational autoencoders are used to model a single speaker’s speech and interference mixture speech, respectively. As the postprocessing, a time-frequency mask estimated from the separated interference mixture speech is used to extract the target speaker’s speech. Experimental results have demonstrated that the proposed method outperforms baseline methods.},
 title = {Target Speaker Extraction based on Conditional Variational Autoencoder and Directional Information in Underdetermined Condition},
 year = {2022}
}