{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00216614","sets":["1164:5159:10869:10870"]},"path":["10870"],"owner":"44499","recid":"216614","title":["Target Speaker Extraction based on Conditional Variational Autoencoder and Directional Information in Underdetermined Condition"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-02-22"},"_buckets":{"deposit":"2042ccfb-b8f8-4fd0-93e3-cd35409bd3e4"},"_deposit":{"id":"216614","pid":{"type":"depid","value":"216614","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Target Speaker Extraction based on Conditional Variational Autoencoder and Directional Information in Underdetermined Condition","author_link":["559270","559273","559269","559274","559271","559272"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Target Speaker Extraction based on Conditional Variational Autoencoder and Directional Information in Underdetermined Condition"},{"subitem_title":"Target Speaker Extraction based on Conditional Variational Autoencoder and Directional Information in Underdetermined Condition","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"EA ","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2022-02-22","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate school of Informatics, Nagoya University"},{"subitem_text_value":"Information Technology Center, Nagoya University"},{"subitem_text_value":"Information Technology Center, Nagoya University"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate school of Informatics, Nagoya University","subitem_text_language":"en"},{"subitem_text_value":"Information Technology Center, Nagoya University","subitem_text_language":"en"},{"subitem_text_value":"Information Technology Center, Nagoya University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/216614/files/IPSJ-SLP22140013.pdf","label":"IPSJ-SLP22140013.pdf"},"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP22140013.pdf","filesize":[{"value":"1.8 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_login","version_id":"8ed392df-2789-48f5-a394-c5a46d20f3d6","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Institute of Electronics, Information and Communication Engineers This SIG report is only available to those in membership of the SIG."}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Rui, Wang"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Li, Li"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tomoki, Toda"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Rui, Wang","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Li, Li","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tomoki, Toda","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"This paper deals with a dual-channel target speaker extraction problem in underdetermined conditions. A blind source separation framework based on the demixing matrix estimation with deep source models has achieved reasonably high separation performance in determined conditions, but its performance is still limited in underdetermined conditions. For the dual-channel target speaker extraction, it is expected that the additional directional information is a useful cue, and the choice of the source model is crucial to the performance. In this report, we propose a target speaker extraction method by combining geometrical constraint-based target selection capability, more powerful source modeling, and nonlinear postprocessing. In the demixing matrix estimation, the target directional information is used as a soft constraint, and two conditional variational autoencoders are used to model a single speaker’s speech and interference mixture speech, respectively. As the postprocessing, a time-frequency mask estimated from the separated interference mixture speech is used to extract the target speaker’s speech. Experimental results have demonstrated that the proposed method outperforms baseline methods.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"This paper deals with a dual-channel target speaker extraction problem in underdetermined conditions. A blind source separation framework based on the demixing matrix estimation with deep source models has achieved reasonably high separation performance in determined conditions, but its performance is still limited in underdetermined conditions. For the dual-channel target speaker extraction, it is expected that the additional directional information is a useful cue, and the choice of the source model is crucial to the performance. In this report, we propose a target speaker extraction method by combining geometrical constraint-based target selection capability, more powerful source modeling, and nonlinear postprocessing. In the demixing matrix estimation, the target directional information is used as a soft constraint, and two conditional variational autoencoders are used to model a single speaker’s speech and interference mixture speech, respectively. As the postprocessing, a time-frequency mask estimated from the separated interference mixture speech is used to extract the target speaker’s speech. Experimental results have demonstrated that the proposed method outperforms baseline methods.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2022-02-22","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"13","bibliographicVolumeNumber":"2022-SLP-140"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":216614,"updated":"2025-01-19T15:47:36.887422+00:00","links":{},"created":"2025-01-19T01:17:08.298728+00:00"}