{"created":"2025-01-19T01:12:46.735887+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00211635","sets":["1164:5159:10515:10608"]},"path":["10608"],"owner":"44499","recid":"211635","title":["話者特徴抽出器を加えたFaderNetVCによる未知話者声質変換"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-06-11"},"_buckets":{"deposit":"53eeef82-ab42-42f0-b436-df0741ecdec4"},"_deposit":{"id":"211635","pid":{"type":"depid","value":"211635","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"話者特徴抽出器を加えたFaderNetVCによる未知話者声質変換","author_link":["538047","538044","538043","538045","538046","538042"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"話者特徴抽出器を加えたFaderNetVCによる未知話者声質変換"},{"subitem_title":"Unseen speaker’s Voice Conversion by FaderNetVC with Speaker Feature Extractor","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"一般発表","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2021-06-11","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"電気通信大学"},{"subitem_text_value":"電気通信大学"},{"subitem_text_value":"電気通信大学"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/211635/files/IPSJ-SLP21137060.pdf","label":"IPSJ-SLP21137060.pdf"},"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP21137060.pdf","filesize":[{"value":"2.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_login","version_id":"a76cda98-92f4-403a-b22f-78b0cac41573","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Institute of Electronics, Information and Communication Engineers This SIG report is only available to those in membership of the SIG."}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"井硲, 巧"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"岸田, 拓也"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"中鹿, 亘"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Takumi, Isako","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Takuya, Kishida","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toru, Nakashima","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"近年,Deep Neural Network(以下,DNN とする)を用いた声質変換モデルが数多く提案されており,そのなかでも FaderNetVC は Encoder により発話から発話内容のみを出力し,それを one-hot な話者ラベルと共に Decoder に入力することにより発話内容を変えることなく話者のみを変えることを実現している.しかしこの FaderNetVC モデルは,既知話者の one-hot ラベルを用いての声質変換が前提となっているため,話者ラベルを持たない未知話者に対しての変換性能が低くなってしまう.本研究ではこの問題を解決するために,話者情報を出力する Encoder を追加した SplitterNetVC を提案する.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In recent years, many voice conversion models using Deep Neural Network (DNN) have been proposed, and FaderNetVC is one of them. In FaderNetVC, an encoder outputs only the speech content from an utterance, and a decoder reconstructs the speech from it with one-hot speaker labels. However, since this FaderNetVC model assumes voice conversion using the one-hot label of the known speaker, the conversion performance for unknown speakers without speaker labels is low. In order to solve this problem, we propose SplitterNetVC with an additional encoder that outputs speaker information.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2021-06-11","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"60","bibliographicVolumeNumber":"2021-SLP-137"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":211635,"updated":"2025-01-19T17:43:58.710580+00:00","links":{}}