{"updated":"2025-01-19T23:27:17.198385+00:00","links":{},"created":"2025-01-19T00:59:34.054108+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00194519","sets":["1164:5159:9712:9713"]},"path":["9713"],"owner":"44499","recid":"194519","title":["Deep Learning-Based Voice Conversion"],"pubdate":{"attribute_name":"公開日","attribute_value":"2019-02-20"},"_buckets":{"deposit":"037dd3a1-8f41-4f0a-9b4c-1d5aa2737eaf"},"_deposit":{"id":"194519","pid":{"type":"depid","value":"194519","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Deep Learning-Based Voice Conversion","author_link":["460571","460570"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Deep Learning-Based Voice Conversion"},{"subitem_title":"Deep Learning-Based Voice Conversion","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2019-02-20","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"University of Science and Technology of China"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"University of Science and Technology of China","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/194519/files/IPSJ-SLP19126004.pdf","label":"IPSJ-SLP19126004.pdf"},"date":[{"dateType":"Available","dateValue":"2021-02-20"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP19126004.pdf","filesize":[{"value":"553.6 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"daa12254-2d4e-4f18-9b5a-8eb5bbfddb65","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2019 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zhenhua, Ling"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zhenhua, Ling","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"I will introduce our recent work on applying deep learning techniques to voice conversion in this talk. Several methods have been proposed to improve different components in the pipeline of a statistical parametric voice conversion system, including deep neural networks with layer-wise generative training for acoustic modeling, deep autoencoders with binary distributed hidden units for feature representation, and WaveNet vocoder with limited training data for waveform reconstruction. Then, I will introduce our system designed for Voice Conversion Challenge 2018, which achieved the best performance under both parallel and non-parallel conditions in this evaluation. After this, I will present our recent progress on sequence-to-sequence acoustic modeling for voice conversion, which converts the acoustic features and durations of source utterances simultaneously using a unified acoustic model. Finally, some discussions on the future development of voice conversion techniques will be given.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"I will introduce our recent work on applying deep learning techniques to voice conversion in this talk. Several methods have been proposed to improve different components in the pipeline of a statistical parametric voice conversion system, including deep neural networks with layer-wise generative training for acoustic modeling, deep autoencoders with binary distributed hidden units for feature representation, and WaveNet vocoder with limited training data for waveform reconstruction. Then, I will introduce our system designed for Voice Conversion Challenge 2018, which achieved the best performance under both parallel and non-parallel conditions in this evaluation. After this, I will present our recent progress on sequence-to-sequence acoustic modeling for voice conversion, which converts the acoustic features and durations of source utterances simultaneously using a unified acoustic model. Finally, some discussions on the future development of voice conversion techniques will be given.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"1","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2019-02-20","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"4","bibliographicVolumeNumber":"2019-SLP-126"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":194519}