{"created":"2025-01-19T01:31:31.521695+00:00","updated":"2025-01-19T10:49:02.740235+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00231303","sets":["1164:5159:11151:11431"]},"path":["11431"],"owner":"44499","recid":"231303","title":["Improvement of Tacotron2 text-to-speech model based on masking operation and positional attention mechanism"],"pubdate":{"attribute_name":"公開日","attribute_value":"2023-11-25"},"_buckets":{"deposit":"6a6328fb-c6c3-4046-94d7-59f82d5b5f22"},"_deposit":{"id":"231303","pid":{"type":"depid","value":"231303","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Improvement of Tacotron2 text-to-speech model based on masking operation and positional attention mechanism","author_link":["624220","624224","624221","624219","624222","624223"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Improvement of Tacotron2 text-to-speech model based on masking operation and positional attention mechanism"},{"subitem_title":"Improvement of Tacotron2 text-to-speech model based on masking operation and positional attention mechanism","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"分野横断(1)","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2023-11-25","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo"},{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo"},{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/231303/files/IPSJ-SLP23149011.pdf","label":"IPSJ-SLP23149011.pdf"},"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP23149011.pdf","filesize":[{"value":"2.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_login","version_id":"278c9e63-f188-42cd-bed0-49c45c320867","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2023 by the Institute of Electronics, Information and Communication Engineers This SIG report is only available to those in membership of the SIG."}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Tong, Ma"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Daisuke, Saito"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Nobuaki, Minematsu"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Tong, Ma","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Daisuke, Saito","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Nobuaki, Minematsu","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Inspired by masking operations on Self-supervised Speech Representation (SSL) models, masking operations were imported to the improvement of text-to-speech synthesis models. In experiments with traditional multi-stage text-to-speech synthesis models, it was found that frame-masking operations on the inputs can improve the performance of the models. However, in an end-to-end model like Tacotron2 [1], hiding state vector information is very complex and it is difficult to achieve accurate masking. To achieve accurate masking operations in an end-to-end model like Tacotron2, this paper introduces a position-based attention mechanism that accurately captures the contextual information of each character and performs precise deletions to achieve effective masking. Through empirical studies, it is demonstrated that judicious masking operations can improve the performance of the Tacotron2 model, while excessive masking operations lead to a significant degradation of the model performance.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Inspired by masking operations on Self-supervised Speech Representation (SSL) models, masking operations were imported to the improvement of text-to-speech synthesis models. In experiments with traditional multi-stage text-to-speech synthesis models, it was found that frame-masking operations on the inputs can improve the performance of the models. However, in an end-to-end model like Tacotron2 [1], hiding state vector information is very complex and it is difficult to achieve accurate masking. To achieve accurate masking operations in an end-to-end model like Tacotron2, this paper introduces a position-based attention mechanism that accurately captures the contextual information of each character and performs precise deletions to achieve effective masking. Through empirical studies, it is demonstrated that judicious masking operations can improve the performance of the Tacotron2 model, while excessive masking operations lead to a significant degradation of the model performance.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2023-11-25","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"11","bibliographicVolumeNumber":"2023-SLP-149"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":231303,"links":{}}