{"updated":"2025-01-20T03:09:14.059993+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00184864","sets":["1164:5159:9063:9316"]},"path":["9316"],"owner":"11","recid":"184864","title":["Analyzing the impact of including listener perception annotations in RNN-based emotional speech synthesis"],"pubdate":{"attribute_name":"公開日","attribute_value":"2017-12-14"},"_buckets":{"deposit":"15dddc46-20a0-421e-ba63-d47e35552f49"},"_deposit":{"id":"184864","pid":{"type":"depid","value":"184864","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"Analyzing the impact of including listener perception annotations in RNN-based emotional speech synthesis","author_link":["409476","409472","409469","409475","409473","409474","409470","409471"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Analyzing the impact of including listener perception annotations in RNN-based emotional speech synthesis"},{"subitem_title":"Analyzing the impact of including listener perception annotations in RNN-based emotional speech synthesis","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"ポスターセッション","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2017-12-14","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"National Institute of Informatics"},{"subitem_text_value":"National Institute of Informatics"},{"subitem_text_value":"National Institute of Informatics"},{"subitem_text_value":"National Institute of Informatics/The University of Edinburgh"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"National Institute of Informatics","subitem_text_language":"en"},{"subitem_text_value":"National Institute of Informatics","subitem_text_language":"en"},{"subitem_text_value":"National Institute of Informatics","subitem_text_language":"en"},{"subitem_text_value":"National Institute of Informatics / The University of Edinburgh","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/184864/files/IPSJ-SLP17119008.pdf","label":"IPSJ-SLP17119008.pdf"},"date":[{"dateType":"Available","dateValue":"2019-12-14"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP17119008.pdf","filesize":[{"value":"622.9 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ec01873d-8a12-43a5-b3a2-98acdbbc1669","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2017 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jaime, Lorenzo-Trueba"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Gustav, Eje Henter"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Shinji, Takaki"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Junichi, Yamagishi"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jaime, Lorenzo-Trueba","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Gustav, Eje Henter","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Shinji, Takaki","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Junichi, Yamagishi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"This paper investigates simultaneous modeling of multiple emotions in DNN-based expressive speech synthesis, and how to represent the emotional labels, such as emotional class and strength, for this task. Our goal is to answer two questions: First, what is the best way to annotate speech data with multiple emotions? Second, how should the emotional information be represented as labels for supervised DNN training? We evaluate on a large-scale corpus of emotional speech from a professional actress, additionally annotated with perceived emotional labels from crowd-sourced listeners. By comparing DNN-based speech synthesizers that utilize different emotional representations, we assess the impact of these representations and design decisions on human emotion recognition rates.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"This paper investigates simultaneous modeling of multiple emotions in DNN-based expressive speech synthesis, and how to represent the emotional labels, such as emotional class and strength, for this task. Our goal is to answer two questions: First, what is the best way to annotate speech data with multiple emotions? Second, how should the emotional information be represented as labels for supervised DNN training? We evaluate on a large-scale corpus of emotional speech from a professional actress, additionally annotated with perceived emotional labels from crowd-sourced listeners. By comparing DNN-based speech synthesizers that utilize different emotional representations, we assess the impact of these representations and design decisions on human emotion recognition rates.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"2","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2017-12-14","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"8","bibliographicVolumeNumber":"2017-SLP-119"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"created":"2025-01-19T00:52:08.701706+00:00","id":184864,"links":{}}