{"updated":"2025-02-18T06:12:31.403965+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:02000403","sets":["1164:5159:1739855613862:1739855711540"]},"path":["1739855711540"],"owner":"80578","recid":"2000403","title":["音声因子句による条件付けを用いた発話スタイルキャプショニング"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2025-02-23"},"_buckets":{"deposit":"31235bda-ebd3-4588-aeef-8ef10e5f2ebd"},"_deposit":{"id":"2000403","pid":{"type":"depid","value":"2000403","revision_id":0},"owners":[80578],"status":"published","created_by":80578},"item_title":"音声因子句による条件付けを用いた発話スタイルキャプショニング","author_link":[],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"音声因子句による条件付けを用いた発話スタイルキャプショニング","subitem_title_language":"ja"},{"subitem_title":"Speaking Style Captioning Using Speech Factor Conditioning","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"ポスター講演","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2025-02-23","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"日本電信電話株式会社"},{"subitem_text_value":"日本電信電話株式会社"},{"subitem_text_value":"日本電信電話株式会社"},{"subitem_text_value":"日本電信電話株式会社"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/2000403/files/IPSJ-SLP25155079.pdf","label":"IPSJ-SLP25155079.pdf"},"date":[{"dateType":"Available","dateValue":"2027-02-23"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP25155079.pdf","filesize":[{"value":"1.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"22"},{"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"b284a6d8-9a24-4580-b102-2275bd994ee8","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2025 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"安藤,厚志"}]},{"creatorNames":[{"creatorName":"森谷,崇史"}]},{"creatorNames":[{"creatorName":"堀口,翔太"}]},{"creatorNames":[{"creatorName":"増村,亮"}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"本稿では,発話スタイルに関する情報(話者性別,音量,音高,...)を正確に認識しながら多様な表現を生成する,新たな発話スタイルキャプショニング手法を提案する.従来手法では,発話スタイルに関する語だけでなく構文に関する語も含むキャプションをそのまま正解文として学習するため,音声からの発話スタイル情報の学習が難しく,文法は正しいが発話スタイル情報に誤りがある文を生成しやすいという課題があった.この問題を解決するため,提案手法では発話スタイル情報を表す音声因子句を導入し,音声因子句を生成させたのちキャプションを生成させるようモデル学習を行うことで,発話スタイル情報を明示的に学習させる.さらに,発話スタイル情報を正確に認識しながら多様なキャプションを生成させるための新たなデコーディング手法も提案する.実験の結果,提案手法は従来手法に比べて発話スタイル情報をより高精度に認識しつつ,より多様なキャプションを生成できることが確認された.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"This work presents a novel speaking-style captioning method that generates diverse descriptions while accurately including speaking-style information such as gender, pitch, and volume. Conventional methods rely on original captions, which contain not only speaking-style-related terms but also syntactic words, making it difficult to learn speaking-style characteristics from speech and often resulting in incorrect captions. To address this problem, the proposed method introduces factor-conditioned captioning (FCC), which first outputs a factor phrase representing speaking-style information and then generates a caption to ensure the model explicitly learns speaking-style factors. Additionally, we propose greedy-then-sampling (GtS) decoding, which first predicts speaking-style factors deterministically to guarantee semantic accuracy and then generates a caption based on factor-conditioned sampling to ensure diversity. Experiments show that the proposed method generates more diverse captions while improving style prediction performance compared to conventional methods.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"7","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2025-02-23","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"79","bibliographicVolumeNumber":"2025-SLP-155"}]},"relation_version_is_last":true,"weko_creator_id":"80578"},"created":"2025-02-18T06:12:27.390284+00:00","id":2000403,"links":{}}