{"created":"2025-01-19T01:28:13.848503+00:00","updated":"2025-01-19T11:37:16.079576+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00229148","sets":["1164:3206:11201:11417"]},"path":["11417"],"owner":"44499","recid":"229148","title":["変分自己符号化器を用いた発話時の動作生成の研究"],"pubdate":{"attribute_name":"公開日","attribute_value":"2023-11-09"},"_buckets":{"deposit":"ccfad77a-2889-4a46-a50d-decbb0180b6d"},"_deposit":{"id":"229148","pid":{"type":"depid","value":"229148","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"変分自己符号化器を用いた発話時の動作生成の研究","author_link":["615719","615718","615720","615717"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"変分自己符号化器を用いた発話時の動作生成の研究"},{"subitem_title":"Co-speech Gesture Generation with Variational Auto Encoder","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2023-11-09","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京工業大学"},{"subitem_text_value":"東京工業大学"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"School of Computing, Tokyo Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"School of Computing, Tokyo Institute of Technology","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/229148/files/IPSJ-CG23192034.pdf","label":"IPSJ-CG23192034.pdf"},"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CG23192034.pdf","filesize":[{"value":"947.0 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"28"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_login","version_id":"649cd753-3af0-42ed-8383-d4f457e1b4aa","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2023 by the Institute of Electronics, Information and Communication Engineers This SIG report is only available to those in membership of the SIG."}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"賈, 辰一"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"篠田, 浩一"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Shinichi, Ka","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Koichi, Shinoda","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10100541","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8949","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"発話時の動作生成は,発話音声に対応するジェスチャーを生成する研究である.従来研究の手法は大きく分けて二つある.決定論的手法は音声と動作の一対一の写像を学習させ,確率論的手法は音声から動作の生成確率をモデル化する.前者は確実に音声に忠実な動作を推論することができるが,一つの音声から一つの動作しか生成しない.後者は多様な動作生成ができるが,音声に対して忠実でない動作が生成されうる.本稿では,確率論的な手法の一つである変分自己符号化器において,話者の ID を与えた訓練で動作の個人性を学習させ,さらにランダム化の処理を施す手法を提案する.変分自己符号化器を用いて評価した FID が 52.8% 向上し,またより多様な動作生成に成功した.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Co-speech gesture generation is the study of generating gestures from speech. In prior works, deterministic methods learn a one-to-one mapping between speech and motion, and probabilistic methods model the probability of motions. While the former can infer the motion corresponding to the speech, it only generates one motion. In contrast, the latter can generate many motions, but it generates motions that don’t match the speech. In this paper, we will propose the probabilistic method by training VAE with the speaker’s information and randomizing it. Consequently, the fidelity of VAE is improved, and it can generate more diverse motions.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告コンピュータグラフィックスとビジュアル情報学(CG)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2023-11-09","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"34","bibliographicVolumeNumber":"2023-CG-192"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":229148,"links":{}}