{"id":183501,"created":"2025-01-19T00:51:01.049418+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00183501","sets":["6164:6165:7308:9246"]},"path":["9246"],"owner":"11","recid":"183501","title":["対話事例に基づく機械学習を用いた同調的表情を提示する対話エージェント"],"pubdate":{"attribute_name":"公開日","attribute_value":"2017-09-09"},"_buckets":{"deposit":"1b679e70-3647-417d-b47b-820866e4397b"},"_deposit":{"id":"183501","pid":{"type":"depid","value":"183501","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"対話事例に基づく機械学習を用いた同調的表情を提示する対話エージェント","author_link":["402908","402912","402909","402913","402911","402910"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"対話事例に基づく機械学習を用いた同調的表情を提示する対話エージェント"},{"subitem_title":"Conversational Agent Learning facial contagion of actual conversation example","subitem_title_language":"en"}]},"item_type_id":"18","publish_date":"2017-09-09","item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_18_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京工業大学"},{"subitem_text_value":"東京工業大学"},{"subitem_text_value":"東京工業大学"}]},"item_18_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Tokyo Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"Tokyo Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"Tokyo Institute of Technology","subitem_text_language":"en"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/183501/files/IPSJ-EC2017046.pdf","label":"IPSJ-EC2017046.pdf"},"date":[{"dateType":"Available","dateValue":"2017-09-09"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-EC2017046.pdf","filesize":[{"value":"1.2 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"40"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"dcfbb581-e5cd-4e69-8096-f86ccc83e8b9","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2017 by the Information Processing Society of Japan"}]},"item_18_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"薮下, 剛史"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"三武, 裕玄"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"長谷川, 晶一"}],"nameIdentifiers":[{}]}]},"item_18_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"TSUYOSHI, YABUSHITA","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"HIRONORI, MITAKE","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"SHOICHI, HASEGAWA","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_5794","resourcetype":"conference paper"}]},"item_18_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"対話エージェントが人間社会に溶け込み楽しい対話相手となるには,いかに人間のような振る舞いを自然に行えるかが重要となる.本研究ではエージェントの自然な振る舞いを決定する要素の一つである表情に注目し,人間と社会的関係を形成する上で重要となる同調的表情を自然に行う手法を提案する. 実会話における表情・音声を記録し,記録を再現するような行動決定モデルを機械学習により獲得することで,実会話事例が含む細やかな表情変化の特徴を持つ対話インタラクションを再現する.","subitem_description_type":"Other"}]},"item_18_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Conversation agents are expected to be a life partners with social behaviours. It is important for Agents to give socialand natural action like human. We propose a method to generate facial expression synchronized with the speaker’s facialexpression. The method includes to capture facial expression and voice volume, and learning HMM with the conversation data.Using the HMM as facial expression determination model, our conversation agent shows natural facial expression based on actualconversation example.","subitem_description_type":"Other"}]},"item_18_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"265","bibliographic_titles":[{"bibliographic_title":"エンタテインメントコンピューティングシンポジウム2017論文集"}],"bibliographicPageStart":"262","bibliographicIssueDates":{"bibliographicIssueDate":"2017-09-09","bibliographicIssueDateType":"Issued"},"bibliographicVolumeNumber":"2017"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"updated":"2025-01-20T03:38:40.336823+00:00","links":{}}