{"created":"2025-01-19T01:17:13.645869+00:00","updated":"2025-01-19T15:45:43.952090+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00216709","sets":["1164:3865:10834:10836"]},"path":["10836"],"owner":"44499","recid":"216709","title":["センサ情報から分析した人の行動認識情報を用いた会話システムの提案"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-02-28"},"_buckets":{"deposit":"88bcce2a-465d-4fa1-a1fe-e94c009c1959"},"_deposit":{"id":"216709","pid":{"type":"depid","value":"216709","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"センサ情報から分析した人の行動認識情報を用いた会話システムの提案","author_link":["559890","559889","559891","559888"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"センサ情報から分析した人の行動認識情報を用いた会話システムの提案"},{"subitem_title":"Proposal of conversation system using action recognition information analyzed from sensor information","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"インタラクション","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2022-02-28","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"現在,九州工業大学"},{"subitem_text_value":"現在,九州工業大学"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Presently with Kyushu institute of technology","subitem_text_language":"en"},{"subitem_text_value":"Presently with Kyushu institute of technology","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/216709/files/IPSJ-MBL22102027.pdf","label":"IPSJ-MBL22102027.pdf"},"date":[{"dateType":"Available","dateValue":"2024-02-28"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-MBL22102027.pdf","filesize":[{"value":"2.7 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"35"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"07bdb257-4f30-4796-9baf-c9a604413d11","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"豊坂, 祐樹"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"大北, 剛"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Yuki, Toyosaka","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tsuyoshi, Okita","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11851388","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8817","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"近年,ロボットなどを用いた AI による会話システムは多々存在し,その技術も向上している.しかし,会話の対象者と事前情報なしに場当たり的に会話するのは依然難しい.そこで,カメラなどのセンサ情報から対象者の行動などを認識し,情報として組み込めばより幅広いな会話が可能ではないかと考えた.本研究では,カメラ等のセンサから認識した人の動きや人の行動に関わった物の情報を抽出し,会話情報として組み込むシステムを提案する.今回はその足掛かりとして,カメラから得られた映像に対する情報の抽出に焦点を絞り,映像内の情報を人の動作を主とした知識グラフとして抽出できるかどうかの分析と検証を行った.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In recent years, there are many AI-based conversation systems that use robots, etc., and their technologies are improving. However, it is still difficult to have a natural conversation with a person without prior information. Therefore, we thought that a wide range of conversations would be possible if the behavior of the target person is recognized from the sensor information of the camera and incorporated into the system as information. In this research, we propose a system that extracts information on human movements and objects related to human behavior recognized from sensors such as cameras and incorporates them as conversation information. This time, as a stepping stone, we focused on extracting information from camera video and verified whether the information could be extracted as a knowledge graph mainly for human movements.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"7","bibliographic_titles":[{"bibliographic_title":"研究報告モバイルコンピューティングと新社会システム(MBL)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2022-02-28","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"27","bibliographicVolumeNumber":"2022-MBL-102"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":216709,"links":{}}