{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00218464","sets":["1164:5159:10869:10940"]},"path":["10940"],"owner":"44499","recid":"218464","title":["Human-Robot Interaction through Multi-modal Semantic Understanding"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-06-10"},"_buckets":{"deposit":"45da9d0c-6f25-411f-864f-b4d0bf112daa"},"_deposit":{"id":"218464","pid":{"type":"depid","value":"218464","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Human-Robot Interaction through Multi-modal Semantic Understanding","author_link":["568109","568108"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Human-Robot Interaction through Multi-modal Semantic Understanding"},{"subitem_title":"Human-Robot Interaction through Multi-modal Semantic Understanding","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"招待講演","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2022-06-10","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Mitsubishi Electric Research Laboratories"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Mitsubishi Electric Research Laboratories","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/218464/files/IPSJ-SLP22142005.pdf","label":"IPSJ-SLP22142005.pdf"},"date":[{"dateType":"Available","dateValue":"2024-06-10"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLP22142005.pdf","filesize":[{"value":"7.5 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"22"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ba4872e5-c689-4542-8c38-1759845e3a28","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Chiori, Hori"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Chiori, Hori","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10442647","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8663","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Science fiction television and movies have portrayed humanoid robots with human-like capabilities to recognize their surroundings and the context of the situation. While computers have recently become much more capable at many perceptual tasks, they are not yet ready to take the place of a human in many situations. The recent artificial intelligence (AI) boom and intelligent use of data acquired from various sensors has certainly accelerated the development of technologies needed to realize these advanced human-like capabilities in machines. We have developed a new AI system, called Scene-Aware Interaction, that enables machines to translate their perception and understanding of a scene and respond to it using natural language to interact more effectively with humans. To develop such a machine, we have proposed the Audio-Visual Scene-Aware Dialog (AVSD) task, collected an AVSD dataset, developed AVSD technologies, and hosted three-time AVSD challenge track at the Dialog System Technology Challenges (DSTC). We tested the performance of answer generation and temporal reasoning by finding evidence from the video to support each answer. This paper introduces a new system that extends our AV-transformer-based system with attentional multimodal fusion, joint student-teacher learning (JSTL), and model combination techniques, achieving state-of-the-art performances on the AVSD datasets for DSTC7-8,10. We applied the Scene-aware interaction technology to a car navigation system to recognizes contextual objects and events based on multimodal sensing information, such as images and video captured with cameras, audio information recorded with microphones, and localization information measured with LiDAR.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Science fiction television and movies have portrayed humanoid robots with human-like capabilities to recognize their surroundings and the context of the situation. While computers have recently become much more capable at many perceptual tasks, they are not yet ready to take the place of a human in many situations. The recent artificial intelligence (AI) boom and intelligent use of data acquired from various sensors has certainly accelerated the development of technologies needed to realize these advanced human-like capabilities in machines. We have developed a new AI system, called Scene-Aware Interaction, that enables machines to translate their perception and understanding of a scene and respond to it using natural language to interact more effectively with humans. To develop such a machine, we have proposed the Audio-Visual Scene-Aware Dialog (AVSD) task, collected an AVSD dataset, developed AVSD technologies, and hosted three-time AVSD challenge track at the Dialog System Technology Challenges (DSTC). We tested the performance of answer generation and temporal reasoning by finding evidence from the video to support each answer. This paper introduces a new system that extends our AV-transformer-based system with attentional multimodal fusion, joint student-teacher learning (JSTL), and model combination techniques, achieving state-of-the-art performances on the AVSD datasets for DSTC7-8,10. We applied the Scene-aware interaction technology to a car navigation system to recognizes contextual objects and events based on multimodal sensing information, such as images and video captured with cameras, audio information recorded with microphones, and localization information measured with LiDAR.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"7","bibliographic_titles":[{"bibliographic_title":"研究報告音声言語情報処理(SLP)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2022-06-10","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"5","bibliographicVolumeNumber":"2022-SLP-142"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":218464,"updated":"2025-01-19T15:09:12.192402+00:00","links":{},"created":"2025-01-19T01:18:49.987668+00:00"}