{"created":"2025-01-18T23:05:08.212135+00:00","updated":"2025-01-22T13:53:10.222326+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00037247","sets":["1164:3027:3094:3095"]},"path":["3095"],"owner":"1","recid":"37247","title":["音声とポインティングジェスチャを利用した指示物同定"],"pubdate":{"attribute_name":"公開日","attribute_value":"1997-11-14"},"_buckets":{"deposit":"38cee7d7-af4a-410d-92b9-eac8ee5d9783"},"_deposit":{"id":"37247","pid":{"type":"depid","value":"37247","revision_id":0},"owners":[1],"status":"published","created_by":1},"item_title":"音声とポインティングジェスチャを利用した指示物同定","author_link":["0","0"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"音声とポインティングジェスチャを利用した指示物同定"},{"subitem_title":"Referent Identification by Using a Speech and Deictic Gesture","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"1997-11-14","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"山梨大学工学部電子情報工学科"},{"subitem_text_value":"山梨大学工学部電子情報工学科"},{"subitem_text_value":"山梨大学工学部電子情報工学科"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Dept. of Electrical Engineering and Computer Science, Yamanashi University","subitem_text_language":"en"},{"subitem_text_value":"Dept. of Electrical Engineering and Computer Science, Yamanashi University","subitem_text_language":"en"},{"subitem_text_value":"Dept. of Electrical Engineering and Computer Science, Yamanashi University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/37247/files/IPSJ-HI97075009.pdf"},"date":[{"dateType":"Available","dateValue":"1999-11-14"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-HI97075009.pdf","filesize":[{"value":"522.4 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"33"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"b5fd55ff-7597-4943-bc42-0a37968a3a24","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 1997 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"山田, 寛康"},{"creatorName":"福本, 文代"},{"creatorName":"今宮, 淳美"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Hiroyasu, Yamada","creatorNameLang":"en"},{"creatorName":"Fumiyo, Fukumoto","creatorNameLang":"en"},{"creatorName":"Atsumi, Imamiya","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA1221543X","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"本稿では,音声とマウスを入力手段とするマルチモーダルインタフェースで,ディスプレイ上の図形を指示したとき,その指示物を同定する手法を提案する.本手法は時間同期性を用いた指示物候補の生成(generation of candidates)と,過去の対話情報を利用した候補中からの指示物伺定(identification)の2つの手続きにより実現される.Generation of candidatesでは,すべての発話をsegmentと呼ぶ単位に分割し,segmentと同期した時間内にマウスによって指示した図形を指示物候補として生成する.Identificationでは,過去に指示物同定で得られた結果を利用することで候補中から指示物を同定する.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In this paper, we propose a method to deal with the reference of deictic expression to visual objects on a terminal screen. Users can point to visual objects on a terminal screen by using both an indirect pointing device, a mouse and natural language. The procedure for determining the referent of the deictic expression, i.e. determining the visual objects being pointed to, consists of two procedures: Generation-of-Candidates and Identification. In Generation-of-Candidates every utterance is divided into a segment using linguistic constraints called verb phrase rank. Then, candidates of visual objects are generated from trace by a mouse, together with a segment. This segment includes Japanese deictic expressions such as 'this (これ)' or 'that(あれ)'. In the second procedure, Identification, a deictic expression is identified with visual objects using pairs of deictic expressions and the visual object candidates, which have already been obtained. The results of the pre1imimnarily experiment show the effectiveness of the method.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"50","bibliographic_titles":[{"bibliographic_title":"情報処理学会研究報告ヒューマンコンピュータインタラクション(HCI)"}],"bibliographicPageStart":"45","bibliographicIssueDates":{"bibliographicIssueDate":"1997-11-14","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"107(1997-HI-075)","bibliographicVolumeNumber":"1997"}]},"relation_version_is_last":true,"weko_creator_id":"1"},"id":37247,"links":{}}