{"id":219065,"updated":"2025-01-19T14:56:14.646743+00:00","links":{},"created":"2025-01-19T01:19:24.045807+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00219065","sets":["1164:8666:10876:10956"]},"path":["10956"],"owner":"44499","recid":"219065","title":["無音検出と動作認識に基づく沈黙推定と発話応答"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-07-22"},"_buckets":{"deposit":"930f8923-dcd0-47f8-8b2a-1b4cba10e0da"},"_deposit":{"id":"219065","pid":{"type":"depid","value":"219065","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"無音検出と動作認識に基づく沈黙推定と発話応答","author_link":["571005","571006"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"無音検出と動作認識に基づく沈黙推定と発話応答"},{"subitem_title":"Silence Estimation and Speech Response based on Speech and Motion","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"コミュニケーション(1)","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2022-07-22","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"神奈川工科大学"},{"subitem_text_value":"神奈川工科大学"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/219065/files/IPSJ-AAC22019005.pdf","label":"IPSJ-AAC22019005.pdf"},"date":[{"dateType":"Available","dateValue":"2024-07-22"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-AAC22019005.pdf","filesize":[{"value":"3.4 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"52"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ed8e9de9-8d2c-42bc-b91b-8669591ee669","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"前土佐, 勇仁"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"三枝, 亮"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA12752949","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2432-2431","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"近年,音声対話システムの開発が進んでいるが,その多くは発話者の発話動作がない限り待機を続けるか処理を終了するシステムが多い.その一方,福祉施設内の日常的な会話の中には介助者が介護者の発話を十分に待ち,発話がない場合に次の発話を続けて開始する場面がある.この会話場面の実現には会話場面に応じた沈黙認識が必要だが,対応した会話分析実験は今までに行われていない.これは,会話場面別の沈黙尺度がないことが考えられる.そこで,本研究ではシステムに適合する沈黙尺度の作成の前段階として,発話や動作などを除いた無音区間の推定を行う音声対話システムを提案する.動作などの非言語情報を抽出することから,本研究の発展によって音声対話システムが沈黙を推測することに近づくのではないかと期待する.また,言葉を発すことが難しい認知患者との対話環境下においても使用者のジェスチャーのみで音声認識の場合と同等の沈黙認識を振る舞う事が可能になる.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In recent years, development of spoken dialogue systems has been progressing, but many of these systems continue to wait or terminate processing unless the speaker makes a speech act. On the other hand, in daily conversation in welfare facilities, there are situations in which a caregiver waits for the caregiver to speak sufficiently, and if the caregiver does not speak, the caregiver continues to speak and starts the next conversation. To realize these conversational situations, it is necessary to recognize silence according to the conversational situation, but no corresponding conversation analysis experiments have been conducted so far. This may be due to the lack of a silence scale for each conversational situation. In this study, we propose a spoken dialogue system that estimates silence intervals excluding speech and actions as a preliminary step to create a silence scale suitable for the system. Since nonverbal information such as actions are extracted, we expect that the development of this research will bring the spoken dialogue system closer to inferring silence. In addition, the system will be able to recognize silence as well as speech recognition by using only the user's gestures in a dialogue environment with a cognitive patient who has difficulty uttering words.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告アクセシビリティ(AAC)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2022-07-22","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"5","bibliographicVolumeNumber":"2022-AAC-19"}]},"relation_version_is_last":true,"weko_creator_id":"44499"}}