{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00241500","sets":["1164:2735:11468:11810"]},"path":["11810"],"owner":"44499","recid":"241500","title":["状態と行動の組み合わせの探索のための内発的報酬を導入した世界モデル"],"pubdate":{"attribute_name":"公開日","attribute_value":"2024-12-02"},"_buckets":{"deposit":"a187c013-b5b3-4ec3-a69e-60fc24c278a2"},"_deposit":{"id":"241500","pid":{"type":"depid","value":"241500","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"状態と行動の組み合わせの探索のための内発的報酬を導入した世界モデル","author_link":["664965","664964"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"状態と行動の組み合わせの探索のための内発的報酬を導入した世界モデル"}]},"item_type_id":"4","publish_date":"2024-12-02","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京都市大学大学院情報専攻"},{"subitem_text_value":"東京都市大学大学院情報専攻"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/241500/files/IPSJ-MPS24151020.pdf","label":"IPSJ-MPS24151020.pdf"},"date":[{"dateType":"Available","dateValue":"2026-12-02"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-MPS24151020.pdf","filesize":[{"value":"1.4 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"17"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"3570bfb4-d793-42a4-b5ed-fee9644e7aa4","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2024 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"深谷, 拓実"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"穴田, 一"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10505667","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8833","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"世界モデル等のモデルベースの深層強化アルゴリズムは,環境モデルを再現し活用する事によってテレビゲーム等のタスクを人間に近いサンプル効率で学習できる.しかし,ランダムな行動で報酬に到達しにくい報酬が疎な環境の場合に世界モデルは報酬を再現できず,高いサンプル効率を実現できない.そこで,環境内の探索を強化し確実に報酬に到達するために,状態と行動の組み合わせの探索を考慮した内発的報酬を世界モデルに導入することを提案する.内発的報酬によって,報酬が疎な環境でも世界モデルがこれまでのアルゴリズムより高いサンプル効率を実現できることを示す.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"5","bibliographic_titles":[{"bibliographic_title":"研究報告数理モデル化と問題解決(MPS)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2024-12-02","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"20","bibliographicVolumeNumber":"2024-MPS-151"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":241500,"updated":"2025-01-19T07:38:34.369217+00:00","links":{},"created":"2025-01-19T01:46:08.371494+00:00"}