{"updated":"2025-01-19T17:09:28.800787+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00213449","sets":["6164:6165:6210:10734"]},"path":["10734"],"owner":"44499","recid":"213449","title":["リセット機能を活用したシミュレータにおける効率的な方策学習"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-11-06"},"_buckets":{"deposit":"854ff3a3-29ea-4e1b-aee5-d9d03556ff4d"},"_deposit":{"id":"213449","pid":{"type":"depid","value":"213449","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"リセット機能を活用したシミュレータにおける効率的な方策学習","author_link":["546191","546193","546194","546192"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"リセット機能を活用したシミュレータにおける効率的な方策学習"},{"subitem_title":"Exploiting Reset Functions for Efficient Policy Learning on Simulators","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"強化学習","subitem_subject_scheme":"Other"},{"subitem_subject":"サンプル効率","subitem_subject_scheme":"Other"},{"subitem_subject":"リセット機能","subitem_subject_scheme":"Other"}]},"item_type_id":"18","publish_date":"2021-11-06","item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_18_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京大学大学院情報理工学系研究科電子情報学専攻"},{"subitem_text_value":"東京大学大学院情報理工学系研究科電子情報学専攻"}]},"item_18_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Department of Information and Communication Engineering, Graduate School of Information Science and Technology, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Department of Information and Communication Engineering, Graduate School of Information Science and Technology, The University of Tokyo","subitem_text_language":"en"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/213449/files/IPSJ-GPWS2021028.pdf","label":"IPSJ-GPWS2021028.pdf"},"date":[{"dateType":"Available","dateValue":"2021-11-06"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-GPWS2021028.pdf","filesize":[{"value":"1.8 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"18"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ab39ac93-8cc2-416e-945e-2a816944bc6a","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_18_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"橋本, 大世"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"鶴岡, 慶雅"}],"nameIdentifiers":[{}]}]},"item_18_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Taisei, Hashimoto","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshimasa, Tsuruoka","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_5794","resourcetype":"conference paper"}]},"item_18_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"強化学習ではシミュレータを使った方策学習が一般的である. これは, シミュレータでは実環境よりも速くかつ安全にデータを収集できるためである. 強化学習は試行錯誤を繰り返しながら学習するため一般的に大量のデータが必要であり, シミュレータを使っても学習に長時間かかることが多い. そのため強化学習の実応用に向けて, シミュレータにおける方策学習のサンプル効率を高めることが重要である. サンプル効率の向上を目的とする研究は数多く存在するが, シミュレータでの学習の特性を利用する研究は不十分であり改善の余地がある. そこで本研究では, シミュレータが備えるリセット機能を活用して方策学習を効率化する手法を検討する. 具体的には, 累積報酬の高い軌跡を素早く見つけることで学習効率を高める. そのために, リセットする状態を選ぶ基準や, 不必要なデータ収集を避ける方法を提案する. 実験では,CartPole という古典的なタスクと, Pong, Boxing というビデオゲームのタスクにおいて提案手法の有効性を定量的に検証した. 加えて, 提案手法の動作に関する定性的な分析も行った.","subitem_description_type":"Other"}]},"item_18_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In reinforcement learning, it is common to use a simulator for policy learning. This is because an agent can collect data faster and more safely on a simulator than in a real environment. Reinforcement learning generally requires a large amount of data because its learning process is a trial-and-error, and training often takes a long time to learn even using a simulator. Therefore, it is crucial to improve the sample efficiency of policy learning on a simulator for practical applications of reinforcement learning. Although there is a large body of work aiming to improve the sample efficiency, there are not many studies that exploit the characteristics of learning on a simulator. Therefore, in this study, we investigate an approach to improve the efficiency of policy learning by utilizing the reset function of a simulator. Specifically, we improve the learning efficiency by quickly finding trajectories with high cumulative rewards. For this purpose, we propose a criterion to select reset states and a method to avoid unnecessary data collection. In the experiments, we quantitatively verified the effectiveness of the proposed method in a classical task called CartPole and video game tasks of Pong and Boxing. In addition, we conducted a qualitative analysis of the behavior of the proposed method.","subitem_description_type":"Other"}]},"item_18_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"159","bibliographic_titles":[{"bibliographic_title":"ゲームプログラミングワークショップ2021論文集"}],"bibliographicPageStart":"152","bibliographicIssueDates":{"bibliographicIssueDate":"2021-11-06","bibliographicIssueDateType":"Issued"},"bibliographicVolumeNumber":"2021"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"created":"2025-01-19T01:14:19.602634+00:00","id":213449,"links":{}}