{"updated":"2025-01-21T09:29:36.681518+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00106503","sets":["6164:6165:6210:7725"]},"path":["7725"],"owner":"11","recid":"106503","title":["方策勾配法による探索制御の一考察"],"pubdate":{"attribute_name":"公開日","attribute_value":"2014-10-31"},"_buckets":{"deposit":"a0ec4475-ad9d-4a69-a2d2-3ae7edebab9f"},"_deposit":{"id":"106503","pid":{"type":"depid","value":"106503","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"方策勾配法による探索制御の一考察","author_link":["11963","11964","11962","11960","11961","11965"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"方策勾配法による探索制御の一考察"},{"subitem_title":"Learning Search Control by Policy Gradient Algorithm","subitem_title_language":"en"}]},"item_type_id":"18","publish_date":"2014-10-31","item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_18_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"芝浦工業大学工学部情報工学科"},{"subitem_text_value":" "},{"subitem_text_value":"(株) コスモ・ウェブ"}]},"item_18_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Shibaura Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":" ","subitem_text_language":"en"},{"subitem_text_value":"Cosmoweb Co., Ltd.","subitem_text_language":"en"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/106503/files/IPSJ-GPWS2014013.pdf"},"date":[{"dateType":"Available","dateValue":"2014-10-31"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-GPWS2014013.pdf","filesize":[{"value":"1.2 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"18"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"859c471e-7d1e-40bd-86dd-0b579484e373","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2014 by the Information Processing Society of Japan"}]},"item_18_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"五十嵐, 治一"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"森岡, 祐一"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"山本, 一将"}],"nameIdentifiers":[{}]}]},"item_18_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Harukazu, Igarashi","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yuichi, Morioka","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kazumasa, Yamamoto","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_5794","resourcetype":"conference paper"}]},"item_18_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"コンピュータ将棋において探索木の枝を成長させる際に,その枝までの探索経路に沿った指し手の累積的な選択確率の値を基に探索制御を行う方法を提案する.このときの指し手の選択には,将棋の指し手に関するヒューリスティクスを組み込んだシミュレーション方策を使用する.この際,枝成長を決定論的に行う場合と確率的に行う2つの場合を考えた.さらに,本手法ではこのシミュレーション方策中のパラメータを強化学習の一手法である方策勾配法により学習する.","subitem_description_type":"Other"}]},"item_18_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"This paper proposes a method based on the policy gradient learning algorithm for search control in computer shogi. In this method, whether every arc in a search tree should be extended is determined by the accumulated move-selection probability from the root node to the arc. Moves are selected by a simulation policy that includes heuristics for evaluating shogi moves. We consider two types of arc extension: deterministic and stochastic. In both cases, the parameters in the simulation policy can be learned by the policy gradient algorithm, which is a method of reinforcement learning.","subitem_description_type":"Other"}]},"item_18_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"94","bibliographic_titles":[{"bibliographic_title":"ゲームプログラミングワークショップ2014論文集"}],"bibliographicPageStart":"90","bibliographicIssueDates":{"bibliographicIssueDate":"2014-10-31","bibliographicIssueDateType":"Issued"},"bibliographicVolumeNumber":"2014"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"created":"2025-01-18T23:49:53.751789+00:00","id":106503,"links":{}}