@inproceedings{oai:ipsj.ixsq.nii.ac.jp:00091353, author = {五十嵐, 治一 and 森岡, 祐一 and 山本, 一将 and Harukazu, Igarashi and Yuichi, Morioka and Kazumasa, Yamamoto}, book = {ゲームプログラミングワークショップ2012論文集}, issue = {6}, month = {Nov}, note = {本論文では強化学習の一手法である方策勾配法をコンピュータ将棋に適用する際に,全leaf 局面の静的局面評価値をその局面への遷移確率値で重み付けた期待値を用いた指し手評価方式を提案する.探索木の各ノードにおける指し手の選択としてBoltzmann 分布に基づく確率的戦略を採用すると静的局面評価関数に含まれるパラメータの学習則が再帰的に計算できる.しかしながら,処理対象とするleaf 局面数が大幅に増加するのでいくつかの近似解法も考案した., This paper applies policy gradient reinforcement learning to shogi. We propose a move’s evaluation function, which is defined by the expectation of the values of all leaf nodes produced by the move in a search tree, that is weighted by the transition probabilities to the leaf nodes from the root node produced by the move. Boltzmann distribution function gives the probabilities of taking branches in a search tree instead of the minimax strategy. The learning rules of the parameters in the static evaluation function of the states can be calculated recursively. Since the number of leaf nodes for evaluation increases substantially, we also consider approximation methods to reduce the computation time.}, pages = {118--121}, publisher = {情報処理学会}, title = {方策勾配法による静的局面評価関数の強化学習についての一考察}, volume = {2012}, year = {2012} }