@techreport{oai:ipsj.ixsq.nii.ac.jp:00190711, author = {Tianlun, Wang and Yusuke, Tanimura and Hidemoto, Nakada and Tianlun, Wang and Yusuke, Tanimura and Hidemoto, Nakada}, issue = {29}, month = {Jul}, note = {Ray is a distributed framework for machine learning that targets reinforcement learning using multiple nodes. While it works well on loosely coupled nodes, it does not take into account the "high-performance computing environment " based on MPI. We modified Ray so that, 1) it works well with the MPI launch mechanism, and 2) it use MPI communication for large data transfer. We evaluated the modified version of Ray on a cluster and confirmed the preliminary performance., Ray is a distributed framework for machine learning that targets reinforcement learning using multiple nodes. While it works well on loosely coupled nodes, it does not take into account the "high-performance computing environment " based on MPI. We modified Ray so that, 1) it works well with the MPI launch mechanism, and 2) it use MPI communication for large data transfer. We evaluated the modified version of Ray on a cluster and confirmed the preliminary performance.}, title = {Adaptation of Ray, a distributed framework for machine learning, to MPI-based environment}, year = {2018} }