{"updated":"2025-01-20T18:29:22.232801+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00145095","sets":["1164:2240:7894:8332"]},"path":["8332"],"owner":"11","recid":"145095","title":["GMPI:GPUクラスタにおけるGPUセルフMPIの提案"],"pubdate":{"attribute_name":"公開日","attribute_value":"2015-09-23"},"_buckets":{"deposit":"8cf8e518-a0f0-457f-95d3-8b64a3e1a986"},"_deposit":{"id":"145095","pid":{"type":"depid","value":"145095","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"GMPI:GPUクラスタにおけるGPUセルフMPIの提案","author_link":["222108","222109","222110"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"GMPI:GPUクラスタにおけるGPUセルフMPIの提案"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"GPU","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2015-09-23","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"筑波大学大学院システム情報工学研究科"},{"subitem_text_value":"東京大学情報基盤センター"},{"subitem_text_value":"筑波大学大学院システム情報工学研究科/筑波大学計算科学研究センター"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of System and Information Engineering, University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"Information Technology Center, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of System and Information Engineering, University of Tsukuba / Center for Computational Sciences, University of Tsukuba","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/145095/files/IPSJ-HPC15151012.pdf","label":"IPSJ-HPC15151012.pdf"},"date":[{"dateType":"Available","dateValue":"2017-09-23"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-HPC15151012.pdf","filesize":[{"value":"1.4 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"14"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"defe400a-c7c2-4aed-9e9c-94a4b123ed8c","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2015 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"桑原, 悠太"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"塙, 敏博"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"朴, 泰祐"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10463942","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8841","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"近年,GPU クラスタでは,GPU プログラミング環境として CUDA (Compute Unified Device Architecture) が標準的に用いられている.GPU クラスタ上での並列アプリケーションでは,CUDA 環境おいて,ノードを跨ぐ GPU 間通信が発生し,MPI などによりホスト CPU が処理するのが一般的である.そのため,通信が発生する毎に GPU 上の CUDA カーネルからホストに一旦制御を戻す必要があり,カーネル関数の起動や同期に伴うオーバーヘッドが生じる.特に並列処理における通信粒度が細かいほど,カーネル関数の起動回数も増え,オーバーヘッドも増加する.それだけでなく,プログラミングのコストが高く,CPU 向け MPI プログラムを GPU 並列化する場合にソースコードが煩雑になりやすいといった生産性の低下も問題となっている.これらの問題を解決するために,本研究では GPU カーネル内から MPI 通信の起動を可能とする並列通信システム “GMPI” を提案・開発する.これにより,並列 GPU プログラミングを簡単化し,GPU カーネルの起動や同期に伴うオーバーヘッド削減による並列処理効率の向上を目指す.本稿では,GMPI の実装と,Ping-Pong 通信および姫野ベンチマークの性能評価を行う.現状では性能最適化やチューニングが十分でなく,Ping-Pong 通信では従来方式とほぼ同等の性能であるが,姫野ベンチマークでは従来手法の約半分の性能が得られている.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"8","bibliographic_titles":[{"bibliographic_title":"研究報告ハイパフォーマンスコンピューティング(HPC)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2015-09-23","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"12","bibliographicVolumeNumber":"2015-HPC-151"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"created":"2025-01-19T00:20:46.629695+00:00","id":145095,"links":{}}