{"created":"2025-01-19T00:44:21.721578+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00174129","sets":["1164:2240:8543:8882"]},"path":["8882"],"owner":"11","recid":"174129","title":["GPUクラスタにおけるGPUセルフMPIシステムGMPIの予備性能評価"],"pubdate":{"attribute_name":"公開日","attribute_value":"2016-08-01"},"_buckets":{"deposit":"612ca5b1-727d-49c7-9e0b-db6723901918"},"_deposit":{"id":"174129","pid":{"type":"depid","value":"174129","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"GPUクラスタにおけるGPUセルフMPIシステムGMPIの予備性能評価","author_link":["356218","356219","356220"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"GPUクラスタにおけるGPUセルフMPIシステムGMPIの予備性能評価"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"GPU","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2016-08-01","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"筑波大学大学院システム情報工学研究科"},{"subitem_text_value":"東京大学情報基盤センター"},{"subitem_text_value":"筑波大学大学院システム情報工学研究科/筑波大学計算科学研究センター"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of System and Information Engineering, University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"Information Technology Center, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of System and Information Engineering, University of Tsukuba / Center for Computational Sciences, University of Tsukuba","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/174129/files/IPSJ-HPC16155015.pdf","label":"IPSJ-HPC16155015.pdf"},"date":[{"dateType":"Available","dateValue":"2018-08-01"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-HPC16155015.pdf","filesize":[{"value":"538.3 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"14"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"d516b821-9ab0-4209-97d0-ec60a4d8e2f2","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2016 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"桑原, 悠太"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"塙, 敏博"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"朴, 泰祐"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10463942","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8841","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"今日,CUDA (Compute Unified Device Architecture) は NVIDIA の GPU のプログラミング環境として一般的に用いられている.その高性能かつ低電力な特徴から,GPU クラスタに搭載された GPU は様々なアプリケーションの実行に用いられる.CUDA はシングルノード向けに開発されたため,ノード間での通信には MPI (Message Passing Interface) などを用いる必要がある.従来手法では,通信の度に制御を CUDA カーネルから CPU に戻す必要があり,カーネル関数の起動や同期に伴うオーバーヘッドが生じる他,プログラマビリティや生産性の低下も問題となっている.これらの問題を解決するために,我々は GPU カーネル内から MPI 通信の起動を可能とする並列通信システム “GMPI” を開発している.本稿では,この GMPI システムにおける並列実行モデルを定義し,Ping-Pong 通信および姫野ベンチマークの性能評価を実 GPU クラスタ上で行う.現状,Ping-Pong 通信は従来手法とほぼ同等の性能である.しかしながら,性能最適化やチューニングが十分でなく,姫野ベンチマークでは従来手法の約 66%の性能にとどまっており,この妨げとなる要因の考察を行う.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"8","bibliographic_titles":[{"bibliographic_title":"研究報告ハイパフォーマンスコンピューティング(HPC)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2016-08-01","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"15","bibliographicVolumeNumber":"2016-HPC-155"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"id":174129,"updated":"2025-01-20T07:01:03.933622+00:00","links":{}}