{"id":214220,"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00214220","sets":["1164:2240:10556:10754"]},"path":["10754"],"owner":"44499","recid":"214220","title":["スーパーコンピュータ富岳上でのMPI集団通信性能の評価"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-11-29"},"_buckets":{"deposit":"3545abc1-4279-434e-bf0c-a4d3515a5c0b"},"_deposit":{"id":"214220","pid":{"type":"depid","value":"214220","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"スーパーコンピュータ富岳上でのMPI集団通信性能の評価","author_link":["549149","549150","549148"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"スーパーコンピュータ富岳上でのMPI集団通信性能の評価"},{"subitem_title":"Performance evaluation of MPI collective communications on the supercomputer FUGAKU","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"通信","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2021-11-29","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"神戸大学"},{"subitem_text_value":"松江工業高等専門学校"},{"subitem_text_value":"神戸大学"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Kobe University","subitem_text_language":"en"},{"subitem_text_value":"National Institute of Technology, Matsue College","subitem_text_language":"en"},{"subitem_text_value":"Kobe University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/214220/files/IPSJ-HPC21182015.pdf","label":"IPSJ-HPC21182015.pdf"},"date":[{"dateType":"Available","dateValue":"2023-11-29"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-HPC21182015.pdf","filesize":[{"value":"928.2 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"14"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"f4bf2400-f418-42d0-9461-c6ee95671061","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"細野, 七月"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"岩澤, 全規"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"牧野, 淳一郎"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10463942","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8841","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"スーパーコンピュータ富岳は,158976 ノードから構成される超大規模計算機である.故に,高い実行効率を得るためには MPI 集団通信の性能や富岳上での性質を知る事が重要である.そこで,集団通信の中から Alltoall,Alltoallv 及び Bcast の 3 つについて,富岳上でベンチマークを行った.測定にあたって,通信アルゴリズム,セグメントサイズやネットワーク・トポロジーの形状などを考慮した.本研究ではその結果と,これらの集団通信において高い性能を得るためのパラメータやネットワーク・トポロジーの設定の仕方について,報告する.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"The supercomputer “FUGAKU” is a massively parallel computer, which consists of 158976 nodes. The performance of MPI collective communication is important to achieve highly effective performance. In this article, we will report the performance evaluation of Alltoall, Alltoallv and Bcast on FUGAKU. We surveyed the effect of algorithms, segment size, and node topology. We will report the results and the optimal setting to achieve high efficiency.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"9","bibliographic_titles":[{"bibliographic_title":"研究報告ハイパフォーマンスコンピューティング(HPC)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2021-11-29","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"15","bibliographicVolumeNumber":"2021-HPC-182"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"updated":"2025-01-19T16:51:30.335888+00:00","created":"2025-01-19T01:15:02.829313+00:00","links":{}}