{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00217929","sets":["581:10784:10790"]},"path":["10790"],"owner":"44499","recid":"217929","title":["パイプライン並列分散深層学習の一実装手法の評価"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-05-15"},"_buckets":{"deposit":"6e32a53e-d5b4-4af4-b73a-fbb1ab043517"},"_deposit":{"id":"217929","pid":{"type":"depid","value":"217929","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"パイプライン並列分散深層学習の一実装手法の評価","author_link":["565536","565532","565533","565537","565534","565535"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"パイプライン並列分散深層学習の一実装手法の評価"},{"subitem_title":"Evaluation of an Implementation Method for Pipeline Parallelism Distributed Deep Learning","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[特集:情報システム論文] 分散深層学習,並列処理","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2022-05-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京工科大学"},{"subitem_text_value":"電気通信大学"},{"subitem_text_value":"東京工科大学"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Tokyo University of Technology","subitem_text_language":"en"},{"subitem_text_value":"The University of Electro-Communications","subitem_text_language":"en"},{"subitem_text_value":"Tokyo University of Technology","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/217929/files/IPSJ-JNL6305003.pdf","label":"IPSJ-JNL6305003.pdf"},"date":[{"dateType":"Available","dateValue":"2024-05-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6305003.pdf","filesize":[{"value":"1.5 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"7b2ec023-a4cd-45b3-a40f-a9261c7e8777","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"滝澤, 尚輝"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"矢崎, 俊志"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"石畑, 宏明"}],"nameIdentifiers":[{}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Naoki, Takisawa","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Syunji, Yazaki","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Hiroaki, Ishihata","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"本論文では,並列計算機におけるパイプライン並列分散深層学習の一実装手法の評価・分析を行う.パイプライン並列ではニューラルネットワークモデルを分割し,各プロセスに割り当てる.ハードウェア効率を向上させるため,ミニバッチを分割したマイクロバッチを用いて各プロセスの処理をオーバラップする.パイプライン並列の利点はマイクロバッチ処理のオーバラップによる高速化と,メモリ消費の分散である.本研究では,パイプライン並列におけるニューラルネットワークモデルの分割の記述方法を提案する.全結合層32層からなるシンプルなネットワークを用いてパイプライン並列の高速化の効果について分析を行う.VGG16とResNet50を用いて,複雑なモデルにおけるパイプライン並列の評価を行う.","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In this paper, we evaluate and analyze an implementation method of pipeline parallelism distributed deep learning on parallel computers. In pipeline parallelism, a neural network model is partitioned and assigned to each process. To improve hardware efficiency, we use microbatches, which are divided minibatches, to overlap the processing of each process. The advantage of pipeline parallelism is that the overlapping of microbatch processes increases the speed and distributes the memory consumtion. In this study, we propose a method for describing the partitioning of neural network models in pipeline parallelism. We analyze the speedup effect of pipeline parallelism using a simple network with 32 fully connected layers. Using VGG16 and ResNet50, we evaluate the pipeline parallelism.","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"1215","bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicPageStart":"1206","bibliographicIssueDates":{"bibliographicIssueDate":"2022-05-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"5","bibliographicVolumeNumber":"63"}]},"relation_version_is_last":true,"item_2_identifier_registration":{"attribute_name":"ID登録","attribute_value_mlt":[{"subitem_identifier_reg_text":"10.20729/00217821","subitem_identifier_reg_type":"JaLC"}]},"weko_creator_id":"44499"},"id":217929,"updated":"2025-01-19T15:15:42.739802+00:00","links":{},"created":"2025-01-19T01:18:21.223663+00:00"}