{"updated":"2025-01-20T03:08:56.645047+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00184902","sets":["1164:2240:9116:9317"]},"path":["9317"],"owner":"11","recid":"184902","title":["Less is More: Accelerating Deep Neural Networks with Micro-Batching"],"pubdate":{"attribute_name":"公開日","attribute_value":"2017-12-11"},"_buckets":{"deposit":"91291ea2-ddda-4a10-aa44-f127ae388d5d"},"_deposit":{"id":"184902","pid":{"type":"depid","value":"184902","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"Less is More: Accelerating Deep Neural Networks with Micro-Batching","author_link":["409656","409655","409654","409651","409650","409653","409652","409657"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Less is More: Accelerating Deep Neural Networks with Micro-Batching"},{"subitem_title":"Less is More: Accelerating Deep Neural Networks with Micro-Batching","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"機械学習","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2017-12-11","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Tokyo Institute of Technology"},{"subitem_text_value":"ETH Zurich"},{"subitem_text_value":"ETH Zurich"},{"subitem_text_value":"Tokyo Institute of Technology"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Tokyo Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"ETH Zurich","subitem_text_language":"en"},{"subitem_text_value":"ETH Zurich","subitem_text_language":"en"},{"subitem_text_value":"Tokyo Institute of Technology","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/184902/files/IPSJ-HPC17162022.pdf","label":"IPSJ-HPC17162022.pdf"},"date":[{"dateType":"Available","dateValue":"2019-12-11"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-HPC17162022.pdf","filesize":[{"value":"933.3 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"14"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"222a3c83-4557-4ac4-9fc6-4d397a665716","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2017 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Yosuke, Oyama"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tal, Ben-Nun"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Torsten, Hoefler"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Satoshi, Matsuoka"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Yosuke, Oyama","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tal, Ben-Nun","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Torsten, Hoefler","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Satoshi, Matsuoka","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10463942","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8841","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"NVIDIA cuDNN is a low-level library that provides GPU kernels frequently used in deep learning. Specifically, cuDNN implements several equivalent convolution algorithms, whose performance and memory footprint may vary considerably, depending on the layer dimensions. When an algorithm is automatically selected by cuDNN, the decision is performed on a per-layer basis, and thus it often resorts to slower algorithms that fit the workspace size constraints. We present μ-cuDNN, a transparent wrapper library for cuDNN, which divides layer computation into several micro-batches. Based on Dynamic Programming and Integer Linear Programming, μ-cuDNN enables faster algorithms by decreasing the workspace requirements. We demonstrate the effectiveness of μ-cuDNN over the Caffe framework, achieving speedups of 1.63x for AlexNet and 1.21x for ResNet-18. These results indicate that using micro-batches can seamlessly increase the performance of deep learning, while maintaining the same memory footprint.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"NVIDIA cuDNN is a low-level library that provides GPU kernels frequently used in deep learning. Specifically, cuDNN implements several equivalent convolution algorithms, whose performance and memory footprint may vary considerably, depending on the layer dimensions. When an algorithm is automatically selected by cuDNN, the decision is performed on a per-layer basis, and thus it often resorts to slower algorithms that fit the workspace size constraints. We present μ-cuDNN, a transparent wrapper library for cuDNN, which divides layer computation into several micro-batches. Based on Dynamic Programming and Integer Linear Programming, μ-cuDNN enables faster algorithms by decreasing the workspace requirements. We demonstrate the effectiveness of μ-cuDNN over the Caffe framework, achieving speedups of 1.63x for AlexNet and 1.21x for ResNet-18. These results indicate that using micro-batches can seamlessly increase the performance of deep learning, while maintaining the same memory footprint.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"9","bibliographic_titles":[{"bibliographic_title":"研究報告ハイパフォーマンスコンピューティング(HPC)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2017-12-11","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"22","bibliographicVolumeNumber":"2017-HPC-162"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"created":"2025-01-19T00:52:10.778251+00:00","id":184902,"links":{}}