{"created":"2025-01-19T01:12:20.743413+00:00","updated":"2025-01-19T17:53:29.314635+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00211157","sets":["1164:4619:10416:10591"]},"path":["10591"],"owner":"44499","recid":"211157","title":["Behavior-based DNN Compression: Pruning and Facilitation Methods"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-05-13"},"_buckets":{"deposit":"f2066210-da8d-4e86-880b-040f1879d0bc"},"_deposit":{"id":"211157","pid":{"type":"depid","value":"211157","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Behavior-based DNN Compression: Pruning and Facilitation Methods","author_link":["536013","536014","536016","536015"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Behavior-based DNN Compression: Pruning and Facilitation Methods"},{"subitem_title":"Behavior-based DNN Compression: Pruning and Facilitation Methods","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"D論セッション","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2021-05-13","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Wakayama University"},{"subitem_text_value":"Wakayama University"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Wakayama University","subitem_text_language":"en"},{"subitem_text_value":"Wakayama University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/211157/files/IPSJ-CVIM21226010.pdf","label":"IPSJ-CVIM21226010.pdf"},"date":[{"dateType":"Available","dateValue":"2023-05-13"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CVIM21226010.pdf","filesize":[{"value":"1.8 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"20"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"bcb50d54-a286-428a-8055-fe4561c819b4","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Koji, Kamma"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshikazu, Wada"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Koji, Kamma","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshikazu, Wada","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11131797","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8701","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"In this paper, we present two pruning methods. Pruning is a technique to reduce the computational cost of Deep Neural Networks (DNNs) by removing redundant neurons. The proposed pruning methods are Neuro-Unification (NU) and Reconstruction Error Aware Pruning (REAP). These methods do not only prune but also conduct reconstruction to prevent accuracy degradation. In reconstruction step, we update the weights connected to the remaining neurons so as to compensate the error caused by pruning. Therefore, the models pruned by the pruning methods suffer smaller accuracy degradation. As REAP needs significant amount of computation for selecting the neurons to be pruned, we developed a biorthogonal system-based algorithm that reduces the computational order of neuron selection from O(n4) to O(n3), where n denotes the number of neurons. We also propose two methods for facilitating pruning, Pruning Ratio Optimizer (PRO) and Serialized Residual Network (SRN). As REAP performs pruning in each layer separately, it is important to tune the pruning ratio (the ratio of neurons to be pruned) in each layer properly in order to preserve the model accuracy better. PRO is a method that can be combined with REAP to tune pruning ratios based on the error in the final layer of the pruned DNN. SRN is to facilitate pruning for ResNet. Due to its identity shortcuts, some layers cannot be pruned. Therefore, we once convert ResNet into an equivalent serial DNN model, which we call SRN, so that pruning can be performed in any layer.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In this paper, we present two pruning methods. Pruning is a technique to reduce the computational cost of Deep Neural Networks (DNNs) by removing redundant neurons. The proposed pruning methods are Neuro-Unification (NU) and Reconstruction Error Aware Pruning (REAP). These methods do not only prune but also conduct reconstruction to prevent accuracy degradation. In reconstruction step, we update the weights connected to the remaining neurons so as to compensate the error caused by pruning. Therefore, the models pruned by the pruning methods suffer smaller accuracy degradation. As REAP needs significant amount of computation for selecting the neurons to be pruned, we developed a biorthogonal system-based algorithm that reduces the computational order of neuron selection from O(n4) to O(n3), where n denotes the number of neurons. We also propose two methods for facilitating pruning, Pruning Ratio Optimizer (PRO) and Serialized Residual Network (SRN). As REAP performs pruning in each layer separately, it is important to tune the pruning ratio (the ratio of neurons to be pruned) in each layer properly in order to preserve the model accuracy better. PRO is a method that can be combined with REAP to tune pruning ratios based on the error in the final layer of the pruned DNN. SRN is to facilitate pruning for ResNet. Due to its identity shortcuts, some layers cannot be pruned. Therefore, we once convert ResNet into an equivalent serial DNN model, which we call SRN, so that pruning can be performed in any layer.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"16","bibliographic_titles":[{"bibliographic_title":"研究報告コンピュータビジョンとイメージメディア(CVIM)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2021-05-13","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"10","bibliographicVolumeNumber":"2021-CVIM-226"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":211157,"links":{}}