{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00214042","sets":["1164:2036:10484:10753"]},"path":["10753"],"owner":"44499","recid":"214042","title":["FPGA Based Accelerator for Neural Networks Computation with Flexible Pipelining"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-11-24"},"_buckets":{"deposit":"5d7e51a4-353e-47ae-a782-538eb6a726d9"},"_deposit":{"id":"214042","pid":{"type":"depid","value":"214042","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"FPGA Based Accelerator for Neural Networks Computation with Flexible Pipelining","author_link":["548391","548395","548392","548393","548394","548390"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"FPGA Based Accelerator for Neural Networks Computation with Flexible Pipelining"},{"subitem_title":"FPGA Based Accelerator for Neural Networks Computation with Flexible Pipelining","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"設計事例","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2021-11-24","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo"},{"subitem_text_value":"Waseda Research Institute for Science and Engineering, Waseda University/JST, PRESTO"},{"subitem_text_value":"System Design Research Center, University of Tokyo"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"Waseda Research Institute for Science and Engineering, Waseda University / JST, PRESTO","subitem_text_language":"en"},{"subitem_text_value":"System Design Research Center, University of Tokyo","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/214042/files/IPSJ-SLDM21196029.pdf","label":"IPSJ-SLDM21196029.pdf"},"date":[{"dateType":"Available","dateValue":"2023-11-24"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SLDM21196029.pdf","filesize":[{"value":"1.1 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"10"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"7a581321-03e0-4c39-982d-f6e7cc7f41ef","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Qingyang, Yi"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Heming, Sun"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masahiro, Fujita"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Qingyang, Yi","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Heming, Sun","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masahiro, Fujita","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11451459","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8639","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"FPGA is appropriate for fix-point neural networks computing due to high power efficiency and configurability. However, its design must be intensively refined to achieve high performance using limited hardware resources. We present an FPGA-based neural networks accelerator and its optimization framework, which can achieve optimal efficiency for various CNN models and FPGA resources. Targeting high throughput, we adopt layer-wise pipeline architecture for higher DSP utilization. To get the optimal performance, a flexible algorithm to allocate balanced hardware resources to each layer is also proposed, supported by activation buffer design. Through our well-balanced implementation of four CNN models on ZC706, the DSP utilization and efficiency are over 90%. For VGG16 on ZC706, the proposed accelerator achieves the performance of 2.58x, 1.53x and 1.35x better than the referenced non-pipeline architecture [1], pipeline architecture [2] and [3], respectively.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"FPGA is appropriate for fix-point neural networks computing due to high power efficiency and configurability. However, its design must be intensively refined to achieve high performance using limited hardware resources. We present an FPGA-based neural networks accelerator and its optimization framework, which can achieve optimal efficiency for various CNN models and FPGA resources. Targeting high throughput, we adopt layer-wise pipeline architecture for higher DSP utilization. To get the optimal performance, a flexible algorithm to allocate balanced hardware resources to each layer is also proposed, supported by activation buffer design. Through our well-balanced implementation of four CNN models on ZC706, the DSP utilization and efficiency are over 90%. For VGG16 on ZC706, the proposed accelerator achieves the performance of 2.58x, 1.53x and 1.35x better than the referenced non-pipeline architecture [1], pipeline architecture [2] and [3], respectively.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告システムとLSIの設計技術(SLDM)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2021-11-24","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"29","bibliographicVolumeNumber":"2021-SLDM-196"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":214042,"updated":"2025-01-19T16:55:34.241394+00:00","links":{},"created":"2025-01-19T01:14:52.757602+00:00"}