{"created":"2025-01-19T01:05:50.537651+00:00","updated":"2025-01-19T20:33:17.477749+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00203414","sets":["1164:3616:10147:10148"]},"path":["10148"],"owner":"44499","recid":"203414","title":["A Study on Motion-robust Video Deblurring"],"pubdate":{"attribute_name":"公開日","attribute_value":"2020-02-20"},"_buckets":{"deposit":"c5adb912-f9ce-4127-b311-0e0db942a82a"},"_deposit":{"id":"203414","pid":{"type":"depid","value":"203414","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"A Study on Motion-robust Video Deblurring","author_link":["501362","501361","501363","501360"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"A Study on Motion-robust Video Deblurring"},{"subitem_title":"A Study on Motion-robust Video Deblurring","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2020-02-20","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"KDDI Research, Inc."},{"subitem_text_value":"KDDI Research, Inc."}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"KDDI Research, Inc.","subitem_text_language":"en"},{"subitem_text_value":"KDDI Research, Inc.","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/203414/files/IPSJ-AVM20108012.pdf","label":"IPSJ-AVM20108012.pdf"},"date":[{"dateType":"Available","dateValue":"2022-02-20"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-AVM20108012.pdf","filesize":[{"value":"6.6 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"27"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"a865ba23-6305-4f2d-9cd5-fbfa1d29542e","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2020 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jianfeng, Xu"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kazuyuki, Tasaka"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jianfeng, Xu","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kazuyuki, Tasaka","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10438399","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8582","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Most existing video deblurring works focus on the use of temporal redundancy and lack utilization of the prior information about data itself, resulting in strong dataset dependency and limited generalization ability in handling challenging scenarios, like blur in low contrast or severe motion areas, and non-uniform blur. Therefore, we propose a PRiOr-enlightened MOTION-robust video deblurring model (PROMOTION) suitable for both global and local blurry scenarios. On the one hand, we use 3D group convolution to efficiently encode heterogeneous prior information (including illumination, structure, and motion priors), which enhances the model's blur perception while mitigating the output's artifacts. On the other hand, we design the priors representing blur distribution, which enables our model to better handle non-uniform blur in spatio-temporal domain. In addition to the classical camera shake caused blurry scenes, we also prove the generalization of the model for local blur in real scenario, resulting in better accuracy of hand pose estimation.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Most existing video deblurring works focus on the use of temporal redundancy and lack utilization of the prior information about data itself, resulting in strong dataset dependency and limited generalization ability in handling challenging scenarios, like blur in low contrast or severe motion areas, and non-uniform blur. Therefore, we propose a PRiOr-enlightened MOTION-robust video deblurring model (PROMOTION) suitable for both global and local blurry scenarios. On the one hand, we use 3D group convolution to efficiently encode heterogeneous prior information (including illumination, structure, and motion priors), which enhances the model's blur perception while mitigating the output's artifacts. On the other hand, we design the priors representing blur distribution, which enables our model to better handle non-uniform blur in spatio-temporal domain. In addition to the classical camera shake caused blurry scenes, we also prove the generalization of the model for local blur in real scenario, resulting in better accuracy of hand pose estimation.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告オーディオビジュアル複合情報処理(AVM)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2020-02-20","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"12","bibliographicVolumeNumber":"2020-AVM-108"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":203414,"links":{}}