{"id":2001754,"updated":"2025-05-01T05:02:26.426749+00:00","links":{},"created":"2025-04-09T00:41:20.725853+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:02001754","sets":["581:11839:11843"]},"path":["11843"],"owner":"80578","recid":"2001754","title":["Investigating Atrous Rate Reduction in DeepLabV3+ for Improved Image Tampering Localization: A New Module and Dataset Approach"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2025-04-15"},"_buckets":{"deposit":"28bba86e-1bca-41b7-8623-7749568c69c6"},"_deposit":{"id":"2001754","pid":{"type":"depid","value":"2001754","revision_id":0},"owners":[80578],"status":"published","created_by":80578},"item_title":"Investigating Atrous Rate Reduction in DeepLabV3+ for Improved Image Tampering Localization: A New Module and Dataset Approach","author_link":[],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Investigating Atrous Rate Reduction in DeepLabV3+ for Improved Image Tampering Localization: A New Module and Dataset Approach","subitem_title_language":"ja"},{"subitem_title":"Investigating Atrous Rate Reduction in DeepLabV3+ for Improved Image Tampering Localization: A New Module and Dataset Approach","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[一般論文] deep learning, image forgery detection, DeepLabV3+, localization, dataset","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2025-04-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Information Science and Engineering, Ritsumeikan University"},{"subitem_text_value":"Faculty of Information and Communication Technology, Mahidol University"},{"subitem_text_value":"College of Information Science and Engineering, Ritsumeikan University"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Information Science and Engineering, Ritsumeikan University","subitem_text_language":"en"},{"subitem_text_value":"Faculty of Information and Communication Technology, Mahidol University","subitem_text_language":"en"},{"subitem_text_value":"College of Information Science and Engineering, Ritsumeikan University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"control_number":"2001754","publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/2001754/files/IPSJ-JNL6604010.pdf","label":"IPSJ-JNL6604010.pdf"},"date":[{"dateType":"Available","dateValue":"2027-04-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6604010.pdf","filesize":[{"value":"8.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"514f1d47-3859-4aaa-9e78-60181c590170","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2025 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jingjing,Rao"}]},{"creatorNames":[{"creatorName":"Songpon,Teerakanok"}]},{"creatorNames":[{"creatorName":"Tetsutaro,Uehara"}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Jingjing Rao","creatorNameLang":"en"}]},{"creatorNames":[{"creatorName":"Songpon Teerakanok","creatorNameLang":"en"}]},{"creatorNames":[{"creatorName":"Tetsutaro Uehara","creatorNameLang":"en"}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_publisher_15":{"attribute_name":"公開者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"With the popularity of digital images in communications and media, image tampering detection has become an important research topic in the field of computer vision. This study uses the DeepLabV3+ model to explore the impact of dilated convolution rate changes and attention mechanisms on the accuracy of image tampering location and particularly emphasizes the application of independently created mobile image tampering datasets in experiments. First, we verified the effectiveness of DeepLabV3+ on basic image segmentation tasks and tried to apply it to more complex image tampering detection tasks. Through a series of experiments, we found that reducing the atrous convolution rate can reduce model complexity and improve training efficiency without significantly affecting accuracy. Furthermore, we integrate channel attention and spatial attention mechanisms, aiming to enhance the model's recognition accuracy of tampered areas. In particular, the mobile datasets we developed contain images shot with smartphones and then tampered with using the phone's built-in editing tools. These datasets play a key role in validating the model's ability to handle real-world tampering scenarios.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.33(2025) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.33.264\n------------------------------","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"With the popularity of digital images in communications and media, image tampering detection has become an important research topic in the field of computer vision. This study uses the DeepLabV3+ model to explore the impact of dilated convolution rate changes and attention mechanisms on the accuracy of image tampering location and particularly emphasizes the application of independently created mobile image tampering datasets in experiments. First, we verified the effectiveness of DeepLabV3+ on basic image segmentation tasks and tried to apply it to more complex image tampering detection tasks. Through a series of experiments, we found that reducing the atrous convolution rate can reduce model complexity and improve training efficiency without significantly affecting accuracy. Furthermore, we integrate channel attention and spatial attention mechanisms, aiming to enhance the model's recognition accuracy of tampered areas. In particular, the mobile datasets we developed contain images shot with smartphones and then tampered with using the phone's built-in editing tools. These datasets play a key role in validating the model's ability to handle real-world tampering scenarios.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.33(2025) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.33.264\n------------------------------","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2025-04-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"4","bibliographicVolumeNumber":"66"}]},"relation_version_is_last":true,"weko_creator_id":"80578"}}