{"created":"2025-01-19T01:15:09.465626+00:00","updated":"2025-01-19T16:39:39.159697+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00214336","sets":["581:10433:10445"]},"path":["10445"],"owner":"44499","recid":"214336","title":["Timing Attack on Random Forests: Experimental Evaluation and Detailed Analysis "],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-12-15"},"_buckets":{"deposit":"7b11f00a-9fe7-4f5b-b7ed-755c9d3c0d76"},"_deposit":{"id":"214336","pid":{"type":"depid","value":"214336","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Timing Attack on Random Forests: Experimental Evaluation and Detailed Analysis ","author_link":["549990","549988","549987","549989","549991","549992"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Timing Attack on Random Forests: Experimental Evaluation and Detailed Analysis "},{"subitem_title":"Timing Attack on Random Forests: Experimental Evaluation and Detailed Analysis ","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[特集:デジタル社会の情報セキュリティとトラスト(推薦論文)] side-channel attack, adversarial examples, black-box attack, evolution strategy","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2021-12-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"NTT Secure Platform Laboratories"},{"subitem_text_value":"NTT Social Informatics Laboratories"},{"subitem_text_value":"NTT Social Informatics Laboratories"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"NTT Secure Platform Laboratories","subitem_text_language":"en"},{"subitem_text_value":"NTT Social Informatics Laboratories","subitem_text_language":"en"},{"subitem_text_value":"NTT Social Informatics Laboratories","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/214336/files/IPSJ-JNL6212003.pdf","label":"IPSJ-JNL6212003.pdf"},"date":[{"dateType":"Available","dateValue":"2023-12-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6212003.pdf","filesize":[{"value":"4.8 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"323bc6a4-fb5b-4506-b703-4e16bf480c6a","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Yuichiro, Dan"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshiki, Shibahara"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Junko, Takahashi"}],"nameIdentifiers":[{}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Yuichiro, Dan","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshiki, Shibahara","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Junko, Takahashi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"This paper proposes a novel implementation attack on machine learning. The threat of such attacks has recently become an problem in machine learning. These attacks include side-channel attacks that use information acquired from implemented devices and fault attacks that inject faults into implemented devices using external tools such as lasers. Thus far, these attacks have targeted mainly deep neural networks; however, other common methods such as random forests can also be targets. In this paper, we investigate the threat of implementation attacks to random forests. Specifically, we propose a novel timing attack that generates adversarial examples. Additionally, we experimentally evaluate and analyze its attack success rate. The proposed attack exploits a fundamental property of random forests: the response time from the input to the output depends on the number of conditional branches invoked during prediction. More precisely, we generate adversarial examples by optimizing the response time. This optimization affects predictions because changes in the response time indicate changes in the results of the conditional branches. For the optimization, we use an evolution strategy that tolerates measurement error in the response time. Experiments are conducted in a black-box setting where attackers can use only prediction labels and response times. Experimental results show that the proposed attack generates adversarial examples with higher probability than a state-of-the-art attack that uses only predicted labels. Detailed analysis of these results indicates an unfortunate trade-off that restricting tree depth of random forests may mitigate this attack but decrease prediction accuracy.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.29(2021) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.29.757\n------------------------------","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"This paper proposes a novel implementation attack on machine learning. The threat of such attacks has recently become an problem in machine learning. These attacks include side-channel attacks that use information acquired from implemented devices and fault attacks that inject faults into implemented devices using external tools such as lasers. Thus far, these attacks have targeted mainly deep neural networks; however, other common methods such as random forests can also be targets. In this paper, we investigate the threat of implementation attacks to random forests. Specifically, we propose a novel timing attack that generates adversarial examples. Additionally, we experimentally evaluate and analyze its attack success rate. The proposed attack exploits a fundamental property of random forests: the response time from the input to the output depends on the number of conditional branches invoked during prediction. More precisely, we generate adversarial examples by optimizing the response time. This optimization affects predictions because changes in the response time indicate changes in the results of the conditional branches. For the optimization, we use an evolution strategy that tolerates measurement error in the response time. Experiments are conducted in a black-box setting where attackers can use only prediction labels and response times. Experimental results show that the proposed attack generates adversarial examples with higher probability than a state-of-the-art attack that uses only predicted labels. Detailed analysis of these results indicates an unfortunate trade-off that restricting tree depth of random forests may mitigate this attack but decrease prediction accuracy.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.29(2021) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.29.757\n------------------------------","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2021-12-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"12","bibliographicVolumeNumber":"62"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":214336,"links":{}}