@techreport{oai:ipsj.ixsq.nii.ac.jp:00234833, author = {加藤, 駿一 and 熊谷, 瞭 and 竹本, 修 and 野崎, 佑典 and 吉川, 雅弥 and Shunichi, Kato and Ryo, Kumagai and Shu, Taketomo and Yusuke, Nozaki and Masaya, Yoshikawa}, issue = {6}, month = {Jun}, note = {AI に対する脅威として敵対的サンプル(Adversarial Example:AE)が報告されている.AE は,入力画像に微小なノイズを加えることで推論結果を誤認識させる攻撃である.近年,この AE の転移性と呼ばれる性質を用いた転移攻撃も報告されている.しかし,これまでに転移攻撃への対策に関する評価はほとんど行われていない.そこで本研究では,フィルタリングや Test-Time Augmentation(TTA)を用いた対策手法を構築し,転移攻撃に対する耐性を定量的に評価する.実験結果から,これらの対策手法が転移攻撃に対する対策として有効であると示した., Adversarial Example (AE) has been reported as a threat to AI. AE is an attack that misclassify prediction results by adding small noise to the input image. Recently, Transferable Adversarial Attack based on Adversarial Example transferability has also been reported. However, there have been few evaluations of countermeasures against Transferable Adversarial Attack. Therefore, this study constructs the countermeasures against it, which are based on filtering and Test Time Augmentation (TTA) in order to evaluate the vulnerability. Experiments show the validity of these countermeasures.}, title = {敵対的サンプルにおける転移性の定量的評価}, year = {2024} }