@inproceedings{oai:ipsj.ixsq.nii.ac.jp:00201484,
 author = {ダンデユイ, タン and 近藤, 大生 and 松井, 俊浩 and Thang, Dang Duy and Taisei, Kondo and Toshihiro, Matsui},
 book = {コンピュータセキュリティシンポジウム2019論文集},
 month = {Oct},
 note = {Along with significant improvements in deep neural networks, image classification tasks are solved with extremely high accuracy rates. However, deep neural networks have been recently found vulnerable to well-designed input samples that called adversarial examples. Such an issue causes deep neural networks to misclassify adversarial examples that are imperceptible to humans. Distinguishing adversarial images and legitimate images are tough challenges. To address this problem, in this paper we propose a new automatic classification system for adversarial examples. Our proposed system can almost distinguish adversarial samples and legitimate images in an end-to-end manner without human intervention. We exploit the important role of low frequencies in adversarial samples and proposing the label-based method for detecting malicious samples based on our observation. We evaluate our method on a variety of standard benchmark datasets including MNIST and ImageNet. Our method reached out detection rates of more than 96% in many settings., Along with significant improvements in deep neural networks, image classification tasks are solved with extremely high accuracy rates. However, deep neural networks have been recently found vulnerable to well-designed input samples that called adversarial examples. Such an issue causes deep neural networks to misclassify adversarial examples that are imperceptible to humans. Distinguishing adversarial images and legitimate images are tough challenges. To address this problem, in this paper we propose a new automatic classification system for adversarial examples. Our proposed system can almost distinguish adversarial samples and legitimate images in an end-to-end manner without human intervention. We exploit the important role of low frequencies in adversarial samples and proposing the label-based method for detecting malicious samples based on our observation. We evaluate our method on a variety of standard benchmark datasets including MNIST and ImageNet. Our method reached out detection rates of more than 96% in many settings.},
 pages = {1356--1363},
 publisher = {情報処理学会},
 title = {A Label-Based System for Detecting Adversarial Examples by Using Low Pass Filters},
 volume = {2019},
 year = {2019}
}