@inproceedings{oai:ipsj.ixsq.nii.ac.jp:00214482, author = {山本, 恭平 and 吉野, 雅之 and 冨樫, 由美子 and Kyohei, Yamamoto and Masayuki, Yoshino and Yumiko, Togashi}, book = {コンピュータセキュリティシンポジウム2021論文集}, month = {Oct}, note = {深層学習が様々な分野で活用され,近い将来には自動車システムや医療システム等の人命に関わる分野への導入も期待される.一方,深層学習の普及に伴い,深層学習に対する攻撃の研究も進展を見せており,実システムへの被害が懸念されている.代表的な攻撃に,入力画像に作為的な微小ノイズを加えることで推論モデルの誤判断を引き起こす Adversarial Example Attack がある.他方,その対策手法も研究されており,特に Adversarial Example を検知する Adversarial Detection が知られているが,攻撃者が検知手法に関する情報を有する場合は,検知精度が低下する.本論文では,攻撃者が検知手法に関する情報を有する状況においても,高い検知精度が期待できる Adversarial Detection の実験結果を報告する., Deep learning is utilized in various fields, and it is expected that it will be introduced into fields related to human life such as automobile systems and medical systems in the near future. On the other hand, with the spread of deep learning, research on attacks on deep learning is also progressing, and there is concern about damage to the system. A typical attack is the Adversarial Example Attack, which causes misjudgment of the inference model by adding artificial noise to the input. On the other hand, countermeasure methods are also being researched, and in particular, Adversarial Detection, which detects Adversarial Examples, is known, but if the attacker has information on the detection method, detection accuracy will decrease. In this paper, we report the experimental results of Adversarial Detection, which can be expected to have high detection accuracy even when the attacker has information on the detection method.}, pages = {607--614}, publisher = {情報処理学会}, title = {White-Box Attack にロバストなAdversarial Detectionの提案}, year = {2021} }