@techreport{oai:ipsj.ixsq.nii.ac.jp:00209564,
 author = {坂本, 岳史 and 森, 達哉 and Takeshi, Sakamoto and Tatsuya, Mori},
 issue = {27},
 month = {Feb},
 note = {ニューラルネットワークを搭載するシステムには,正当な入力に微小な摂動を加えた悪意ある入力 (Adversarial Input) により,意図的に誤動作が引き起こされるという脆弱性が指摘されている.Adversarial Input は,元の入力との違いが人間には認知されないという特徴をもつ.本研究では,オンラインで利用可能な 8 つの機械翻訳システムに対し,悪意ある入力としてホモグリフや特殊文字を入力した際の出力を調査し,敵対的攻撃に対する脆弱性の評価を行う.調査の結果,各機械翻訳システムに特有の前処理を推定することが可能であるとわかった.本論文では,  前処理の推定結果に基づき,各システムに固有の脆弱性や対策方法について提案する., It has been widely known that systems empowered by neural network algorithms are vulnerable against an intrinsic attack named “Adversarial Input”, which can be generated by adding small perturbations to the original inputs, aiming at fooling the systems. Adversarial Input has the characteristic that the difference from the original input is not recognized by humans. In this research, we investigate the output of eight machine translation systems that can be used online when homoglyphs and special characters are input as malicious inputs, and evaluate their vulnerability to hostile attacks. As a result of the investigation, it was found that it is possible to estimate the preprocessing to each machine translation system. In this paper, we propose vulnerabilities and countermeasures specific to each system based on the estimation results of preprocessing.},
 title = {オンライン機械翻訳システムに対するホモグリフ攻撃の脆弱性調査},
 year = {2021}
}