@inproceedings{oai:ipsj.ixsq.nii.ac.jp:00212959, author = {Van, Sang Tran and Phuong, Thao Tran and 山口, 利恵 and 中田, 登志之}, book = {マルチメディア,分散協調とモバイルシンポジウム2021論文集}, issue = {1}, month = {Jun}, note = {Adversarial image attack is a well-known attack methodology in the image recognition field where the input images are purposely modified to make no difference to the human perception but can fool the image recognition models to classify them incorrectly. Recently, the adversarial attack has drawn much attention from researchers due to its ability to fool even state-of-the-art and commercial image recognition models. Researching the adversarial attack is crucial to know the potential risk, thus preparing needed earlier prevention. In this paper, we investigated an improvement on the Boundary Attack algorithm because of its effectiveness, flexibility and the absent of a direct protection mechanism. Previously, in the randomization step, the Boundary Attack algorithm randomizes the movement vector from the whole image space. In this research, we have improved the algorithm by applying a square mask to the space in this step. We have applied on the CIFAR10 dataset and successfully improved the distance between the adversarial and the original images without increasing the number of queries. Our work suggests a new possibility of an attack vector that can exploit the prior knowledge of the model to improve the distance without affecting the query count., Adversarial image attack is a well-known attack methodology in the image recognition field where the input images are purposely modified to make no difference to the human perception but can fool the image recognition models to classify them incorrectly. Recently, the adversarial attack has drawn much attention from researchers due to its ability to fool even state-of-the-art and commercial image recognition models. Researching the adversarial attack is crucial to know the potential risk, thus preparing needed earlier prevention. In this paper, we investigated an improvement on the Boundary Attack algorithm because of its effectiveness, flexibility and the absent of a direct protection mechanism. Previously, in the randomization step, the Boundary Attack algorithm randomizes the movement vector from the whole image space. In this research, we have improved the algorithm by applying a square mask to the space in this step. We have applied on the CIFAR10 dataset and successfully improved the distance between the adversarial and the original images without increasing the number of queries. Our work suggests a new possibility of an attack vector that can exploit the prior knowledge of the model to improve the distance without affecting the query count.}, pages = {466--471}, publisher = {情報処理学会}, title = {Improving The Decision-Based Adversarial Boundary Attack by Square Masked Movement}, volume = {2021}, year = {2021} }