{"links":{},"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00223098","sets":["6164:6165:6462:11124"]},"path":["11124"],"owner":"44499","recid":"223098","title":["ポイズニング支援型メンバーシップ推定攻撃に対する差分プライバシーの一考察"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-10-17"},"_buckets":{"deposit":"e1bfd39b-9260-4a08-9cf0-a70d61c8dcc2"},"_deposit":{"id":"223098","pid":{"type":"depid","value":"223098","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"ポイズニング支援型メンバーシップ推定攻撃に対する差分プライバシーの一考察","author_link":["586900","586898","586901","586899"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"ポイズニング支援型メンバーシップ推定攻撃に対する差分プライバシーの一考察"},{"subitem_title":"A Study on Differential Privacy for Poisoning-Assisted Membership Inference Attacks","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"機械学習, ポイズニング攻撃, メンバーシップ推定攻撃, 差分プライバシー","subitem_subject_scheme":"Other"}]},"item_type_id":"18","publish_date":"2022-10-17","item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_18_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"NTT社会情報研究所"},{"subitem_text_value":"大阪大学"}]},"item_18_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"NTT Social Informatics Laboratories","subitem_text_language":"en"},{"subitem_text_value":"Osaka University","subitem_text_language":"en"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/223098/files/IPSJ-CSS2022043.pdf","label":"IPSJ-CSS2022043.pdf"},"date":[{"dateType":"Available","dateValue":"2024-10-17"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CSS2022043.pdf","filesize":[{"value":"452.8 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"30"},{"tax":["include_tax"],"price":"0","billingrole":"46"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ac380a38-a1f4-4f20-9a79-35ee6b07d0f5","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_18_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"芦澤, 奈実"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"矢内, 直人"}],"nameIdentifiers":[{}]}]},"item_18_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Nami, Ashizawa","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Naoto, Yanai","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_5794","resourcetype":"conference paper"}]},"item_18_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"攻撃者が悪意を持って学習データの一部(汚染データ)を提供することで,学習済みモデルから学習データを効果的に得るデータ復元攻撃が近年に示された.本稿では,学習データが差分プライバシーを満たすことで,上述したデータ復元攻撃のうち,メンバーシップ推定攻撃を防ぐことを実験的に確認する.また,差分プライバシーの敏感度ごと,および汚染データ数ごとに,メンバーシップ推定攻撃の成功率を評価する.これによって,ポイズニング支援型メンバーシップ推定攻撃を防ぐための,差分プライバシーおよびポイズニング攻撃の条件を明らかにする.またこの目的に向けて,ポイズニング支援型メンバーシップ推定攻撃のフレームワークも示す.実験の結果,50,000 枚の学習データのうち 250 枚の汚染データによって,メンバーシップ推定攻撃の成功率が 25.26% 上昇した.一方で,ε ≦ 360 の差分プライバシーを満たすことで,ポイズニング攻撃によるメンバーシップ推定攻撃の成功率の上昇を 0.94% 以内に抑えた.したがって,差分プライバシーによりポイズニング支援型メンバーシップ推定攻撃を防ぐことを確認した.","subitem_description_type":"Other"}]},"item_18_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Recently, model inversion attacks have been proposed in which an attacker maliciously provides a part of the training data (poison data) to obtain the training data from the model effectively. In this paper, we experimentally confirm that differential privacy prevents membership inference attacks. We then evaluate the success rate of membership inference attacks by differential-privacy sensitivity and the number of poison data. We determine the parameter of differential privacy and poisoning attacks to prevent membership inference attacks. To this end, we also present a framework for poisoning-assisted membership inference attacks. We experimentally found that 250 poison data out of 50,000 training data increased the success rate of membership inference attacks by 25.26%.\nOn the other hand, the differential privacy on ε ≦ 360 reduced the increase in the attack success rate of membership inference attacks to within 0.94%.\nTherefore, we found that differential privacy prevents poisoning-assisted membership inference attacks. ","subitem_description_type":"Other"}]},"item_18_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"312","bibliographic_titles":[{"bibliographic_title":"コンピュータセキュリティシンポジウム2022論文集"}],"bibliographicPageStart":"305","bibliographicIssueDates":{"bibliographicIssueDate":"2022-10-17","bibliographicIssueDateType":"Issued"}}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"created":"2025-01-19T01:22:58.311518+00:00","updated":"2025-01-19T13:31:03.118049+00:00","id":223098}