{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00241743","sets":["581:11492:11505"]},"path":["11505"],"owner":"44499","recid":"241743","title":["Development and Trial Application of an Improved MRC-EDC Method for Risk Assessment of Attacks on Humans by Generative AI"],"pubdate":{"attribute_name":"公開日","attribute_value":"2024-12-15"},"_buckets":{"deposit":"2c568f1b-518f-4a0a-b896-635352a24ac2"},"_deposit":{"id":"241743","pid":{"type":"depid","value":"241743","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Development and Trial Application of an Improved MRC-EDC Method for Risk Assessment of Attacks on Humans by Generative AI","author_link":["666021","666023","666022","666024","666020","666018","666019","666017"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Development and Trial Application of an Improved MRC-EDC Method for Risk Assessment of Attacks on Humans by Generative AI"},{"subitem_title":"Development and Trial Application of an Improved MRC-EDC Method for Risk Assessment of Attacks on Humans by Generative AI","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[特集:社会的・倫理的なオンライン活動を支援するセキュリティとトラスト] generative AI, security, risk assessment, risk communication, AI attacks","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2024-12-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Tokyo Denki University"},{"subitem_text_value":"PwC Consulting LLC"},{"subitem_text_value":"PwC Consulting LLC"},{"subitem_text_value":"Tokyo Denki University"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Tokyo Denki University","subitem_text_language":"en"},{"subitem_text_value":"PwC Consulting LLC","subitem_text_language":"en"},{"subitem_text_value":"PwC Consulting LLC","subitem_text_language":"en"},{"subitem_text_value":"Tokyo Denki University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/241743/files/IPSJ-JNL6512003.pdf","label":"IPSJ-JNL6512003.pdf"},"date":[{"dateType":"Available","dateValue":"2026-12-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6512003.pdf","filesize":[{"value":"7.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"619fa0e9-265f-4b74-9018-6ff4ae6f55ae","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2024 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Ryoichi, Sasaki"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kenta, Onishi"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihiro, Mitsui"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masato, Terada"}],"nameIdentifiers":[{}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Ryoichi, Sasaki","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kenta, Onishi","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihiro, Mitsui","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masato, Terada","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_publisher_15":{"attribute_name":"公開者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"The authors previously proposed classifying the relationship between AI and security into four types: attacks using AI, attacks by AI, attacks to AI, and security measures using AI. Subsequently, generative AI such as ChatGPT has become widely used. Therefore, we examined the impact of the emergence of generative AI on the relationship between AI and security and demonstrated a pressing need for countermeasures against attacks by generative AI. The authors then categorized three types of attacks from generative AI to humans: “Terminator,” “2001: A Space Odyssey,” and “Mad Scientist,” and proposed potential countermeasures against them. The MRC-EDC method developed earlier by the authors aimed to optimize the combination of countermeasures, but it was not suitable for this subject due to its full-quantitative approach, necessitating rigorous cost and risk estimation. Consequently, we developed an improved MRC-EDC method that partially incorporates a semi-quantitative approach and conducted a trial to propose countermeasures against attacks by generative AI. As a result, five cost-effective countermeasures were identified, confirming the effectiveness of this method. \n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.32(2024) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.32.1057\n------------------------------","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"The authors previously proposed classifying the relationship between AI and security into four types: attacks using AI, attacks by AI, attacks to AI, and security measures using AI. Subsequently, generative AI such as ChatGPT has become widely used. Therefore, we examined the impact of the emergence of generative AI on the relationship between AI and security and demonstrated a pressing need for countermeasures against attacks by generative AI. The authors then categorized three types of attacks from generative AI to humans: “Terminator,” “2001: A Space Odyssey,” and “Mad Scientist,” and proposed potential countermeasures against them. The MRC-EDC method developed earlier by the authors aimed to optimize the combination of countermeasures, but it was not suitable for this subject due to its full-quantitative approach, necessitating rigorous cost and risk estimation. Consequently, we developed an improved MRC-EDC method that partially incorporates a semi-quantitative approach and conducted a trial to propose countermeasures against attacks by generative AI. As a result, five cost-effective countermeasures were identified, confirming the effectiveness of this method. \n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.32(2024) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.32.1057\n------------------------------","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2024-12-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"12","bibliographicVolumeNumber":"65"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"updated":"2025-01-19T07:33:41.194861+00:00","created":"2025-01-19T01:46:31.794249+00:00","links":{},"id":241743}