@article{oai:ipsj.ixsq.nii.ac.jp:00241743, author = {Ryoichi, Sasaki and Kenta, Onishi and Yoshihiro, Mitsui and Masato, Terada and Ryoichi, Sasaki and Kenta, Onishi and Yoshihiro, Mitsui and Masato, Terada}, issue = {12}, journal = {情報処理学会論文誌}, month = {Dec}, note = {The authors previously proposed classifying the relationship between AI and security into four types: attacks using AI, attacks by AI, attacks to AI, and security measures using AI. Subsequently, generative AI such as ChatGPT has become widely used. Therefore, we examined the impact of the emergence of generative AI on the relationship between AI and security and demonstrated a pressing need for countermeasures against attacks by generative AI. The authors then categorized three types of attacks from generative AI to humans: “Terminator,” “2001: A Space Odyssey,” and “Mad Scientist,” and proposed potential countermeasures against them. The MRC-EDC method developed earlier by the authors aimed to optimize the combination of countermeasures, but it was not suitable for this subject due to its full-quantitative approach, necessitating rigorous cost and risk estimation. Consequently, we developed an improved MRC-EDC method that partially incorporates a semi-quantitative approach and conducted a trial to propose countermeasures against attacks by generative AI. As a result, five cost-effective countermeasures were identified, confirming the effectiveness of this method. ------------------------------ This is a preprint of an article intended for publication Journal of Information Processing(JIP). This preprint should not be cited. This article should be cited as: Journal of Information Processing Vol.32(2024) (online) DOI http://dx.doi.org/10.2197/ipsjjip.32.1057 ------------------------------, The authors previously proposed classifying the relationship between AI and security into four types: attacks using AI, attacks by AI, attacks to AI, and security measures using AI. Subsequently, generative AI such as ChatGPT has become widely used. Therefore, we examined the impact of the emergence of generative AI on the relationship between AI and security and demonstrated a pressing need for countermeasures against attacks by generative AI. The authors then categorized three types of attacks from generative AI to humans: “Terminator,” “2001: A Space Odyssey,” and “Mad Scientist,” and proposed potential countermeasures against them. The MRC-EDC method developed earlier by the authors aimed to optimize the combination of countermeasures, but it was not suitable for this subject due to its full-quantitative approach, necessitating rigorous cost and risk estimation. Consequently, we developed an improved MRC-EDC method that partially incorporates a semi-quantitative approach and conducted a trial to propose countermeasures against attacks by generative AI. As a result, five cost-effective countermeasures were identified, confirming the effectiveness of this method. ------------------------------ This is a preprint of an article intended for publication Journal of Information Processing(JIP). This preprint should not be cited. This article should be cited as: Journal of Information Processing Vol.32(2024) (online) DOI http://dx.doi.org/10.2197/ipsjjip.32.1057 ------------------------------}, title = {Development and Trial Application of an Improved MRC-EDC Method for Risk Assessment of Attacks on Humans by Generative AI}, volume = {65}, year = {2024} }