{"created":"2025-09-05T04:46:57.174864+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:02004341","sets":["581:11839:11848"]},"path":["11848"],"owner":"80578","recid":"2004341","title":["大規模言語モデルの利活用におけるインジェクション攻撃に関する脅威の体系化"],"pubdate":{"attribute_name":"PubDate","attribute_value":"2025-09-15"},"_buckets":{"deposit":"ef600b75-8a4a-452b-b328-f46bf273e609"},"_deposit":{"id":"2004341","pid":{"type":"depid","value":"2004341","revision_id":0},"owner":"80578","owners":[80578],"status":"published","created_by":80578},"item_title":"大規模言語モデルの利活用におけるインジェクション攻撃に関する脅威の体系化","author_link":[],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"大規模言語モデルの利活用におけるインジェクション攻撃に関する脅威の体系化","subitem_title_language":"ja"},{"subitem_title":"Systematizing Threats of Injection Attacks on Applications of Large Language Models","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[特集:AI社会を安全にするコンピュータセキュリティ技術(推薦論文,特選論文)] 大規模言語モデル,インジェクション攻撃,脅威の体系化","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2025-09-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"NTT社会情報研究所"},{"subitem_text_value":"NTT社会情報研究所"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"NTT Social Informatics Laboratories","subitem_text_language":"en"},{"subitem_text_value":"NTT Social Informatics Laboratories","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"control_number":"2004341","publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/2004341/files/IPSJ-JNL6609020.pdf","label":"IPSJ-JNL6609020.pdf"},"date":[{"dateType":"Available","dateValue":"2027-09-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6609020.pdf","filesize":[{"value":"713.1 KB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"b263aa2e-cc2d-4c55-8a67-1d8b69d64b55","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2025 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"青島,達大"}]},{"creatorNames":[{"creatorName":"秋山,満昭"}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Tatsuhiro Aoshima","creatorNameLang":"en"}]},{"creatorNames":[{"creatorName":"Mitsuaki Akiyama","creatorNameLang":"en"}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_publisher_15":{"attribute_name":"公開者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"ChatGPTの登場により,大規模言語モデル(LLM)を活用したアプリケーション開発が広がりつつある.LLMは,与えられた文字列に続く文字を予測する機械学習モデルである.そのため,LLMへ指示を与えるには,アプリとしての仕事に関する指示とユーザーからのクエリを1つの文字列として結合する必要がある.このとき,攻撃者による指示がLLMへ入力される可能性があり,LLMが悪意のある行為を実行してしまうインジェクション攻撃が成立する危険性がある.我々の研究目的は,LLMの安全な利活用の実現へ向けて,インジェクション攻撃対策に注力した脅威の体系化を行い,攻撃手法から対策の優先付けが可能となるフレームワークを与えることにある.この観点で,72個の攻撃事例を収集し,9個の攻撃手法へ体系的に整理した.また,攻撃手法から攻撃者の意図を推定し,9個の緩和策から優先すべき対策を検討できる構成とした.本論文では,利用体験と設計方針を検証するという目的で,サイバーセキュリティ分野の専門家らによる評価を実施した結果も報告し,今後の課題も整理する.","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"After ChatGPT emerged, more applications have been developed using Large Language Models (LLM). An LLM is a machine-learning model that predicts a character following a given string. An LLM application takes one input string, concatenating the description of the task and a user query. In this case, an attacker can inject its instructions into LLMs. Hence, LLM would execute malicious tasks, which leads to injection attacks. Our research goal is to systematize threats focusing on injection attacks to realize LLM application safety and provide a framework supporting the prioritization of possible mitigations from the attack techniques. It consists of 72 attack examples, then categorized into nine techniques. Then, users can infer the attacker's tactics from the techniques taken and consider which of the nine mitigations should be employed. In this paper, to validate user experience and design policy, we show the results of evaluations by experts in cyber security and discuss future works.","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"1246","bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicPageStart":"1235","bibliographicIssueDates":{"bibliographicIssueDate":"2025-09-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"9","bibliographicVolumeNumber":"66"}]},"relation_version_is_last":true,"item_2_identifier_registration":{"attribute_name":"ID登録","attribute_value_mlt":[{"subitem_identifier_reg_text":"10.20729/0002004341","subitem_identifier_reg_type":"JaLC"}]},"weko_creator_id":"80578"},"id":2004341,"updated":"2025-09-15T23:06:50.081240+00:00","links":{}}