{"updated":"2025-01-19T13:13:54.664123+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00224184","sets":["1164:4402:11197:11198"]},"path":["11198"],"owner":"44499","recid":"224184","title":["距離に基づく内発的報酬のためのポテンシャル場生成"],"pubdate":{"attribute_name":"公開日","attribute_value":"2023-02-13"},"_buckets":{"deposit":"2bc07a27-3a4f-4cd1-ab37-4058d30bebb3"},"_deposit":{"id":"224184","pid":{"type":"depid","value":"224184","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"距離に基づく内発的報酬のためのポテンシャル場生成","author_link":["590628","590631","590630","590629","590632"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"距離に基づく内発的報酬のためのポテンシャル場生成"},{"subitem_title":"Potential field generation for distance-based intrinsic rewards","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2023-02-13","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"名古屋工業大学"},{"subitem_text_value":"名古屋工業大学"},{"subitem_text_value":"名古屋工業大学"},{"subitem_text_value":"中部大学"},{"subitem_text_value":"名古屋工業大学"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Nagoya Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"Nagoya Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"Nagoya Institute of Technology","subitem_text_language":"en"},{"subitem_text_value":"Chubu University","subitem_text_language":"en"},{"subitem_text_value":"Nagoya Institute of Technology","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/224184/files/IPSJ-ICS23208001.pdf","label":"IPSJ-ICS23208001.pdf"},"date":[{"dateType":"Available","dateValue":"2025-02-13"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-ICS23208001.pdf","filesize":[{"value":"824.3 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"25"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"7d0797a1-522e-4a58-9ebd-2faf84422868","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2023 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"中田, 瑛"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"森山, 甲一"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"武藤, 敦子"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"松井, 藤五郎"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"犬塚, 信博"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11135936","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-885X","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"マルチエージェント環境下において,一般的な強化学習ではエージェントが他エージェントに対し協力行動を行うことは困難であるという問題がある.本研究では,効用利用 Q 学習と人工ポテンシャル場(APF)を組み合わせることによって,その問題の解決を図った.環境内のオブジェクトに APF を生成することで,距離に応じた内発的報酬の獲得を行う.そして,APF から獲得した内発的報酬と環境から与えられる外部報酬をもとに効用利用 Q 学習を行うことで,エージェントに対して協力行動を促すことを目的とする.その際,遺伝的アルゴリズム(GA)を用いることで,エージェントの学習に適した APF の生成を目指す.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"7","bibliographic_titles":[{"bibliographic_title":"研究報告知能システム(ICS)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2023-02-13","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"1","bibliographicVolumeNumber":"2023-ICS-208"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"created":"2025-01-19T01:23:47.116370+00:00","id":224184,"links":{}}