{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00239252","sets":["6164:6165:6522:11751"]},"path":["11751"],"owner":"44499","recid":"239252","title":["An Empirical Study on Small Language Models in Sentiment Analysis for Software Engineering"],"pubdate":{"attribute_name":"公開日","attribute_value":"2024-09-10"},"_buckets":{"deposit":"30b74170-75b1-469f-a875-155b80de6bd4"},"_deposit":{"id":"239252","pid":{"type":"depid","value":"239252","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"An Empirical Study on Small Language Models in Sentiment Analysis for Software Engineering","author_link":["655531","655527","655529","655528","655525","655526","655530","655532"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"An Empirical Study on Small Language Models in Sentiment Analysis for Software Engineering"},{"subitem_title":"An Empirical Study on Small Language Models in Sentiment Analysis for Software Engineering","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"大規模言語モデル","subitem_subject_scheme":"Other"}]},"item_type_id":"18","publish_date":"2024-09-10","item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_18_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Kyushu University"},{"subitem_text_value":"Kyushu University"},{"subitem_text_value":"Kyushu University"},{"subitem_text_value":"Kyushu University"}]},"item_18_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Kyushu University","subitem_text_language":"en"},{"subitem_text_value":"Kyushu University","subitem_text_language":"en"},{"subitem_text_value":"Kyushu University","subitem_text_language":"en"},{"subitem_text_value":"Kyushu University ","subitem_text_language":"en"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/239252/files/IPSJ-SES2024024.pdf","label":"IPSJ-SES2024024.pdf"},"date":[{"dateType":"Available","dateValue":"2026-09-10"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-SES2024024.pdf","filesize":[{"value":"138.4 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"12"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"a8635eb9-a9f3-4fb9-a6d9-a6e5e4ca4499","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2024 by the Information Processing Society of Japan"}]},"item_18_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Chunrun, Tao"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Honglin, Shu"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masanari, Kondo"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yasutaka, Kamei"}],"nameIdentifiers":[{}]}]},"item_18_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Chunrun, Tao","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Honglin, Shu","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masanari, Kondo","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yasutaka, Kamei","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_5794","resourcetype":"conference paper"}]},"item_18_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Software engineering has become very important in daily life and scientific research. The ability to quickly understand developers' emotions, especially negative ones, during the software development process, as well as the reputation and user feedback of the software, is crucial in software engineering today. Over the years, many tools have been developed for Sentiment Analysis for Software Engineering (SA4SE), but capturing sentiment efficiently and accurately remains challenging. The fine-tuned model performs well but relies on a large number of high-quality labeled datasets. While Large Language Models (LLMs) are relatively easy to use and not dependent on these datasets, they generally have mediocre performance except in a few cases. Additionally, they require a large amount of computational resources. In this study, we introduce the Small Language Models (SLMs) and empirically determine its characteristics. We also compare its performance with existing models to generalize SLM's characteristics and see if it improves performance. In addition, the emergence of various chatbots provides this research with a new opportunity: Language Models (LMs) Negotiation. This study examines whether it can improve performance compared to a single LM. The experimental results show that SLMs currently performs similarly to LLMs, indicating that SLMs has good potential in this task. Additionally, LMs Negotiation slightly improves its performance compared to individual models.","subitem_description_type":"Other"}]},"item_18_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Software engineering has become very important in daily life and scientific research. The ability to quickly understand developers' emotions, especially negative ones, during the software development process, as well as the reputation and user feedback of the software, is crucial in software engineering today. Over the years, many tools have been developed for Sentiment Analysis for Software Engineering (SA4SE), but capturing sentiment efficiently and accurately remains challenging. The fine-tuned model performs well but relies on a large number of high-quality labeled datasets. While Large Language Models (LLMs) are relatively easy to use and not dependent on these datasets, they generally have mediocre performance except in a few cases. Additionally, they require a large amount of computational resources. In this study, we introduce the Small Language Models (SLMs) and empirically determine its characteristics. We also compare its performance with existing models to generalize SLM's characteristics and see if it improves performance. In addition, the emergence of various chatbots provides this research with a new opportunity: Language Models (LMs) Negotiation. This study examines whether it can improve performance compared to a single LM. The experimental results show that SLMs currently performs similarly to LLMs, indicating that SLMs has good potential in this task. Additionally, LMs Negotiation slightly improves its performance compared to individual models.","subitem_description_type":"Other"}]},"item_18_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"136","bibliographic_titles":[{"bibliographic_title":"ソフトウェアエンジニアリングシンポジウム2024論文集"}],"bibliographicPageStart":"130","bibliographicIssueDates":{"bibliographicIssueDate":"2024-09-10","bibliographicIssueDateType":"Issued"},"bibliographicVolumeNumber":"2024"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":239252,"updated":"2025-01-19T08:20:48.844994+00:00","links":{},"created":"2025-01-19T01:42:47.158251+00:00"}