{"id":231983,"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00231983","sets":["934:1022:11484:11485"]},"path":["11485"],"owner":"44499","recid":"231983","title":["Phrase-Level Topic Modeling Based on Joint Embedding Space of Words, Phrases and Documents"],"pubdate":{"attribute_name":"公開日","attribute_value":"2024-01-25"},"_buckets":{"deposit":"9201522b-0a38-41d4-94ed-38d385f689e3"},"_deposit":{"id":"231983","pid":{"type":"depid","value":"231983","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Phrase-Level Topic Modeling Based on Joint Embedding Space of Words, Phrases and Documents","author_link":["627583","627580","627581","627584","627582","627585"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Phrase-Level Topic Modeling Based on Joint Embedding Space of Words, Phrases and Documents"},{"subitem_title":"Phrase-Level Topic Modeling Based on Joint Embedding Space of Words, Phrases and Documents","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[研究論文] topic modeling, embeddings, Phrase-BERT","subitem_subject_scheme":"Other"}]},"item_type_id":"3","publish_date":"2024-01-25","item_3_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Presently with University of Tsukuba"},{"subitem_text_value":"University of Tsukuba"},{"subitem_text_value":"University of Tsukuba"}]},"item_3_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Presently with University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"University of Tsukuba","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/231983/files/IPSJ-TOD1701003.pdf","label":"IPSJ-TOD1701003.pdf"},"date":[{"dateType":"Available","dateValue":"2026-01-25"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-TOD1701003.pdf","filesize":[{"value":"2.5 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"13"},{"tax":["include_tax"],"price":"0","billingrole":"39"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ad221f2e-c0be-484b-9127-e5d394625fbd","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2024 by the Information Processing Society of Japan"}]},"item_3_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zikai, Zhou"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kei, Wakabayashi"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Hiroyoshi, Ito"}],"nameIdentifiers":[{}]}]},"item_3_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zikai, Zhou","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kei, Wakabayashi","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Hiroyoshi, Ito","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_3_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11464847","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_3_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7799","subitem_source_identifier_type":"ISSN"}]},"item_3_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"In topic modeling, phrases act as important grammatical units that help users interpret the semantics of extracted topics. Embedding-based topic modeling, which has been proposed recently, is a promising approach to extracting phrase-level topics because it does not suffer from scalability issues due to the increased vocabulary size by adding phrases. However, the quality of the phrase-level topics extracted by this approach has not been evaluated, and the effect of the choice of the embedding models used for this method has not been investigated. In this paper, we validate the performance of the phrase-level embedding-based topic modeling and evaluate the effect of the embedding models on the quality of the phrase-level topics. From the result of the evaluation, we realized that the existing pre-trained BERT models have limitations in either sentence or phrase representation; therefore, we further propose a joint fine-tuning of BERT for phrase and sentence embeddings to improve the quality of phrase-level topic modeling. The experimental results quantitatively and qualitatively demonstrate that the jointly fine-tuned BERT yields more coherent phrase-level topics compared with other methods, including popular LDA-based phrase topic modeling.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.32(2024) (online)\n------------------------------","subitem_description_type":"Other"}]},"item_3_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"In topic modeling, phrases act as important grammatical units that help users interpret the semantics of extracted topics. Embedding-based topic modeling, which has been proposed recently, is a promising approach to extracting phrase-level topics because it does not suffer from scalability issues due to the increased vocabulary size by adding phrases. However, the quality of the phrase-level topics extracted by this approach has not been evaluated, and the effect of the choice of the embedding models used for this method has not been investigated. In this paper, we validate the performance of the phrase-level embedding-based topic modeling and evaluate the effect of the embedding models on the quality of the phrase-level topics. From the result of the evaluation, we realized that the existing pre-trained BERT models have limitations in either sentence or phrase representation; therefore, we further propose a joint fine-tuning of BERT for phrase and sentence embeddings to improve the quality of phrase-level topic modeling. The experimental results quantitatively and qualitatively demonstrate that the jointly fine-tuned BERT yields more coherent phrase-level topics compared with other methods, including popular LDA-based phrase topic modeling.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.32(2024) (online)\n------------------------------","subitem_description_type":"Other"}]},"item_3_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌データベース(TOD)"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2024-01-25","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"1","bibliographicVolumeNumber":"17"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"updated":"2025-01-19T10:34:42.080908+00:00","created":"2025-01-19T01:32:34.697016+00:00","links":{}}