{"updated":"2025-01-21T18:30:31.590736+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00083598","sets":["1164:4179:6657:6851"]},"path":["6851"],"owner":"11","recid":"83598","title":["潜在トピックを考慮したBayes n-gram言語モデル"],"pubdate":{"attribute_name":"公開日","attribute_value":"2012-08-26"},"_buckets":{"deposit":"c5b27c9b-5b82-4c87-811d-b12f9f49d099"},"_deposit":{"id":"83598","pid":{"type":"depid","value":"83598","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"潜在トピックを考慮したBayes n-gram言語モデル","author_link":["0","0"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"潜在トピックを考慮したBayes n-gram言語モデル"},{"subitem_title":"Latent Topic Aware Bayesian n-gram Language Model","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"言語モデル・解析","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2012-08-26","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"東京大学大学院情報理工学系研究科"},{"subitem_text_value":"統計数理研究所"},{"subitem_text_value":"東京大学大学院情報理工学系研究科"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Information Science and Technology, University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"The Institute of Statistical Mathematics","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of Information Science and Technology, University of Tokyo","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"jpn"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/83598/files/IPSJ-NL12208004.pdf"},"date":[{"dateType":"Available","dateValue":"2014-08-26"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-NL12208004.pdf","filesize":[{"value":"477.1 kB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"23"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"c5682e19-4eaf-4545-ad50-ddb84cf2ea17","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2012 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"能地, 宏"},{"creatorName":"持橋, 大地"},{"creatorName":"石塚, 満"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Noji, Hiroshi","creatorNameLang":"en"},{"creatorName":"Mochihashi, Daichi","creatorNameLang":"en"},{"creatorName":"Ishizuka, Mitsuru","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10115061","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"文書の潜在トピックを捉え,トピックに応じた適切なnグラムを用いて予測を行うBayes的なnグラム言語モデルを提案する.文章には,単語の出現が文書のトピックに依存して決まる内容語と,文法的な関係のみで決まる機能語が存在する.我々はこれらの単語の出現が,文脈によっておおまかに決まることに着目し,適切な箇所でのみトピックを考慮した予測を行うモデルとして,2種類のモデルを提案し,比較を行う.トピック別のnグラムモデルを,通常のGibbsサンプリングで学習したのではすぐに局所解に陥ってしまうことを実験的に示し,それを回避するための新しいBlocked Gibbsサンプリングを提案する.提案法は,パープレキシティの比較において,Unigram Rescalingと同等以上の性能を示しながら,予測時間の大幅な改善を行うことを確認した.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"We study n-gram language models that can leverage latent topics of a given document, and predict words using n-gram models of these topics. Words in a sentence will appear because of short-range syntactic constraints or semantic dependencies of the document. We observe that whether next word given some context is function word or content word can roughly be predictable and propose two novel n-gram language models that predict words using topic-specific n-grams only when context has strong topicality. To avoid getting stucked in a local minima, novel Blocked Gibbs sampling based on a table in the internal Chinese Restaurant is proposed. We show that our models give superior or comparative performance than the traditional method, Unigram Rescaling in terms of test set perplexity, while drastically reduce the time for prediction.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"11","bibliographic_titles":[{"bibliographic_title":"研究報告自然言語処理(NL)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2012-08-26","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"4","bibliographicVolumeNumber":"2012-NL-208"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"created":"2025-01-18T23:37:03.000722+00:00","id":83598,"links":{}}