{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00217665","sets":["934:1022:10776:10872"]},"path":["10872"],"owner":"44499","recid":"217665","title":["Providing Interpretability of Document Classification by Deep Neural Network with Self-attention"],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-04-07"},"_buckets":{"deposit":"01e0668c-bac4-4e91-b346-abd136d3bcb4"},"_deposit":{"id":"217665","pid":{"type":"depid","value":"217665","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Providing Interpretability of Document Classification by Deep Neural Network with Self-attention","author_link":["564360","564359","564355","564358","564354","564357","564353","564356"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Providing Interpretability of Document Classification by Deep Neural Network with Self-attention"},{"subitem_title":"Providing Interpretability of Document Classification by Deep Neural Network with Self-attention","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[研究論文] deep learning, new documents classification, self-attention, smooth-grad, LSTM","subitem_subject_scheme":"Other"}]},"item_type_id":"3","publish_date":"2022-04-07","item_3_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Kogakuin University"},{"subitem_text_value":"Kogakuin University"},{"subitem_text_value":"Kogakuin University"},{"subitem_text_value":"Kogakuin University"}]},"item_3_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Kogakuin University","subitem_text_language":"en"},{"subitem_text_value":"Kogakuin University","subitem_text_language":"en"},{"subitem_text_value":"Kogakuin University","subitem_text_language":"en"},{"subitem_text_value":"Kogakuin University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/217665/files/IPSJ-TOD1502003.pdf","label":"IPSJ-TOD1502003.pdf"},"date":[{"dateType":"Available","dateValue":"2024-04-07"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-TOD1502003.pdf","filesize":[{"value":"5.9 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"13"},{"tax":["include_tax"],"price":"0","billingrole":"39"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"ef1305e2-e590-44dd-8089-4ff8d2b256e7","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_3_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Atsuki, Tamekuri"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kosuke, Nakamura"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihaya, Takahashi"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Saneyasu, Yamaguchi"}],"nameIdentifiers":[{}]}]},"item_3_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Atsuki, Tamekuri","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Kosuke, Nakamura","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihaya, Takahashi","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Saneyasu, Yamaguchi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_3_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11464847","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_3_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7799","subitem_source_identifier_type":"ISSN"}]},"item_3_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Deep learning has been widely used in natural language processing (NLP) such as document classification. For example, self-attention has achieved significant improvement in NLP. However, it has been pointed out that although deep learning accurately classifies documents, it is difficult for users to interpret the basis of the decision. In this paper, we focus on the task of classifying open-data news documents by their theme with a deep neural network with self-attention. We then propose methods for providing the interpretability for these classifications. First, we classify news documents by LSTM with a self-attention mechanism and then show that the network can classify documents highly accurately. Second, we propose five methods for providing the basis of the decision by focusing on various values, e.g., attention, the gradient between input and output values of a neural network, and classification results of a document with one word. Finally, we evaluate the performance of these methods in four evaluating ways and show that these methods can present interpretability suitably. In particular, the methods based on documents with one word can provide interpretability, which is extracting the words that have a strong influence on the classification results.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.30(2022) (online)\n------------------------------","subitem_description_type":"Other"}]},"item_3_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Deep learning has been widely used in natural language processing (NLP) such as document classification. For example, self-attention has achieved significant improvement in NLP. However, it has been pointed out that although deep learning accurately classifies documents, it is difficult for users to interpret the basis of the decision. In this paper, we focus on the task of classifying open-data news documents by their theme with a deep neural network with self-attention. We then propose methods for providing the interpretability for these classifications. First, we classify news documents by LSTM with a self-attention mechanism and then show that the network can classify documents highly accurately. Second, we propose five methods for providing the basis of the decision by focusing on various values, e.g., attention, the gradient between input and output values of a neural network, and classification results of a document with one word. Finally, we evaluate the performance of these methods in four evaluating ways and show that these methods can present interpretability suitably. In particular, the methods based on documents with one word can provide interpretability, which is extracting the words that have a strong influence on the classification results.\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.30(2022) (online)\n------------------------------","subitem_description_type":"Other"}]},"item_3_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌データベース(TOD)"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2022-04-07","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"2","bibliographicVolumeNumber":"15"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":217665,"updated":"2025-01-19T15:24:59.040477+00:00","links":{},"created":"2025-01-19T01:18:08.813880+00:00"}