{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00220346","sets":["581:10784:10795"]},"path":["10795"],"owner":"44499","recid":"220346","title":["Stetho Touch: Touch Action Recognition System by Deep Learning with Stethoscope Acoustic Sensing "],"pubdate":{"attribute_name":"公開日","attribute_value":"2022-10-15"},"_buckets":{"deposit":"4d9ee27b-674d-47e6-9163-83612f535b07"},"_deposit":{"id":"220346","pid":{"type":"depid","value":"220346","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Stetho Touch: Touch Action Recognition System by Deep Learning with Stethoscope Acoustic Sensing ","author_link":["576507","576509","576511","576506","576508","576510"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Stetho Touch: Touch Action Recognition System by Deep Learning with Stethoscope Acoustic Sensing "},{"subitem_title":"Stetho Touch: Touch Action Recognition System by Deep Learning with Stethoscope Acoustic Sensing ","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[一般論文] machine learning, deep learning, CHI, acoustic sensing","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2022-10-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University"},{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University"},{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University","subitem_text_language":"en"},{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University","subitem_text_language":"en"},{"subitem_text_value":"Graduate of Faculty of Engineering, Sophia University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/220346/files/IPSJ-JNL6310005.pdf","label":"IPSJ-JNL6310005.pdf"},"date":[{"dateType":"Available","dateValue":"2024-10-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL6310005.pdf","filesize":[{"value":"3.1 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"0","billingrole":"5"},{"tax":["include_tax"],"price":"0","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"34f4e231-1b60-4bc0-8376-9565d1b9ad94","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2022 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Nagisa, Masuda"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Koichi, Furukawa"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Ikuko, Eguchi Yairi"}],"nameIdentifiers":[{}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Nagisa, Masuda","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Koichi, Furukawa","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Ikuko, Eguchi Yairi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_publisher_15":{"attribute_name":"公開者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Developing a new IoT device input method that can reduce the burden on users has become an important issue. This paper proposed a system Stetho Touch that identifies touch actions using acoustic information obtained when a user's finger makes contact with a solid object. To investigate the method, we implemented a prototype of an acoustic sensing device consisting of a low-pressure melamine veneer table, a stethoscope, and an audio interface. The CNN-LSTM classification model of combining CNN and LSTM classified the five touch actions with accuracy 88.26%, f-score 87.26% in LOSO and accuracy 99.39, f-score 99.39 in 18-fold cross-validation. The contributions of this paper are the following; (1) proposed a touch action recognition method using acoustic information that is more natural and accurate than existing methods, (2) evaluated a touch action recognition method using Deep Learning that can be processed in real-time using acoustic time series raw data as input, and (3) proved the compensations for the user dependence of touch actions by providing a learning phase or performing sequential learning during use. \n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.30(2022) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.30.718\n------------------------------","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Developing a new IoT device input method that can reduce the burden on users has become an important issue. This paper proposed a system Stetho Touch that identifies touch actions using acoustic information obtained when a user's finger makes contact with a solid object. To investigate the method, we implemented a prototype of an acoustic sensing device consisting of a low-pressure melamine veneer table, a stethoscope, and an audio interface. The CNN-LSTM classification model of combining CNN and LSTM classified the five touch actions with accuracy 88.26%, f-score 87.26% in LOSO and accuracy 99.39, f-score 99.39 in 18-fold cross-validation. The contributions of this paper are the following; (1) proposed a touch action recognition method using acoustic information that is more natural and accurate than existing methods, (2) evaluated a touch action recognition method using Deep Learning that can be processed in real-time using acoustic time series raw data as input, and (3) proved the compensations for the user dependence of touch actions by providing a learning phase or performing sequential learning during use. \n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.30(2022) (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.30.718\n------------------------------","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2022-10-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"10","bibliographicVolumeNumber":"63"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":220346,"updated":"2025-01-19T14:35:17.710910+00:00","links":{},"created":"2025-01-19T01:20:23.474358+00:00"}