{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00102603","sets":["581:7397:7643"]},"path":["7643"],"owner":"11","recid":"102603","title":["A Method for Embedding Context to Sound-based Life Log"],"pubdate":{"attribute_name":"公開日","attribute_value":"2014-08-15"},"_buckets":{"deposit":"c0da3864-38cd-403b-9e55-625e93f18899"},"_deposit":{"id":"102603","pid":{"type":"depid","value":"102603","revision_id":0},"owners":[11],"status":"published","created_by":11},"item_title":"A Method for Embedding Context to Sound-based Life Log","author_link":["11302","11305","11303","11304","11306","11307"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"A Method for Embedding Context to Sound-based Life Log"},{"subitem_title":"A Method for Embedding Context to Sound-based Life Log","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"[一般論文] wearable computing, gesture recognition, environment recognition, ultrasonic sound, life log, location recognition, person recognition","subitem_subject_scheme":"Other"}]},"item_type_id":"2","publish_date":"2014-08-15","item_2_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, Kobe University"},{"subitem_text_value":"Graduate School of Engineering, Kobe University/PRESTO, Japan Science and Technology Agency"},{"subitem_text_value":"Graduate School of Engineering, Kobe University"}]},"item_2_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Graduate School of Engineering, Kobe University","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of Engineering, Kobe University / PRESTO, Japan Science and Technology Agency","subitem_text_language":"en"},{"subitem_text_value":"Graduate School of Engineering, Kobe University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"publish_status":"0","weko_shared_id":11,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/102603/files/IPSJ-JNL5508024.pdf","label":"IPSJ-JNL5508024"},"date":[{"dateType":"Available","dateValue":"2016-08-15"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-JNL5508024.pdf","filesize":[{"value":"4.0 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"8"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"a51e7fad-7d15-46c5-8828-19c1f077ac39","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2014 by the Information Processing Society of Japan"}]},"item_2_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Hiroki, Watanabe"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tsutomu, Terada"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masahiko, Tsukamoto"}],"nameIdentifiers":[{}]}]},"item_2_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Hiroki, Watanabe","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Tsutomu, Terada","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Masahiko, Tsukamoto","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_2_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN00116647","subitem_source_identifier_type":"NCID"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_6501","resourcetype":"journal article"}]},"item_2_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"1882-7764","subitem_source_identifier_type":"ISSN"}]},"item_2_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Wearable computing technologies are attracting a great deal of attention on context-aware systems. They recognize user context by using wearable sensors. Though conventional context-aware systems use accelerometers or microphones, the former requires wearing many sensors and a storage such as PC for data storing, and the latter cannot recognize complex user motions. In this paper, we propose an activity and context recognition method where the user carries a neck-worn receiver comprising a microphone, and small speakers on his/her wrists that generate ultrasounds. The system recognizes gestures on the basis of the volume of the received sound and the Doppler effect. The former indicates the distance between the neck and wrists, and the latter indicates the speed of motions. We combine the gesture recognition by using ultrasound and conventional MFCC-based environmental-context recognition to recognize complex contexts from the recorded sound. Thus, our approach substitutes the wired or wireless communication typically required in body area motion sensing networks by ultrasounds. Our system also recognizes the place where the user is in and the people who are near the user by ID signals generated from speakers placed in rooms and on people. The strength of the approach is that, for offline recognition, a simple audio recorder can be used for the receiver. Contexts are embedded in the recorded sound all together, and this recorded sound creates a sound-based life log with context information. We evaluate the approach on nine gestures/activities with 10 users. Evaluation results confirmed that when there was no environmental sound generated from other people, the recognition rate was 86.6% on average. When there was environmental sound generated from other people, we compare an approach that selects used feature values depending on a situation against standard approach, which uses feature value of ultrasound and environmental sound. Results for the proposed approach are 64.3%, for the standard approach are 57.3%.\n\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.22(2014) No.4 (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.22.651\n------------------------------","subitem_description_type":"Other"}]},"item_2_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Wearable computing technologies are attracting a great deal of attention on context-aware systems. They recognize user context by using wearable sensors. Though conventional context-aware systems use accelerometers or microphones, the former requires wearing many sensors and a storage such as PC for data storing, and the latter cannot recognize complex user motions. In this paper, we propose an activity and context recognition method where the user carries a neck-worn receiver comprising a microphone, and small speakers on his/her wrists that generate ultrasounds. The system recognizes gestures on the basis of the volume of the received sound and the Doppler effect. The former indicates the distance between the neck and wrists, and the latter indicates the speed of motions. We combine the gesture recognition by using ultrasound and conventional MFCC-based environmental-context recognition to recognize complex contexts from the recorded sound. Thus, our approach substitutes the wired or wireless communication typically required in body area motion sensing networks by ultrasounds. Our system also recognizes the place where the user is in and the people who are near the user by ID signals generated from speakers placed in rooms and on people. The strength of the approach is that, for offline recognition, a simple audio recorder can be used for the receiver. Contexts are embedded in the recorded sound all together, and this recorded sound creates a sound-based life log with context information. We evaluate the approach on nine gestures/activities with 10 users. Evaluation results confirmed that when there was no environmental sound generated from other people, the recognition rate was 86.6% on average. When there was environmental sound generated from other people, we compare an approach that selects used feature values depending on a situation against standard approach, which uses feature value of ultrasound and environmental sound. Results for the proposed approach are 64.3%, for the standard approach are 57.3%.\n\n------------------------------\nThis is a preprint of an article intended for publication Journal of\nInformation Processing(JIP). This preprint should not be cited. This\narticle should be cited as: Journal of Information Processing Vol.22(2014) No.4 (online)\nDOI http://dx.doi.org/10.2197/ipsjjip.22.651\n------------------------------","subitem_description_type":"Other"}]},"item_2_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographic_titles":[{"bibliographic_title":"情報処理学会論文誌"}],"bibliographicIssueDates":{"bibliographicIssueDate":"2014-08-15","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"8","bibliographicVolumeNumber":"55"}]},"relation_version_is_last":true,"weko_creator_id":"11"},"id":102603,"updated":"2025-01-20T06:45:28.173944+00:00","links":{},"created":"2025-01-18T23:47:51.489805+00:00"}