{"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00234133","sets":["1164:4619:11539:11642"]},"path":["11642"],"owner":"44499","recid":"234133","title":["Label-Efficient Microscopy Image Recognition with Cell Image Characteristics"],"pubdate":{"attribute_name":"公開日","attribute_value":"2024-05-08"},"_buckets":{"deposit":"40aa2961-2147-47e3-ae20-4a56d1493268"},"_deposit":{"id":"234133","pid":{"type":"depid","value":"234133","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Label-Efficient Microscopy Image Recognition with Cell Image Characteristics","author_link":["637454","637453","637456","637455"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Label-Efficient Microscopy Image Recognition with Cell Image Characteristics"},{"subitem_title":"Label-Efficient Microscopy Image Recognition with Cell Image Characteristics","subitem_title_language":"en"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"D論セッション (CVIM)","subitem_subject_scheme":"Other"}]},"item_type_id":"4","publish_date":"2024-05-08","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Kyushu University"},{"subitem_text_value":"Kyushu University"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Kyushu University","subitem_text_language":"en"},{"subitem_text_value":"Kyushu University","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/234133/files/IPSJ-CVIM24238002.pdf","label":"IPSJ-CVIM24238002.pdf"},"date":[{"dateType":"Available","dateValue":"2026-05-08"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CVIM24238002.pdf","filesize":[{"value":"13.9 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"20"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"729db2b9-d36e-428a-8fce-2ffde924478a","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2024 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Kazuya, Nishimura"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Ryoma, Bise"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Kazuya, Nishimura","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Ryoma, Bise","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11131797","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8701","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Deep-learning-based methods have achieved promising results in microscopy image analysis. The methods learn the feature representation in a data-driven manner and can adapt to various imaging conditions and tasks of microscopy image analysis. However, these methods require a certain amount of labeled data to train the model. The labeled data are required for each imaging condition, and unlike general images, specialized knowledge is required to make labeled data. For these reasons, collecting labels is time-consuming and labor-intensive. In this thesis, I explore label-efficient learning methods using readily available cell image characteristics as clues instead of manually labeled data. Specifically, I focused on three main topics to explore the potential utility of clues. As the first topic, I propose weakly supervised cell tracking methods with nuclei positions as a clue. For cell tracking, a multi-frame cell detection network that simultaneously detects cells in successive frames is trained using cell positions. Then, the positions of the same cell in successive frames (i.e., it indicates the cell motion in the frames) are extracted from the network. Experiments demonstrated the effectiveness of the proposed methods. As the second topic, I propose a weakly supervised cell segmentation method with the nuclei positions and cell type labels as multiple clues in this topic. These clues do not contain inter-cell boundary information. Therefore, I complement inter-cell boundary information by generating the inter-cell boundary labels in a self-supervised manner. Experiments demonstrated that the proposed method achieved the best performance among the conventional methods. As the third topic, I propose cell and mitosis detection methods that utilize a few manually labeled data and capture timing as a clue. For mitosis detection, I use partially labeled sequences and capture timing. Since the partial label does not contain non-mitotic region information, I generate a non-mitotic region label using capture timing. Experiments demonstrated the proposed methods could outperform conventional methods.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Deep-learning-based methods have achieved promising results in microscopy image analysis. The methods learn the feature representation in a data-driven manner and can adapt to various imaging conditions and tasks of microscopy image analysis. However, these methods require a certain amount of labeled data to train the model. The labeled data are required for each imaging condition, and unlike general images, specialized knowledge is required to make labeled data. For these reasons, collecting labels is time-consuming and labor-intensive. In this thesis, I explore label-efficient learning methods using readily available cell image characteristics as clues instead of manually labeled data. Specifically, I focused on three main topics to explore the potential utility of clues. As the first topic, I propose weakly supervised cell tracking methods with nuclei positions as a clue. For cell tracking, a multi-frame cell detection network that simultaneously detects cells in successive frames is trained using cell positions. Then, the positions of the same cell in successive frames (i.e., it indicates the cell motion in the frames) are extracted from the network. Experiments demonstrated the effectiveness of the proposed methods. As the second topic, I propose a weakly supervised cell segmentation method with the nuclei positions and cell type labels as multiple clues in this topic. These clues do not contain inter-cell boundary information. Therefore, I complement inter-cell boundary information by generating the inter-cell boundary labels in a self-supervised manner. Experiments demonstrated that the proposed method achieved the best performance among the conventional methods. As the third topic, I propose cell and mitosis detection methods that utilize a few manually labeled data and capture timing as a clue. For mitosis detection, I use partially labeled sequences and capture timing. Since the partial label does not contain non-mitotic region information, I generate a non-mitotic region label using capture timing. Experiments demonstrated the proposed methods could outperform conventional methods.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"16","bibliographic_titles":[{"bibliographic_title":"研究報告コンピュータビジョンとイメージメディア(CVIM)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2024-05-08","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"2","bibliographicVolumeNumber":"2024-CVIM-238"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":234133,"updated":"2025-01-19T09:53:28.099853+00:00","links":{},"created":"2025-01-19T01:35:47.697146+00:00"}