{"created":"2025-01-19T01:28:22.018797+00:00","updated":"2025-01-19T11:35:31.876795+00:00","metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00229236","sets":["1164:4619:11188:11420"]},"path":["11420"],"owner":"44499","recid":"229236","title":["Generalizable Novel-view Synthesis of Full-body Human from Sparse Input"],"pubdate":{"attribute_name":"公開日","attribute_value":"2023-11-09"},"_buckets":{"deposit":"c0d0596a-fb6c-4b69-b93a-12064b13f59e"},"_deposit":{"id":"229236","pid":{"type":"depid","value":"229236","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"Generalizable Novel-view Synthesis of Full-body Human from Sparse Input","author_link":["616088","616087","616089","616086","616091","616090"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Generalizable Novel-view Synthesis of Full-body Human from Sparse Input"},{"subitem_title":"Generalizable Novel-view Synthesis of Full-body Human from Sparse Input","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2023-11-09","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"Presently with University of Tsukuba"},{"subitem_text_value":"Presently with University of Tsukuba"},{"subitem_text_value":"Presently with University of Tsukuba"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"Presently with University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"Presently with University of Tsukuba","subitem_text_language":"en"},{"subitem_text_value":"Presently with University of Tsukuba","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/229236/files/IPSJ-CVIM23235016.pdf","label":"IPSJ-CVIM23235016.pdf"},"date":[{"dateType":"Available","dateValue":"2025-11-09"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CVIM23235016.pdf","filesize":[{"value":"5.2 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"20"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"1a3b709e-495a-4643-8d22-3113aa6a79fa","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2023 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zhaorong, Wang"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihiro, Kanamori"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yuki, Endo"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zhaorong, Wang","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yoshihiro, Kanamori","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Yuki, Endo","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AA11131797","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8701","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"Neural Radiance Fields (NeRF) have significantly advanced the field of novel view synthesis, and its application to full-body humans (or human NeRF) has been deemed as promising to enable telepresence. Generalizable human NeRF can avoid lengthy re-training for each human target but, if only sparse views are given, suffers from blurry outputs with artifacts due to insufficient visible samples. To better handle such a sparse view setting, we enhance the quality of appearance particularly in the regions completely occluded in any input views. We first condense sampling rays by omitting empty spaces via a parametric body fitting, leading improved appearance. We then specify the completely occluded regions and inpaint them to remove artifacts. Our method demonstrates improvements in quantitative evaluations compared to the baseline method. Qualitative results also exhibit higher fidelity, fewer artifacts, and a more natural clothing appearance.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"Neural Radiance Fields (NeRF) have significantly advanced the field of novel view synthesis, and its application to full-body humans (or human NeRF) has been deemed as promising to enable telepresence. Generalizable human NeRF can avoid lengthy re-training for each human target but, if only sparse views are given, suffers from blurry outputs with artifacts due to insufficient visible samples. To better handle such a sparse view setting, we enhance the quality of appearance particularly in the regions completely occluded in any input views. We first condense sampling rays by omitting empty spaces via a parametric body fitting, leading improved appearance. We then specify the completely occluded regions and inpaint them to remove artifacts. Our method demonstrates improvements in quantitative evaluations compared to the baseline method. Qualitative results also exhibit higher fidelity, fewer artifacts, and a more natural clothing appearance.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"6","bibliographic_titles":[{"bibliographic_title":"研究報告コンピュータビジョンとイメージメディア(CVIM)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2023-11-09","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"16","bibliographicVolumeNumber":"2023-CVIM-235"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"id":229236,"links":{}}