{"id":211693,"metadata":{"_oai":{"id":"oai:ipsj.ixsq.nii.ac.jp:00211693","sets":["1164:3206:10513:10610"]},"path":["10610"],"owner":"44499","recid":"211693","title":["A GAN Based Approach to Lip-Sync 2D Cartoon Animations without Requiring Raw Cartoon Dataset"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-06-18"},"_buckets":{"deposit":"529b9f09-059b-4804-a519-a005b4ebae22"},"_deposit":{"id":"211693","pid":{"type":"depid","value":"211693","revision_id":0},"owners":[44499],"status":"published","created_by":44499},"item_title":"A GAN Based Approach to Lip-Sync 2D Cartoon Animations without Requiring Raw Cartoon Dataset","author_link":["538246","538244","538247","538248","538243","538245"],"item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"A GAN Based Approach to Lip-Sync 2D Cartoon Animations without Requiring Raw Cartoon Dataset"},{"subitem_title":"A GAN Based Approach to Lip-Sync 2D Cartoon Animations without Requiring Raw Cartoon Dataset","subitem_title_language":"en"}]},"item_type_id":"4","publish_date":"2021-06-18","item_4_text_3":{"attribute_name":"著者所属","attribute_value_mlt":[{"subitem_text_value":"The University of Tokyo"},{"subitem_text_value":"The University of Tokyo/Currently working in AI Lab at CyberAgent, Inc."},{"subitem_text_value":"The University of Tokyo"}]},"item_4_text_4":{"attribute_name":"著者所属(英)","attribute_value_mlt":[{"subitem_text_value":"The University of Tokyo","subitem_text_language":"en"},{"subitem_text_value":"The University of Tokyo / Currently working in AI Lab at CyberAgent, Inc.","subitem_text_language":"en"},{"subitem_text_value":"The University of Tokyo","subitem_text_language":"en"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_publisher":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"情報処理学会","subitem_publisher_language":"ja"}]},"publish_status":"0","weko_shared_id":-1,"item_file_price":{"attribute_name":"Billing file","attribute_type":"file","attribute_value_mlt":[{"url":{"url":"https://ipsj.ixsq.nii.ac.jp/record/211693/files/IPSJ-CG21182001.pdf","label":"IPSJ-CG21182001.pdf"},"date":[{"dateType":"Available","dateValue":"2023-06-18"}],"format":"application/pdf","billing":["billing_file"],"filename":"IPSJ-CG21182001.pdf","filesize":[{"value":"4.4 MB"}],"mimetype":"application/pdf","priceinfo":[{"tax":["include_tax"],"price":"660","billingrole":"5"},{"tax":["include_tax"],"price":"330","billingrole":"6"},{"tax":["include_tax"],"price":"0","billingrole":"28"},{"tax":["include_tax"],"price":"0","billingrole":"44"}],"accessrole":"open_date","version_id":"73dbc8b6-b5ab-41b9-b7f5-c7a3c38504e4","displaytype":"detail","licensetype":"license_note","license_note":"Copyright (c) 2021 by the Information Processing Society of Japan"}]},"item_4_creator_5":{"attribute_name":"著者名","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Mitsuhiko, Nakamoto"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Xueting, Wang"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshihiko, Yamasaki"}],"nameIdentifiers":[{}]}]},"item_4_creator_6":{"attribute_name":"著者名(英)","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Mitsuhiko, Nakamoto","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Xueting, Wang","creatorNameLang":"en"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Toshihiko, Yamasaki","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_4_source_id_9":{"attribute_name":"書誌レコードID","attribute_value_mlt":[{"subitem_source_identifier":"AN10100541","subitem_source_identifier_type":"NCID"}]},"item_4_textarea_12":{"attribute_name":"Notice","attribute_value_mlt":[{"subitem_textarea_value":"SIG Technical Reports are nonrefereed and hence may later appear in any journals, conferences, symposia, etc."}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourceuri":"http://purl.org/coar/resource_type/c_18gh","resourcetype":"technical report"}]},"item_4_source_id_11":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"2188-8949","subitem_source_identifier_type":"ISSN"}]},"item_4_description_7":{"attribute_name":"論文抄録","attribute_value_mlt":[{"subitem_description":"We present a generative adversarial networks (GAN) based approach to lip-sync 2D cartoon animations. Most of the previous works have worked on lip-sync for the real people talking videos. However, lip-sync for 2D cartoon animations was rarely discussed while the traditional workflow of creating 2D cartoon animations is highly time-consuming. The main problem of automatically lip-syncing a 2D cartoon animation, especially using a deep learning approach, is the lack of datasets which consist of well lip-synced cartoon animations. Therefore, In this paper we present a GAN-based approach to achieve 2D cartoon animation lip-sync with no need of collecting raw cartoon animation datasets. Alternatively, we construct a cartoon talking video dataset by applying CartoonGAN to transform real-life speaking videos into cartoon styles. The dataset after the style transfer was used to train a lip-synchronization model, Wav2Lip. Our approach can generate natural lip-synchronized cartoon animations. We also conduct a user study and the results demonstrate the effectiveness of our approach.","subitem_description_type":"Other"}]},"item_4_description_8":{"attribute_name":"論文抄録(英)","attribute_value_mlt":[{"subitem_description":"We present a generative adversarial networks (GAN) based approach to lip-sync 2D cartoon animations. Most of the previous works have worked on lip-sync for the real people talking videos. However, lip-sync for 2D cartoon animations was rarely discussed while the traditional workflow of creating 2D cartoon animations is highly time-consuming. The main problem of automatically lip-syncing a 2D cartoon animation, especially using a deep learning approach, is the lack of datasets which consist of well lip-synced cartoon animations. Therefore, In this paper we present a GAN-based approach to achieve 2D cartoon animation lip-sync with no need of collecting raw cartoon animation datasets. Alternatively, we construct a cartoon talking video dataset by applying CartoonGAN to transform real-life speaking videos into cartoon styles. The dataset after the style transfer was used to train a lip-synchronization model, Wav2Lip. Our approach can generate natural lip-synchronized cartoon animations. We also conduct a user study and the results demonstrate the effectiveness of our approach.","subitem_description_type":"Other"}]},"item_4_biblio_info_10":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicPageEnd":"5","bibliographic_titles":[{"bibliographic_title":"研究報告コンピュータグラフィックスとビジュアル情報学(CG)"}],"bibliographicPageStart":"1","bibliographicIssueDates":{"bibliographicIssueDate":"2021-06-18","bibliographicIssueDateType":"Issued"},"bibliographicIssueNumber":"1","bibliographicVolumeNumber":"2021-CG-182"}]},"relation_version_is_last":true,"weko_creator_id":"44499"},"updated":"2025-01-19T17:42:54.390027+00:00","created":"2025-01-19T01:12:50.029431+00:00","links":{}}