@techreport{oai:ipsj.ixsq.nii.ac.jp:00227777, author = {Jiayun, Wang and Akira, Maeda and Kyoji, Kawagoe and Jiayun, Wang and Akira, Maeda and Kyoji, Kawagoe}, issue = {5}, month = {Sep}, note = {Multimodal models have demonstrated remarkable success in the domains of image processing and natural language processing. Recently, their significance has also been acknowledged within recommendation systems. In many cases, the recommendation systems perform better when utilizing multimodal features to construct item embeddings, rather than utilizing individual text or image models. Consequently, research in this field has shifted its focus towards effectively combining multimodal features and accurately embedding items. Our study specifically concentrates on artwork recommendation. In artwork recommendation, the textual data such as titles and descriptions notably influence users' preferences. Our research approach involves constructing multimodal embeddings of artworks by integrating both images and titles as a fundamental step., Multimodal models have demonstrated remarkable success in the domains of image processing and natural language processing. Recently, their significance has also been acknowledged within recommendation systems. In many cases, the recommendation systems perform better when utilizing multimodal features to construct item embeddings, rather than utilizing individual text or image models. Consequently, research in this field has shifted its focus towards effectively combining multimodal features and accurately embedding items. Our study specifically concentrates on artwork recommendation. In artwork recommendation, the textual data such as titles and descriptions notably influence users' preferences. Our research approach involves constructing multimodal embeddings of artworks by integrating both images and titles as a fundamental step.}, title = {MultArtRec: A Multimodal Neural Topic Model for Integrating Image and Textual Features in Artwork Recommendation}, year = {2023} }