@techreport{oai:ipsj.ixsq.nii.ac.jp:00237130, author = {Chi, Xu and Xiang, Li and Allam, Shehata and Yasushi, Yagi and Chi, Xu and Xiang, Li and Allam, Shehata and Yasushi, Yagi}, issue = {9}, month = {Jul}, note = {In this paper, we describe a new multimodal biometric database named “OU-ISIR Multimodal Biometric Database”. An early version of this database consists of more than 100 subjects, which will increase to around 400 subjects in the future. Eleven biometric modalities are provided in this database, which, to the best of our knowledge, is the largest number of modalities among existing multimodal databases. Specifically, for each subject, we collected his/her iris, palm veins, 2D face images, signature images, gait videos, and speech data, which are typically included in existing multimodal databases. Additionally, some modalities not commonly considered in previous datasets are also included, that is, full-body images, online signature time series data, brain signals, inertial data (e.g., acceleration), and health data (e.g., heartbeat). We provide baseline results by evaluating benchmark algorithms on some individual modalities, and discuss possible future works using this database. We believe this database can facilitate future research on person authentication using unimodal, multimodal, and even cross-modal approaches, as well as research on brain signal and health status analysis., In this paper, we describe a new multimodal biometric database named “OU-ISIR Multimodal Biometric Database”. An early version of this database consists of more than 100 subjects, which will increase to around 400 subjects in the future. Eleven biometric modalities are provided in this database, which, to the best of our knowledge, is the largest number of modalities among existing multimodal databases. Specifically, for each subject, we collected his/her iris, palm veins, 2D face images, signature images, gait videos, and speech data, which are typically included in existing multimodal databases. Additionally, some modalities not commonly considered in previous datasets are also included, that is, full-body images, online signature time series data, brain signals, inertial data (e.g., acceleration), and health data (e.g., heartbeat). We provide baseline results by evaluating benchmark algorithms on some individual modalities, and discuss possible future works using this database. We believe this database can facilitate future research on person authentication using unimodal, multimodal, and even cross-modal approaches, as well as research on brain signal and health status analysis.}, title = {The OU-ISIR Multimodal Biometric Database and Its Performance Evaluation}, year = {2024} }