@techreport{oai:ipsj.ixsq.nii.ac.jp:00241663,
 author = {Muhammad, Yeza Baihaqi and Angel, García Contreras and Seiya, Kawano and Koichiro, Yoshino and Muhammad, Yeza Baihaqi and Angel, García Contreras and Seiya, Kawano and Koichiro, Yoshino},
 issue = {43},
 month = {Dec},
 note = {Human evaluation plays a critical role in dialogue systems research, especially in non-task-oriented systems such as rapport-building dialogue systems. Current evaluations often rely on Likert scales to assess user experience, but this method introduces challenges such as inconsistent scale perception, inefficiency, and central tendency bias. Moreover, it is difficult to compare the agent's performance across multiple criteria due to the problem of uneven scoring interpretations by participants on the Likert scale. On the other hand, pairwise comparison emphasizes direct item-to-item evaluation based on defined criteria, producing scores that more closely align with participants' preferences and minimizing biases. This paper compares an evaluation framework for rapport-building dialogue systems using pairwise comparison with a conventional Likert scale system. These approaches are tested through dialogue experiments involving six participants and four dialogue systems embedded in a conversational robot: CommA, CommI, CommO, and CommE, to measure human-agent rapport. Our experimental results indicated that the pairwise comparison method better represented systems' overall performance compared to the Likert scale. It also demonstrated lower variability, higher reliability, and a shorter completion time., Human evaluation plays a critical role in dialogue systems research, especially in non-task-oriented systems such as rapport-building dialogue systems. Current evaluations often rely on Likert scales to assess user experience, but this method introduces challenges such as inconsistent scale perception, inefficiency, and central tendency bias. Moreover, it is difficult to compare the agent's performance across multiple criteria due to the problem of uneven scoring interpretations by participants on the Likert scale. On the other hand, pairwise comparison emphasizes direct item-to-item evaluation based on defined criteria, producing scores that more closely align with participants' preferences and minimizing biases. This paper compares an evaluation framework for rapport-building dialogue systems using pairwise comparison with a conventional Likert scale system. These approaches are tested through dialogue experiments involving six participants and four dialogue systems embedded in a conversational robot: CommA, CommI, CommO, and CommE, to measure human-agent rapport. Our experimental results indicated that the pairwise comparison method better represented systems' overall performance compared to the Likert scale. It also demonstrated lower variability, higher reliability, and a shorter completion time.},
 title = {Comparing Likert Scale and Pairwise Comparison for Human Evaluation in Rapport-Building Dialogue Systems},
 year = {2024}
}