@techreport{oai:ipsj.ixsq.nii.ac.jp:00069588, author = {金, 泰憲 and 深山, 覚 and 西本, 卓也 and 嵯峨山, 茂樹 and Tae, HunKim and Satoru, Fukayama and Takuya, Nishimoto and Shigeki, Sagayama}, issue = {2}, month = {May}, note = {楽譜情報を基に人間らしい演奏表情を自動的に生成する問題に対して,確率モデルに基づいた機械学習手法が応用されて来た.しかし演奏楽譜に多重音が含まれる場合はモデルが複雑になるため,膨大な学習データが必要,ないし計算が困難になるといった問題があった.本稿では単旋律と和音の確率モデルの組み合わせによりデータスパースネス問題を避けながら,多重音を含むピアノ曲演奏の表情を自動的に付ける手法を提案する.評価実験の結果,多重音を含むピアノ曲に対して,人間らしい表情を持った演奏が生成されることが分かった.また心理実験による主観評価では,提案手法を用いて生成した演奏表情が人間らしく,さらには音楽的に自然に聴こえることが確認された., In this paper, we present a method to generate human-like performance expression for polyphonic piano music with a combination of probabilistic models for melody and chords to avoid data sparseness problems. Probabilistic models and machine learning have been applied to solve the problem of generating human-like expressive performance given a music score. In case of polyphonic music, however, it was difficult to make a tractable model and a huge amount of training data was necessary. The results of the experiments show that the proposed method is able to generate fluctuations of performance parameters for polyphonic piano music such like human performers do. The results of subjective evaluations are also reported which indicate that the generated performance expression sounded human-like and have certain degree of musicality.}, title = {単旋律と和音の確率モデルの組み合わせによるピアノ曲演奏の自動表情付け}, year = {2010} }