@techreport{oai:ipsj.ixsq.nii.ac.jp:00214101, author = {Yurun, He and Nobuaki, Minematsu and Daisuke, Saito and Yurun, He and Nobuaki, Minematsu and Daisuke, Saito}, issue = {7}, month = {Nov}, note = {The performance of a speech emotion recognition (SER) system heavily relies on deep representations learned from training samples. Recently, transformer has exhibited outstanding properties in learning relevant representations for this task. However, to better fuse it with conventional models, experimental investigations are still needed. In this paper, we attempt to take advantage of several integrations of transformer with two most widely used deep learning models - CNN and BLSTM. Experiments on the IEMOCAP benchmark dataset demonstrate that the proposed approaches can make a promising improvement., The performance of a speech emotion recognition (SER) system heavily relies on deep representations learned from training samples. Recently, transformer has exhibited outstanding properties in learning relevant representations for this task. However, to better fuse it with conventional models, experimental investigations are still needed. In this paper, we attempt to take advantage of several integrations of transformer with two most widely used deep learning models - CNN and BLSTM. Experiments on the IEMOCAP benchmark dataset demonstrate that the proposed approaches can make a promising improvement.}, title = {Effective Integration of Transformer for Network-based Speech Emotion Recognition}, year = {2021} }