@techreport{oai:ipsj.ixsq.nii.ac.jp:00234665, author = {Shuting, Hao and Daisuke, Saito and Nobuaki, Minematsu and Shuting, Hao and Daisuke, Saito and Nobuaki, Minematsu}, issue = {53}, month = {Jun}, note = {This study focuses on Acoustic Scene Classification (ASC), which categorizes environmental audio streams into predefined semantic labels. We introduce a novel architecture that integrates multi-layer classifiers and direct finetuning, presenting a new perspective in ASC research. The study employs the TAU Urban Acoustic Scenes 2022 Mobile dataset for fine-tuning and validation. We utilized the SSAST model, pre-trained on the AudioSet and LibriSpeech datasets, and fine-tuned it on the TAU dataset with a unique approach to enhance ASC-specific feature learning. Our layered SSAST system achieved an accuracy of 52.17% and an AUC of 88.66% in ASC, marking a notable improvement over the baseline with absolute increases of 0.99% in accuracy and 0.85% in AUC., This study focuses on Acoustic Scene Classification (ASC), which categorizes environmental audio streams into predefined semantic labels. We introduce a novel architecture that integrates multi-layer classifiers and direct finetuning, presenting a new perspective in ASC research. The study employs the TAU Urban Acoustic Scenes 2022 Mobile dataset for fine-tuning and validation. We utilized the SSAST model, pre-trained on the AudioSet and LibriSpeech datasets, and fine-tuned it on the TAU dataset with a unique approach to enhance ASC-specific feature learning. Our layered SSAST system achieved an accuracy of 52.17% and an AUC of 88.66% in ASC, marking a notable improvement over the baseline with absolute increases of 0.99% in accuracy and 0.85% in AUC.}, title = {Enhancing Feature Integration to Improve Classification Accuracy of Similar Categories in Acoustic Scene Classification}, year = {2024} }