@article{oai:ipsj.ixsq.nii.ac.jp:02001757, author = {Hongkuan,Zhang and Koichi,Takeda and Ryohei,Sasano and Hongkuan Zhang and Koichi Takeda and Ryohei Sasano}, issue = {4}, journal = {情報処理学会論文誌}, month = {Apr}, note = {Driving video captioning aims to automatically generate descriptions for videos from driving recorders. Driving video captions are generally required to describe first-person driving behaviors which implicitly characterize the driving videos but are challenging to anchor to concrete visual evidence. To generate captions with better driving behavior descriptions, existing work has introduced behavior-related in-vehicle sensors into a captioning model for behavior-aware captioning. However, a better method for fusing the sensor modality with visual modalities has not been fully investigated, and the accuracy and informativeness of generated behavior-related descriptions remain unsatisfactory. In this paper, we compare three modality fusion methods by using a Transformer-based video captioning model and propose two training strategies to improve both the accuracy and the informativeness of generated behavior descriptions: 1) joint training the captioning model with multilabel behavior classification by explicitly using annotated behavior tags; and 2) weighted training by assigning weights to reference captions (references) according to the informativeness of behavior descriptions in references. Experiments on a Japanese driving video captioning dataset, City Traffic (CT), show the efficacy and positive interaction of the proposed training strategies. Moreover, larger improvements on out-of-distribution data demonstrate the improved generalization ability. ------------------------------ This is a preprint of an article intended for publication Journal of Information Processing(JIP). This preprint should not be cited. This article should be cited as: Journal of Information Processing Vol.33(2025) (online) DOI http://dx.doi.org/10.2197/ipsjjip.33.284 ------------------------------, Driving video captioning aims to automatically generate descriptions for videos from driving recorders. Driving video captions are generally required to describe first-person driving behaviors which implicitly characterize the driving videos but are challenging to anchor to concrete visual evidence. To generate captions with better driving behavior descriptions, existing work has introduced behavior-related in-vehicle sensors into a captioning model for behavior-aware captioning. However, a better method for fusing the sensor modality with visual modalities has not been fully investigated, and the accuracy and informativeness of generated behavior-related descriptions remain unsatisfactory. In this paper, we compare three modality fusion methods by using a Transformer-based video captioning model and propose two training strategies to improve both the accuracy and the informativeness of generated behavior descriptions: 1) joint training the captioning model with multilabel behavior classification by explicitly using annotated behavior tags; and 2) weighted training by assigning weights to reference captions (references) according to the informativeness of behavior descriptions in references. Experiments on a Japanese driving video captioning dataset, City Traffic (CT), show the efficacy and positive interaction of the proposed training strategies. Moreover, larger improvements on out-of-distribution data demonstrate the improved generalization ability. ------------------------------ This is a preprint of an article intended for publication Journal of Information Processing(JIP). This preprint should not be cited. This article should be cited as: Journal of Information Processing Vol.33(2025) (online) DOI http://dx.doi.org/10.2197/ipsjjip.33.284 ------------------------------}, title = {Improving Behavior-aware Driving Video Captioning through Better Use of In-vehicle Sensors and References}, volume = {66}, year = {2025} }