@techreport{oai:ipsj.ixsq.nii.ac.jp:00231943, author = {Lun, Miao and Ryoichi, Ishikawa and Takeshi, Oishi and Lun, Miao and Ryoichi, Ishikawa and Takeshi, Oishi}, issue = {21}, month = {Jan}, note = {Edges are caused by the discontinuities in Surface-Reflectance, Illumination, Surface-Normal, and Depth (RIND). However, despite general edge detection being studied for decades, research on specific edges has not been extensively explored. In this work, we propose a transformer-based approach called SWIN-RIND that can detect the four types of edges from a single image. Recently, attention-based approaches have performed well in general edge detection and can be expected to work for RIND edges as well. Our model uses Swin Transformer as an encoder and a top-down and bottom-up multi-level feature aggregation block as a decoder. The encoder extracts cues at different levels, which the decoder integrates into shared features containing rich contextual information. We then predict each specific edge type through independent decision heads. To train and evaluate our model, we use a public benchmark called BSDS-RIND, which is based on BSDS (Berkeley Segmentation Data Set) and contains annotations of four types of edges. In our experiments, we confirmed that SWIN-RIND outperforms state-of-the-art methods., Edges are caused by the discontinuities in Surface-Reflectance, Illumination, Surface-Normal, and Depth (RIND). However, despite general edge detection being studied for decades, research on specific edges has not been extensively explored. In this work, we propose a transformer-based approach called SWIN-RIND that can detect the four types of edges from a single image. Recently, attention-based approaches have performed well in general edge detection and can be expected to work for RIND edges as well. Our model uses Swin Transformer as an encoder and a top-down and bottom-up multi-level feature aggregation block as a decoder. The encoder extracts cues at different levels, which the decoder integrates into shared features containing rich contextual information. We then predict each specific edge type through independent decision heads. To train and evaluate our model, we use a public benchmark called BSDS-RIND, which is based on BSDS (Berkeley Segmentation Data Set) and contains annotations of four types of edges. In our experiments, we confirmed that SWIN-RIND outperforms state-of-the-art methods.}, title = {SWIN-RIND: Edge Detection for Reflectance, Illumination, Normal and Depth Discontinuity with Swin Transformer}, year = {2024} }