@techreport{oai:ipsj.ixsq.nii.ac.jp:00231081, author = {Du, Wu and Peng, Chen and Toshio, Endo and Satoshi, Matsuoka and Mohamed, Wahib and Du, Wu and Peng, Chen and Toshio, Endo and Satoshi, Matsuoka and Mohamed, Wahib}, issue = {3}, month = {Nov}, note = {This paper presents armGEMM, a novel approach aimed at enhancing the performance of irregular General Matrix Multiplication (GEMM) operations on popular Arm architectures. Designed to support a wide range of Arm processors, from edge devices to high-performance CPUs. armGEMM optimizes GEMM by intelligently combining fragments of auto-generated micro-kernels, incorporating hand-written optimizations to improve computational efficiency. We optimize the kernel pipeline by tuning the register reuse and the data load/store overlapping. In addition, we use a dynamic tiling scheme to generate balanced tile shapes, based on the shapes of the matrices. We build armGEMM on top of the TVM framework where our dynamic tiling scheme prunes the search space for TVM to identify the optimal combination of parameters for code optimization. Evaluations on five different classes of Arm chips demonstrate the advantages of armGEMM. In most cases involving irregular matrices, armGEMM outperforms state-of-the-art implementations like LIBXSMM, LibShalom, OpenBLAS, and Eigen., This paper presents armGEMM, a novel approach aimed at enhancing the performance of irregular General Matrix Multiplication (GEMM) operations on popular Arm architectures. Designed to support a wide range of Arm processors, from edge devices to high-performance CPUs. armGEMM optimizes GEMM by intelligently combining fragments of auto-generated micro-kernels, incorporating hand-written optimizations to improve computational efficiency. We optimize the kernel pipeline by tuning the register reuse and the data load/store overlapping. In addition, we use a dynamic tiling scheme to generate balanced tile shapes, based on the shapes of the matrices. We build armGEMM on top of the TVM framework where our dynamic tiling scheme prunes the search space for TVM to identify the optimal combination of parameters for code optimization. Evaluations on five different classes of Arm chips demonstrate the advantages of armGEMM. In most cases involving irregular matrices, armGEMM outperforms state-of-the-art implementations like LIBXSMM, LibShalom, OpenBLAS, and Eigen.}, title = {Optimizing Matrix Multiplication on Arm Architectures}, year = {2023} }