@techreport{oai:ipsj.ixsq.nii.ac.jp:00227113,
 author = {Joanna, Imada and Keichi, Takahashi and Yoichi, Shimomura and Hiroyuki, Takizawa and Joanna, Imada and Keichi, Takahashi and Yoichi, Shimomura and Hiroyuki, Takizawa},
 issue = {2},
 month = {Jul},
 note = {The C++ 17 standard introduced a set of parallel algorithms, referred to as Parallel STL, that is designed to be programmer-friendly and portable across CPU and GPU. Several studies compared the performance between GPU programming models including Parallel STL. However, the reasons behind the performance differences are not well discussed yet. This study thus investigates what causes the performance differences among GPU programming models: CUDA, Kokkos, OpenACC, OpenMP, and Parallel STL. Three benchmarks are selected to compare the models: BabelStream, Himeno benchmark, and CloverLeaf. In BabelStream, Parallel STL achieves similar performance to other models. In the Himeno benchmark, it achieves 12% higher performance than CUDA for the large problem size. However, for the largest problem size, it performs 23% worse than CUDA. Profiling reveals that Parallel STL has a low cache hit ratio compared to other models in the larger problem sizes., The C++ 17 standard introduced a set of parallel algorithms, referred to as Parallel STL, that is designed to be programmer-friendly and portable across CPU and GPU. Several studies compared the performance between GPU programming models including Parallel STL. However, the reasons behind the performance differences are not well discussed yet. This study thus investigates what causes the performance differences among GPU programming models: CUDA, Kokkos, OpenACC, OpenMP, and Parallel STL. Three benchmarks are selected to compare the models: BabelStream, Himeno benchmark, and CloverLeaf. In BabelStream, Parallel STL achieves similar performance to other models. In the Himeno benchmark, it achieves 12% higher performance than CUDA for the large problem size. However, for the largest problem size, it performs 23% worse than CUDA. Profiling reveals that Parallel STL has a low cache hit ratio compared to other models in the larger problem sizes.},
 title = {Comparison of Parallel STL with C/C++ GPU Programming Models},
 year = {2023}
}