@techreport{oai:ipsj.ixsq.nii.ac.jp:00225050, author = {Izuho, Koyasu and Rudy, Raymond and Hiroshi, Imai and Izuho, Koyasu and Rudy, Raymond and Hiroshi, Imai}, issue = {16}, month = {Mar}, note = {Quantum Machine Learning (QML) is one of the hottest areas in near-term quantum computing. Two popular methods in QML are kernel methods and variational methods. Variational methods, which consist of parametrized quantum circuits (PQCs) to encode data and define classifiers, work faster in theory (i.e., O(N) to learn from N training examples) than kernel methods, which use quantum circuits to compute O(N2) elements of kernel matrices. However, in practice when dealing with large N, it is necessary to devise ways to speed up variational methods due to the slow quantum gates. In this work, we propose parallelization of training variational quantum classifiers to utilize the availability of many quantum devices with dozens of qubits. In contrast to existing parallelization of variational methods with gradient-based algorithms, we develop a novel distributed mechanism of coordinate descent algorithm to optimize parametrized gates of variational quantum circuits. There are several gradient-free methods to optimize PQCs that have been shown to converge faster. Here, by focusing on the so-called Free-axis selection (Fraxis) method, we further show how the gradient-free methods can be parallelized, and demonstrate their efficacies by running the algorithm on both simulators and IBM Quantum devices. We confirm the proposed algorithm not only achieves high classification accuracy but also gains speedup that grows linearly with the degree of parallelization., Quantum Machine Learning (QML) is one of the hottest areas in near-term quantum computing. Two popular methods in QML are kernel methods and variational methods. Variational methods, which consist of parametrized quantum circuits (PQCs) to encode data and define classifiers, work faster in theory (i.e., O(N) to learn from N training examples) than kernel methods, which use quantum circuits to compute O(N2) elements of kernel matrices. However, in practice when dealing with large N, it is necessary to devise ways to speed up variational methods due to the slow quantum gates. In this work, we propose parallelization of training variational quantum classifiers to utilize the availability of many quantum devices with dozens of qubits. In contrast to existing parallelization of variational methods with gradient-based algorithms, we develop a novel distributed mechanism of coordinate descent algorithm to optimize parametrized gates of variational quantum circuits. There are several gradient-free methods to optimize PQCs that have been shown to converge faster. Here, by focusing on the so-called Free-axis selection (Fraxis) method, we further show how the gradient-free methods can be parallelized, and demonstrate their efficacies by running the algorithm on both simulators and IBM Quantum devices. We confirm the proposed algorithm not only achieves high classification accuracy but also gains speedup that grows linearly with the degree of parallelization.}, title = {Distributed Coordinate Descent Algorithm for Variational Quantum Classification}, year = {2023} }