@techreport{oai:ipsj.ixsq.nii.ac.jp:00216101, author = {Man, Wu and Yirong, Kan and Vantinh, Nguyen and 張, 任遠 and 中島, 康彦 and Man, Wu and Yirong, Kan and Vantinh, Nguyen and Renyuan, Zhang and Yasuhiko, Nakashima}, issue = {14}, month = {Jan}, note = {The feasibility of ternarizing spiking neural networks (SNNs) is studied in this work toward trading a slight accuracy for significantly reducing computational and memory costs. By leveraging a parametric integrate-and-fire (PIF) neuron with learnable threshold and spike-timing-dependent backpropagation (STDB) learning rule, the ternarized spiking neural networks (TSNNs) enable directly trained with low latency and negligible loss of accuracy. To this end, a paradigm for binary-ternary dot-product operation is realized during the inference; therefore, the TSNNs achieve up to 16x model compression in contrast to the full precision SNNs. Moreover, to mitigate the accuracy gap, an optimized TSNN with a spiking ResNet structure is introduced into TSNN. For proof-of-concept, we evaluate the prototype of proposed TSNN on N-MNIST, CIFAR-10, CIFAR-100, which achieve 98.43%, 89.07%, 65.24% accuracy with 4 timesteps, respectively. On the basis of this prototype, the optimized TSNN improves by 0.84% and 0.51% over CIFAR-10 and CIFAR-100 datasets, respectively., The feasibility of ternarizing spiking neural networks (SNNs) is studied in this work toward trading a slight accuracy for significantly reducing computational and memory costs. By leveraging a parametric integrate-and-fire (PIF) neuron with learnable threshold and spike-timing-dependent backpropagation (STDB) learning rule, the ternarized spiking neural networks (TSNNs) enable directly trained with low latency and negligible loss of accuracy. To this end, a paradigm for binary-ternary dot-product operation is realized during the inference; therefore, the TSNNs achieve up to 16x model compression in contrast to the full precision SNNs. Moreover, to mitigate the accuracy gap, an optimized TSNN with a spiking ResNet structure is introduced into TSNN. For proof-of-concept, we evaluate the prototype of proposed TSNN on N-MNIST, CIFAR-10, CIFAR-100, which achieve 98.43%, 89.07%, 65.24% accuracy with 4 timesteps, respectively. On the basis of this prototype, the optimized TSNN improves by 0.84% and 0.51% over CIFAR-10 and CIFAR-100 datasets, respectively.}, title = {三元ディープスパイクニューラルネットワーク}, year = {2022} }