@article{oai:kyutech.repo.nii.ac.jp:00007289, author = {Matsuo, Yuki and Takemoto, Kazuhiro and 竹本, 和広}, issue = {20}, journal = {Applied Sciences}, month = {Oct}, note = {Open-source deep neural networks (DNNs) for medical imaging are significant in emergent situations, such as during the pandemic of the 2019 novel coronavirus disease (COVID-19), since they accelerate the development of high-performance DNN-based systems. However, adversarial attacks are not negligible during open-source development. Since DNNs are used as computer-aided systems for COVID-19 screening from radiography images, we investigated the vulnerability of the COVID-Net model, a representative open-source DNN for COVID-19 detection from chest X-ray images to backdoor attacks that modify DNN models and cause their misclassification when a specific trigger input is added. The results showed that backdoors for both non-targeted attacks, for which DNNs classify inputs into incorrect labels, and targeted attacks, for which DNNs classify inputs into a specific target class, could be established in the COVID-Net model using a small trigger and small fraction of training data. Moreover, the backdoors were effective for models fine-tuned from the backdoored COVID-Net models, although the performance of non-targeted attacks was limited. This indicated that backdoored models could be spread via fine-tuning (thereby becoming a significant security threat). The findings showed that emphasis is required on open-source development and practical applications of DNNs for COVID-19 detection.}, pages = {9556-1--9556-10}, title = {Backdoor Attacks to Deep Neural Network-Based System for COVID-19 Detection from Chest X-ray Images}, volume = {11}, year = {2021}, yomi = {タケモト, カズヒロ} }