@article{OMARA2024103016, title = {An AI-driven solution to prevent adversarial attacks on mobile Vehicle-to-Microgrid services}, journal = {Simulation Modelling Practice and Theory}, volume = {137}, pages = {103016}, year = {2024}, issn = {1569-190X}, doi = {https://doi.org/10.1016/j.simpat.2024.103016}, url = {https://www.sciencedirect.com/science/article/pii/S1569190X24001308}, author = {Ahmed Omara and Burak Kantarci}, keywords = {Vehicle-to-microgrid (V2M), Adversarial machine learning, Smart microgrids, Cybersecurity, Generative adversarial network (GAN), Evasion attack, Inference attack}, abstract = {With the increasing integration of Artificial Intelligence (AI) in microgrid control systems, there is a risk that malicious actors may exploit vulnerabilities in machine learning algorithms to disrupt power generation and distribution. In this work, we study the potential impacts of adversarial attacks on Vehicle-to-Microgrid (V2M), and discuss potential defensive countermeasures to prevent these risks. Our analysis shows that the decentralized and adaptive nature of microgrids makes them particularly vulnerable to adversarial attacks, and highlights the need for robust security measures to protect against such threats. We propose a framework to detect and prevent adversarial attacks on V2M services using Generative Adversarial Network (GAN) model and a Machine Learning (ML) classifier. We focus on two adversarial attacks, namely inference and evasion attacks. We test our proposed framework under three attack scenarios to ensure the robustness of our solution. As the adversary’s knowledge of a system determines the success of the executed attacks, we study four gray-box cases where the adversary has access to different percentages of the victim’s training dataset. Moreover, we compare our proposed detection method against four benchmark detectors. Furthermore, we evaluate the effectiveness of our proposed method to detect three benchmark evasion attack. Through simulations, we show that all benchmark detectors fail to successfully detect adversarial attacks, particularly when the attacks are intelligently augmented, obtaining an Adversarial Detection Rate (ADR) of up to 60.4%. On the other hand, our proposed framework outperforms the other detectors and achieves an ADR of 92.5%.} }