
%Aigaion2 BibTeX export from HES SO Valais Publications
%Saturday 02 May 2026 03:14:42 PM

@PROCEEDINGS{,
     author = {Soyarar, Ege and Aydoğan, Reyhan and Buzcu, Berk and Calvaresi, Davide},
   keywords = {Explanation Evaluation, Large Language Models (LLMs), recommender systems, Synthetic Data Generation \and  Explainable AI (XAI)},
      title = {LLM-based Evaluation Methodology of Explanation Strategies},
  booktitle = {Proceedings of EXTRAAMAS 2025},
       year = {2025},
   abstract = {As data privacy regulations, such as the EU AI Act and EU Data Act, become increasingly stringent, processing real user data for AI models like movie recommendation systems has grown more challenging. Moreover, the human-centric data collection and evaluation of Explainable AI (XAI) systems are often costly and time-consuming; making it hard to sustain. Hence, this study adopts the Synthetic Behavior Generation (SBG) approach, leveraging large language models (LLMs) to evaluate AI explanations while ensuring compliance with regulations and providing cost-effective solutions for human feedback. To assess the quality of these explanations, we utilize three different LLMs, which are fed syntactically generated user behaviors to evaluate explanations of an AI system as if they were real users. The evaluation focuses on key criteria such as convincingness, clarity, accuracy, and the impact on decision-making, facilitating a thorough assessment of explanation effectiveness. The results indicated that LLMs can deliver structured and consistent evaluations based on the provided synthetic user behavior.}
}

