@inproceedings{hostetter_abdelshiheed_barnes_chi_2023, place={London, UK}, title={A Self-Organizing Neuro-Fuzzy Q-Network: Systematic Design with Offline Hybrid Learning}, url={https://www.southampton.ac.uk/~eg/AAMAS2023/pdfs/p1248.pdf}, booktitle={Proceedings of the 22nd International Conference on Autonomous Agents and Multiagent Systems}, author={Hostetter, J.W. and Abdelshiheed, M. and Barnes, T. and Chi, M.}, year={2023}, pages={1248–1257} } @misc{ausin_abdelshiheed_barnes_chi_2023, title={A Unified Batch Hierarchical Reinforcement Learning Framework for Pedagogical Policy Induction with Deep Bisimulation Metrics}, ISBN={9783031363351 9783031363368}, ISSN={1865-0929 1865-0937}, url={http://dx.doi.org/10.1007/978-3-031-36336-8_93}, DOI={10.1007/978-3-031-36336-8_93}, abstractNote={Intelligent Tutoring Systems (ITSs) leverage AI to adapt to individual students, and employ pedagogical policies to decide what instructional action to take next. A number of researchers applied Reinforcement Learning (RL) and Deep RL (DRL) to induce effective pedagogical policies. Most prior work, however, has been developed independently for a specific ITS and cannot directly be applied to another. In this work, we propose a Multi-Task Learning framework that combines Deep BIsimulation Metrics and DRL, named MTL-BIM, to induce a unified pedagogical policy for two different ITSs across different domains: logic and probability. Based on empirical classroom results, our unified RL policy performed significantly better than the expert-crafted policies and independently induced DQN policies on both ITSs.}, journal={Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky}, publisher={Springer Nature Switzerland}, author={Ausin, Markel Sanz and Abdelshiheed, Mark and Barnes, Tiffany and Chi, Min}, year={2023}, pages={599–605} } @inproceedings{abdelshiheed_hostetter_barnes_chi_2023, title={Bridging Declarative, Procedural, and Conditional Metacognitive Knowledge Gap Using Deep Reinforcement Learning}, DOI={10.48550/arXiv.2304.11739}, abstractNote={In deductive domains, three metacognitive knowledge types in ascending order are declarative, procedural, and conditional learning. This work leverages Deep Reinforcement Learning (DRL) in providing adaptive metacognitive interventions to bridge the gap between the three knowledge types and prepare students for future learning across Intelligent Tutoring Systems (ITSs). Students received these interventions that taught how and when to use a backward-chaining (BC) strategy on a logic tutor that supports a default forward-chaining strategy. Six weeks later, we trained students on a probability tutor that only supports BC without interventions. Our results show that on both ITSs, DRL bridged the metacognitive knowledge gap between students and significantly improved their learning performance over their control peers. Furthermore, the DRL policy adapted to the metacognitive development on the logic tutor across declarative, procedural, and conditional students, causing their strategic decisions to be more autonomous.}, booktitle={Proceedings of the 45th Annual Meeting of the Cognitive Science Society (CogSci’23)}, author={Abdelshiheed, M. and Hostetter, J.W. and Barnes, T. and Chi, M.}, year={2023}, pages={333–340} } @article{abdelshiheed_barnes_chi_2023, title={How and When: The Impact of Metacognitive Knowledge Instruction and Motivation on Transfer Across Intelligent Tutoring Systems}, volume={9}, ISSN={1560-4292 1560-4306}, url={http://dx.doi.org/10.1007/s40593-023-00371-0}, DOI={10.1007/s40593-023-00371-0}, journal={International Journal of Artificial Intelligence in Education}, publisher={Springer Science and Business Media LLC}, author={Abdelshiheed, Mark and Barnes, Tiffany and Chi, Min}, year={2023}, month={Sep} } @article{shabrina_mostafavi_abdelshiheed_chi_barnes_2023, title={Investigating the Impact of Backward Strategy Learning in a Logic Tutor: Aiding Subgoal Learning Towards Improved Problem Solving}, volume={8}, ISSN={1560-4292 1560-4306}, url={http://dx.doi.org/10.1007/s40593-023-00338-1}, DOI={10.1007/s40593-023-00338-1}, abstractNote={Abstract}, journal={International Journal of Artificial Intelligence in Education}, publisher={Springer Science and Business Media LLC}, author={Shabrina, Preya and Mostafavi, Behrooz and Abdelshiheed, Mark and Chi, Min and Barnes, Tiffany}, year={2023}, month={Aug} } @misc{abdelshiheed_hostetter_barnes_chi_2023, title={Leveraging Deep Reinforcement Learning for Metacognitive Interventions Across Intelligent Tutoring Systems}, ISBN={9783031362712 9783031362729}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-031-36272-9_24}, DOI={10.1007/978-3-031-36272-9_24}, abstractNote={This work compares two approaches to provide metacognitive interventions and their impact on preparing students for future learning across Intelligent Tutoring Systems (ITSs). In two consecutive semesters, we conducted two classroom experiments: Exp. 1 used a classic artificial intelligence approach to classify students into different metacognitive groups and provide static interventions based on their classified groups. In Exp. 2, we leveraged Deep Reinforcement Learning (DRL) to provide adaptive interventions that consider the dynamic changes in the student’s metacognitive levels. In both experiments, students received these interventions that taught how and when to use a backward-chaining (BC) strategy on a logic tutor that supports a default forward-chaining strategy. Six weeks later, we trained students on a probability tutor that only supports BC without interventions. Our results show that adaptive DRL-based interventions closed the metacognitive skills gap between students. In contrast, static classifier-based interventions only benefited a subset of students who knew how to use BC in advance. Additionally, our DRL agent prepared the experimental students for future learning by significantly surpassing their control peers on both ITSs.}, journal={Lecture Notes in Computer Science}, publisher={Springer Nature Switzerland}, author={Abdelshiheed, Mark and Hostetter, John Wesley and Barnes, Tiffany and Chi, Min}, year={2023}, pages={291–303} } @misc{hostetter_abdelshiheed_barnes_chi_2023, title={Leveraging Fuzzy Logic Towards More Explainable Reinforcement Learning-Induced Pedagogical Policies on Intelligent Tutoring Systems}, ISSN={["1544-5615"]}, url={http://dx.doi.org/10.1109/FUZZ52849.2023.10309741}, DOI={10.1109/FUZZ52849.2023.10309741}, abstractNote={Deep Reinforcement Learning (Deep RL) has revolutionized the field of Intelligent Tutoring Systems by providing effective pedagogical policies. However, the “black box” nature of Deep RL models makes it challenging to understand these policies. This study tackles this challenge by applying fuzzy logic to distill knowledge from Deep RL-induced policies into interpretable IF-THEN Fuzzy Logic Controller (FLC) rules. Our experiments show that these FLC policies significantly outperform expert policy and student decisions, demonstrating the effectiveness of our approach. We propose a Temporal Granule Pattern (TGP) mining algorithm to increase the FLC rules' interpretability further. This work highlights the potential of fuzzy logic and TGP analysis to enhance understanding of Deep RL-induced pedagogical policies.}, journal={2023 IEEE International Conference on Fuzzy Systems (FUZZ)}, publisher={IEEE}, author={Hostetter, John Wesley and Abdelshiheed, Mark and Barnes, Tiffany and Chi, Min}, year={2023}, month={Aug} } @inproceedings{hostetter_conati_yang_abdelshiheed_barnes_chi_2023, place={Germany}, title={XAI to Increase the Effectiveness of an Intelligent Pedagogical Agent}, DOI={10.1145/3570945.3607301}, abstractNote={We explore eXplainable AI (XAI) to enhance user experience and understand the value of explanations in AI-driven pedagogical decisions within an Intelligent Pedagogical Agent (IPA). Our real-time and personalized explanations cater to students' attitudes to promote learning. In our empirical study, we evaluate the effectiveness of personalized explanations by comparing three versions of the IPA: (1) personalized explanations and suggestions, (2) suggestions but no explanations, and (3) no suggestions. Our results show the IPA with personalized explanations significantly improves students' learning outcomes compared to the other versions.}, booktitle={Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents. (IVA’23)}, author={Hostetter, J.W. and Conati, C. and Yang, X. and Abdelshiheed, M. and Barnes, T. and Chi, M.}, year={2023} } @inbook{abdelshiheed_maniktala_barnes_chi_2022, place={Orlando, FL}, series={9780997725810}, title={Assessing Competency Using Metacognition and Motivation: The Role of Time-Awareness in Preparation for Future Learning}, volume={9}, booktitle={Design Recommendations for Intelligent Tutoring Systems: Volume 9 Competency-Based Scenario Design}, publisher={US Army Combat Capabilities Development Command - Soldier Center}, author={Abdelshiheed, M. and Maniktala, M. and Barnes, T. and Chi, M.}, editor={Hampton, A.J. and Sinatra, A. and Graesser, A.C. and Goldberg, B. and Johnston, J.H. and Hu, X.Editors}, year={2022}, pages={121–131}, collection={9780997725810} } @misc{abdelshiheed_hostetter_yang_barnes_chi_2022, title={Mixing Backward- with Forward-Chaining for Metacognitive Skill Acquisition and Transfer}, volume={13355}, ISBN={9783031116438 9783031116445}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-031-11644-5_47}, DOI={10.1007/978-3-031-11644-5_47}, abstractNote={Metacognitive skills have been commonly associated with preparation for future learning in deductive domains. Many researchers have regarded strategy- and time-awareness as two metacognitive skills that address how and when to use a problem-solving strategy, respectively. It was shown that students who are both strategy- and time-aware (StrTime) outperformed their nonStrTime peers across deductive domains. In this work, students were trained on a logic tutor that supports a default forward-chaining (FC) and a backward-chaining (BC) strategy. We investigated the impact of mixing BC with FC on teaching strategy- and time-awareness for nonStrTime students. During the logic instruction, the experimental students (Exp) were provided with two BC worked examples and some problems in BC to practice how and when to use BC. Meanwhile, their control (Ctrl) and StrTime peers received no such intervention. Six weeks later, all students went through a probability tutor that only supports BC to evaluate whether the acquired metacognitive skills are transferred from logic. Our results show that on both tutors, Exp outperformed Ctrl and caught up with StrTime.}, journal={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Abdelshiheed, Mark and Hostetter, John Wesley and Yang, Xi and Barnes, Tiffany and Chi, Min}, year={2022}, pages={546–552} } @inproceedings{abdelshiheed_hostetter_shabrina_barnes_chi_2022, title={The Power of Nudging: Exploring Three Interventions for Metacognitive Skills Instruction across Intelligent Tutoring Systems}, DOI={10.48550/arXiv.2303.11965}, abstractNote={Deductive domains are typical of many cognitive skills in that no single problem-solving strategy is always optimal for solving all problems. It was shown that students who know how and when to use each strategy (StrTime) outperformed those who know neither and stick to the default strategy (Default). In this work, students were trained on a logic tutor that supports a default forward-chaining and a backward-chaining (BC) strategy, then a probability tutor that only supports BC. We investigated three types of interventions on teaching the Default students how and when to use which strategy on the logic tutor: Example, Nudge and Presented. Meanwhile, StrTime students received no interventions. Overall, our results show that Nudge outperformed their Default peers and caught up with StrTime on both tutors.}, booktitle={Proceedings of the 44th Annual Meeting of the Cognitive Science Society. (CogSci’22)}, author={Abdelshiheed, M. and Hostetter, J.W. and Shabrina, P. and Barnes, T. and Chi, M.}, year={2022}, pages={541–548} } @misc{ju_zhou_abdelshiheed_barnes_chi_2021, title={Evaluating Critical Reinforcement Learning Framework in the Field}, volume={12748}, ISBN={9783030782917 9783030782924}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-030-78292-4_18}, DOI={10.1007/978-3-030-78292-4_18}, abstractNote={Reinforcement Learning (RL) is learning what action to take next by mapping situations to actions so as to maximize cumulative rewards. In recent years RL has achieved great success in inducing effective pedagogical policies for various interactive e-learning environments. However, it is often prohibitive to identify the critical pedagogical decisions that actually contribute to desirable learning outcomes. In this work, by utilizing the RL framework we defined critical decisions to be those states in which the agent has to take the optimal actions, and subsequently, the Critical policy as carrying out optimal actions in the critical states while acting randomly in others. We proposed a general Critical-RL framework for identifying critical decisions and inducing a Critical policy. The effectiveness of our Critical-RL framework is empirically evaluated from two perspectives: whether optimal actions must be carried out in critical states (the necessary hypothesis) and whether only carrying out optimal actions in critical states is as effective as a fully-executed RL policy (the sufficient hypothesis). Our results confirmed both hypotheses.}, journal={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Ju, Song and Zhou, Guojing and Abdelshiheed, Mark and Barnes, Tiffany and Chi, Min}, year={2021}, pages={215–227} } @inproceedings{abdelshiheed_maniktala_ju_jain_barnes_chi_2021, title={Preparing Unprepared Students for Future Learning}, booktitle={Proceedings of the 43rd Annual Meeting of the Cognitive Science Society. (CogSci’21)}, author={Abdelshiheed, M. and Maniktala, M. and Ju, S. and Jain, A. and Barnes, T. and Chi, M.}, year={2021}, pages={2547–2553} } @inproceedings{abdelshiheed_zhou_maniktala_barnes_chi_2020, title={Metacognition and Motivation: The Role of Time-Awareness in Preparation for Future Learning}, DOI={10.48550/arXiv.2303.13541}, abstractNote={In this work, we investigate how two factors, metacognitive skills and motivation, would impact student learning across domains. More specifically, our primary goal is to identify the critical, yet robust, interaction patterns of these two factors that would contribute to students' performance in learning logic first and then their performance on a subsequent new domain, probability. We are concerned with two types of metacognitive skills: strategy-awareness and time-awareness, that is, which problem-solving strategy to use and when to use it. Our data were collected from 495 participants across three consecutive semesters, and our results show that the only students who consistently outperform their peers across both domains are those who are not only highly motivated but also strategy-aware and time-aware.}, booktitle={Proceedings of the 42nd Annual Meeting of the Cognitive Science Society. (CogSci’20)}, author={Abdelshiheed, M. and Zhou, G. and Maniktala, M. and Barnes, T. and Chi, M.}, year={2020}, pages={945–951} }