@article{shabrina_mostafavi_abdelshiheed_chi_barnes_2023, title={Investigating the Impact of Backward Strategy Learning in a Logic Tutor: Aiding Subgoal Learning Towards Improved Problem Solving}, volume={8}, ISSN={1560-4292 1560-4306}, url={http://dx.doi.org/10.1007/s40593-023-00338-1}, DOI={10.1007/s40593-023-00338-1}, abstractNote={AbstractLearning to derive subgoals reduces the gap between experts and students and makes students prepared for future problem solving. Researchers have explored subgoal-labeled instructional materials in traditional problem solving and within tutoring systems to help novices learn to subgoal. However, only a little research is found on problem-solving strategies in relationship with subgoal learning. Also, these strategies are under-explored within computer-based tutors and learning environments. The backward problem-solving strategy is closely related to the process of subgoaling, where problem solving iteratively refines the goal into a new subgoal to reduce difficulty. In this paper, we explore a training strategy for backward strategy learning within an intelligent logic tutor that teaches logic-proof construction. The training session involved backward worked examples (BWE) and problem solving (BPS) to help students learn backward strategy towards improving their subgoaling and problem-solving skills. To evaluate the training strategy, we analyzed students’ 1) experience with and engagement in learning backward strategy, 2) performance and 3) proof construction approaches in new problems that they solved independently without tutor help after each level of training and in posttest. Our results showed that, when new problems were given to solve without any tutor help, students who were trained with both BWE and BPS outperformed students who received none of the treatment or only BWE during training. Additionally, students trained with both BWE and BPS derived subgoals during proof construction with significantly higher efficiency than the other two groups.}, journal={International Journal of Artificial Intelligence in Education}, publisher={Springer Science and Business Media LLC}, author={Shabrina, Preya and Mostafavi, Behrooz and Abdelshiheed, Mark and Chi, Min and Barnes, Tiffany}, year={2023}, month={Aug} } @article{shen_ausin_mostafavi_chi_2018, title={Improving Learning & Reducing Time: A Constrained Action-Based Reinforcement Learning Approach}, DOI={10.1145/3209219.3209232}, abstractNote={Constrained action-based decision-making is one of the most challenging decision-making problems. It refers to a scenario where an agent takes action in an environment not only to maximize the expected cumulative reward but where it is subject to certain action-based constraints; for example, an upper limit on the total number of certain actions being carried out. In this work, we construct a general data-driven framework called Constrained Action-based Partially Observable Markov Decision Process (CAPOMDP) to induce effective pedagogical policies. Specifically, we induce two types of policies: CAPOMDPLG using learning gain as reward with the goal of improving students' learning performance, and CAPOMDPTime using time as reward for reducing students' time on task. The effectiveness of CAPOMDPLG is compared against a random yet reasonable policy and the effectiveness of CAPOMDPTime is compared against both a Deep Reinforcement Learning induced policy and a random policy. Empirical results show that there is an Aptitude-Treatment Interaction effect: students are split into High vs. Low based on their incoming competence; while no significant difference is found among the High incoming competence groups, for the Low groups, students following CAPOMDPTime indeed spent significantly less time than those using the two baseline policies and students following CAPOMDPLG significantly outperform their peers on both learning gain and learning efficiency.}, journal={PROCEEDINGS OF THE 26TH CONFERENCE ON USER MODELING, ADAPTATION AND PERSONALIZATION (UMAP'18)}, author={Shen, Shitian and Ausin, Markel Sanz and Mostafavi, Behrooz and Chi, Min}, year={2018}, pages={43–51} } @inproceedings{shen_lin_mostafavi_barnes_chi_2016, title={An analysis of feature selection and reward function for model-based reinforcement learning}, volume={0684}, booktitle={Intelligent tutoring systems, its 2016}, author={Shen, S. T. and Lin, C. and Mostafavi, B. and Barnes, T. and Chi, M.}, year={2016}, pages={504–505} } @article{liu_mostafavi_barnes_2016, title={Combining Worked Examples and Problem Solving in a Data-Driven Logic Tutor}, volume={9684}, ISBN={["978-3-319-39582-1"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-319-39583-8_40}, abstractNote={Previous research has shown that worked examples can increase learning efficiency during computer-aided instruction, especially when alternatively offered with problem solving opportunities. In this study, we investigate whether these results are consistent in a complex, open-ended problem solving domain, where students are presented with randomly ordered sets of worked examples and required problem solving. Our results show that worked examples benefits students early in tutoring sessions, but are comparable to hint-based systems for scaffolding domain concepts. Later in tutoring sessions, worked examples are less beneficial, and can decrease performance for lower-proficiency students.}, journal={INTELLIGENT TUTORING SYSTEMS, ITS 2016}, author={Liu, Zhongxiu and Mostafavi, Behrooz and Barnes, Tiffany}, year={2016}, pages={347–353} } @article{mostafavi_barnes_2016, title={Data-driven Proficiency Profiling - Proof of Concept}, DOI={10.1145/2883851.2883935}, abstractNote={Data-driven methods have previously been used in intelligent tutoring systems to improve student learning outcomes and predict student learning methods. We have been incorporating data-driven methods for feedback and problem selection into Deep Thought, a logic tutor where students practice constructing deductive logic proofs. In this latest study we have implemented our data-driven proficiency profiler (DDPP) into Deep Thought as a proof of concept. The DDPP determines student proficiency without expert involvement by comparing relevant student rule scores to previous students who behaved similarly in the tutor and successfully completed it. The results show that the DDPP did improve in performance with additional data and proved to be an effective proof of concept.}, journal={LAK '16 CONFERENCE PROCEEDINGS: THE SIXTH INTERNATIONAL LEARNING ANALYTICS & KNOWLEDGE CONFERENCE,}, author={Mostafavi, Behrooz and Barnes, Tiffany}, year={2016}, pages={324–328} } @article{mostafavi_barnes_2017, title={Evolution of an Intelligent Deductive Logic Tutor Using Data-Driven Elements}, volume={27}, ISSN={["1560-4306"]}, DOI={10.1007/s40593-016-0112-1}, number={1}, journal={INTERNATIONAL JOURNAL OF ARTIFICIAL INTELLIGENCE IN EDUCATION}, author={Mostafavi, Behrooz and Barnes, Tiffany}, year={2017}, month={Mar}, pages={5–36} } @inproceedings{mostafavi_zhou_lynch_chi_barnes_2015, title={Data-driven worked examples improve retention and completion in a logic tutor}, volume={9112}, booktitle={Artificial intelligence in education, aied 2015}, author={Mostafavi, B. and Zhou, G. J. and Lynch, C. and Chi, M. and Barnes, T.}, year={2015}, pages={726–729} }