@misc{ju_zhou_abdelshiheed_barnes_chi_2021, title={Evaluating Critical Reinforcement Learning Framework in the Field}, volume={12748}, ISBN={9783030782917 9783030782924}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-030-78292-4_18}, DOI={10.1007/978-3-030-78292-4_18}, abstractNote={Reinforcement Learning (RL) is learning what action to take next by mapping situations to actions so as to maximize cumulative rewards. In recent years RL has achieved great success in inducing effective pedagogical policies for various interactive e-learning environments. However, it is often prohibitive to identify the critical pedagogical decisions that actually contribute to desirable learning outcomes. In this work, by utilizing the RL framework we defined critical decisions to be those states in which the agent has to take the optimal actions, and subsequently, the Critical policy as carrying out optimal actions in the critical states while acting randomly in others. We proposed a general Critical-RL framework for identifying critical decisions and inducing a Critical policy. The effectiveness of our Critical-RL framework is empirically evaluated from two perspectives: whether optimal actions must be carried out in critical states (the necessary hypothesis) and whether only carrying out optimal actions in critical states is as effective as a fully-executed RL policy (the sufficient hypothesis). Our results confirmed both hypotheses.}, journal={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Ju, Song and Zhou, Guojing and Abdelshiheed, Mark and Barnes, Tiffany and Chi, Min}, year={2021}, pages={215–227} } @article{zhou_azizsoltani_ausin_barnes_chi_2021, title={Leveraging Granularity: Hierarchical Reinforcement Learning for Pedagogical Policy Induction}, volume={8}, ISSN={["1560-4306"]}, DOI={10.1007/s40593-021-00269-9}, journal={INTERNATIONAL JOURNAL OF ARTIFICIAL INTELLIGENCE IN EDUCATION}, author={Zhou, Guojing and Azizsoltani, Hamoon and Ausin, Markel Sanz and Barnes, Tiffany and Chi, Min}, year={2021}, month={Aug} } @article{zhou_azizsoltani_ausin_barnes_chi_2019, title={Hierarchical Reinforcement Learning for Pedagogical Policy Induction}, volume={11625}, ISBN={["978-3-030-23203-0"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-030-23204-7_45}, abstractNote={In interactive e-learning environments such as Intelligent Tutoring Systems, there are pedagogical decisions to make at two main levels of granularity: whole problems and single steps. Recent years have seen growing interest in data-driven techniques for such pedagogical decision making, which can dynamically tailor students' learning experiences. Most existing data-driven approaches, however, treat these pedagogical decisions equally, or independently, disregarding the long-term impact that tutor decisions may have across these two levels of granularity. In this paper, we propose and apply an offline, off-policy Gaussian Processes based Hierarchical Reinforcement Learning (HRL) framework to induce a hierarchical pedagogical policy that makes decisions at both problem and step levels. In an empirical classroom study with 180 students, our results show that the HRL policy is significantly more effective than a Deep Q-Network (DQN) induced policy and a random yet reasonable baseline policy.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION (AIED 2019), PT I}, author={Zhou, Guojing and Azizsoltani, Hamoon and Ausin, Markel Sanz and Barnes, Tiffany and Chi, Min}, year={2019}, pages={544–556} } @inproceedings{mostafavi_zhou_lynch_chi_barnes_2015, title={Data-driven worked examples improve retention and completion in a logic tutor}, volume={9112}, booktitle={Artificial intelligence in education, aied 2015}, author={Mostafavi, B. and Zhou, G. J. and Lynch, C. and Chi, M. and Barnes, T.}, year={2015}, pages={726–729} }