@article{shen_ausin_mostafavi_chi_2018, title={Improving Learning & Reducing Time: A Constrained Action-Based Reinforcement Learning Approach}, DOI={10.1145/3209219.3209232}, abstractNote={Constrained action-based decision-making is one of the most challenging decision-making problems. It refers to a scenario where an agent takes action in an environment not only to maximize the expected cumulative reward but where it is subject to certain action-based constraints; for example, an upper limit on the total number of certain actions being carried out. In this work, we construct a general data-driven framework called Constrained Action-based Partially Observable Markov Decision Process (CAPOMDP) to induce effective pedagogical policies. Specifically, we induce two types of policies: CAPOMDPLG using learning gain as reward with the goal of improving students' learning performance, and CAPOMDPTime using time as reward for reducing students' time on task. The effectiveness of CAPOMDPLG is compared against a random yet reasonable policy and the effectiveness of CAPOMDPTime is compared against both a Deep Reinforcement Learning induced policy and a random policy. Empirical results show that there is an Aptitude-Treatment Interaction effect: students are split into High vs. Low based on their incoming competence; while no significant difference is found among the High incoming competence groups, for the Low groups, students following CAPOMDPTime indeed spent significantly less time than those using the two baseline policies and students following CAPOMDPLG significantly outperform their peers on both learning gain and learning efficiency.}, journal={PROCEEDINGS OF THE 26TH CONFERENCE ON USER MODELING, ADAPTATION AND PERSONALIZATION (UMAP'18)}, author={Shen, Shitian and Ausin, Markel Sanz and Mostafavi, Behrooz and Chi, Min}, year={2018}, pages={43–51} } @inproceedings{shen_lin_mostafavi_barnes_chi_2016, title={An analysis of feature selection and reward function for model-based reinforcement learning}, volume={0684}, booktitle={Intelligent tutoring systems, its 2016}, author={Shen, S. T. and Lin, C. and Mostafavi, B. and Barnes, T. and Chi, M.}, year={2016}, pages={504–505} } @misc{ranshous_shen_koutra_harenberg_faloutsos_samatova_2015, title={Anomaly detection in dynamic networks: a survey}, volume={7}, ISSN={["1939-0068"]}, DOI={10.1002/wics.1347}, abstractNote={Anomaly detection is an important problem with multiple applications, and thus has been studied for decades in various research domains. In the past decade there has been a growing interest in anomaly detection in data represented as networks, or graphs, largely because of their robust expressiveness and their natural ability to represent complex relationships. Originally, techniques focused on anomaly detection in static graphs, which do not change and are capable of representing only a single snapshot of data. As real‐world networks are constantly changing, there has been a shift in focus to dynamic graphs, which evolve over time.}, number={3}, journal={WILEY INTERDISCIPLINARY REVIEWS-COMPUTATIONAL STATISTICS}, author={Ranshous, Stephen and Shen, Shitian and Koutra, Danai and Harenberg, Steve and Faloutsos, Christos and Samatova, Nagiza F.}, year={2015}, pages={223–247} } @book{ranshous_shen_koutra_faloutsos_samatova, title={Anomaly detection in dynamic networks: A survey}, journal={Technical Report- Not held in TRLN member libraries}, author={Ranshous, S. and Shen, S. and Koutra, D. and Faloutsos, C. and Samatova, N. F.} }