@article{azevedo_martin_taub_mudrick_millar_grafsgaard_2016, title={Are Pedagogical Agents' External Regulation Effective in Fostering Learning with Intelligent Tutoring Systems?}, volume={9684}, ISBN={["978-3-319-39582-1"]}, ISSN={["0302-9743"]}, DOI={10.1007/978-3-319-39583-8_19}, abstractNote={In this study we tested whether external regulation provided by artificial pedagogical agents (PAs) was effective in facilitating learners’ self-regulated learning (SRL) and can therefore foster complex learning with a hypermedia-based intelligent tutoring system. One hundred twenty (N = 120) college students learned about the human circulatory system with MetaTutor during a 2-hour session under one of two conditions: adaptive scaffolding (AS) or a control (C) condition. The AS condition received timely prompts from four PAs to deploy various cognitive and metacognitive SRL processes, and received immediate directive feedback concerning the deployment of the processes. By contrast, the C condition learned without assistance from the PAs. Results indicated that those in the AS condition gained significantly more knowledge about the science topic than those in the C condition. In addition, log-file data provided evidence of the effectiveness of the PAs’ scaffolding and feedback in facilitating learners’ (in the AS condition) metacognitive monitoring and regulation during learning. We discuss implications for the design of external regulation by PAs necessary to accurately detect, track, model, and foster learners’ SRL by providing more accurate and intelligent prompting, scaffolding, and feedback regarding SRL processes.}, journal={INTELLIGENT TUTORING SYSTEMS, ITS 2016}, author={Azevedo, Roger and Martin, Seth A. and Taub, Michelle and Mudrick, Nicholas V. and Millar, Garrett C. and Grafsgaard, Joseph F.}, year={2016}, pages={197–207} } @article{wiggins_grafsgaard_boyer_wiebe_lester_2016, title={Do You Think You Can? The Influence of Student Self-Efficacy on the Effectiveness of Tutorial Dialogue for Computer Science}, volume={27}, ISSN={1560-4292 1560-4306}, url={http://dx.doi.org/10.1007/s40593-015-0091-7}, DOI={10.1007/s40593-015-0091-7}, number={1}, journal={International Journal of Artificial Intelligence in Education}, publisher={Springer Science and Business Media LLC}, author={Wiggins, Joseph B. and Grafsgaard, Joseph F. and Boyer, Kristy Elizabeth and Wiebe, Eric N. and Lester, James C.}, year={2016}, month={Feb}, pages={130–153} } @article{vail_grafsgaard_boyer_wiebe_lester_2016, title={Predicting Learning from Student Affective Response to Tutor Questions}, volume={9684}, ISBN={["978-3-319-39582-1"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-319-39583-8_15}, abstractNote={Modeling student learning during tutorial interaction is a central problem in intelligent tutoring systems. While many modeling techniques have been developed to address this problem, most of them focus on cognitive models in conjunction with often-complex domain models. This paper presents an analysis suggesting that observing students’ multimodal behaviors may provide deep insight into student learning at critical moments in a tutorial session. In particular, this work examines student facial expression, electrodermal activity, posture, and gesture immediately following inference questions posed by human tutors. The findings show that for human-human task-oriented tutorial dialogue, facial expression and skin conductance response following tutor inference questions are highly predictive of student learning gains. These findings suggest that with multimodal behavior data, intelligent tutoring systems can make more informed adaptive decisions to support students effectively.}, journal={INTELLIGENT TUTORING SYSTEMS, ITS 2016}, author={Vail, Alexandria K. and Grafsgaard, Joseph F. and Boyer, Kristy Elizabeth and Wiebe, Eric N. and Lester, James C.}, year={2016}, pages={154–164} } @inproceedings{worsley_chiluiza_grafsgaard_ochoa_2015, title={2015 multimodal learning and analytics grand challenge}, booktitle={ICMI'15: Proceedings of the 2015 ACM International Conference on Multimodal Interaction}, author={Worsley, M. and Chiluiza, K. and Grafsgaard, J. F. and Ochoa, X.}, year={2015}, pages={525–529} } @article{grafsgaard_wiggins_boyer_wiebe_lester_2013, title={Automatically Recognizing Facial Indicators of Frustration: A Learning-Centric Analysis}, ISSN={["2156-8103"]}, DOI={10.1109/acii.2013.33}, abstractNote={Affective and cognitive processes form a rich substrate on which learning plays out. Affective states often influence progress on learning tasks, resulting in positive or negative cycles of affect that impact learning outcomes. Developing a detailed account of the occurrence and timing of cognitive-affective states during learning can inform the design of affective tutorial interventions. In order to advance understanding of learning-centered affect, this paper reports on a study to analyze a video corpus of computer-mediated human tutoring using an automated facial expression recognition tool that detects fine-grained facial movements. The results reveal three significant relationships between facial expression, frustration, and learning: (1) Action Unit 2 (outer brow raise) was negatively correlated with learning gain, (2) Action Unit 4 (brow lowering) was positively correlated with frustration, and (3) Action Unit 14 (mouth dimpling) was positively correlated with both frustration and learning gain. Additionally, early prediction models demonstrated that facial actions during the first five minutes were significantly predictive of frustration and learning at the end of the tutoring session. The results represent a step toward a deeper understanding of learning-centered affective states, which will form the foundation for data-driven design of affective tutoring systems.}, journal={2013 HUMAINE ASSOCIATION CONFERENCE ON AFFECTIVE COMPUTING AND INTELLIGENT INTERACTION (ACII)}, author={Grafsgaard, Joseph F. and Wiggins, Joseph B. and Boyer, Kristy Elizabeth and Wiebe, Eric N. and Lester, James C.}, year={2013}, pages={159–165} } @inproceedings{grafsgaard_fulton_boyer_wiebe_lester_2012, title={Multimodal analysis of the implicit affective channel in computer-mediated textual communication}, DOI={10.1145/2388676.2388708}, abstractNote={Computer-mediated textual communication has become ubiquitous in recent years. Compared to face-to-face interactions, there is decreased bandwidth in affective information, yet studies show that interactions in this medium still produce rich and fulfilling affective outcomes. While overt communication (e.g., emoticons or explicit discussion of emotion) can explain some aspects of affect conveyed through textual dialogue, there may also be an underlying implicit affective channel through which participants perceive additional emotional information. To investigate this phenomenon, computer-mediated tutoring sessions were recorded with Kinect video and depth images and processed with novel tracking techniques for posture and hand-to-face gestures. Analyses demonstrated that tutors implicitly perceived students' focused attention, physical demand, and frustration. Additionally, bodily expressions of posture and gesture correlated with student cognitive-affective states that were perceived by tutors through the implicit affective channel. Finally, posture and gesture complement each other in multimodal predictive models of student cognitive-affective states, explaining greater variance than either modality alone. This approach of empirically studying the implicit affective channel may identify details of human behavior that can inform the design of future textual dialogue systems modeled on naturalistic interaction.}, booktitle={ICMI '12: Proceedings of the ACM International Conference on Multimodal Interaction}, author={Grafsgaard, J. F. and Fulton, R. M. and Boyer, K. E. and Wiebe, E. N. and Lester, J. C.}, year={2012}, pages={145–152} } @inproceedings{grafsgaard_lee_mott_boyer_lester, title={Modeling self-efficacy across age groups with automatically tracked facial expression}, volume={9112}, booktitle={Artificial intelligence in education, aied 2015}, author={Grafsgaard, J. F. and Lee, S. Y. and Mott, B. W. and Boyer, K. E. and Lester, J. C.}, pages={582–585} }