@article{park_mott_lee_gupta_jantaraweragul_glazewski_scribner_ottenbreit-leftwich_hmelo-silver_lester_2022, title={Investigating a visual interface for elementary students to formulate AI planning tasks}, volume={73}, ISSN={["2665-9182"]}, DOI={10.1016/j.cola.2022.101157}, abstractNote={Recent years have seen the rapid adoption of artificial intelligence (AI) in every facet of society. The ubiquity of AI has led to an increasing demand to integrate AI learning experiences into K-12 education. Early learning experiences incorporating AI concepts and practices are critical for students to better understand, evaluate, and utilize AI technologies. AI planning is an important class of AI technologies in which an AI-driven agent utilizes the structure of a problem to construct plans of actions to perform a task. Although a growing number of efforts have explored promoting AI education for K-12 learners, limited work has investigated effective and engaging approaches for delivering AI learning experiences to elementary students. In this article, we propose a visual interface to enable upper elementary students (grades 3-5, ages 8-11) to formulate AI planning tasks within a game-based learning environment. We present our approach to designing the visual interface as well as how the AI planning tasks are embedded within narrative-centered gameplay structured around a Use-Modify-Create scaffolding progression. Further, we present results from a study of upper elementary students using the visual interface. We discuss how the Use-Modify-Create approach supported student learning as well as discuss the misconceptions and usability issues students encountered while using the visual interface to formulate AI planning tasks.}, journal={JOURNAL OF COMPUTER LANGUAGES}, author={Park, Kyungjin and Mott, Bradford and Lee, Seung and Gupta, Anisha and Jantaraweragul, Katie and Glazewski, Krista and Scribner, J. Adam and Ottenbreit-Leftwich, Anne and Hmelo-Silver, Cindy E. and Lester, James}, year={2022}, month={Dec} } @article{park_mott_lee_glazewski_scribner_ottenbreit-leftwich_hmelo-silver_lester_2021, title={Designing a Visual Interface for Elementary Students to Formulate AI Planning Tasks}, ISSN={["1943-6092"]}, DOI={10.1109/VL/HCC51201.2021.9576163}, abstractNote={Recent years have seen the rapid adoption of artificial intelligence (AI) in every facet of society. The ubiquity of AI has led to an increasing demand to integrate AI learning experiences into K-12 education. Early learning experiences incorporating AI concepts and practices are critical for students to better understand, evaluate, and utilize AI technologies. AI planning is an important class of AI technologies in which an AI-driven agent utilizes the structure of a problem to construct plans of actions to perform a task. Although a growing number of efforts have explored promoting AI education for K-12 learners, limited work has investigated effective and engaging approaches for delivering AI learning experiences to elementary students. In this paper, we propose a visual interface to enable upper elementary students (grades 3–5, ages 8–11) to formulate AI planning tasks within a game-based learning environment. We present our approach to designing the visual interface as well as how the AI planning tasks are embedded within narrative-centered gameplay structured around a Use-Modify-Create scaffolding progression. Further, we present results from a qualitative study of upper elementary students using the visual interface. We discuss how the Use-Modify-Create approach supported student learning as well as discuss the misconceptions and usability issues students encountered while using the visual interface to formulate AI planning tasks.}, journal={2021 IEEE SYMPOSIUM ON VISUAL LANGUAGES AND HUMAN-CENTRIC COMPUTING (VL/HCC 2021)}, author={Park, Kyungjin and Mott, Bradford and Lee, Seung and Glazewski, Krista and Scribner, J. Adam and Ottenbreit-Leftwich, Anne and Hmelo-Silver, Cindy E. and Lester, James}, year={2021} } @article{sohn_park_chi_2020, title={MuLan: Multilevel Language-based Representation Learning for Disease Progression Modeling}, ISSN={["2639-1589"]}, DOI={10.1109/BigData50022.2020.9377829}, abstractNote={Modeling patient disease progression using Electronic Health Records (EHRs) is crucial to assist clinical decision making. In recent years, deep learning models such as Long Short-Term Memory (LSTM) and Convolutional Neural Network (CNN) have shown great success in handling sequential multivariate data, such as EHRs. Despite their great success, it is often difficult to interpret and visualize patient disease progression learned from these models in a meaningful yet unified way. In this work, we present MuLan: a Multilevel Language-based representation learning framework that can automatically learn a hierarchical representation for EHRs at entry, event, and visit levels. We validate MuLan on modeling the progression of an extremely challenging disease, septic shock, by using real-world EHRs. Our results showed that these unified multilevel representations can be utilized not only for interpreting and visualizing the latent mechanism of patients’ septic shock progressions but also for early detection of septic shock.}, journal={2020 IEEE INTERNATIONAL CONFERENCE ON BIG DATA (BIG DATA)}, author={Sohn, Hyunwoo and Park, Kyungjin and Chi, Min}, year={2020}, pages={1246–1255} } @article{min_park_wiggins_mott_wiebe_boyer_lester_2019, title={Predicting Dialogue Breakdown in Conversational Pedagogical Agents with Multimodal LSTMs}, volume={11626}, ISBN={["978-3-030-23206-1"]}, ISSN={["1611-3349"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85068335512&partnerID=MN8TOARS}, DOI={10.1007/978-3-030-23207-8_37}, abstractNote={Recent years have seen a growing interest in conversational pedagogical agents. However, creating robust dialogue managers for conversational pedagogical agents poses significant challenges. Agents’ misunderstandings and inappropriate responses may cause breakdowns in conversational flow, lead to breaches of trust in agent-student relationships, and negatively impact student learning. Dialogue breakdown detection (DBD) is the task of predicting whether an agent’s utterance will cause a breakdown in an ongoing conversation. A robust DBD framework can support enhanced user experiences by choosing more appropriate responses, while also offering a method to conduct error analyses and improve dialogue managers. This paper presents a multimodal deep learning-based DBD framework to predict breakdowns in student-agent conversations. We investigate this framework with dialogues between middle school students and a conversational pedagogical agent in a game-based learning environment. Results from a study with 92 middle school students demonstrate that multimodal long short-term memory network (LSTM)-based dialogue breakdown detectors incorporating eye gaze features achieve high predictive accuracies and recall rates, suggesting that multimodal detectors can play an important role in designing conversational pedagogical agents that effectively engage students in dialogue.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION, AIED 2019, PT II}, author={Min, Wookhee and Park, Kyungjin and Wiggins, Joseph and Mott, Bradford and Wiebe, Eric and Boyer, Kristy Elizabeth and Lester, James}, year={2019}, pages={195–200} }