@article{marwan_price_2023, title={iSnap: Evolution and Evaluation of a Data-Driven Hint System for Block-Based Programming}, volume={16}, ISSN={["1939-1382"]}, url={https://doi.org/10.1109/TLT.2022.3223577}, DOI={10.1109/TLT.2022.3223577}, abstractNote={Novice programmers often struggle on assignments, and timely help, such as a hint on what to do next, can help students continue to progress and learn, rather than giving up. However, in large programming classrooms, it is hard for instructors to provide such real-time support for every student. Researchers have, therefore, put tremendous effort into developing algorithms to generate automated data-driven hints to help students at scale. Despite this, few controlled studies have directly evaluated the impact of such hints on students' performance and learning. It is also unclear what specific design features make hints more or less effective. In this article, we present iSnap, a block-based programming environment that provides novices with data-driven next-step hints in real time. This article describes our improvements to iSnap over four years, including its “enhanced” next-step hints with three design features: textual explanations, self-explanation prompts, and an adaptive hint display. Moreover, we conducted a controlled study in an authentic classroom setting over several weeks to evaluate the impact of iSnap's enhanced hints on students' performance and learning. We found students who received the enhanced hints perform better on in-class assignments and have higher programming efficiency in homework assignments than those who did not receive hints, but that hints did not significantly impact students' learning. We also discuss the challenges of classroom studies and the implications of enhanced hints compared to prior evaluations in laboratory settings, which is essential to validate the efficacy of next-step hints' impact in a real classroom experience.}, number={3}, journal={IEEE TRANSACTIONS ON LEARNING TECHNOLOGIES}, author={Marwan, Samiha and Price, Thomas W. W.}, year={2023}, month={Jun}, pages={399–413} } @article{marwan_akram_barnes_price_2022, title={Adaptive Immediate Feedback for Block-Based Programming: Design and Evaluation}, volume={15}, ISSN={["1939-1382"]}, url={https://doi.org/10.1109/TLT.2022.3180984}, DOI={10.1109/TLT.2022.3180984}, abstractNote={Theories on learning show that formative feedback that is immediate, specific, corrective, and positive is essential to improve novice students’ motivation and learning. However, most prior work on programming feedback focuses on highlighting student's mistakes, or detecting failed test cases after they submit a solution. In this article, we present our adaptive immediate feedback (AIF) system, which uses a hybrid data-driven feedback generation algorithm to provide students with information on their progress, code correctness, and potential errors, as well as encouragement in the middle of programming. We also present an empirical controlled study using the AIF system across several programming tasks in a CS0 classroom. Our results show that the AIF system improved students’ performance, and the proportion of students who fully completed the programming assignments, indicating increased persistence. Our results suggest that the AIF system has potential to scalably support students by giving them real-time formative feedback and the encouragement they need to complete assignments.}, number={3}, journal={IEEE TRANSACTIONS ON LEARNING TECHNOLOGIES}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Marwan, Samiha and Akram, Bita and Barnes, Tiffany and Price, Thomas W.}, year={2022}, month={Jun}, pages={406–420} } @article{shi_shah_wang_marwan_penmetsa_price_2021, title={Toward Semi-Automatic Misconception Discovery Using Code Embeddings}, url={http://dx.doi.org/10.1145/3448139.3448205}, DOI={10.1145/3448139.3448205}, abstractNote={Understanding students’ misconceptions is important for effective teaching and assessment. However, discovering such misconceptions manually can be time-consuming and laborious. Automated misconception discovery can address these challenges by highlighting patterns in student data, which domain experts can then inspect to identify misconceptions. In this work, we present a novel method for the semi-automated discovery of problem-specific misconceptions from students’ program code in computing courses, using a state-of-the-art code classification model. We trained the model on a block-based programming dataset and used the learned embedding to cluster incorrect student submissions. We found these clusters correspond to specific misconceptions about the problem and would not have been easily discovered with existing approaches. We also discuss potential applications of our approach and how these misconceptions inform domain-specific insights into students’ learning processes.}, journal={LAK21 CONFERENCE PROCEEDINGS: THE ELEVENTH INTERNATIONAL CONFERENCE ON LEARNING ANALYTICS & KNOWLEDGE}, publisher={ACM}, author={Shi, Yang and Shah, Krupal and Wang, Wengran and Marwan, Samiha and Penmetsa, Poorvaja and Price, Thomas W.}, year={2021}, pages={606–612} } @article{dong_shabrina_marwan_barnes_2021, title={You Really Need Help: Exploring Expert Reasons for Intervention During Block-based Programming Assignments}, DOI={10.1145/3446871.3469764}, abstractNote={In recent years, research has increasingly focused on developing intelligent tutoring systems that provide data-driven support for students in need of assistance during programming assignments. One goal of such intelligent tutors is to provide students with quality interventions comparable to those human tutors would give. While most studies focused on generating different forms of on-demand support, such as next-step hints and worked examples, at any given moment during the programming assignment, there is a lack of research on why human tutors would provide different forms of proactive interventions to students in different situations. This information is critical to know to allow the intelligent programming environments to select the appropriate type of student support at the right moment. In this work, we studied human tutors’ reasons for providing interventions during two introductory programming assignments in a block-based environment. Three human tutors evaluated a sample of 86 struggling moments identified from students’ log data using a data-driven model. The human tutors specified whether and why an intervention was needed (or not) for each struggling moment. We analyzed the expert tags and their consensus discussions and extracted three main reasons that made the experts decide to intervene: “missing key components to make progress”, “using wrong or unnecessary blocks”, “misusing needed blocks”, “having critical logic errors”, “needing confirmation and next steps”, and “unclear student intention”. We use six case studies to illustrate specific student code trace examples and the tutors’ reasons for intervention. We also discuss the potential types of automatic interventions that could address these cases. Our work sheds light on when and why students might need programming interventions. These insights contribute towards improving the quality of automated, data-driven support in programming learning environments.}, journal={ICER 2021: PROCEEDINGS OF THE 17TH ACM CONFERENCE ON INTERNATIONAL COMPUTING EDUCATION RESEARCH}, author={Dong, Yihuan and Shabrina, Preya and Marwan, Samiha and Barnes, Tiffany}, year={2021}, pages={334–346} } @article{price_williams_solyst_marwan_2020, title={Engaging Students with Instructor Solutions in Online Programming Homework}, DOI={10.1145/3313831.3376857}, abstractNote={Students working on programming homework do not receive the same level of support as in the classroom, relying primarily on automated feedback from test cases. One low-effort way to provide more support is by prompting students to compare their solution to an instructor's solution, but it is unclear the best way to design such prompts to support learning. We designed and deployed a randomized controlled trial during online programming homework, where we provided students with an instructor's solution, and randomized whether they were prompted to compare their solution to the instructor's, to fill in the blanks for a written explanation of the instructor's solution, to do both, or neither. Our results suggest that these prompts can effectively engage students in reflecting on instructor solutions, although the results point to design trade-offs between the amount of effort that different prompts require from students and instructors, and their relative impact on learning.}, journal={PROCEEDINGS OF THE 2020 CHI CONFERENCE ON HUMAN FACTORS IN COMPUTING SYSTEMS (CHI'20)}, author={Price, Thomas W. and Williams, Joseph Jay and Solyst, Jaemarie and Marwan, Samiha}, year={2020} }