@article{hoq_shi_leinonen_babalola_lynch_price_akram_2024, title={Detecting ChatGPT-Generated Code Submissions in a CS1 Course Using Machine Learning Models}, url={https://doi.org/10.1145/3626252.3630826}, DOI={10.1145/3626252.3630826}, abstractNote={The emergence of publicly accessible large language models (LLMs) such as ChatGPT poses unprecedented risks of new types of plagiarism and cheating where students use LLMs to solve exercises for them. Detecting this behavior will be a necessary component in introductory computer science (CS1) courses, and educators should be well-equipped with detection tools when the need arises. However, ChatGPT generates code non-deterministically, and thus, traditional similarity detectors might not suffice to detect AI-created code. In this work, we explore the affordances of Machine Learning (ML) models for the detection task. We used an openly available dataset of student programs for CS1 assignments and had ChatGPT generate code for the same assignments, and then evaluated the performance of both traditional machine learning models and Abstract Syntax Tree-based (AST-based) deep learning models in detecting ChatGPT code from student code submissions. Our results suggest that both traditional machine learning models and AST-based deep learning models are effective in identifying ChatGPT-generated code with accuracy above 90%. Since the deployment of such models requires ML knowledge and resources that are not always accessible to instructors, we also explore the patterns detected by deep learning models that indicate possible ChatGPT code signatures, which instructors could possibly use to detect LLM-based cheating manually. We also explore whether explicitly asking ChatGPT to impersonate a novice programmer affects the code produced. We further discuss the potential applications of our proposed models for enhancing introductory computer science instruction.}, journal={PROCEEDINGS OF THE 55TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION, SIGCSE 2024, VOL. 1}, author={Hoq, Muntasir and Shi, Yang and Leinonen, Juho and Babalola, Damilola and Lynch, Collin and Price, Thomas and Akram, Bita}, year={2024}, pages={526–532} } @inproceedings{reichert_sthapit_tabarsi_limke_price_barnes_2024, title={Experience Helps, but It Isn't Everything: Exploring Causes of Affective State in Novice Programmers}, url={https://doi.org/10.1145/3626253.3635508}, DOI={10.1145/3626253.3635508}, abstractNote={Affective state, referring to an individual's feeling, can impact students' confidence and retention in CS, particularly for novice programmers. However, little research has been conducted to examine how moments that occur during programming impact students' affective states in real-time. In this pilot study, seven undergraduate students in an introductory block-based programming course completed a programming assignment and were surveyed and interviewed about their experience and self-efficacy as programmers. While programming, students periodically recorded their affective states via a popup in the programming environment. We performed retrospective think-aloud interviews with students afterward, asking them to watch and reflect on recordings of their programming. We subsequently analyzed student interviews using thematic analysis to derive 206 codes. These codes were grouped into three areas that impacted affect: the environment, objective progress, and perceptions during programming. To explore why students responded as they did to moment occurrence, we further categorized students based on four dimensions: programming experience, assignment completion, confidence, and the impact of the programming session on self-efficacy. Our initial results suggest that while certain moments elicit similar affective states among students, the interaction of the aforementioned four dimensions may have a higher impact on novices' affective states during programming. We conclude with recommendations for educators to improve students' affective states during and after programming.}, author={Reichert, Heidi and Sthapit, Sandeep and Tabarsi, Benyamin T. and Limke, Ally and Price, Thomas and Barnes, Tiffany}, year={2024}, month={Mar} } @article{wang_limke_bobbadi_isvik_catete_barnes_price_2024, title={Idea Builder: Motivating Idea Generation and Planning for Open-Ended Programming Projects through Storyboarding}, url={https://doi.org/10.1145/3626252.3630872}, DOI={10.1145/3626252.3630872}, abstractNote={In computing classrooms, building an open-ended programming project engages students in the process of designing and implementing an idea of their own choice. An explicit planning process has been shown to help students build more complex and ambitious open-ended projects. However, novices encounter difficulties in exploring and creatively expressing ideas during planning. We present Idea Builder, a storyboarding-based planning system to help novices visually express their ideas. Idea Builder includes three features: 1) storyboards to help students express a variety of ideas that map easily to programming code, 2) animated example mechanics with example actors to help students explore the space of possible ideas supported by the programming environments, and 3) synthesized starter code to help students easily transition from planning to programming. Through two studies with high school coding workshops, we found that students self-reported as feeling creative and feeling easy to communicate ideas; having access to animated example mechanics of an actor help students to build those actors in their plans and projects; and that most students perceived the synthesized starter code from Idea Builder as helpful and time-saving.}, journal={PROCEEDINGS OF THE 55TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION, SIGCSE 2024, VOL. 1}, author={Wang, Wengran and Limke, Ally and Bobbadi, Mahesh and Isvik, Amy and Catete, Veronica and Barnes, Tiffany and Price, Thomas W.}, year={2024}, pages={1402–1408} } @inproceedings{shaffer_brusilovsky_koedinger_price_barnes_mostafavi_2024, title={Ninth SPLICE Workshop on Technology and Data Infrastructure for CS Education Research}, url={https://doi.org/10.1145/3626253.3633431}, DOI={10.1145/3626253.3633431}, abstractNote={Many SIGCSE attendees are either developing or using online educational tools, and all will benefit from better interoperability among these tools and better analysis of the clickstream data coming from those tools. New tools for analyzing big data leveraged by AI (e.g., deep learning for assessment) in turn improve both content and pedagogy, thus setting up a virtuous cycle fueling learning discoveries and leveraging innovation in AI: Online technologies → big data analysis → better online technologies. This NSF-supported workshop is the latest in a series of SPLICE workshops, and is a continuation of our event at SIGCSE 2023, where the SPLICE-Portal, a dedicated socio-technical research infrastructure for Computing Education Research, was presented. This year, we continue the work with several new SPLICE community working groups, including those on Dashboards, Large Language Models, Parsons Problems, and Smart Learning Content Protocols. We continue to build upon our existing collaborations developed over the course of the project to engage more members of the community in tasks that will advance the project agenda.}, author={Shaffer, Cliff and Brusilovsky, Peter and Koedinger, Ken and Price, Thomas and Barnes, Tiffany and Mostafavi, Behrooz}, year={2024}, month={Mar} } @inproceedings{skripchuk_bacher_shi_tran_price_2024, title={Novices' Perceptions of Web-Search and AI for Programming}, url={https://doi.org/10.1145/3626253.3635545}, DOI={10.1145/3626253.3635545}, author={Skripchuk, James and Bacher, John and Shi, Yang and Tran, Keith and Price, Thomas}, year={2024}, month={Mar} } @article{wang_rao_kwatra_milliken_dong_gomes_martin_catete_isvik_barnes_et al._2023, title={A Case Study on When and How Novices Use Code Examples in Open-Ended Programming}, url={https://doi.org/10.1145/3587102.3588774}, DOI={10.1145/3587102.3588774}, abstractNote={Many students rely on examples when learning to program, but they often face barriers when incorporating these examples into their own code and learning the concepts they present. As a step towards designing effective example interfaces that can support student learning, we investigate novices' needs and strategies when using examples to write code. We conducted a study with 12 pairs of high school students working on open-ended game design projects, using a system that allows students to browse examples based on their functionality, and to view and copy the example code. We analyzed interviews, screen recordings, and log data, identifying 5 moments when novices request examples, and 4 strategies that arise when students use examples. We synthesize these findings into principles that can inform the design of future example systems to better support students.}, journal={PROCEEDINGS OF THE 2023 CONFERENCE ON INNOVATION AND TECHNOLOGY IN COMPUTER SCIENCE EDUCATION, ITICSE 2023, VOL 1}, author={Wang, Wengran and Rao, Yudong and Kwatra, Archit and Milliken, Alexandra and Dong, Yihuan and Gomes, Neeloy and Martin, Sarah and Catete, Veronica and Isvik, Amy and Barnes, Tiffany and et al.}, year={2023}, pages={82–88} } @article{bai_sthapit_heckman_price_stolee_2023, title={An Experience Report on Introducing Explicit Strategies into Testing Checklists for Advanced Beginners}, url={https://doi.org/10.1145/3587102.3588781}, DOI={10.1145/3587102.3588781}, abstractNote={Software testing is a critical skill for computing students, but learning and practicing testing can be challenging, particularly for beginners. A recent study suggests that a lightweight testing checklist that contains testing strategies and tutorial information could assist students in writing quality tests. However, students expressed a desire for more support in knowing how to test the code/scenario. Moreover, the potential costs and benefits of the testing checklist are not yet examined in a classroom setting. To that end, we improved the checklist by integrating explicit testing strategies to it (ETS Checklist), which provide step-by-step guidance on how to transfer semantic information from instructions to the possible testing scenarios. In this paper, we report our experiences in designing explicit strategies in unit testing, as well as adapting the ETS Checklist as optional tool support in a CS1.5 course. With the quantitative and qualitative analysis of the survey responses and lab assignment submissions generated by students, we discuss students' engagement with the ETS Checklists. Our results suggest that students who used the checklist intervention had significantly higher quality in their student-authored test code, in terms of code coverage, compared to those who did not, especially for assignments earlier in the course. We also observed students' unawareness of their need for help in writing high-quality tests.}, journal={PROCEEDINGS OF THE 2023 CONFERENCE ON INNOVATION AND TECHNOLOGY IN COMPUTER SCIENCE EDUCATION, ITICSE 2023, VOL 1}, author={Bai, Gina R. and Sthapit, Sandeep and Heckman, Sarah and Price, Thomas W. and Stolee, Kathryn T.}, year={2023}, pages={194–200} } @article{skripchuk_bennett_zheng_li_price_2023, title={Analysis of Novices' Web-Based Help-Seeking Behavior While Programming}, url={https://doi.org/10.1145/3545945.3569852}, DOI={10.1145/3545945.3569852}, abstractNote={Web-based help-seeking -- finding and utilizing websites to solve a problem -- is a critical skill during programming in both professional and academic settings. However, little work has explored how students, especially novices, engage in web-based help-seeking during programming, or what strategies they use and barriers they face. This study begins to investigate these questions through analysis of students' web-search behaviors during programming. We collected think-aloud, screen recording, and log data as students completed a challenging programming task. Students were encouraged to use the web for help when needed, as if in an internship. We then qualitatively analyzed the data to address three research questions: 1) What events motivate students to use web search? 2) What strategies do students employ to search for, select, and learn from web pages? 3) What barriers do students face in web search, and when do they arise? Our results suggest that that novices use a variety of web-search strategies -- some quite unexpected -- with varying degrees of success, suggesting that web search can be a challenging skill for novice programmers. We discuss how these results inform future research and pedagogy focused on how to support students in effective web search.}, journal={PROCEEDINGS OF THE 54TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION, VOL 1, SIGCSE 2023}, author={Skripchuk, James and Bennett, Neil and Zheng, Jeffrey and Li, Eric and Price, Thomas}, year={2023}, pages={945–951} } @article{harred_barnes_fisk_akram_price_yoder_2023, title={Do Intentions to Persist Predict Short-Term Computing Course Enrollments? A Scale Development, Validation, and Reliability Analysis}, url={https://doi.org/10.1145/3545945.3569875}, DOI={10.1145/3545945.3569875}, abstractNote={A key goal of many computer science education efforts is to increase the number and diversity of students who persist in the field of computer science and into computing careers. Many interventions have been developed in computer science designed to increase students' persistence in computing. However, it is often difficult to measure the efficacy of such interventions, as measuring actual persistence by tracking student enrollments and career placements after an intervention is difficult and time-consuming, and sometimes even impossible. In the social sciences, attitudinal research is often used to solve this problem, as attitudes can be collected in survey form around the same time that interventions are introduced and are predictive of behavior. This can allow researchers to assess the potential efficacy of an intervention before devoting the time and energy to conduct a longitudinal analysis. In this paper, we develop and validate a scale to measure intentions to persist in computing, and demonstrate its use in predicting actual persistence as defined by enrolling in another computer science course within two semesters. We conduct two analyses to do this: First, we develop a computing persistence index and test whether our scale has high alpha reliability and whether our scale predicts actual persistence in computing using students' course enrollments. Second, we conduct analyses to reduce the number of items in the scale, to make the scale easy for others to include in their own research. This paper contributes to research on computing education by developing and validating a novel measure of intentions to persist in computing, which can be used by computer science educators to evaluate potential interventions. This paper also creates a short version of the index, to ease implementation.}, journal={PROCEEDINGS OF THE 54TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION, VOL 1, SIGCSE 2023}, author={Harred, Rachel and Barnes, Tiffany and Fisk, Susan R. and Akram, Bita and Price, Thomas W. and Yoder, Spencer}, year={2023}, pages={1062–1068} } @article{tabarsi_reichert_qualls_price_barnes_2023, title={Exploring Novices' Struggle and Progress during Programming through Data-Driven Detectors and Think-Aloud Protocols}, ISSN={["1943-6092"]}, DOI={10.1109/VL-HCC57772.2023.00029}, abstractNote={Many students struggle when they are first learning to program. Without help, these students can lose confidence and negatively assess their programming ability, which can ultimately lead to dropouts. However, detecting the exact moment of student struggle is still an open question in computing education. In this work, we conducted a think-aloud study with five high-school students to investigate the automatic detection of progressing and struggling moments using a detector algorithm (SPD). SPD classifies student trace logs into moments of struggle and progress based on their similarity to prior students' correct solutions. We explored the extent to which the SPD-identified moments of struggle aligned with expert-identified moments based on novices' verbalized thoughts and programming actions. Our analysis results suggest that SPD can catch students' struggling and progressing moments with a 72.5% F1-score, but room remains for improvement in detecting struggle. Moreover, we conducted an in-depth examination to discover why discrepancies arose between expert-identified and detector-identified struggle moments. We conclude with recommendations for future data-driven struggle detection systems.}, journal={2023 IEEE SYMPOSIUM ON VISUAL LANGUAGES AND HUMAN-CENTRIC COMPUTING, VL/HCC}, author={Tabarsi, Benyamin and Reichert, Heidi and Qualls, Rachel and Price, Thomas and Barnes, Tiffany}, year={2023}, pages={179–183} } @article{wang_bacher_isvik_limke_sthapit_shi_tabarsi_tran_catete_barnes_et al._2023, title={Investigating the Impact of On-Demand Code Examples on Novices' Open-Ended Programming Projects}, url={https://doi.org/10.1145/3568813.3600141}, DOI={10.1145/3568813.3600141}, abstractNote={Background and Context: Open-ended programming projects encourage novice students to choose and pursue projects based on their own ideas and interests, and are widely used in many introductory programming courses. However, novice programmers encounter challenges exploring and discovering new ideas, implementing their ideas, and applying unfamiliar programming concepts and APIs. Code examples are one of the primary resources students use to apply code usage patterns and learn API knowledge, but little work has investigated the effect of having access to examples on students’ open-ended programming experience. Objectives: In this work, we evaluate the impact of code examples on open-ended programming, through a study with 46 local high school students in a full-day coding workshop. Method: We conducted a controlled study, where half of the students had full access to 37 code examples using an example browser system called Example Helper and the other half had 5 standard, tutorial examples. Findings: We found that students who had access to all 37 code examples used a significantly larger variety of code APIs, perceived the programming as relatively more creative, but also experienced a higher task load. We also found suggestive evidence of a better post-assignment performance from the example group, showing that some students were able to learn and apply the knowledge they learned from examples to a new programming task.}, journal={PROCEEDINGS OF THE 2023 ACM CONFERENCE ON INTERNATIONAL COMPUTING EDUCATION RESEARCH V.1, ICER 2023 V1}, author={Wang, Wengran and Bacher, John and Isvik, Amy and Limke, Ally and Sthapit, Sandeep and Shi, Yang and Tabarsi, Benyamin T. and Tran, Keith and Catete, Veronica and Barnes, Tiffany and et al.}, year={2023}, pages={464–475} } @article{marwan_price_2023, title={iSnap: Evolution and Evaluation of a Data-Driven Hint System for Block-Based Programming}, volume={16}, ISSN={["1939-1382"]}, url={https://doi.org/10.1109/TLT.2022.3223577}, DOI={10.1109/TLT.2022.3223577}, abstractNote={Novice programmers often struggle on assignments, and timely help, such as a hint on what to do next, can help students continue to progress and learn, rather than giving up. However, in large programming classrooms, it is hard for instructors to provide such real-time support for every student. Researchers have, therefore, put tremendous effort into developing algorithms to generate automated data-driven hints to help students at scale. Despite this, few controlled studies have directly evaluated the impact of such hints on students' performance and learning. It is also unclear what specific design features make hints more or less effective. In this article, we present iSnap, a block-based programming environment that provides novices with data-driven next-step hints in real time. This article describes our improvements to iSnap over four years, including its “enhanced” next-step hints with three design features: textual explanations, self-explanation prompts, and an adaptive hint display. Moreover, we conducted a controlled study in an authentic classroom setting over several weeks to evaluate the impact of iSnap's enhanced hints on students' performance and learning. We found students who received the enhanced hints perform better on in-class assignments and have higher programming efficiency in homework assignments than those who did not receive hints, but that hints did not significantly impact students' learning. We also discuss the challenges of classroom studies and the implications of enhanced hints compared to prior evaluations in laboratory settings, which is essential to validate the efficacy of next-step hints' impact in a real classroom experience.}, number={3}, journal={IEEE TRANSACTIONS ON LEARNING TECHNOLOGIES}, author={Marwan, Samiha and Price, Thomas W. W.}, year={2023}, month={Jun}, pages={399–413} } @article{marwan_akram_barnes_price_2022, title={Adaptive Immediate Feedback for Block-Based Programming: Design and Evaluation}, volume={15}, ISSN={["1939-1382"]}, url={https://doi.org/10.1109/TLT.2022.3180984}, DOI={10.1109/TLT.2022.3180984}, abstractNote={Theories on learning show that formative feedback that is immediate, specific, corrective, and positive is essential to improve novice students’ motivation and learning. However, most prior work on programming feedback focuses on highlighting student's mistakes, or detecting failed test cases after they submit a solution. In this article, we present our adaptive immediate feedback (AIF) system, which uses a hybrid data-driven feedback generation algorithm to provide students with information on their progress, code correctness, and potential errors, as well as encouragement in the middle of programming. We also present an empirical controlled study using the AIF system across several programming tasks in a CS0 classroom. Our results show that the AIF system improved students’ performance, and the proportion of students who fully completed the programming assignments, indicating increased persistence. Our results suggest that the AIF system has potential to scalably support students by giving them real-time formative feedback and the encouragement they need to complete assignments.}, number={3}, journal={IEEE TRANSACTIONS ON LEARNING TECHNOLOGIES}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Marwan, Samiha and Akram, Bita and Barnes, Tiffany and Price, Thomas W.}, year={2022}, month={Jun}, pages={406–420} } @article{limke_milliken_catete_gransbury_isvik_price_martens_barnes_2022, title={Case Studies on the use of Storyboarding by Novice Programmers}, DOI={10.1145/3502718.3524749}, abstractNote={Our researchers seek to support students in building block-based programming projects that are motivating and engaging as well as valuable practice in learning to code. A difficult part of the programming process is planning. In this research, we explore how novice programmers used a custom-built planning tool, PlanIT, contrasted against how they used storyboarding when planning games. In a three-part study, we engaged novices in planning and programming three games: a maze game, a break-out game, and a mashup of the two. In a set of five case studies, we show how five pairs of students approached the planning and programming of these three games, illustrating that students felt more creative when storyboarding rather than using PlanIT. We end with a discussion on the implications of this work for designing supports for novices to plan open-ended projects.}, journal={PROCEEDINGS OF THE 27TH ACM CONFERENCE ON INNOVATION AND TECHNOLOGY IN COMPUTER SCIENCE EDUCATION, ITICSE 2022, VOL 1}, author={Limke, Ally and Milliken, Alexandra and Catete, Veronica and Gransbury, Isabella and Isvik, Amy and Price, Thomas and Martens, Chris and Barnes, Tiffany}, year={2022}, pages={318–324} } @article{bai_presler-marshall_price_stolee_2022, title={Check It Off: Exploring the Impact of a Checklist Intervention on the Quality of Student-authored Unit Tests}, DOI={10.1145/3502718.3524799}, abstractNote={Software testing is an essential skill for computer science students. Prior work reports that students desire support in determining what code to test and which scenarios should be tested. In response to this, we present a lightweight testing checklist that contains both tutorial information and testing strategies to guide students in what and how to test. To assess the impact of the testing checklist, we conducted an experimental, controlled A/B study with 32 undergraduate and graduate students. The study task was writing a test suite for an existing program. Students were given either the testing checklist (the experimental group) or a tutorial on a standard coverage tool with which they were already familiar (the control group). By analyzing the combination of student-written tests and survey responses, we found students with the checklist performed as well as or better than the coverage tool group, suggesting a potential positive impact of the checklist (or at minimum, a non-negative impact). This is particularly noteworthy given the control condition of the coverage tool is the state of the practice. These findings suggest that the testing tool support does not need to be sophisticated to be effective.}, journal={PROCEEDINGS OF THE 27TH ACM CONFERENCE ON INNOVATION AND TECHNOLOGY IN COMPUTER SCIENCE EDUCATION, ITICSE 2022, VOL 1}, author={Bai, Gina R. and Presler-Marshall, Kai and Price, Thomas W. and Stolee, Kathryn T.}, year={2022}, pages={276–282} } @article{wang_le meur_bobbadi_akram_barnes_martens_price_2022, title={Exploring Design Choices to Support Novices' Example Use During Creative Open-Ended Programming}, DOI={10.1145/3478431.3499374}, abstractNote={Open-ended programming engages students by connecting computing with their real-world experience and personal interest. However, such open-ended programming tasks can be challenging, as they require students to implement features that they may be unfamiliar with. Code examples help students to generate ideas and implement program features, but students also encounter many learning barriers when using them. We explore how to design code examples to support novices' effective example use by presenting our experience of building and deploying Example Helper, a system that supports students with a gallery of code examples during open-ended programming. We deployed Example Helper in an undergraduate CS0 classroom to investigate students' example usage experience, finding that students used different strategies to browse, understand, experiment with, and integrate code examples, and that students who make more sophisticated plans also used more examples in their projects.}, journal={PROCEEDINGS OF THE 53RD ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION (SIGCSE 2022), VOL 1}, author={Wang, Wengran and Le Meur, Audrey and Bobbadi, Mahesh and Akram, Bita and Barnes, Tiffany and Martens, Chris and Price, Thomas}, year={2022}, pages={619–625} } @article{skripchuk_shi_price_2022, title={Identifying Common Errors in Open-Ended Machine Learning Projects}, volume={1}, url={http://dx.doi.org/10.1145/3478431.3499397}, DOI={10.1145/3478431.3499397}, abstractNote={Machine learning (ML) is one of the fastest growing subfields in Computer Science, and it is important to identify ways to improve ML education. A key way to do so is by understanding the common errors that students make when writing ML programs, so they can be addressed. Prior work investigating ML errors has focused on an instructor perspective, but has not looked at student programming artifacts, such as projects and code submissions to understand how these errors occur and which are most common. To address this, we qualitatively coded over 2,500 cells of code from 19 final team projects (63 students) in an upper-division machine learning course. By isolating and codifying common errors and misconceptions across projects, we can identify what ML errors students struggle with. In our results, we found that library usage, hyperparameter tuning, and misusing test data were among the most common errors, and we give examples of how and when they occur. We then provide suggestions on why these misconceptions may occur, and how instructors and software designers can possibly mitigate these errors.}, journal={PROCEEDINGS OF THE 53RD ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION (SIGCSE 2022), VOL 1}, publisher={ACM}, author={Skripchuk, James and Shi, Yang and Price, Thomas}, year={2022}, pages={216–222} } @article{akram_fisk_yoder_hunt_price_battestilli_barnes_2022, title={Increasing Students' Persistence in Computer Science through a Lightweight Scalable Intervention}, url={http://dx.doi.org/10.1145/3502718.3524815}, DOI={10.1145/3502718.3524815}, abstractNote={Research has shown that high self-assessment of ability, sense of belonging, and professional role confidence are crucial for students' persistence in computing. As grades in introductory computer science courses tend to be lower than other courses, it is essential to provide students with contextualized feedback about their performance in these courses. Giving students unambiguous and con- textualized feedback is especially important during COVID when many classes have moved online and instructors and students have fewer opportunities to interact. In this study, we investigate the effect of a lightweight, scalable intervention where students received personalized, contextualized feedback from their instructors after two major assignments during the semester. After each intervention, we collected survey data to assess students' self-assessment of computing ability, sense of belonging, intentions to persist in computing, professional role confidence, and the likelihood of stating intention to pursue a major in computer science. To analyze the effectiveness of our intervention, we conducted linear regression and mediation analysis on student survey responses. Our results have shown that providing students with personalized feedback can significantly improve their self-assessment of computing ability, which will significantly improve their intentions to persist in computing. Furthermore, our results have demonstrated that our intervention can significantly improve students' sense of belonging, professional role confidence, and the likelihood of stating an intention to pursue a major in computer science.}, journal={PROCEEDINGS OF THE 27TH ACM CONFERENCE ON INNOVATION AND TECHNOLOGY IN COMPUTER SCIENCE EDUCATION, ITICSE 2022, VOL 1}, publisher={ACM}, author={Akram, Bita and Fisk, Susan and Yoder, Spencer and Hunt, Cynthia and Price, Thomas and Battestilli, Lina and Barnes, Tiffany}, year={2022}, pages={526–532} } @inproceedings{milliken_wang_cateté_martin_gomes_dong_harred_isvik_barnes_price_et al._2021, title={PlanIT! A New Integrated Tool to Help Novices Design for Open-ended Projects}, url={http://dx.doi.org/10.1145/3408877.3432552}, DOI={10.1145/3408877.3432552}, abstractNote={Project-based learning can encourage and motivate students to learn through exploring their own interests, but introduces special challenges for novice programmers. Recent research has shown that novice students perceive themselves to be "bad at programming, especially when they do not know how to start writing a program, or need to create a plan before getting started. In this paper, we present PlanIT, a guided planning tool integrated with the Snap! programming environment designed to help novices plan and program their open-ended projects. Within PlanIT, students can add a description for their project, use a to do list to help break down the steps of implementation, plan important elements of their program including actors, variables, and events, and view related example projects. We report findings from a pilot study of high school students using PlanIT, showing that students who used the tool learned to make more specific and actionable plans. Results from student interviews show they appreciate the guidance that PlanIT provides, as well as the affordances it offers to more quickly create program elements.}, booktitle={Proceedings of the 52nd ACM Technical Symposium on Computer Science Education}, publisher={ACM}, author={Milliken, Alexandra and Wang, Wengran and Cateté, Veronica and Martin, Sarah and Gomes, Neeloy and Dong, Yihuan and Harred, Rachel and Isvik, Amy and Barnes, Tiffany and Price, Thomas and et al.}, year={2021}, month={Mar}, pages={232–238} } @article{card_wang_martens_price_2021, title={Scaffolding Game Design: Towards Tool Support for Planning Open-Ended Projects in an Introductory Game Design Class}, ISSN={["1943-6092"]}, DOI={10.1109/VL/HCC51201.2021.9576209}, abstractNote={One approach to teaching game design to students with a wide variety of disciplinary backgrounds is through team game projects that span multiple weeks, up to an entire term. However, open-ended, creative projects introduce a gamut of challenges to novice programmers. Our goal is to assist game design students with the planning stage of their projects. This paper describes our data collection process through three course interventions and student interviews, and subsequent analysis in which we learned students had difficulty expressing their creative vision and connecting the game mechanics to the intended player experience. We present these results as a step towards the goal of scaffolding the planning process for student game projects, supporting more creative ideas, clearer communication among team members, and a stronger understanding of human-centered design in software development.}, journal={2021 IEEE SYMPOSIUM ON VISUAL LANGUAGES AND HUMAN-CENTRIC COMPUTING (VL/HCC 2021)}, author={Card, Alexander and Wang, Wengran and Martens, Chris and Price, Thomas}, year={2021} } @article{shi_shah_wang_marwan_penmetsa_price_2021, title={Toward Semi-Automatic Misconception Discovery Using Code Embeddings}, url={http://dx.doi.org/10.1145/3448139.3448205}, DOI={10.1145/3448139.3448205}, abstractNote={Understanding students’ misconceptions is important for effective teaching and assessment. However, discovering such misconceptions manually can be time-consuming and laborious. Automated misconception discovery can address these challenges by highlighting patterns in student data, which domain experts can then inspect to identify misconceptions. In this work, we present a novel method for the semi-automated discovery of problem-specific misconceptions from students’ program code in computing courses, using a state-of-the-art code classification model. We trained the model on a block-based programming dataset and used the learned embedding to cluster incorrect student submissions. We found these clusters correspond to specific misconceptions about the problem and would not have been easily discovered with existing approaches. We also discuss potential applications of our approach and how these misconceptions inform domain-specific insights into students’ learning processes.}, journal={LAK21 CONFERENCE PROCEEDINGS: THE ELEVENTH INTERNATIONAL CONFERENCE ON LEARNING ANALYTICS & KNOWLEDGE}, publisher={ACM}, author={Shi, Yang and Shah, Krupal and Wang, Wengran and Marwan, Samiha and Penmetsa, Poorvaja and Price, Thomas W.}, year={2021}, pages={606–612} } @inproceedings{marwan_gao_fisk_price_barnes_2020, title={Adaptive Immediate Feedback Can Improve Novice Programming Engagement and Intention to Persist in Computer Science}, booktitle={Proceedings of the International Computing Education Research Conference}, author={Marwan, Samiha and Gao, Ge and Fisk, Susan and Price, Thomas W. and Barnes, Tiffany}, year={2020}, pages={1–10} } @inbook{price_marwan_winters_williams_2020, title={An Evaluation of Data-Driven Programming Hints in a Classroom Setting}, url={https://doi.org/10.1007/978-3-030-52240-7_45}, DOI={10.1007/978-3-030-52240-7_45}, abstractNote={Data-driven programming hints are a scalable way to support students when they are stuck by automatically offering suggestions and identifying errors. However, few classroom studies have investigated data-driven hints’ impact on students’ performance and learning. In this work, we ran a controlled experiment with 241 students in an authentic classroom setting, comparing students who learned with and without hints. We found no evidence that hints improved student performance or learning overall, and we discuss possible reasons why.}, booktitle={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Price, Thomas W. and Marwan, Samiha and Winters, Michael and Williams, Joseph Jay}, year={2020}, pages={246–251} } @inproceedings{wang_zhi_milliken_lytle_price_2020, title={Crescendo : Engaging Students to Self-Paced Programming Practices}, booktitle={Proceedings of the ACM Technical Symposium on Computer Science Education}, author={Wang, Wengran and Zhi, Rui and Milliken, Alexandra and Lytle, Nicholas and Price, Thomas W}, year={2020} } @article{price_williams_solyst_marwan_2020, title={Engaging Students with Instructor Solutions in Online Programming Homework}, DOI={10.1145/3313831.3376857}, abstractNote={Students working on programming homework do not receive the same level of support as in the classroom, relying primarily on automated feedback from test cases. One low-effort way to provide more support is by prompting students to compare their solution to an instructor's solution, but it is unclear the best way to design such prompts to support learning. We designed and deployed a randomized controlled trial during online programming homework, where we provided students with an instructor's solution, and randomized whether they were prompted to compare their solution to the instructor's, to fill in the blanks for a written explanation of the instructor's solution, to do both, or neither. Our results suggest that these prompts can effectively engage students in reflecting on instructor solutions, although the results point to design trade-offs between the amount of effort that different prompts require from students and instructors, and their relative impact on learning.}, journal={PROCEEDINGS OF THE 2020 CHI CONFERENCE ON HUMAN FACTORS IN COMPUTING SYSTEMS (CHI'20)}, author={Price, Thomas W. and Williams, Joseph Jay and Solyst, Jaemarie and Marwan, Samiha}, year={2020} } @inproceedings{price_williams_solyst_marwan_2020, place={Honolulu, HI, USA}, title={Engaging Students with Instructor Solutions in Online Programming Homework}, booktitle={ACM CHI Conference on Human Factors in Computing Systems}, author={Price, Thomas W. and Williams, Joseph Jay and Solyst, Jaemarie and Marwan, Samiha}, year={2020} } @inproceedings{wang_rao_zhi_marwan_gao_price_2020, title={Step Tutor: Supporting Students through Step-by-Step Example-Based Feedback}, booktitle={Proceedings of the International Conference on Innovation and Technology in Computer Science Education}, author={Wang, Wengran and Rao, Yudong and Zhi, Rui and Marwan, Samiha and Gao, Ge and Price, Thomas W.}, year={2020} } @inproceedings{unproductive help-seeking in programming: what it is and how to address it_2020, url={http://dx.doi.org/10.1145/3341525.3387394}, DOI={10.1145/3341525.3387394}, abstractNote={While programming, novices often lack the ability to effectively seek help, such as when to ask for a hint or feedback. Students may avoid help when they need it, or abuse help to avoid putting in effort, and both behaviors can impede learning. In this paper we present two main contributions. First, we investigated log data from students working in a programming environment that offers automated hints, and we propose a taxonomy of unproductive help-seeking behaviors in programming. Second, we used these findings to design a novel user interface for hints that subtly encourages students to seek help with the right frequency, estimated with a data-driven algorithm. We conducted a pilot study to evaluate our data-driven (DD) hint display, compared to a traditional interface, where students request hints on-demand as desired. We found students with the DD display were less than half as likely to engage in unproductive help-seeking, and we found suggestive evidence that this may improve their learning.}, booktitle={Proceedings of the 2020 ACM Conference on Innovation and Technology in Computer Science Education}, year={2020}, month={Jun} } @article{price_dong_zhi_paaßen_lytle_cateté_barnes_2019, title={A Comparison of the Quality of Data-Driven Programming Hint Generation Algorithms}, volume={29}, ISSN={1560-4292 1560-4306}, url={http://dx.doi.org/10.1007/s40593-019-00177-z}, DOI={10.1007/s40593-019-00177-z}, number={3}, journal={International Journal of Artificial Intelligence in Education}, publisher={Springer Science and Business Media LLC}, author={Price, Thomas W. and Dong, Yihuan and Zhi, Rui and Paaßen, Benjamin and Lytle, Nicholas and Cateté, Veronica and Barnes, Tiffany}, year={2019}, month={May}, pages={368–395} } @inproceedings{marwan_jay williams_price_2019, place={New York, NY, USA}, title={An Evaluation of the Impact of Automated Programming Hints on Performance and Learning}, url={https://doi.org/10.1145/3291279.3339420}, DOI={10.1145/3291279.3339420}, abstractNote={A growing body of work has explored how to automatically generate hints for novice programmers, and many programming environments now employ these hints. However, few studies have investigated the efficacy of automated programming hints for improving performance and learning, how and when novices find these hints beneficial, and the tradeoffs that exist between different types of hints. In this work, we explored the efficacy of next-step code hints with 2 complementary features: textual explanations and self-explanation prompts. We conducted two studies in which novices completed two programming tasks in a block-based programming environment with automated hints. In Study 1, 10 undergraduate students completed 2 programming tasks with a variety of hint types, and we interviewed them to understand their perceptions of the affordances of each hint type. For Study 2, we recruited a convenience sample of participants without programming experience from Amazon Mechanical Turk. We conducted a randomized experiment comparing the effects of hints' types on learners' performance and performance on a subsequent task without hints. We found that code hints with textual explanations significantly improved immediate programming performance. However, these hints only improved performance in a subsequent post-test task with similar objectives, when they were combined with self-explanation prompts. These results provide design insights into how automatically generated code hints can be improved with textual explanations and prompts to self-explain, and provide evidence about when and how these hints can improve programming performance and learning.}, booktitle={Proceedings of the 2019 ACM Conference on International Computing Education Research}, publisher={Association for Computing Machinery}, author={Marwan, Samiha and Jay Williams, Joseph and Price, Thomas}, year={2019}, pages={61–70} } @inproceedings{dong_marwan_catete_price_barnes_2019, place={New York, NY, USA}, title={Defining Tinkering Behavior in Open-ended Block-based Programming Assignments}, ISBN={9781450358903}, url={http://dx.doi.org/10.1145/3287324.3287437}, DOI={10.1145/3287324.3287437}, abstractNote={Tinkering has been shown to have a positive influence on students in open-ended making activities. Open-ended programming assignments in block-based programming resemble making activities in that both of them encourage students to tinker with tools to create their own solutions to achieve a goal. However, previous studies of tinkering in programming discussed tinkering as a broad, ambiguous term, and investigated only self-reported data. To our knowledge, no research has studied student tinkering behaviors while solving problems in block-based programming environments. In this position paper, we propose a definition for tinkering in block-based programming environments as a kind of behavior that students exhibit when testing, exploring, and struggling during problem-solving. We introduce three general categories of tinkering behaviors (test-based, prototype-based, and construction-based tinkering) derived from student data, and use case studies to demonstrate how students exhibited these behaviors in problem-solving. We created the definitions using a mixed-methods research design combining a literature review with data-driven insights from submissions of two open-ended programming assignments in iSnap, a block-based programming environment. We discuss the implication of each type of tinkering behavior for learning. Our study and results are the first in this domain to define tinkering based on student behaviors in a block-based programming environment.}, booktitle={Proceedings of the 50th ACM Technical Symposium on Computer Science Education - SIGCSE '19}, publisher={ACM Press}, author={Dong, Yihuan and Marwan, Samiha and Catete, Veronica and Price, Thomas and Barnes, Tiffany}, year={2019}, pages={1204–1210} } @inproceedings{zhi_chi_barnes_price_2019, place={New York, NY, USA}, title={Evaluating the Effectiveness of Parsons Problems for Block-based Programming}, ISBN={9781450361859}, url={http://dx.doi.org/10.1145/3291279.3339419}, DOI={10.1145/3291279.3339419}, abstractNote={Parsons problems are program puzzles, where students piece together code fragments to construct a program. Similar to block-based programming environments, Parsons problems eliminate the need to learn syntax. Parsons problems have been shown to improve learning efficiency when compared to writing code or fixing incorrect code in lab studies, or as part of a larger curriculum. In this study, we directly compared Parsons problems with block-based programming assignments in classroom settings. We hypothesized that Parsons problems would improve students' programming efficiency on the lab assignments where they were used, without impacting performance on the subsequent, related homework or the later programming project. Our results confirmed our hypothesis, showing that on average Parsons problems took students about half as much time to complete compared to equivalent programming problems. At the same time, we found no evidence to suggest that students performed worse on subsequent assignments, as measured by performance and time on task. The results indicate that the effectiveness of Parsons problems is not simply based on helping students avoid syntax errors. We believe this is because Parsons problems dramatically reduce the programming solution space, letting students focus on solving the problem rather than having to solve the combined problem of devising a solution, searching for needed components, and composing them together.}, booktitle={Proceedings of the 2019 ACM Conference on International Computing Education Research - ICER '19}, publisher={ACM Press}, author={Zhi, Rui and Chi, Min and Barnes, Tiffany and Price, Thomas W.}, year={2019}, pages={51–59} } @inproceedings{zhi_price_marwan_milliken_barnes_chi_2019, place={New York, NY, USA}, title={Exploring the Impact of Worked Examples in a Novice Programming Environment}, ISBN={9781450358903}, url={http://dx.doi.org/10.1145/3287324.3287385}, DOI={10.1145/3287324.3287385}, abstractNote={Research in a variety of domains has shown that viewing worked examples (WEs) can be a more efficient way to learn than solving equivalent problems. We designed a Peer Code Helper system to display WEs, along with scaffolded self-explanation prompts, in a block-based, novice programming environment called \snap. We evaluated our system during a high school summer camp with 22 students. Participants completed three programming problems with access to WEs on either the first or second problem. We found that WEs did not significantly impact students' learning, but may have impacted students' intrinsic cognitive load, suggesting that our WEs with scaffolded prompts may be an inherently different learning task. Our results show that WEs saved students time on initial tasks compared to writing code, but some of the time saved was lost in subsequent programming tasks. Overall, students with WEs completed more tasks within a fixed time period, but not significantly more. WEs may improve students' learning efficiency when programming, but these effects are nuanced and merit further study.}, booktitle={Proceedings of the 50th ACM Technical Symposium on Computer Science Education - SIGCSE '19}, publisher={ACM Press}, author={Zhi, Rui and Price, Thomas W. and Marwan, Samiha and Milliken, Alexandra and Barnes, Tiffany and Chi, Min}, year={2019}, pages={98–104} } @inproceedings{marwan_lytle_williams_price_2019, place={New York, NY, USA}, title={The Impact of Adding Textual Explanations to Next-Step Hints in a Novice Programming Environment}, url={https://doi.org/10.1145/3304221.3319759}, DOI={10.1145/3304221.3319759}, abstractNote={Automated hints, a powerful feature of many programming environments, have been shown to improve students' performance and learning. New methods for generating these hints use historical data, allowing them to scale easily to new classrooms and contexts. These scalable methods often generate next-step, code hints that suggest a single edit for the student to make to their code. However, while these code hints tell the student what to do, they do not explain why, which can make these hints hard to interpret and decrease students' trust in their helpfulness. In this work, we augmented code hints by adding adaptive, textual explanations in a block-based, novice programming environment. We evaluated their impact in two controlled studies with novice learners to investigate how our results generalize to different populations. We measured the impact of textual explanations on novices' programming performance. We also used quantitative analysis of log data, self-explanation prompts, and frequent feedback surveys to evaluate novices' understanding and perception of the hints throughout the learning process. Our results showed that novices perceived hints with explanations as significantly more relevant and interpretable than those without explanations, and were also better able to connect these hints to their code and the assignment. However, we found little difference in novices' performance. Our results suggest that explanations have the potential to make code hints more useful, but it is unclear whether this translates into better overall performance and learning.}, booktitle={Proceedings of the 2019 ACM Conference on Innovation and Technology in Computer Science Education}, publisher={Association for Computing Machinery}, author={Marwan, Samiha and Lytle, Nicholas and Williams, Joseph Jay and Price, Thomas}, year={2019}, pages={520–526} } @inproceedings{zhi_marwan_dong_lytle_price_barnes_2019, title={Toward Data-Driven Example Feedback for Novice Programming}, booktitle={Proceedings of the International Conference on Educational Data Mining}, author={Zhi, Rui and Marwan, Samiha and Dong, Yihuan and Lytle, Nicholas and Price, Thomas W and Barnes, Tiffany}, year={2019}, pages={218–227} } @inproceedings{zhi_lytle_price_2018, place={New York, NY, USA}, title={Exploring Instructional Support Design in an Educational Game for K-12 Computing Education}, url={https://doi.org/10.1145/3159450.3159519}, DOI={10.1145/3159450.3159519}, abstractNote={Instructional supports (Supports) help students learn more effectively in intelligent tutoring systems and gamified educational environments. However, the implementation and success of Supports vary by environment. We explored Support design in an educational programming game, BOTS, implementing three different strategies: instructional text (Text), worked examples (Examples) and buggy code (Bugs). These strategies are adapted from promising Supports in other domains and motivated by established educational theory. We evaluated our Supports through a pilot study with middle school students. Our results suggest Bugs may be a promising strategy, as demonstrated by the lower completion time and solution code length in assessment puzzles. We end reflecting on our design decisions providing recommendations for future iterations. Our motivations, design process, and study's results provide insight into the design of Supports for programming games.}, booktitle={Proceedings of the 49th ACM Technical Symposium on Computer Science Education}, publisher={Association for Computing Machinery}, author={Zhi, Rui and Lytle, Nicholas and Price, Thomas W.}, year={2018}, pages={747–752} } @article{paassen_hammer_price_barnes_gross_pinkwart_2018, title={The Continuous Hint Factory - Providing Hints in Vast and Sparsely Populated Edit Distance Spaces}, volume={10}, DOI={https://doi.org/10.5281/zenodo.3554697}, abstractNote={Intelligent tutoring systems can support students in solving multi-step tasks by providing hints regarding what to do next. However, engineering such next-step hints manually or via an expert model becomes infeasible if the space of possible states is too large. Therefore, several approaches have emerged to infer next-step hints automatically, relying on past students' data. In particular, the Hint Factory (Barnes & Stamper, 2008) recommends edits that are most likely to guide students from their current state towards a correct solution, based on what successful students in the past have done in the same situation. Still, the Hint Factory relies on student data being available for any state a student might visit while solving the task, which is not the case for some learning tasks, such as open-ended programming tasks. In this contribution we provide a mathematical framework for edit-based hint policies and, based on this theory, propose a novel hint policy to provide edit hints in vast and sparsely populated state spaces. In particular, we extend the Hint Factory by considering data of past students in all states which are similar to the student's current state and creating hints approximating the weighted average of all these reference states. Because the space of possible weighted averages is continuous, we call this approach the Continuous Hint Factory. In our experimental evaluation, we demonstrate that the Continuous Hint Factory can predict more accurately what capable students would do compared to existing prediction schemes on two learning tasks, especially in an open-ended programming task, and that the Continuous Hint Factory is comparable to existing hint policies at reproducing tutor hints on a simple UML diagram task.}, number={1}, journal={Journal of Educational Data Mining}, author={Paassen, B. and Hammer, B. and Price, T.W. and Barnes, T. and Gross, S. and Pinkwart, N.}, year={2018}, month={Jun}, pages={1–35} } @inbook{price_zhi_dong_lytle_barnes_2018, title={The Impact of Data Quantity and Source on the Quality of Data-Driven Hints for Programming}, ISBN={9783319938424 9783319938431}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-93843-1_35}, DOI={10.1007/978-3-319-93843-1_35}, abstractNote={In the domain of programming, intelligent tutoring systems increasingly employ data-driven methods to automate hint generation. Evaluations of these systems have largely focused on whether they can reliably provide hints for most students, and how much data is needed to do so, rather than how useful the resulting hints are to students. We present a method for evaluating the quality of data-driven hints and how their quality is impacted by the data used to generate them. Using two datasets, we investigate how the quantity of data and the source of data (whether it comes from students or experts) impact one hint generation algorithm. We find that with student training data, hint quality stops improving after 15–20 training solutions and can decrease with additional data. We also find that student data outperforms a single expert solution but that a comprehensive set of expert solutions generally performs best.}, booktitle={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Price, Thomas W. and Zhi, Rui and Dong, Yihuan and Lytle, Nicholas and Barnes, Tiffany}, year={2018}, pages={476–490} } @article{price_2018, place={New York, NY, USA}, title={iSnap: Automatic Hints and Feedback for Block-based Programming}, url={https://doi.org/10.1145/3159450.3162202}, DOI={10.1145/3159450.3162202}, abstractNote={iSnap is a block-based programming environment that supports struggling students with on-demand hints and error-checking feedback. iSnap is an extension of Snap!, a creative and novice-friendly programming environment, used in the Beauty and Joy of Computing (BJC) AP CS Principles curriculum. iSnap is designed to support the open-ended, exploratory programming problems of BJC, while adapting to many possible student solutions. When students ask iSnap for help, it highlights possible errors in their code and suggests next steps they can make. Hints are presented visually, right alongside students/ code, making them easy to interpret and implement. iSnap/s hints are generated automatically from student data, so no teacher input is required to create them, making iSnap appropriate for both new and experienced instructors. The demonstration will showcase iSnap/s hints on a variety of assignments and explain how the algorithm is working behind the scenes to generate data-driven hints. It will also include an overview of the results from two years of research with iSnap on how students seek and use programming help. A key objective of this demonstration is to solicit feedback from SIGCSE attendees on the design of iSnap as we work to make the system ready for deployment in classrooms. More information on iSnap can be found at http://go.ncsu.edu/isnap.}, journal={SIGCSE'18: PROCEEDINGS OF THE 49TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION}, publisher={Association for Computing Machinery}, author={Price, Thomas W.}, year={2018}, pages={1113–1113} } @inproceedings{price_zhi_barnes_2017, title={Evaluation of a Data-driven Feedback Algorithm for Open-ended Programming}, booktitle={Proceedings of the International Conference on Educational Data Mining}, author={Price, Thomas W. and Zhi, Rui and Barnes, Tiffany}, year={2017} } @article{price_liu_catete_barnes_2017, place={New York, NY, USA}, title={Factors Influencing Students' Help-Seeking Behavior while Programming with Human and Computer Tutors}, url={https://doi.org/10.1145/3105726.3106179}, DOI={10.1145/3105726.3106179}, abstractNote={When novice students encounter difficulty when learning to program, some can seek help from instructors or teaching assistants. This one-on-one tutoring is highly effective at fostering learning, but busy instructors and large class sizes can make expert help a scarce resource. Increasingly, programming environments attempt to imitate this human support by providing students with hints and feedback. In order to design effective, computer-based help, it is important to understand how and why students seek and avoid help when programming, and how this process differs when the help is provided by a human or a computer. We explore these questions through a qualitative analysis of 15 students' interviews, in which they reflect on solving two programming problems with human and computer help. We discuss implications for help design and present hypotheses on students' help-seeking behavior.}, journal={PROCEEDINGS OF THE 2017 ACM CONFERENCE ON INTERNATIONAL COMPUTING EDUCATION RESEARCH (ICER 17)}, publisher={Association for Computing Machinery}, author={Price, Thomas W. and Liu, Zhongxiu and Catete, Veronica and Barnes, Tiffany}, year={2017}, pages={127–135} } @article{price_zhi_barnes_2017, title={Hint Generation Under Uncertainty: The Effect of Hint Quality on Help-Seeking Behavior}, volume={10331}, ISBN={["978-3-319-61424-3"]}, ISSN={["1611-3349"]}, url={http://dx.doi.org/10.1007/978-3-319-61425-0_26}, DOI={10.1007/978-3-319-61425-0_26}, abstractNote={Much research in Intelligent Tutoring Systems has explored how to provide on-demand hints, how they should be used, and what effect they have on student learning and performance. Most of this work relies on hints created by experts and assumes that all help provided by the tutor is correct and of high quality. However, hints may not all be of equal value, especially in open-ended problem solving domains, where context is important. This work argues that hint quality, especially when using data-driven hint generation techniques, is inherently uncertain. We investigate the impact of hint quality on students’ help-seeking behavior in an open-ended programming environment with on-demand hints. Our results suggest that the quality of the first few hints on an assignment is positively associated with future hint use on the same assignment. Initial hint quality also correlates with possible help abuse. These results have important implications for hint design and generation.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION, AIED 2017}, author={Price, Thomas W. and Zhi, Rui and Barnes, Tiffany}, year={2017}, pages={311–322} } @inproceedings{price_barnes_2017, title={Position paper: Block-based programming should offer intelligent support for learners}, DOI={10.1109/blocks.2017.8120414}, abstractNote={Block-based programming environments make learning to program easier by allowing learners to focus on concepts rather than syntax. However, these environments offer little support when learners encounter difficulty with programming concepts themselves, especially in the absence of instructors. Textual programming environments increasingly use AI and data mining to provide intelligent, adaptive support for students, similar to human tutoring, which has been shown to improve performance and learning outcomes. In this position paper, we argue that block-based programming environments should also include these features. This paper gives an overview of promising research in intelligent support for programming and highlights the challenges and opportunities for applying this work to block-based programming.}, booktitle={2017 IEEE Blocks and Beyond Workshop (B&B)}, author={Price, Thomas and Barnes, T.}, year={2017}, pages={65–68} } @inproceedings{price_brown_piech_rivers_2017, place={New York, NY, USA}, title={Sharing and Using Programming Log Data (Abstract Only)}, url={https://doi.org/10.1145/3017680.3022366}, DOI={10.1145/3017680.3022366}, abstractNote={As more programming environments add logging features and programming data becomes more accessible, it is important to have a conversation about how we share and use this data. Uses of programming log data range from big-picture analyses to dashboards for instant teacher feedback, to intelligent, data-driven learning environments. The goal of this BOF is to talk about what data is important to collect, where it can be gathered and shared, what general data formats make sense, how to handle privacy and anonymization, and what ultimately we want to see the data used for. The BOF welcomes both producers of programming log data and current or potential consumers, interested in how it could be applied in their classrooms or research. One hopeful outcome of this BOF is a commitment to documenting and sharing existing programming data in an accessible location and format.}, booktitle={Proceedings of the 2017 ACM SIGCSE Technical Symposium on Computer Science Education}, publisher={Association for Computing Machinery}, author={Price, Thomas W. and Brown, Neil C.C. and Piech, Chris and Rivers, Kelly}, year={2017}, pages={729} } @inproceedings{price_dong_lipovac_2017, place={New York}, title={iSnap: Towards Intelligent Tutoring in Novice Programming Environments.}, url={http://dx.doi.org/10.1145/3017680.3017762}, DOI={10.1145/3017680.3017762}, abstractNote={Programming environments intentionally designed to support novices have become increasingly popular, and growing research supports their efficacy. While these environments offer features to engage students and reduce the burden of syntax errors, they currently offer little support to students who get stuck and need expert assistance. Intelligent Tutoring Systems (ITSs) are computer systems designed to play this role, helping and guiding students to achieve better learning outcomes. We present iSnap, an extension to the Snap programming environment which adds some key features of ITSs, including detailed logging and automatically generated hints. We share results from a pilot study of iSnap, indicating that students are generally willing to use hints and that hints can create positive outcomes. We also highlight some key challenges encountered in the pilot study and discuss their implications for future work.}, booktitle={SIGCSE '17: Proceedings of the 2017 ACM SIGCSE Technical Symposium on Computer Science Education}, publisher={Association for Computing Machinery}, author={Price, Thomas and Dong, Yihuan and Lipovac, Dragan}, year={2017}, month={Mar}, pages={483–488} } @inproceedings{duvall_eagle_narcisse_price_2016, place={New York, NY, USA}, title={Clashroom: A Game to Enhance the Classroom Experience (Abstract Only)}, url={https://doi.org/10.1145/2839509.2850556}, DOI={10.1145/2839509.2850556}, abstractNote={Clashroom is a web-based fantasy game that was created to enhance the motivation and learning of a traditional classroom. It was created to address several drawbacks of other educational games by being broadly applicable to any course, requiring no class time, and allowing for optional rather than mandatory game play. Each player in the game is a dragon trainer, hoping to train a champion pet dragon. To gain skills and magical items for the dragon to use, players complete educational quests set up by the teacher. Players compete in weekly tournaments, where the quest rewards are vital to success. Because the gameplay and the learning material are decoupled, the game can be used for any subject at any level. After initial testing, students report that the game is fun and that the intrinsic rewards of a fun game are motivation enough for extra learning.}, booktitle={Proceedings of the 47th ACM Technical Symposium on Computing Science Education}, publisher={Association for Computing Machinery}, author={Duvall, Shannon and Eagle, Daniel R. and Narcisse, Riese P. and Price, Thomas W.}, year={2016}, pages={692} } @article{price_brown_lipovac_barnes_kolling_2016, place={New York, NY, USA}, title={Evaluation of a Frame-based Programming Editor}, url={https://doi.org/10.1145/2960310.2960319}, DOI={10.1145/2960310.2960319}, abstractNote={Frame-based editing is a novel way to edit programs, which claims to combine the benefits of textual and block-based programming. It combines structured `frames' of preformatted code, designed to reduce the burden of syntax, with `slots' that allow for efficient textual entry of expressions. We present an empirical evaluation of Stride, a frame-based language used in the Greenfoot IDE. We compare two groups of middle school students who worked on a short programming activity in Greenfoot, one using the original Java editor, and one using the Stride editor. We found that the two groups reported similarly low levels of frustration and high levels of satisfaction, but students using Stride progressed through the activity more quickly and completed more objectives. The Stride group also spent significantly less time making purely syntactic edits to their code and significantly less time with non-compilable code.}, journal={PROCEEDINGS OF THE 2016 ACM CONFERENCE ON INTERNATIONAL COMPUTING EDUCATION RESEARCH (ICER'16)}, publisher={Association for Computing Machinery}, author={Price, Thomas W. and Brown, Neil C. C. and Lipovac, Dragan and Barnes, Tiffany and Kolling, Michael}, year={2016}, pages={33–42} } @inproceedings{price_dong_barnes_2016, title={Generating data-driven hints for open-ended programming}, booktitle={Proceedings of the 9th International Conference on Educational Data Mining, International Educational Data Mining Society}, author={Price, Thomas W and Dong, Yihuan and Barnes, Tiffany}, year={2016}, pages={191–198} } @inproceedings{price_cateté_albert_barnes_garcia_2016, place={New York, NY, USA}, title={Lessons Learned from "BJC" CS Principles Professional Development}, ISBN={9781450336857}, url={http://dx.doi.org/10.1145/2839509.2844625}, DOI={10.1145/2839509.2844625}, abstractNote={Computer Science Principles (CSP) will become an Advanced Placement course during the 2016-17 school year, and there is an immediate need to train new teachers to be leaders in computing classrooms. From 2012-2015, the Beauty and Joy of Computing team offered professional development (PD) to 133 teachers, resulting in 89 BJC CSP courses taught in high schools. Our data show that the PD improved teachers' confidence in our four core content categories and met its primary goal of training teachers in equitable, inquiry-based instruction. In this paper, we present the evolution of the BJC PD, its challenges and lessons that we learned while continually adapting to teachers' needs and contexts.}, booktitle={Proceedings of the 47th ACM Technical Symposium on Computing Science Education - SIGCSE '16}, publisher={ACM Press}, author={Price, Thomas W. and Cateté, Veronica and Albert, Jennifer and Barnes, Tiffany and Garcia, Daniel D.}, year={2016}, pages={467–472} } @article{cardona-rivera_price_winer_young_2016, title={Question Answering in the Context of Stories Generated by Computers}, volume={4}, journal={Advances in Cognitive Systems}, author={Cardona-Rivera, R. and Price, T.W. and Winer, D. and Young, R.M.}, year={2016}, month={Jun}, pages={227–245} } @inproceedings{price_barnes_2015, title={An Exploration of Data-Driven Hint Generation in an Open-Ended Programming Problem}, booktitle={International Workshop on Graph-Based Educational Data Mining}, author={Price, Thomas W and Barnes, Tiffany}, year={2015} } @inproceedings{price_lynch_barnes_chi_2015, title={An Improved Data-Driven Hint Selection Algorithm for Probability Tutors}, booktitle={The 8th International Conference on Education Data Mining}, author={Price, Thomas W and Lynch, Collin F and Barnes, Tiffany and Chi, Min}, year={2015} } @inproceedings{price_albert_catete_barnes_2015, title={BJC in action: Comparison of student perceptions of a computer science principles course}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84959906858&partnerID=MN8TOARS}, DOI={10.1109/respect.2015.7296506}, abstractNote={The Beauty and Joy of Computing (BJC) is a curriculum for the new AP Computer Science Principles course. Over the past 2 years, we have collected post-course surveys from 399 students participating in the BJC course. This paper investigates how the responses of females and students from underrepresented racial minority groups (URMs) differed from those of their counterparts. We found that female students had taken fewer CS courses prior to BJC but that students from URMs had taken more prior CS courses. Both groups were nearly equally likely to recommend the course to a friend, with about 80% recommending. We found no evidence to suggest that female students showed more or less interest in specific CS topics, such as learning how computing has changed the world or making mobile apps/games. Despite having taken more CS courses prior to BJC, we found that students from URMs were overall less likely to intend to take additional CS courses. Overall, our findings are fairly consistent with the literature, and suggest that BJC makes some progress towards broadening participation in computing.}, booktitle={2015 Research in Equity and Sustained Participation in Engineering, Computing, and Technology (RESPECT)}, publisher={IEEE}, author={Price, Thomas W. and Albert, Jennifer and Catete, Veronica and Barnes, Tiffany}, year={2015}, pages={1–4} } @inproceedings{price_barnes_2015, place={New York, NY, USA}, title={Comparing Textual and Block Interfaces in a Novice Programming Environment}, ISBN={9781450336307}, url={http://dx.doi.org/10.1145/2787622.2787712}, DOI={10.1145/2787622.2787712}, abstractNote={Visual, block-based programming environments present an alternative way of teaching programming to novices and have proven successful in classrooms and informal learning settings. However, few studies have been able to attribute this success to specific features of the environment. In this study, we isolate the most fundamental feature of these environments, the block interface, and compare it directly to its textual counterpart. We present analysis from a study of two groups of novice programmers, one assigned to each interface, as they completed a simple programming activity. We found that while the interface did not seem to affect users' attitudes or perceived difficulty, students using the block interface spent less time off task and completed more of the activity's goals in less time.}, booktitle={Proceedings of the eleventh annual International Conference on International Computing Education Research - ICER '15}, publisher={ACM Press}, author={Price, Thomas W. and Barnes, Tiffany}, year={2015}, pages={91–99} } @article{price_barnes_2015, title={Creating Data-Driven Feedback for Novices in Goal-Driven Programming Projects}, volume={9112}, ISBN={["978-3-319-19772-2"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-319-19773-9_132}, abstractNote={Programming environments that afford the creation of media-rich, goal-driven projects, such as games, stories and simulations, are effective at engaging novice users. However, the open-ended nature of these projects makes it difficult to generate ITS-style guidance for students in need of help. In domains where students produce similar, overlapping solutions, data-driven techniques can leverage the work of previous students to provide feedback. However, our data suggest that solutions to these projects have insufficient overlap to apply current data-driven methods. We propose a novel subtree-based state matching technique that will find partially overlapping solutions to generate feedback across diverse student programs. We will build a system to generate this feedback, test the technique on historical data, and evaluate the generated feedback in a study of goal-driven programming projects. If successful, this approach will provide insight into how to leverage structural similarities across complex, creative problem solutions to provide data-driven feedback for intelligent tutoring.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION, AIED 2015}, author={Price, Thomas W. and Barnes, Tiffany}, year={2015}, pages={856–859} } @inproceedings{price_barnes_2015, title={Creating data-driven feedback for novices in goal-driven programming projects}, booktitle={International Conference on Artificial Intelligence in Education}, author={Price, Thomas W and Barnes, Tiffany}, year={2015}, pages={856–859} } @inproceedings{price_2015, place={New York, NY, USA}, title={Integrating Intelligent Feedback into Block Programming Environments}, url={https://doi.org/10.1145/2787622.2787748}, DOI={10.1145/2787622.2787748}, abstractNote={Block Programming Environments (BPEs) are becoming popular tools for introducing novices to programming, due in part to their connection with students' interests in games, apps and stories. This has led to increasing use of BPEs outside of classroom settings, where knowledgeable instructors are not always available. Intelligent Tutoring Systems (ITSs) can keep students on track in the absence of instructors by providing hints and warnings to students in need of help. Further, data-driven techniques can generate this feedback automatically from previous students' attempts at a problem. This research focuses on the integration of this data-driven, ITS-style feedback into a modern BPE and the evaluation of its impact.}, booktitle={Proceedings of the Eleventh Annual International Conference on International Computing Education Research}, publisher={Association for Computing Machinery}, author={Price, Thomas W.}, year={2015}, pages={275–276} } @inproceedings{zhou_price_lynch_barnes_chi_2015, title={The Impact of Granularity on Worked Examples and Problem Solving}, booktitle={Annual Meeting of the Cognitive Science Society (CogSci)}, author={Zhou, Guojing and Price, Thomas W and Lynch, Collin and Barnes, Tiffany and Chi, Min}, year={2015} } @inproceedings{lynch_price_chi_barnes_2015, title={Using the Hint Factory to Compare Model-based Tutoring Systems}, booktitle={International Workshop on Graph-Based Educational Data Mining}, author={Lynch, Collin and Price, Thomas W and Chi, Min and Barnes, Tiffany}, year={2015} } @inproceedings{price_young_2014, title={Towards an Extended Declarative Representation for Camera Planning}, booktitle={Workshops at the Twenty-Eighth AAAI Conference on Artificial Intelligence}, author={Price, Thomas William and Young, R Michael}, year={2014} } @article{zhou_lynch_price_barnes_chi, title={The Impact of Granularity on the Effectiveness of Students’ Pedagogical Decision}, author={Zhou, Guojing and Lynch, Collin F and Price, Thomas W and Barnes, Tiffany and Chi, Min} }