@inproceedings{zhi_price_marwan_milliken_barnes_chi_2019, place={New York, NY, USA}, title={Exploring the Impact of Worked Examples in a Novice Programming Environment}, ISBN={9781450358903}, url={http://dx.doi.org/10.1145/3287324.3287385}, DOI={10.1145/3287324.3287385}, abstractNote={Research in a variety of domains has shown that viewing worked examples (WEs) can be a more efficient way to learn than solving equivalent problems. We designed a Peer Code Helper system to display WEs, along with scaffolded self-explanation prompts, in a block-based, novice programming environment called \snap. We evaluated our system during a high school summer camp with 22 students. Participants completed three programming problems with access to WEs on either the first or second problem. We found that WEs did not significantly impact students' learning, but may have impacted students' intrinsic cognitive load, suggesting that our WEs with scaffolded prompts may be an inherently different learning task. Our results show that WEs saved students time on initial tasks compared to writing code, but some of the time saved was lost in subsequent programming tasks. Overall, students with WEs completed more tasks within a fixed time period, but not significantly more. WEs may improve students' learning efficiency when programming, but these effects are nuanced and merit further study.}, booktitle={Proceedings of the 50th ACM Technical Symposium on Computer Science Education - SIGCSE '19}, publisher={ACM Press}, author={Zhi, Rui and Price, Thomas W. and Marwan, Samiha and Milliken, Alexandra and Barnes, Tiffany and Chi, Min}, year={2019}, pages={98–104} } @inproceedings{exploring data-driven worked examples for block-based programming_2018, url={http://dx.doi.org/10.1145/3230977.3231018}, DOI={10.1145/3230977.3231018}, abstractNote={Empirical studies show that worked examples (WEs) are effective in improving students' learning efficiency in a variety of domains. I aim to create and evaluate data-driven intelligent WEs for novices using the Snap! block-based programming environment. First, I will design and evaluate WEs with self-explanation prompts in Snap!. Then I will develop a data-driven method to generate WEs based on student solutions and compare it with manually-curated WEs.}, year={2018}, month={Aug} } @inproceedings{zhi_lytle_price_2018, place={New York, NY, USA}, title={Exploring Instructional Support Design in an Educational Game for K-12 Computing Education}, url={http://dx.doi.org/10.1145/3159450.3159519}, DOI={10.1145/3159450.3159519}, abstractNote={Instructional supports (Supports) help students learn more effectively in intelligent tutoring systems and gamified educational environments. However, the implementation and success of Supports vary by environment. We explored Support design in an educational programming game, BOTS, implementing three different strategies: instructional text (Text), worked examples (Examples) and buggy code (Bugs). These strategies are adapted from promising Supports in other domains and motivated by established educational theory. We evaluated our Supports through a pilot study with middle school students. Our results suggest Bugs may be a promising strategy, as demonstrated by the lower completion time and solution code length in assessment puzzles. We end reflecting on our design decisions providing recommendations for future iterations. Our motivations, design process, and study's results provide insight into the design of Supports for programming games.}, booktitle={Proceedings of the 49th ACM Technical Symposium on Computer Science Education}, publisher={Association for Computing Machinery}, author={Zhi, Rui and Lytle, Nicholas and Price, Thomas W.}, year={2018}, month={Feb}, pages={747–752} } @inbook{price_zhi_dong_lytle_barnes_2018, title={The Impact of Data Quantity and Source on the Quality of Data-Driven Hints for Programming}, ISBN={9783319938424 9783319938431}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-93843-1_35}, DOI={10.1007/978-3-319-93843-1_35}, abstractNote={In the domain of programming, intelligent tutoring systems increasingly employ data-driven methods to automate hint generation. Evaluations of these systems have largely focused on whether they can reliably provide hints for most students, and how much data is needed to do so, rather than how useful the resulting hints are to students. We present a method for evaluating the quality of data-driven hints and how their quality is impacted by the data used to generate them. Using two datasets, we investigate how the quantity of data and the source of data (whether it comes from students or experts) impact one hint generation algorithm. We find that with student training data, hint quality stops improving after 15–20 training solutions and can decrease with additional data. We also find that student data outperforms a single expert solution but that a comprehensive set of expert solutions generally performs best.}, booktitle={Lecture Notes in Computer Science}, publisher={Springer International Publishing}, author={Price, Thomas W. and Zhi, Rui and Dong, Yihuan and Lytle, Nicholas and Barnes, Tiffany}, year={2018}, pages={476–490} } @article{price_zhi_barnes_2017, title={Hint Generation Under Uncertainty: The Effect of Hint Quality on Help-Seeking Behavior}, volume={10331}, ISBN={["978-3-319-61424-3"]}, ISSN={["1611-3349"]}, url={http://dx.doi.org/10.1007/978-3-319-61425-0_26}, DOI={10.1007/978-3-319-61425-0_26}, abstractNote={Much research in Intelligent Tutoring Systems has explored how to provide on-demand hints, how they should be used, and what effect they have on student learning and performance. Most of this work relies on hints created by experts and assumes that all help provided by the tutor is correct and of high quality. However, hints may not all be of equal value, especially in open-ended problem solving domains, where context is important. This work argues that hint quality, especially when using data-driven hint generation techniques, is inherently uncertain. We investigate the impact of hint quality on students’ help-seeking behavior in an open-ended programming environment with on-demand hints. Our results suggest that the quality of the first few hints on an assignment is positively associated with future hint use on the same assignment. Initial hint quality also correlates with possible help abuse. These results have important implications for hint design and generation.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION, AIED 2017}, author={Price, Thomas W. and Zhi, Rui and Barnes, Tiffany}, year={2017}, pages={311–322} } @article{liu_zhi_hicks_barnes_2017, title={Understanding problem solving behavior of 6-8 graders in a debugging game}, volume={27}, ISSN={["1744-5175"]}, DOI={10.1080/08993408.2017.1308651}, abstractNote={Abstract Debugging is an over-looked component in K-12 computational thinking education. Few K-12 programming environments are designed to teach debugging, and most debugging research were conducted on college-aged students. In this paper, we presented debugging exercises to 6th–8th grade students and analyzed their problem solving behaviors in a programming game – BOTS. Apart from the perspective of prior literature, we identified student behaviors in relation to problem solving stages, and correlated these behaviors with student prior programming experience and performance. We found that in our programming game, debugging required deeper understanding than writing new codes. We also found that problem solving behaviors were significantly correlated with students’ self-explanation quality, number of code edits, and prior programming experience. This study increased our understanding of younger students’ problem solving behavior, and provided actionable suggestions to the future design of debugging exercises in BOTS and similar environments.}, number={1}, journal={COMPUTER SCIENCE EDUCATION}, author={Liu, Zhongxiu and Zhi, Rui and Hicks, Andrew and Barnes, Tiffany}, year={2017}, pages={1–29} }