@article{skripchuk_bennett_zheng_li_price_2023, title={Analysis of Novices' Web-Based Help-Seeking Behavior While Programming}, url={https://doi.org/10.1145/3545945.3569852}, DOI={10.1145/3545945.3569852}, abstractNote={Web-based help-seeking -- finding and utilizing websites to solve a problem -- is a critical skill during programming in both professional and academic settings. However, little work has explored how students, especially novices, engage in web-based help-seeking during programming, or what strategies they use and barriers they face. This study begins to investigate these questions through analysis of students' web-search behaviors during programming. We collected think-aloud, screen recording, and log data as students completed a challenging programming task. Students were encouraged to use the web for help when needed, as if in an internship. We then qualitatively analyzed the data to address three research questions: 1) What events motivate students to use web search? 2) What strategies do students employ to search for, select, and learn from web pages? 3) What barriers do students face in web search, and when do they arise? Our results suggest that that novices use a variety of web-search strategies -- some quite unexpected -- with varying degrees of success, suggesting that web search can be a challenging skill for novice programmers. We discuss how these results inform future research and pedagogy focused on how to support students in effective web search.}, journal={PROCEEDINGS OF THE 54TH ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION, VOL 1, SIGCSE 2023}, author={Skripchuk, James and Bennett, Neil and Zheng, Jeffrey and Li, Eric and Price, Thomas}, year={2023}, pages={945–951} } @article{skripchuk_shi_price_2022, title={Identifying Common Errors in Open-Ended Machine Learning Projects}, volume={1}, url={http://dx.doi.org/10.1145/3478431.3499397}, DOI={10.1145/3478431.3499397}, abstractNote={Machine learning (ML) is one of the fastest growing subfields in Computer Science, and it is important to identify ways to improve ML education. A key way to do so is by understanding the common errors that students make when writing ML programs, so they can be addressed. Prior work investigating ML errors has focused on an instructor perspective, but has not looked at student programming artifacts, such as projects and code submissions to understand how these errors occur and which are most common. To address this, we qualitatively coded over 2,500 cells of code from 19 final team projects (63 students) in an upper-division machine learning course. By isolating and codifying common errors and misconceptions across projects, we can identify what ML errors students struggle with. In our results, we found that library usage, hyperparameter tuning, and misusing test data were among the most common errors, and we give examples of how and when they occur. We then provide suggestions on why these misconceptions may occur, and how instructors and software designers can possibly mitigate these errors.}, journal={PROCEEDINGS OF THE 53RD ACM TECHNICAL SYMPOSIUM ON COMPUTER SCIENCE EDUCATION (SIGCSE 2022), VOL 1}, publisher={ACM}, author={Skripchuk, James and Shi, Yang and Price, Thomas}, year={2022}, pages={216–222} }