@article{ford_behroozi_serebrenik_parnin_2019, title={Beyond the Code Itself: How Programmers Really Look at Pull Requests}, DOI={10.1109/ICSE-SEIS.2019.00014}, abstractNote={Developers in open source projects must make decisions on contributions from other community members, such as whether or not to accept a pull request. However, secondary factors-beyond the code itself-can influence those decisions. For example, signals from GitHub profiles, such as a number of followers, activity, names, or gender can also be considered when developers make decisions. In this paper, we examine how developers use these signals (or not) when making decisions about code contributions. To evaluate this question, we evaluate how signals related to perceived gender identity and code quality influenced decisions on accepting pull requests. Unlike previous work, we analyze this decision process with data collected from an eye-tracker. We analyzed differences in what signals developers said are important for themselves versus what signals they actually used to make decisions about others. We found that after the code snippet (x=57%), the second place programmers spent their time fixating is on supplemental technical signals (x=32%), such as previous contributions and popular repositories. Diverging from what participants reported themselves, we also found that programmers fixated on social signals more than recalled.}, journal={2019 IEEE/ACM 41ST INTERNATIONAL CONFERENCE ON SOFTWARE ENGINEERING: SOFTWARE ENGINEERING IN SOCIETY (ICSE-SEIS 2019)}, author={Ford, Denae and Behroozi, Mahnaz and Serebrenik, Alexander and Parnin, Chris}, year={2019}, pages={51–60} } @article{sholler_steinmacher_ford_averick_hoye_wilson_2019, title={Ten simple rules for helping newcomers become contributors to open projects}, volume={15}, ISSN={["1553-7358"]}, DOI={10.1371/journal.pcbi.1007296}, abstractNote={To survive and thrive, a community must attract new members, retain them, and help them be productive [1]. As openness becomes the norm in research, software development, and education, knowing how to do this has become an essential skill for principal investigators and community managers alike. A growing body of knowledge in sociology, anthropology, education, and software engineering can guide decisions about how to facilitate this. What exactly do we mean by "community"? In the case of open source and open science, the most usual meaning is a "community of practice." As defined by Lave and Wenger [2, 3], groups as diverse as knitting circles, oncology researchers, and web designers share three key characteristics: Participants have a common product or purpose that they work on or toward. They are mutually engaged, i.e., they assist and mentor each another. They develop shared resources and domain knowledge. Brown [4] specializes this to define a "community of effort" as …a community formed in pursuit of a common goal. The goal can be definite or indefinite in time, and may not be clearly defined, but it is something that (generally speaking) the community is aligned on. People working to preserve coral reefs in the face of global climate change are an example of such a community. No central organization coordinates their work, but the scientists who study coral reefs, the environmentalists who work to protect them, and the citizens who support them financially and politically are aware of each other’s efforts, collaborate in ad hoc ways, and are conscious of contributing toward a shared purpose. Open-source software projects are also communities of effort. E.g., the Mozilla Firefox [5] community includes a mix of paid professionals, highly involved volunteers, and occasional contributors who not only create software, documentation, and tutorials but also organize events, answer questions in online forums, mentor newcomers, and advocate for open standards. Every community of effort has unique features, but they have enough in common to profit from one another’s experience. The 10 rules laid out below are based on studies of such communities and on the authors’ experience as members, leaders, and observers. Our focus is on small and medium-sized projects, i.e., ones that have a handful of to a few hundred participants and are a few months to a few years old but may not (yet) have any formal legal standing, such as incorporation as a nonprofit.}, number={9}, journal={PLOS COMPUTATIONAL BIOLOGY}, author={Sholler, Dan and Steinmacher, Igor and Ford, Denae and Averick, Mara and Hoye, Mike and Wilson, Greg}, year={2019}, month={Sep} } @article{barik_ford_murphy-hill_parnin_2018, title={How Should Compilers Explain Problems to Developers?}, DOI={10.1145/3236024.3236040}, abstractNote={Compilers primarily give feedback about problems to developers through the use of error messages. Unfortunately, developers routinely find these messages to be confusing and unhelpful. In this paper, we postulate that because error messages present poor explanations, theories of explanation---such as Toulmin's model of argument---can be applied to improve their quality. To understand how compilers should present explanations to developers, we conducted a comparative evaluation with 68 professional software developers and an empirical study of compiler error messages found in Stack Overflow questions across seven different programming languages. Our findings suggest that, given a pair of error messages, developers significantly prefer the error message that employs proper argument structure over a deficient argument structure when neither offers a resolution---but will accept a deficient argument structure if it provides a resolution to the problem. Human-authored explanations on Stack Overflow converge to one of the three argument structures: those that provide a resolution to the error, simple arguments, and extended arguments that provide additional evidence for the problem. Finally, we contribute three practical design principles to inform the design and evaluation of compiler error messages.}, journal={ESEC/FSE'18: PROCEEDINGS OF THE 2018 26TH ACM JOINT MEETING ON EUROPEAN SOFTWARE ENGINEERING CONFERENCE AND SYMPOSIUM ON THE FOUNDATIONS OF SOFTWARE ENGINEERING}, author={Barik, Titus and Ford, Denae and Murphy-Hill, Emerson and Parnin, Chris}, year={2018}, pages={633–643} } @article{ford_barik_rand-pickett_parnin_2017, title={The Tech-Talk Balance: What Technical Interviewers Expect from Technical Candidates}, DOI={10.1109/chase.2017.8}, abstractNote={Software engineer job candidates are not succeeding at technical interviews. Although candidates are able to answer technical questions, there is a mismatch of what candidates think interviewers assess versus what criteria is used in practice. This mismatch in expectations can cost candidates a job opportunity. To determine what criteria interviewers value, we conducted mock technical interviews with software engineer candidates at a university and collected evaluations from interviewers. We analyzed 70 interview evaluations from 9 software companies. Using a grounded theory approach, we compared interviewer interpretations of criteria including: performing a problem solving walkthrough, applying previous experience to problem solving, and the ability to engaging in conversation beyond writing code. From these findings, we provide implications on what candidates can expect to be evaluated on during technical interviews across companies, which can sometimes vary significantly.}, journal={2017 IEEE/ACM 10TH INTERNATIONAL WORKSHOP ON COOPERATIVE AND HUMAN ASPECTS OF SOFTWARE ENGINEERING (CHASE 2017)}, author={Ford, Denae and Barik, Titus and Rand-Pickett, Leslie and Parnin, Chris}, year={2017}, pages={43–48} } @inproceedings{ford_2016, title={Recognizing gender differences in stack overflow usage: applying the bechdel test}, booktitle={2016 ieee symposium on visual languages and human-centric computing (vl/hcc)}, author={Ford, D.}, year={2016}, pages={264–265} }