@article{valone_meade_2024, title={Can Forced-Choice Response Format Reduce Faking of Socially Aversive Personality Traits?}, volume={3}, ISSN={["1532-7752"]}, url={https://doi.org/10.1080/00223891.2024.2326893}, DOI={10.1080/00223891.2024.2326893}, abstractNote={Self-report assessments are the standard for personality measurement, but motivated respondents are able to manipulate or fake their responses to typical Likert scale self-report. Although progress has been made in research seeking to reduce faking, most of it has focused on normative personality traits such as those measured by the five factor model. The measurement of socially aversive personality (e.g., the Dark Triad) is less well-researched. The negative aspects of socially aversive traits increase the opportunity and motivation of respondents to fake typical single-stimulus self-report assessments underscoring the need for faking resistant response formats. A possible way to reduce faking that has been explored in basic personality research is the use of the forced-choice response format. This study applied this method to socially aversive traits and illustrated best practices to create new multidimensional forced-choice and single-stimulus measures of socially aversive personality traits. Results indicated that participants were able to artificially alter their scores when asked to respond like an ideal job applicant, and counter to expectations, the forced-choice format did not decrease faking. Our results indicate that even when best practices are followed, forced-choice format is not a panacea for respondent faking.}, journal={JOURNAL OF PERSONALITY ASSESSMENT}, author={Valone, Amanda L. Y. and Meade, Adam W.}, year={2024}, month={Mar} } @misc{ward_meade_2023, title={Dealing with Careless Responding in Survey Data: Prevention, Identification, and Recommended Best Practices}, volume={74}, ISSN={["1545-2085"]}, DOI={10.1146/annurev-psych-040422-045007}, abstractNote={ Surveys administered online have several benefits, but they are particularly prone to careless responding, which occurs when respondents fail to read item content or give sufficient attention, resulting in raw data that may not accurately reflect respondents’ true levels of the constructs being measured. Careless responding can lead to various psychometric issues, potentially impacting any area of psychology that uses self-reported surveys and assessments. This review synthesizes the careless responding literature to provide a comprehensive understanding of careless responding and ways to prevent, identify, report, and clean careless responding from data sets. Further, we include recommendations for different levels of screening for careless responses. Finally, we highlight some of the most promising areas for future work on careless responding. }, journal={ANNUAL REVIEW OF PSYCHOLOGY}, author={Ward, M. K. and Meade, Adam W.}, year={2023}, pages={577–596} } @article{robie_meade_risavy_rasheed_2022, title={Effects of Response Option Order on Likert-Type Psychometric Properties and Reactions}, volume={1}, ISSN={["1552-3888"]}, DOI={10.1177/00131644211069406}, abstractNote={ The effects of different response option orders on survey responses have been studied extensively. The typical research design involves examining the differences in response characteristics between conditions with the same item stems and response option orders that differ in valence—either incrementally arranged (e.g., strongly disagree to strongly agree) or decrementally arranged (e.g., strongly agree to strongly disagree). The present study added two additional experimental conditions—randomly incremental or decremental and completely randomized. All items were presented in an item-by-item format. We also extended previous studies by including an examination of response option order effects on: careless responding, correlations between focal predictors and criteria, and participant reactions, all the while controlling for false discovery rate and focusing on the size of effects. In a sample of 1,198 university students, we found little to no response option order effects on a recognized personality assessment vis-à-vis measurement equivalence, scale mean differences, item-level distributions, or participant reactions. However, the completely randomized response option order condition differed on several careless responding indices suggesting avenues for future research. }, journal={EDUCATIONAL AND PSYCHOLOGICAL MEASUREMENT}, author={Robie, Chet and Meade, Adam W. and Risavy, Stephen D. and Rasheed, Sabah}, year={2022}, month={Jan} } @article{cartwright_desmarais_grimm_meade_van dorn_2020, title={Psychometric Properties of the MacArthur Community Violence Screening Instrument}, volume={19}, ISSN={1499-9013 1932-9903}, url={http://dx.doi.org/10.1080/14999013.2020.1718246}, DOI={10.1080/14999013.2020.1718246}, abstractNote={Abstract This study examined the psychometric properties of the MacArthur Community Violence Screening Instrument (MCVSI) in a heterogeneous and integrated sample of adults with mental illness (n = 4,480), including its factor structure, model fit, and psychometric properties as a function of patient sex, race, and primary diagnosis. Factor structure results indicate a unidimensional construct. Item-level analyses revealed that the MCVSI’s difficulty, including the easiest and most difficult items to endorse, sometimes differed across sex, race, and primary diagnosis. However, differential item functioning was minimal across these patient characteristics, with only those without a primary diagnosis of schizophrenia indicating an increased likelihood of having “hit anyone with a fist, object or beaten anyone” compared to those with a primary diagnosis of schizophrenia. Overall, these findings support using the MCVSI as a measure of violence in studies of U.S. adults with mental illness. They also highlight the importance of using more methodologically rigorous approaches to measuring violence, including the ongoing study of the MCVSI across samples and settings.}, number={3}, journal={International Journal of Forensic Mental Health}, publisher={Informa UK Limited}, author={Cartwright, Joel K. and Desmarais, Sarah L. and Grimm, Kevin J. and Meade, Adam W. and Van Dorn, Richard A.}, year={2020}, month={Feb}, pages={253–268} } @article{meade_pappalardo_braddy_fleenor_2020, title={Rapid Response Measurement: Development of a Faking-Resistant Assessment Method for Personality}, volume={23}, ISSN={["1552-7425"]}, DOI={10.1177/1094428118795295}, abstractNote={ While rating-scale-based assessments have been shown to be useful for measuring a variety of workplace-relevant constructs, assessment length and response distortion present practical limitations on their use. We describe a new type of measurement method termed rapid response measurement (RRM) in which stimuli are presented on a computer screen one at a time in rapid succession and respondents are asked to quickly provide a dichotomous response. Two personality assessments using RRM were developed and reliability and validity evidence across four independent samples were evaluated. Both RRM assessments showed adequate reliability, even at short test lengths, with acceptable levels of convergent and discriminant validity with traditional survey-based measures. Analyses based on a within-participants design indicated that the RRM was significantly more difficult to fake when instructed than was a survey-based measure of personality. The second RRM was related to several aspects of job performance. While initial results show promise, further research is needed to establish the validity and viability of the RRM for organizational and psychological measurement. }, number={1}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W. and Pappalardo, Gabriel and Braddy, Phillip W. and Fleenor, John W.}, year={2020}, month={Jan}, pages={181–207} } @article{francavilla_meade_young_2019, title={Social Interaction and Internet-Based Surveys: Examining the Effects of Virtual and In-Person Proctors on Careless Response}, volume={68}, ISSN={["1464-0597"]}, DOI={10.1111/apps.12159}, abstractNote={A lack of human interaction and environmental control in Internet‐based data collection have been suggested as possible antecedents of careless responding, which occurs when participants respond to survey items without regard for item content. To address these possible antecedents, this study investigated whether survey proctoring deterred careless response in an undergraduate sample by reducing environmental distractions. The study randomly assigned respondents to one of three proctoring conditions: remote online un‐proctored, remote online virtually proctored, and in‐person classroom proctored. Data quality was examined via nine careless response indicators. Analyses indicated that proctor presence had effects on a small number of careless response indicators. Virtually proctored participants performed better than un‐proctored participants on one of nine careless response indicators, and in‐person proctored participants performed better on two careless response indicators compared to un‐proctored participants. Environmental distraction fully mediated the relationship between in‐person proctor presence and self‐reported diligence. Implications for survey administration are discussed.}, number={2}, journal={APPLIED PSYCHOLOGY-AN INTERNATIONAL REVIEW-PSYCHOLOGIE APPLIQUEE-REVUE INTERNATIONALE}, author={Francavilla, Nicole M. and Meade, Adam W. and Young, Amanda L.}, year={2019}, month={Apr}, pages={223–249} } @article{middleton_murphy-hill_green_meade_mayer_white_mcdonald_2018, title={Which Contributions Predict Whether Developers Are Accepted Into GitHub Teams}, ISSN={["2160-1852"]}, DOI={10.1145/3196398.3196429}, abstractNote={Open-source software (OSS) often evolves from volunteer contributions, so OSS development teams must cooperate with their communities to attract new developers. However, in view of the myriad ways that developers interact over platforms for OSS development, observers of these communities may have trouble discerning, and thus learning from, the successful patterns of developer-to-team interactions that lead to eventual team acceptance. In this work, we study project communities on GitHub to discover which forms of software contribution characterize developers who begin as development team outsiders and eventually join the team, in contrast to developers who remain team outsiders. From this, we identify and compare the forms of contribution, such as pull requests and several forms of discussion comments, that influence whether new developers join OSS teams, and we discuss the implications that these behavioral patterns have for the focus of designers and educators.}, journal={2018 IEEE/ACM 15TH INTERNATIONAL CONFERENCE ON MINING SOFTWARE REPOSITORIES (MSR)}, author={Middleton, Justin and Murphy-Hill, Emerson and Green, Demetrius and Meade, Adam and Mayer, Roger and White, David and McDonald, Steve}, year={2018}, pages={403–413} } @article{ward_meade_2018, title={Applying Social Psychology to Prevent Careless Responding during Online Surveys}, volume={67}, ISSN={["1464-0597"]}, DOI={10.1111/apps.12118}, abstractNote={A major threat to data quality in online surveys is careless responding (CR; Meade & Craig, ) or insufficient effort responding (e.g. Bowling, Huang, Bragg, Khazon, Liu, & Blackmore, ). In three studies, we use social psychological theories to develop and test three prevention strategies (Ward & Pond III, ) related to increasing respondent motivation to respond carefully. Study 1 presented control, scripted, or video‐recorded instructions designed to increase the social influence of survey administrators on survey participants. Participants in the control group were significantly more likely to admit to CR than the script and video groups. Compared with the control, scripted instructions decreased interest, and had no effect on objective indicators of CR. Study 2 found that instructions designed to induce cognitive dissonance increased logical consistency of responses and survey interest. Instructions to create a sense of hypocrisy increased accuracy on instructed‐response items. Study 3 showed that leveraging social exchange theory in survey instructions generally had no effect on CR. Similar results were found for both continuous and dichotomous scoring of indicators of CR across the three studies. Results demonstrate that facets of CR can be influenced via survey design. Future studies are needed to develop a more thorough understanding of best practices in survey design with respect to preventing CR.}, number={2}, journal={APPLIED PSYCHOLOGY-AN INTERNATIONAL REVIEW-PSYCHOLOGIE APPLIQUEE-REVUE INTERNATIONALE}, author={Ward, M. K. and Meade, Adam W.}, year={2018}, month={Apr}, pages={231–263} } @article{ward_meade_allred_pappalardo_stoughton_2017, title={Careless response and attrition as sources of bias in online survey assessments of personality traits and performance}, volume={76}, ISSN={["1873-7692"]}, DOI={10.1016/j.chb.2017.06.032}, abstractNote={The online survey is critical to organizations to efficiently collect large amounts of data about variables like performance. However, this data collection method comes with challenges. Careless responding (CR) and attrition in online surveys introduce measurement error, and can lead to several psychometric issues. Despite the common co-occurrence of CR and attrition, previous studies have looked at CR and attrition separately, which may miss their additive impact in online survey measurement. Additionally, research has focused on CR and attrition without attending to their relationships with other variables. The aims of our studies are to build the nomological network around CR and attrition, and to determine the effects of the co-occurrence of CR and attrition. We investigated (a) the extent to which personality traits relate to both CR and attrition, and (b) the extent to which CR and attrition affect estimates of the relationships between personality traits and performance. We found a relatively high base rate of carelessness (23%) and mid-study attrition (11%). Conscientiousness, extraversion, neuroticism, and agreeableness were related to survey attrition and carelessness resulting in significant differences between respondents who carefully completed the survey and those lost via attrition or careless response screening. A simulation study provided estimates of the extent of bias resulting from: 1) various amounts of attrition and carelessness, and 2) correlations between attrition and carelessness with personality traits. Although the magnitude of bias is modest in most cases, there was substantial bias (.15) in correlation estimates in some situations when screening for CR and attrition. Based on findings from the current studies and extant literature, we suggest ways researchers and practitioners can address CR and attrition to improve the accuracy of the measured relationships between variables like personality and performance, and enhance the defensibility of conclusions.}, journal={COMPUTERS IN HUMAN BEHAVIOR}, author={Ward, M. K. and Meade, Adam W. and Allred, Christopher M. and Pappalardo, Gabriel and Stoughton, J. William}, year={2017}, month={Nov}, pages={417–430} } @article{tay_meade_cao_2015, title={An Overview and Practical Guide to IRT Measurement Equivalence Analysis}, volume={18}, ISSN={["1552-7425"]}, DOI={10.1177/1094428114553062}, abstractNote={ This article provides an overview and guide to implementing item response theory (IRT) measurement equivalence (ME) or differential item functioning (DIF) analysis. We (a) present the need for establishing IRT ME/DIF analysis, (b) discuss the similarities and differences between factor-analytic ME/DIF analysis, (c) review commonly used IRT ME/DIF indices and procedures, (d) provide three illustrations to two recommended IRT procedures, and (e) furnish recommendations for conducting IRT ME/DIF. We conclude by discussing future directions for IRT ME/DIF research. }, number={1}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Tay, Louis and Meade, Adam W. and Cao, Mengyang}, year={2015}, month={Jan}, pages={3–46} } @article{halberstadt_beale_meade_craig_parker_2015, title={Anger in families: Individual and dyadic contributions}, volume={32}, ISSN={["1460-3608"]}, DOI={10.1177/0265407514552617}, abstractNote={ We addressed three questions about anger in the family, including the derivation of anger, the presence of anger contagion, and the degree to which family members share perceptions about anger in the family. Seventh-grade children, mothers, and fathers independently reported on the frequency and intensity of anger in six family relationships (child to mother, mother to child, child to father, father to child, mother to father, and father to mother). Analyses based on the social relations model revealed that family members share the belief that anger in the family is the result of individuals’ own styles of anger and, to a lesser degree, is created within unique relationships. Family members also recognized emotion contagion effects across all familial relationships. Overall, children, mothers, and fathers seemed to share perceptions about anger in the family with one exception. Implications for further research and family relationships are discussed. }, number={6}, journal={JOURNAL OF SOCIAL AND PERSONAL RELATIONSHIPS}, author={Halberstadt, Amy G. and Beale, Karen S. and Meade, Adam W. and Craig, Ashley B. and Parker, Alison E.}, year={2015}, month={Sep}, pages={810–828} } @article{duvernet_wright_meade_coughlin_kantrowitz_2014, title={General Mental Ability as a Source of Differential Functioning in Personality Scales}, volume={17}, ISSN={["1552-7425"]}, DOI={10.1177/1094428114525996}, abstractNote={ Despite pervasive evidence that general mental ability and personality are unrelated, we investigated whether general mental ability may affect the response process associated with personality measurement. Study 1 examined a large sample of job applicant responses to four personality scales for differential functioning across groups of differing general mental ability. While results indicated that personality items differentially function across highly disparate general mental ability groups, there was little evidence of differential functioning across groups with similar levels of general mental ability. Study 2 replicated these findings in a different sample, using a different measure of general mental ability. We posit that observed differences in the psychometric properties of these personality scales are likely due to the information processing capabilities of the respondents. Additionally, we describe how differential functioning analyses can be used during scale development as a method of identifying items that are not appropriate for all intended respondents. In so doing, we demonstrate procedures for examining other construct-measurement interactions in which respondents’ standings on a specific construct could influence their interpretation of and response to items assessing other constructs. }, number={3}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={DuVernet, Amy M. and Wright, Natalie A. and Meade, Adam W. and Coughlin, Chris and Kantrowitz, Tracy M.}, year={2014}, month={Jul}, pages={299–323} } @article{lobene_meade_pond_2014, title={Perceived Overqualification: A Multi-Source Investigation of Psychological Predisposition and Contextual Triggers}, volume={149}, ISSN={0022-3980 1940-1019}, url={http://dx.doi.org/10.1080/00223980.2014.967654}, DOI={10.1080/00223980.2014.967654}, abstractNote={ABSTRACT. Although employee (subjective) perceived overqualification (POQ) has recently been explored as a meaningful organizational construct, further work is needed to fully understand it. We extend the theoretical psychological underpinnings of employee POQ and examine both its determinants and outcomes based on established and newly proposed theoretical developments. Four-hundred and fifteen employees completed an online questionnaire and 208 of their supervisors completed corresponding surveys about the employees’ withdrawal behaviors and job-related attitudes, in order to explore potential predictors and outcomes of subjectively experienced POQ. Among the predictors, work conditions (uniform requirements and repetitive tasks) were most strongly associated with POQ. In terms of individual differences, narcissism predicted higher POQ while general mental ability only did when holding other variables constant. In addition, among the outcomes, higher POQ was related to lower job satisfaction and organizational commitment, but was not related to withdrawal behaviors such as truancy, absenteeism, and turnover intentions.}, number={7}, journal={The Journal of Psychology}, publisher={Informa UK Limited}, author={Lobene, Eleni V. and Meade, Adam W. and Pond, Samuel B., III}, year={2014}, month={Oct}, pages={684–710} } @article{wright_meade_gutierrez_2014, title={Using Invariance to Examine Cheating in Unproctored Ability Tests}, volume={22}, ISSN={["1468-2389"]}, DOI={10.1111/ijsa.12053}, abstractNote={Despite their widespread use in personnel selection, there is concern that cheating could undermine the validity of unproctored Internet‐based tests. This study examined the presence of cheating in a speeded ability test used for personnel selection. The same test was administered to applicants in either proctored or unproctored conditions. Item response theory differential functioning analyses were used to evaluate the equivalence of the psychometric properties of test items across proctored and unproctored conditions. A few items displayed different psychometric properties, and the nature of these differences was not uniform. Theta scores were not reflective of widespread cheating among unproctored examinees. Thus, results were not consistent with what would be expected if cheating on unproctored tests was pervasive.}, number={1}, journal={INTERNATIONAL JOURNAL OF SELECTION AND ASSESSMENT}, author={Wright, Natalie A. and Meade, Adam W. and Gutierrez, Sara L.}, year={2014}, month={Mar}, pages={12–22} } @article{stoughton_thompson_meade_2013, title={Big Five Personality Traits Reflected in Job Applicants' Social Media Postings}, volume={16}, ISSN={["2152-2723"]}, DOI={10.1089/cyber.2012.0163}, abstractNote={Job applicants and incumbents often use social media for personal communications allowing for direct observation of their social communications "unfiltered" for employer consumption. As such, these data offer a glimpse of employees in settings free from the impression management pressures present during evaluations conducted for applicant screening and research purposes. This study investigated whether job applicants' (N=175) personality characteristics are reflected in the content of their social media postings. Participant self-reported social media content related to (a) photos and text-based references to alcohol and drug use and (b) criticisms of superiors and peers (so-called "badmouthing" behavior) were compared to traditional personality assessments. Results indicated that extraverted candidates were prone to postings related to alcohol and drugs. Those low in agreeableness were particularly likely to engage in online badmouthing behaviors. Evidence concerning the relationships between conscientiousness and the outcomes of interest was mixed.}, number={11}, journal={CYBERPSYCHOLOGY BEHAVIOR AND SOCIAL NETWORKING}, author={Stoughton, J. William and Thompson, Lori Foster and Meade, Adam W.}, year={2013}, month={Nov}, pages={800–805} } @article{stoughton_thompson_meade_2015, title={Examining Applicant Reactions to the Use of Social Networking Websites in Pre-Employment Screening}, volume={30}, ISSN={["1573-353X"]}, DOI={10.1007/s10869-013-9333-6}, abstractNote={Social networking websites such as Facebook allow employers to gain information about applicants which job seekers may not otherwise share during the hiring process. This multi-study investigation examined how job seekers react to this screening practice. Study 1 (N = 175) employed a realistic selection scenario examining applicant reactions to prospective employers reviewing their social networking website. Study 2 (N = 208) employed a simulated selection scenario where participants rated their experience with a proposed selection process. In Study 1, social networking website screening caused applicants to feel their privacy had been invaded, which ultimately resulted in lower organizational attraction. Applicants low in agreeableness had the most adverse reactions to social networking website screening. In Study 2, screening again caused applicants to feel their privacy had been invaded, resulting in lower organizational attraction and increased intentions to litigate. The organization’s positive/negative hiring decision did not moderate the relationship between screening and justice. The results suggest organizations should consider the costs and benefits of social media screening which could reduce the attractiveness of the organization. Additionally, applicants may need to change their conceptualization of social networking websites, viewing them through the eyes of a prospective employer. This investigation proposed and tested an explanatory model of the effects of screening practices on organizational outcomes demonstrating how electronic monitoring, privacy, and applicant reactions can be integrated to better understand responses to technological innovations in the workplace.}, number={1}, journal={JOURNAL OF BUSINESS AND PSYCHOLOGY}, author={Stoughton, J. William and Thompson, Lori Foster and Meade, Adam W.}, year={2015}, month={Mar}, pages={73–88} } @article{meade_2013, title={Statistical Approaches to Measurement Invariance}, volume={20}, ISSN={["1532-8007"]}, DOI={10.1080/10705511.2013.742405}, abstractNote={Roger E. Millsap. New York, NY: Routledge, 2011, 355 pages, $44.95 (Softcover). In Statistical Approaches to Measurement Invariance, Roger Millsap has provided a detailed summary of a comprehensive...}, number={1}, journal={STRUCTURAL EQUATION MODELING-A MULTIDISCIPLINARY JOURNAL}, author={Meade, Adam W.}, year={2013}, month={Jan}, pages={168–174} } @article{lobene_meade_2013, title={The Effects of Career Calling and Perceived Overqualification on Work Outcomes for Primary and Secondary School Teachers}, volume={40}, ISSN={["1556-0856"]}, DOI={10.1177/0894845313495512}, abstractNote={ While perceived overqualification (POQ) has received increased research attention in recent years, the identification of variables that moderate POQ-outcome relationships is critical to our understanding of how the construct affects career outcomes. This study, involving 170 full-time primary and secondary school educators in a suburban mid-Atlantic school system, found that POQ is negatively related to job satisfaction and affective commitment while positively related to turnover intentions and desire to turnover. While POQ was not significantly related to performance or continuance organizational commitment, the relationships between POQ and both performance and continuance organizational commitment were significantly moderated by the experience of career calling orientation. Generally, the relationship between POQ and performance was stronger, and the relationship between POQ and continuance organizational commitment was weaker, for those with high calling. Additionally, the effects of career calling were considerably stronger than those of POQ for all criteria. The implications surrounding these results, and opportunities for future investigation, are discussed. }, number={6}, journal={JOURNAL OF CAREER DEVELOPMENT}, author={Lobene, Eleni V. and Meade, Adam W.}, year={2013}, month={Dec}, pages={508–530} } @article{gregory_meade_thompson_2013, title={Understanding internet recruitment via signaling theory and the elaboration likelihood model}, volume={29}, ISSN={["1873-7692"]}, DOI={10.1016/j.chb.2013.04.013}, abstractNote={A detailed model specifying the linkages between Internet recruitment websites and organizational attraction was examined. Participants (N = 581) viewed Fortune 500 company websites and responded to questions about the content and design of these websites and their resulting attitudes, fit perceptions, and organizational attraction. Results showed that recruitment website content and design influence attitudes toward the recruitment websites, organizational attitudes, and subsequently organizational attraction. The moderating effects of person-organization (P-O) and person-job (P-J) fit were examined. Two sets of hypotheses based on signaling theory (Spence, 1973, Spence, 1974) and the elaboration likelihood model (Petty & Cacioppo, 1981) were largely supported. Consistent with signaling theory, the amount of job and organizational information on a recruitment website interacted with website usability, such that when less job information was presented, website usability played a greater role in predicting favorable attitudes towards the organization. Consistent with the elaboration likelihood model, when P-J fit was high, website aesthetics were less important in predicting attitudes towards the organization.}, number={5}, journal={COMPUTERS IN HUMAN BEHAVIOR}, author={Gregory, Christina K. and Meade, Adam W. and Thompson, Lori Foster}, year={2013}, month={Sep}, pages={1949–1959} } @article{meade_craig_2012, title={Identifying Careless Responses in Survey Data}, volume={17}, ISSN={["1082-989X"]}, DOI={10.1037/a0028085}, abstractNote={When data are collected via anonymous Internet surveys, particularly under conditions of obligatory participation (such as with student samples), data quality can be a concern. However, little guidance exists in the published literature regarding techniques for detecting careless responses. Previously several potential approaches have been suggested for identifying careless respondents via indices computed from the data, yet almost no prior work has examined the relationships among these indicators or the types of data patterns identified by each. In 2 studies, we examined several methods for identifying careless responses, including (a) special items designed to detect careless response, (b) response consistency indices formed from responses to typical survey items, (c) multivariate outlier analysis, (d) response time, and (e) self-reported diligence. Results indicated that there are two distinct patterns of careless response (random and nonrandom) and that different indices are needed to identify these different response patterns. We also found that approximately 10%-12% of undergraduates completing a lengthy survey for course credit were identified as careless responders. In Study 2, we simulated data with known random response patterns to determine the efficacy of several indicators of careless response. We found that the nature of the data strongly influenced the efficacy of the indices to identify careless responses. Recommendations include using identified rather than anonymous responses, incorporating instructed response items before data collection, as well as computing consistency indices and multivariate outlier analysis to ensure high-quality data.}, number={3}, journal={PSYCHOLOGICAL METHODS}, author={Meade, Adam W. and Craig, S. Bartholomew}, year={2012}, month={Sep}, pages={437–455} } @misc{scherbaum_meade_2013, title={New Directions for Measurement in Management Research}, volume={15}, ISSN={["1468-2370"]}, DOI={10.1111/ijmr.12003}, abstractNote={Despite its importance, measurement has received less attention in the management sciences than it deserves. Currently, there is an over‐reliance on a narrow set of methods of measuring cognitive, affective, motivational, attitudinal and individual difference constructs that are often of interest in behavioural management research. The authors argue that there is a need to expand the scope of the measurement methods commonly employed by management researchers and that a greater diversity of measurement methods would benefit the field by contributing to theory development and the pursuit of new areas of research. The goals of this review are twofold: (1) to increase awareness among management researchers of the alternative measurement methods that can capture many of the cognitive, affective, motivational, attitudinal and individual difference constructs of interest; (2) to critically evaluate how these methods can and should be used, with a focus on both the strengths and limitations of each method. This review focuses on three classes of measures: physiological and biological measures; experience‐sampling measures; and implicit measures. These measures have had a tremendous impact on the research and theories of other fields such as marketing and economics, despite still being in their infancy. The authors believe that these three classes of measures have the potential to impact the nature and scope of management research and theory as well.}, number={2}, journal={INTERNATIONAL JOURNAL OF MANAGEMENT REVIEWS}, author={Scherbaum, Charles A. and Meade, Adam W.}, year={2013}, month={Apr}, pages={132–148} } @article{bynum_hoffman_meade_gentry_2013, title={Reconsidering the Equivalence of Multisource Performance Ratings: Evidence for the Importance and Meaning of Rater Factors}, volume={28}, ISSN={["1573-353X"]}, DOI={10.1007/s10869-012-9272-7}, number={2}, journal={JOURNAL OF BUSINESS AND PSYCHOLOGY}, author={Bynum, Bethany H. and Hoffman, Brian J. and Meade, Adam W. and Gentry, William A.}, year={2013}, month={Jun}, pages={203–219} } @article{meade_wright_2012, title={Solving the Measurement Invariance Anchor Item Problem in Item Response Theory}, volume={97}, ISSN={["0021-9010"]}, DOI={10.1037/a0027934}, abstractNote={The efficacy of tests of differential item functioning (measurement invariance) has been well established. It is clear that when properly implemented, these tests can successfully identify differentially functioning (DF) items when they exist. However, an assumption of these analyses is that the metric for different groups is linked using anchor items that are invariant. In practice, however, it is impossible to be certain which items are DF and which are invariant. This problem of anchor items, or referent indicators, has long plagued invariance research, and a multitude of suggested approaches have been put forth. Unfortunately, the relative efficacy of these approaches has not been tested. This study compares 11 variations on 5 qualitatively different approaches from recent literature for selecting optimal anchor items. A large-scale simulation study indicates that for nearly all conditions, an easily implemented 2-stage procedure recently put forth by Lopez Rivas, Stark, and Chernyshenko (2009) provided optimal power while maintaining nominal Type I error. With this approach, appropriate anchor items can be easily and quickly located, resulting in more efficacious invariance tests. Recommendations for invariance testing are illustrated using a pedagogical example of employee responses to an organizational culture measure.}, number={5}, journal={JOURNAL OF APPLIED PSYCHOLOGY}, author={Meade, Adam W. and Wright, Natalie A.}, year={2012}, month={Sep}, pages={1016–1031} } @article{wright_meade_2012, title={An exploration of cognitive ability contamination in the Implicit Association Test methodology}, volume={28}, ISSN={["1873-7692"]}, DOI={10.1016/j.chb.2011.10.009}, abstractNote={The purpose of this study was to explore the relationship between scores on the Implicit Association Test (IAT) and cognitive ability. This relationship was investigated by examining the relationship between two different IATs, a cognitive ability test, and learning outcomes following a short training module. Results demonstrated that IATs scored with the D scoring algorithm were not significantly related to cognitive ability test scores, and were not related to post-training learning outcomes. However, IATs scored with the conventional scoring algorithm were significantly negatively related to cognitive ability, and the two IATs used in the study were significantly correlated with one another regardless of which scoring method was used.}, number={2}, journal={COMPUTERS IN HUMAN BEHAVIOR}, author={Wright, Natalie A. and Meade, Adam W.}, year={2012}, month={Mar}, pages={393–399} } @article{behrend_sharek_meade_wiebe_2011, title={The viability of crowdsourcing for survey research}, volume={43}, ISSN={1554-3528}, url={http://dx.doi.org/10.3758/s13428-011-0081-0}, DOI={10.3758/s13428-011-0081-0}, abstractNote={Online contract labor portals (i.e., crowdsourcing) have recently emerged as attractive alternatives to university participant pools for the purposes of collecting survey data for behavioral research. However, prior research has not provided a thorough examination of crowdsourced data for organizational psychology research. We found that, as compared with a traditional university participant pool, crowdsourcing respondents were older, were more ethnically diverse, and had more work experience. Additionally, the reliability of the data from the crowdsourcing sample was as good as or better than the corresponding university sample. Moreover, measurement invariance generally held across these groups. We conclude that the use of these labor portals is an efficient and appropriate alternative to a university participant pool, despite small differences in personality and socially desirable responding across the samples. The risks and advantages of crowdsourcing are outlined, and an overview of practical and ethical guidelines is provided.}, number={3}, journal={Behavior Research Methods}, publisher={Springer Science and Business Media LLC}, author={Behrend, Tara S. and Sharek, David J. and Meade, Adam W. and Wiebe, Eric N.}, year={2011}, month={Mar}, pages={800–813} } @article{meade_2010, title={"A taxonomy of measurement invariance effect size indices": Correction to Meade (2010).}, volume={95}, ISSN={1939-1854 0021-9010}, url={http://dx.doi.org/10.1037/a0020897}, DOI={10.1037/a0020897}, number={5}, journal={Journal of Applied Psychology}, publisher={American Psychological Association (APA)}, author={Meade, Adam W.}, year={2010}, month={Sep}, pages={943–943} } @article{meade_2010, title={A Taxonomy of Effect Size Measures for the Differential Functioning of Items and Scales}, volume={95}, ISSN={["1939-1854"]}, DOI={10.1037/a0018966}, abstractNote={Much progress has been made in the past 2 decades with respect to methods of identifying measurement invariance or a lack thereof. Until now, the focus of these efforts has been to establish criteria for statistical significance in items and scales that function differently across samples. The power associated with tests of differential functioning, as with all significance tests, is affected by sample size and other considerations. Additionally, statistical significance need not imply practical importance. There is a strong need as such for meaningful effect size indicators to describe the extent to which items and scales function differently. Recently developed effect size measures show promise for providing a metric to describe the amount of differential functioning present between groups. Expanding upon recent developments, this article presents a taxonomy of potential differential functioning effect sizes; several new indices of item and scale differential functioning effect size are proposed and illustrated with 2 data samples. Software created for computing these indices and graphing item- and scale-level differential functioning is described.}, number={4}, journal={JOURNAL OF APPLIED PSYCHOLOGY}, author={Meade, Adam W.}, year={2010}, month={Jul}, pages={728–743} } @article{meade_tonidandel_2010, title={Final Thoughts on Measurement Bias and Differential Prediction}, volume={3}, ISSN={["1754-9434"]}, DOI={10.1111/j.1754-9434.2010.01230.x}, abstractNote={In the focal article, we suggested that more thought be given to the concepts of test bias, measurement bias, and differential prediction and the implicit framework of fairness underlying the Cleary model. In this response, we clarify the nature and scope of our recommendations and address some of the more critical comments of our work.}, number={2}, journal={INDUSTRIAL AND ORGANIZATIONAL PSYCHOLOGY-PERSPECTIVES ON SCIENCE AND PRACTICE}, author={Meade, Adam W. and Tonidandel, Scott}, year={2010}, month={Jun}, pages={232–237} } @article{meade_tonidandel_2010, title={Not Seeing Clearly With Cleary: What Test Bias Analyses Do and Do Not Tell Us}, volume={3}, ISSN={["1754-9434"]}, DOI={10.1111/j.1754-9434.2010.01223.x}, abstractNote={In recent decades, the Cleary (1968) approach for testing for differences in regression lines among demographic groups has been codified as a central approach to evaluate a test for bias. However, this approach is fraught with numerous shortcomings, a preponderance of implicit assumptions, and outcomes that are not sufficient to conclude that there is a problem with a test. We believe these shortcomings are poorly understood by many industrial–organizational (I–O) psychologists, that this method for evaluating test bias is overrelied on by our profession, and that it is interpreted improperly by those wishing to evaluate tests for bias in applied settings. Moreover, eliminating differential prediction may be impossible in some cases, undesirable in others, and places an undue burden on organizational researchers.}, number={2}, journal={INDUSTRIAL AND ORGANIZATIONAL PSYCHOLOGY-PERSPECTIVES ON SCIENCE AND PRACTICE}, author={Meade, Adam W. and Tonidandel, Scott}, year={2010}, month={Jun}, pages={192–205} } @article{goldsworthy_mayhorn_meade_2010, title={Warnings in Manufacturing: Improving Hazard-Mitigation Messaging through Audience Analysis}, volume={20}, ISSN={["1520-6564"]}, DOI={10.1002/hfm.20163}, abstractNote={AbstractHazard mitigation, including warning development, validation, and dissemination, is an important aspect of product safety and workplace and consumer protection. Understanding our audiences—workers and consumers—is an especially important, often overlooked, aspect of risk and harm reduction efforts. In this article, particular attention is paid to audience analysis in hazard communication and warning messaging, with a focus on the potential role of latent class analysis (LCA). We provide an example of using LCA to analyze a hazardous behavior: prescription medicine sharing and borrowing. Four distinct groups of people—ranging from abstainers to at‐risk sharers—are identified and discussed. Building better warnings and risk communication techniques is essential to promoting occupational and consumer safety. Audience analysis is a vital component of these efforts. LCA appears to be a worthwhile addition to our analytical toolbox by allowing risk reduction and hazard‐mitigation efforts to tailor interventions to a diverse target audience. © 2010 Wiley Periodicals, Inc.}, number={6}, journal={HUMAN FACTORS AND ERGONOMICS IN MANUFACTURING & SERVICE INDUSTRIES}, publisher={Wiley}, author={Goldsworthy, Richard C. and Mayhorn, Christopher B. and Meade, Adam W.}, year={2010}, pages={484–499} } @article{meade_2009, title={FreeIAT: An Open-Source Program to Administer the Implicit Association Test}, volume={33}, ISSN={["0146-6216"]}, DOI={10.1177/0146621608327803}, abstractNote={T he Free IAT was created to provide a free and open-source alternative to commercial software used to administer the Implicit Association Test (IAT). The IAT is a commonly used reaction–time based measure of association between sets of stimuli. Although there has been a large increase in the amount of research related to the IAT since its inception, limitations due to software cost and ease of use limit the accessibility of IAT as a measurement method.}, number={8}, journal={APPLIED PSYCHOLOGICAL MEASUREMENT}, author={Meade, Adam W.}, year={2009}, month={Nov}, pages={643–643} } @article{braddy_meade_michael_fleenor_2009, title={Internet Recruiting: Effects of website content features on viewers' perceptions of organizational culture}, volume={17}, ISSN={["0965-075X"]}, DOI={10.1111/j.1468-2389.2009.00448.x}, abstractNote={This study examined the effects of four ‘careers’ website content features (pictures, testimonials, organizational policies, and awards won) on viewers' perceptions of nine organizational culture attributes. Eight of these culture attributes were more strongly conveyed by culture‐specific website content features than by culture‐neutral website content features. This study also found support for a partial mediated‐moderation model. This model illustrated that participants with weak culture preferences formed less favorable person–organization (P–O) fit perceptions as they perceived an organization to more strongly convey the culture attribute under investigation. Conversely, participants with strong culture preferences formed more favorable P–O fit perceptions as they perceived an organization to more strongly portray the culture attribute in question. Respondents with stronger P–O fit perceptions in turn reported stronger organizational attraction.}, number={1}, journal={INTERNATIONAL JOURNAL OF SELECTION AND ASSESSMENT}, author={Braddy, Phillip W. and Meade, Adam W. and Michael, Joan J. and Fleenor, John W.}, year={2009}, month={Mar}, pages={19–34} } @article{meade_fetzer_2009, title={Test Bias, Differential Prediction, and a Revised Approach for Determining the Suitability of a Predictor in a Selection Context}, volume={12}, ISSN={["1552-7425"]}, DOI={10.1177/1094428109331487}, abstractNote={ The most commonly used and accepted model of assessing bias in a selection context is that proposed by Cleary in which predictor-criterion regression lines are tested for both slope and intercept equality. With this approach, any difference in intercepts or slopes is considered an indication of bias. We argue that differing regression lines intercepts is indicative of differential prediction but not test bias. We describe several fundamentally different potential causes of differences in groups’ regression line intercepts, many of which are unrelated to test properties. We argue that differential prediction because of such sources should not preclude the use of the test in selection contexts. We propose a new procedure to potentially identify the source of regression line differences and illustrate this framework using a job incumbent sample. }, number={4}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W. and Fetzer, Michael}, year={2009}, month={Oct}, pages={738–761} } @article{johnson_meade_duvernet_2009, title={The Role of Referent Indicators in Tests of Measurement Invariance}, volume={16}, ISSN={["1532-8007"]}, DOI={10.1080/10705510903206014}, abstractNote={Confirmatory factor analytic tests of measurement invariance (MI) require a referent indicator (RI) for model identification. Although the assumption that the RI is perfectly invariant across groups is acknowledged as problematic, the literature provides relatively little guidance for researchers to identify the conditions under which the practice is appropriate. Using simulated data, this study examined the effects of RI selection on both scale- and item-level MI tests. Results indicated that while inappropriate RI selection has little effect on the accuracy of conclusions drawn from scale-level tests of metric invariance, poor RI choice can produce very misleading results for item-level tests. As a result, group comparisons under conditions of partial invariance are highly susceptible to problems associated with poor RI choice.}, number={4}, journal={STRUCTURAL EQUATION MODELING-A MULTIDISCIPLINARY JOURNAL}, author={Johnson, Emily C. and Meade, Adam W. and DuVernet, Amy M.}, year={2009}, pages={642–657} } @article{lautenschlager_meade_2008, title={AlphaTest: A windows program for tests of hypotheses about coefficient alpha}, volume={32}, ISSN={["1552-3497"]}, DOI={10.1177/0146621607312307}, number={6}, journal={APPLIED PSYCHOLOGICAL MEASUREMENT}, author={Lautenschlager, Gary J. and Meade, Adam W.}, year={2008}, month={Sep}, pages={502–503} } @article{rivers_meade_fuller_2009, title={Examining Question and Context Effects in Organization Survey Data Using Item Response Theory}, volume={12}, ISSN={["1552-7425"]}, DOI={10.1177/1094428108315864}, abstractNote={ Organizational researchers routinely use attitudinal surveys to track organizational development and identify areas for intervention. However, seemingly trivial changes to the survey instrument, such as question wording or question order, can introduce measurement artifacts leading to differences in observed responses that are not due to actual employee attitudinal change. Traditional methods for assessing the presence of artifacts because of survey changes require additional survey administration using multiple survey forms and random assignment. However, the item response theory method illustrated in this study eliminates the need for additional data collection, offers a more rigorous design, and requires fewer organizational resources. }, number={3}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Rivers, Drew C. and Meade, Adam W. and Fuller, W. Lou}, year={2009}, month={Jul}, pages={529–553} } @article{baranik_meade_lakey_lance_hu_hua_michalos_2008, title={Examining the differential item functioning of the rosenberg self-esteem scale across eight countries}, volume={38}, ISSN={["0021-9029"]}, DOI={10.1111/j.1559-1816.2008.00372.x}, abstractNote={We examined the differential item functioning (DIF) of Rosenberg's (1965) Self‐Esteem Scale (RSES) and compared scores from U.S. participants with those from 7 other countries: Canada, Germany, New Zealand, Kenya, South Africa, Singapore, and Taiwan. Results indicate that DIF was present in all comparisons. Moreover, controlling for latent self‐esteem, participants from individualistic countries had an easier time reporting high self‐esteem on self‐competence‐related items, whereas participants from communal countries had an easier time reporting high self‐esteem on self‐liking items (Tafarodi & Milne, 2002). After adjusting for DIF, we found larger mean self‐esteem differences between the countries than observed scores initially indicated. The suitability of the RSES, and the importance of examining DIF, for cross‐cultural research are discussed.}, number={7}, journal={JOURNAL OF APPLIED SOCIAL PSYCHOLOGY}, author={Baranik, Lisa E. and Meade, Adam W. and Lakey, Chad E. and Lance, Charles E. and Hu, Changya and Hua, Wei and Michalos, Alex}, year={2008}, month={Jul}, pages={1867–1904} } @article{behrend_thompson_meade_newton_grayson_2008, title={Measurement invariance in careers research: Using IRT to study gender differences in medical students' specialization decisions}, volume={35}, DOI={10.1177/0894945308317936}, number={1}, journal={Journal of Career Development}, author={Behrend, T. S. and Thompson, L. F. and Meade, Adam and Newton, D. A. and Grayson, M. S.}, year={2008}, pages={60–83} } @article{braddy_meade_kroustalis_2008, title={Online recruiting: The effects of organizational familiarity, website usability, and website attractiveness on viewers' impressions of organizations}, volume={24}, ISSN={["0747-5632"]}, DOI={10.1016/j.chb.2008.05.005}, abstractNote={Previous research on Internet recruitment has made the implicit assumption that recruitment websites influence viewers' opinions of recruiting organizations. This study tested this assumption using a pretest/posttest design. Findings revealed that participants' organizational favorability, image as employer, and organizational attractiveness perceptions were affected by their viewing of organizational recruitment websites. Greater increases in favorable organizational evaluations from the pretest measures to the posttest measures occurred with organizations maintaining websites that were easy to navigate and/or that were appealing. Contrary to predictions made by signaling theory, recruitment websites had similar effects on the organizational impressions of all individuals, regardless of their familiarity with the organizations maintaining the recruitment websites that they viewed.}, number={6}, journal={COMPUTERS IN HUMAN BEHAVIOR}, author={Braddy, Phillip W. and Meade, Adam W. and Kroustalis, Christina M.}, year={2008}, month={Sep}, pages={2992–3001} } @article{meade_johnson_braddy_2008, title={Power and sensitivity of alternative fit indices in tests of measurement invariance}, volume={93}, ISSN={["1939-1854"]}, DOI={10.1037/0021-9010.93.3.568}, abstractNote={Confirmatory factor analytic tests of measurement invariance (MI) based on the chi-square statistic are known to be highly sensitive to sample size. For this reason, G. W. Cheung and R. B. Rensvold (2002) recommended using alternative fit indices (AFIs) in MI investigations. In this article, the authors investigated the performance of AFIs with simulated data known to not be invariant. The results indicate that AFIs are much less sensitive to sample size and are more sensitive to a lack of invariance than chi-square-based tests of MI. The authors suggest reporting differences in comparative fit index (CFI) and R. P. McDonald's (1989) noncentrality index (NCI) to evaluate whether MI exists. Although a general value of change in CFI (.002) seemed to perform well in the analyses, condition specific change in McDonald's NCI values exhibited better performance than a single change in McDonald's NCI value. Tables of these values are provided as are recommendations for best practices in MI testing.}, number={3}, journal={JOURNAL OF APPLIED PSYCHOLOGY}, author={Meade, Adam W. and Johnson, Emily C. and Braddy, Phillip W.}, year={2008}, month={May}, pages={568–592} } @article{meade_lautenschlager_johnson_2007, title={A Monte Carlo examination of the sensitivity of the differential functioning of items and tests framework for tests of measurement invariance with likert data}, volume={31}, ISSN={["1552-3497"]}, DOI={10.1177/0146621606297316}, abstractNote={ This article highlights issues associated with the use of the differential functioning of items and tests (DFIT) methodology for assessing measurement invariance (or differential functioning) with Likert-type data. Monte Carlo analyses indicate relatively low sensitivity of the DFIT methodology for identifying differential item functioning (DIF) under some conditions of differential functioning with previously recommended significance values. The differential test functioning index was extremely insensitive to differential functioning under all study conditions. The authors recommend alternative noncompensatory DIF cutoff values used to evaluate the significance of DIF for different DIF effect sizes. Additionally, contrasts between polytomous and dichotomous data are drawn, and problems with determining measurement invariance at the scale, rather than item, level for Likert scale data are highlighted. }, number={5}, journal={APPLIED PSYCHOLOGICAL MEASUREMENT}, author={Meade, Adam W. and Lautenschlager, Gary J. and Johnson, Emily C.}, year={2007}, month={Sep}, pages={430–455} } @article{lance_woehr_meade_2007, title={A Monte Carlo investigation of assessment center construct validity models}, volume={10}, ISSN={["1552-7425"]}, DOI={10.1177/1094428106289395}, abstractNote={ Three major reviews of the assessment center (AC) construct-validity literature have disagreed as to the most appropriate analytic model for AC postexercise dimension ratings. We report a Monte Carlo study addressing the following questions: (a) To what extent does the “true” model (i.e., the model that generated the data) actually appear to fit the data well? (b) To what extent can a model appear to fit the data well even though it is the wrong model? and (c) Is model fit actually a useful empirical criterion for judging which model is most likely the population model? Results suggest that “true” models may not always appear as the best fitting models, whereas “false” models sometimes appear to offer better fit than the true models. Implications for the study of AC construct validity are discussed. }, number={3}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Lance, Charles E. and Woehr, David J. and Meade, Adam W.}, year={2007}, month={Jul}, pages={430–448} } @article{meade_michels_lautenschlager_2007, title={Are Internet and paper-and-pencil personality tests truly comparable? An experimental design measurement invariance study}, volume={10}, ISSN={["1552-7425"]}, DOI={10.1177/1094428106289393}, abstractNote={ Recently, the use of technology in assessment for personnel selection has increased dramatically. An important consideration is whether test scores obtained via Internet administration are psychometrically equivalent to those obtained by the more traditional paper-and-pencil format. Our results suggest that there is comparability of scores for many personality constructs, including conscientiousness. However, invariance was not found for some scales between persons allowed to choose formats and those not given a choice of formats. As testing-format preference may be related to membership in federally protected demographic groups, this latter finding was somewhat troubling. Additionally, we illustrate the use of an experimental laboratory design to investigate possible causes of a lack of measurement invariance in Internet and paper-and-pencil comparisons. }, number={2}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W. and Michels, Lawrence C. and Lautenschlager, Gary J.}, year={2007}, month={Apr}, pages={322–345} } @article{baker_caison_meade_2007, title={Assessing gender-related differential item functioning and predictive validity with the Institutional Integration Scale}, volume={67}, ISSN={["1552-3888"]}, DOI={10.1177/0013164406292088}, abstractNote={ This study examined the gender-related differential predictive validity of five subscales of the Institutional Integration Scale (IIS) with regard to college student withdrawal. Differential functioning of the IIS across genders was assessed using an item response theory (IRT)—based framework of differential item and test functioning. The results confirmed the absence of differential functioning and supported the predictive validity of two of the five subscales for student withdrawal. IRT analyses revealed that a number of the items did not adequately reflect the construct and should be revised or removed from the measure. A discussion of these results and the implications for higher education institutions focused on preventing student withdrawal are presented. }, number={3}, journal={EDUCATIONAL AND PSYCHOLOGICAL MEASUREMENT}, author={Baker, Becca A. and Caison, Amy L. and Meade, Adam W.}, year={2007}, month={Jun}, pages={545–559} } @article{meade_bauer_2007, title={Power and precision in confirmatory factor analytic tests of measurement invariance}, volume={14}, ISSN={["1532-8007"]}, DOI={10.1080/10705510701575461}, abstractNote={This study investigates the effects of sample size, factor overdetermination, and communality on the precision of factor loading estimates and the power of the likelihood ratio test of factorial invariance in multigroup confirmatory factor analysis. Although sample sizes are typically thought to be the primary determinant of precision and power, the degree of factor overdetermination and the level of indicator communalities also play important roles. Based on these findings, no single rule of thumb regarding the ratio of sample size to number of indicators can ensure adequate power to detect a lack of measurement invariance.}, number={4}, journal={STRUCTURAL EQUATION MODELING-A MULTIDISCIPLINARY JOURNAL}, author={Meade, Adam W. and Bauer, Daniel J.}, year={2007}, pages={611–635} } @article{meade_eby_2007, title={Using indices of group agreement in multilevel construct validation}, volume={10}, ISSN={["1552-7425"]}, DOI={10.1177/1094428106289390}, abstractNote={This article illustrates the role of group-agreement indices in the process of multilevel construct validation. A two-step process is outlined that includes multitrait-multimethod confirmatory factor analysis (CFA) and the use of a dispersion model to examine correlates of consensus of multilevel constructs. The role of dispersion models for providing discriminant validity evidence for similar constructs, as well as providing insight as to why some teams share perceptions more strongly than others, is illustrated using three efficacy-related constructs.}, number={1}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W. and Eby, Lillian T.}, year={2007}, month={Jan}, pages={75–96} } @article{meade_2006, title={A beginner's guide to structural equation modeling, 2nd edition}, volume={9}, ISSN={["1094-4281"]}, DOI={10.1177/1094428106289194}, number={4}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W.}, year={2006}, month={Oct}, pages={568–571} } @article{dierdorff_surface_meade_thompson_martin_2006, title={Group differences and measurement equivalence: Implications for command climate survey research and practice}, volume={18}, ISSN={["1532-7876"]}, DOI={10.1207/s15327876mp1801_2}, abstractNote={Military organizations use survey methodology to assess attitudes related to command climate. Many commands are staffed with both military and civilian personnel. However, no previous research has examined the equivalence of a command climate survey's measurement properties across these types of personnel. Differences in the personnel systems and organizational socialization could lead to different views of various facets of a command climate survey, making direct comparisons or aggregations of group-level data inappropriate. Furthermore, men and women may also view aspects of command climate surveys differently. Using two administrations of a command climate survey in a U.S. Major Army Command, our findings reveal only small differences between male and female samples. More notable differences existed between military and civilian personnel. Nevertheless, the measurement equivalence demonstrated for both male–female and military–civilian comparisons was adequate to justify cross-group comparisons and aggregation of survey responses.}, number={1}, journal={MILITARY PSYCHOLOGY}, author={Dierdorff, Erich C. and Surface, Eric A. and Meade, Adam and Thompson, Lori Foster and Martin, Don L.}, year={2006}, pages={19–37} } @article{meade_kroustalis_2006, title={Problems with item parceling for confirmatory factor analytic tests of measurement invariance}, volume={9}, ISSN={["1552-7425"]}, DOI={10.1177/1094428105283384}, abstractNote={ Combining items into parcels in confirmatory factor analysis (CFA) can improve model estimation and fit. Because adequate model fit is imperative for CFA tests of measurement invariance, parcels have frequently been used. However, the use of parcels as indicators in a CFA model can have serious detrimental effects on tests of measurement invariance. Using simulated data with a known lack of invariance, the authors illustrate how models using parcels as indicator variables erroneously indicate that measurement invariance exists much more often than do models using items as indicators. Moreover, item-by-item tests of measurement invariance were often more informative than were tests of the entire parameter matrices. }, number={3}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, Adam W. and Kroustalis, Christina M.}, year={2006}, month={Jul}, pages={369–403} } @article{braddy_meade_kroustalis_2006, title={Organizational recruitment website effects on viewers' perceptions of organizational culture}, volume={20}, ISSN={["1573-353X"]}, DOI={10.1007/s10869-005-9003-4}, number={4}, journal={JOURNAL OF BUSINESS AND PSYCHOLOGY}, author={Braddy, Phillip W. and Meade, Adam W. and Kroustalis, Christina M.}, year={2006}, pages={525–543} } @article{meade_lautenschlager_2004, title={A Monte-Carlo study of confirmatory factor analytic tests of measurement equivalence/invariance}, volume={11}, ISSN={["1532-8007"]}, DOI={10.1207/S15328007SEM1101_5}, abstractNote={In recent years, confirmatory factor analytic (CFA) techniques have become the most common method of testing for measurement equivalence/invariance (ME/I). However, no study has simulated data with known differences to determine how well these CFA techniques perform. This study utilizes data with a variety of known simulated differences in factor loadings to determine how well traditional tests of ME/I can detect these specific simulated differences. Results show that traditional CFA tests of ME/I perform well under ideal situations but that large sample sizes, a sufficient number of manifest indicators, and at least moderate communalities are crucial for assurance that ME/I conditions exist.}, number={1}, journal={STRUCTURAL EQUATION MODELING-A MULTIDISCIPLINARY JOURNAL}, author={Meade, AW and Lautenschlager, GJ}, year={2004}, pages={60–72} } @article{meade_lautenschlager_2004, title={A comparison of item response theory and confirmatory factor analytic methodologies for establishing measurement equivalence/invariance}, volume={7}, ISSN={["1094-4281"]}, DOI={10.1177/1094428104268027}, abstractNote={ Recently, there has been increased interest in tests of measurement equivalence/ invariance (ME/I). This study uses simulated data with known properties to assess the appropriateness, similarities, and differences between confirmatory factor analysis and item response theory methods of assessing ME/I. Results indicate that although neither approach is without flaw, the item response theory–based approach seems to be better suited for some types of ME/I analyses. }, number={4}, journal={ORGANIZATIONAL RESEARCH METHODS}, author={Meade, AW and Lautenschlager, GJ}, year={2004}, month={Oct}, pages={361–388} } @article{meade_2004, title={Psychometric problems and issues involved with creating and using ipsative measures for selection}, volume={77}, ISSN={["0963-1798"]}, DOI={10.1348/0963179042596504}, abstractNote={Data are described as ipsative if a given set of responses always sum to the same total. However, there are many properties of data collection that can give rise to different types of ipsative data. In this study, the most common type of ipsative data used in employee selection (forced‐choice ipsative data; FCID) is discussed as a special case of other types of ipsative data. Although all ipsative data contains constraints on covariance matrices (covariance‐level interdependence), FCID contains additional item‐level interdependencies as well. The psychological processes that give rise to FCID and the resultant psychometric properties are discussed. In addition, data from which both normative and ipsative responses were provided by job applicants illustrate very different patterns of correlations as well as very different selection decisions between normative, FCID and ipsatized measures.}, journal={JOURNAL OF OCCUPATIONAL AND ORGANIZATIONAL PSYCHOLOGY}, author={Meade, AW}, year={2004}, month={Dec}, pages={531–551} }