@article{behrend_wiebe_london_johnson_2011, title={Cloud computing adoption and usage in community colleges}, volume={30}, ISSN={0144-929X 1362-3001}, url={http://dx.doi.org/10.1080/0144929X.2010.489118}, DOI={10.1080/0144929x.2010.489118}, abstractNote={Cloud computing is gaining popularity in higher education settings, but the costs and benefits of this tool have gone largely unexplored. The purpose of this study was to examine the factors that lead to technology adoption in a higher education setting. Specifically, we examined a range of predictors and outcomes relating to the acceptance of a cloud computing platform in rural and urban community colleges. Drawing from the Technology Acceptance Model 3 (TAM3) (Venkatesh, V. and Bala, H., 2008. Technology Acceptance Model 3 and a research agenda on interventions. Decision Sciences, 39 (2), 273–315), we build on the literature by examining both the actual usage and future intentions; further, we test the direct and indirect effects of a range of predictors on these outcomes. Approximately 750 community college students enrolled in basic computing skills courses participated in this study; findings demonstrated that background characteristics such as the student's ability to travel to campus had influenced the usefulness perceptions, while ease of use was largely determined by first-hand experiences with the platform, and instructor support. We offer recommendations for community college administrators and others who seek to incorporate cloud computing in higher education settings.}, number={2}, journal={Behaviour & Information Technology}, publisher={Informa UK Limited}, author={Behrend, Tara S. and Wiebe, Eric N. and London, Jennifer E. and Johnson, Emily C.}, year={2011}, month={Mar}, pages={231–240} } @article{johnson_meade_duvernet_2009, title={The Role of Referent Indicators in Tests of Measurement Invariance}, volume={16}, ISSN={["1532-8007"]}, DOI={10.1080/10705510903206014}, abstractNote={Confirmatory factor analytic tests of measurement invariance (MI) require a referent indicator (RI) for model identification. Although the assumption that the RI is perfectly invariant across groups is acknowledged as problematic, the literature provides relatively little guidance for researchers to identify the conditions under which the practice is appropriate. Using simulated data, this study examined the effects of RI selection on both scale- and item-level MI tests. Results indicated that while inappropriate RI selection has little effect on the accuracy of conclusions drawn from scale-level tests of metric invariance, poor RI choice can produce very misleading results for item-level tests. As a result, group comparisons under conditions of partial invariance are highly susceptible to problems associated with poor RI choice.}, number={4}, journal={STRUCTURAL EQUATION MODELING-A MULTIDISCIPLINARY JOURNAL}, author={Johnson, Emily C. and Meade, Adam W. and DuVernet, Amy M.}, year={2009}, pages={642–657} } @article{meade_johnson_braddy_2008, title={Power and sensitivity of alternative fit indices in tests of measurement invariance}, volume={93}, ISSN={["1939-1854"]}, DOI={10.1037/0021-9010.93.3.568}, abstractNote={Confirmatory factor analytic tests of measurement invariance (MI) based on the chi-square statistic are known to be highly sensitive to sample size. For this reason, G. W. Cheung and R. B. Rensvold (2002) recommended using alternative fit indices (AFIs) in MI investigations. In this article, the authors investigated the performance of AFIs with simulated data known to not be invariant. The results indicate that AFIs are much less sensitive to sample size and are more sensitive to a lack of invariance than chi-square-based tests of MI. The authors suggest reporting differences in comparative fit index (CFI) and R. P. McDonald's (1989) noncentrality index (NCI) to evaluate whether MI exists. Although a general value of change in CFI (.002) seemed to perform well in the analyses, condition specific change in McDonald's NCI values exhibited better performance than a single change in McDonald's NCI value. Tables of these values are provided as are recommendations for best practices in MI testing.}, number={3}, journal={JOURNAL OF APPLIED PSYCHOLOGY}, author={Meade, Adam W. and Johnson, Emily C. and Braddy, Phillip W.}, year={2008}, month={May}, pages={568–592} } @article{meade_lautenschlager_johnson_2007, title={A Monte Carlo examination of the sensitivity of the differential functioning of items and tests framework for tests of measurement invariance with likert data}, volume={31}, ISSN={["1552-3497"]}, DOI={10.1177/0146621606297316}, abstractNote={ This article highlights issues associated with the use of the differential functioning of items and tests (DFIT) methodology for assessing measurement invariance (or differential functioning) with Likert-type data. Monte Carlo analyses indicate relatively low sensitivity of the DFIT methodology for identifying differential item functioning (DIF) under some conditions of differential functioning with previously recommended significance values. The differential test functioning index was extremely insensitive to differential functioning under all study conditions. The authors recommend alternative noncompensatory DIF cutoff values used to evaluate the significance of DIF for different DIF effect sizes. Additionally, contrasts between polytomous and dichotomous data are drawn, and problems with determining measurement invariance at the scale, rather than item, level for Likert scale data are highlighted. }, number={5}, journal={APPLIED PSYCHOLOGICAL MEASUREMENT}, author={Meade, Adam W. and Lautenschlager, Gary J. and Johnson, Emily C.}, year={2007}, month={Sep}, pages={430–455} }