@article{lolos_boone_alexopoulos_goldsman_dingec_mokashi_wilson_2022, title={A SEQUENTIAL METHOD FOR ESTIMATING STEADY-STATE QUANTILES USING STANDARDIZED TIME SERIES}, ISSN={["0891-7736"]}, DOI={10.1109/WSC57314.2022.10015283}, abstractNote={We propose SQSTS, an automated sequential procedure for computing confidence intervals (CIs) for steady-state quantiles based on Standardized Time Series (STS) processes computed from sample quantiles. We estimate the variance parameter associated with a given quantile estimator using the order statistics of the full sample and a combination of variance-parameter estimators based on the theoretical framework developed by Alexopoulos et al. in 2022. SQSTS is structurally less complicated than its main competitors, the Sequest and Sequem methods developed by Alexopoulos et al. in 2019 and 2017. Preliminary experimentation with the customer delay process prior to service in a congested M/M/1 queueing system revealed that SQSTS performed favorably compared with Sequest and Sequem in terms of the estimated CI coverage probability, and it significantly outperformed the latter methods with regard to average sample-size requirements.}, journal={2022 WINTER SIMULATION CONFERENCE (WSC)}, author={Lolos, Athanasios and Boone, J. Haden and Alexopoulos, Christos and Goldsman, David and Dingec, Kemal Dincer and Mokashi, Anup C. and Wilson, James R.}, year={2022}, pages={73–84} } @article{lei_alexopoulos_peng_wilson_2022, title={ESTIMATING CONFIDENCE REGIONS FOR DISTORTION RISK MEASURES AND THEIR GRADIENTS}, ISSN={["0891-7736"]}, DOI={10.1109/WSC57314.2022.10015404}, abstractNote={This article constructs confidence regions (CRs) of distortion risk measures and their gradients at different risk levels based on replicate samples obtained from finite-horizon simulations. The CRs are constructed by batching and sectioning methods which partition the sample into nonoverlapping batches. Preliminary numerical results show that the estimated coverage rates of the CRs constructed are close to the nominal values.}, journal={2022 WINTER SIMULATION CONFERENCE (WSC)}, author={Lei, Lei and Alexopoulos, Christos and Peng, Yijie and Wilson, James R.}, year={2022}, pages={13–24} } @article{lei_alexopoulos_peng_wilson_2020, title={CONFIDENCE INTERVALS AND REGIONS FOR QUANTILES USING CONDITIONAL MONTE CARLO AND GENERALIZED LIKELIHOOD RATIOS}, ISSN={["0891-7736"]}, DOI={10.1109/WSC48552.2020.9383910}, abstractNote={This article develops confidence intervals (CIs) and confidence regions (CRs) for quantiles based on independent realizations of a simulation response. The methodology uses a combination of conditional Monte Carlo (CMC) and the generalized likelihood ratio (GLR) method. While batching and sectioning methods partition the sample into nonoverlapping batches, and construct CIs and CRs by estimating the asymptotic variance using sample quantiles from each batch, the proposed techniques directly estimate the underlying probability density function of the response. Numerical results show that the CIs constructed by applying CMC, GLR, and sectioning lead to comparable coverage results, which are closer to the targets compared with batching alone for relatively small samples; and the coverage rates of the CRs constructed by applying CMC and GLR are closer to the targets than both sectioning and batching when the sample size is relatively small and the number of probability levels is relatively large.}, journal={2020 WINTER SIMULATION CONFERENCE (WSC)}, author={Lei, Lei and Alexopoulos, Christos and Peng, Yijie and Wilson, James R.}, year={2020}, pages={2071–2082} } @article{slocum_jones_fletcher_mcconnell_hodgson_taheri_wilson_2020, title={Improving chemotherapy infusion operations through the simulation of scheduling heuristics: a case study}, volume={2}, ISSN={2047-6965 2047-6973}, url={http://dx.doi.org/10.1080/20476965.2019.1709908}, DOI={10.1080/20476965.2019.1709908}, abstractNote={ABSTRACT Over the last decade, chemotherapy treatments have dramatically shifted to outpatient services such that nearly 90% of all infusions are now administered outpatient. This shift has challenged oncology clinics to make chemotherapy treatment as widely available as possible while attempting to treat all patients within a fixed period of time. Historical data from a Veterans Affairs chemotherapy clinic in the United States and staff input informed a discrete event simulation model of the clinic. The case study examines the impact of altering the current schedule, where all patients arrive at 8:00 AM, to a schedule that assigns patients to two or three different appointment times based on the expected length of their chemotherapy infusion. The results identify multiple scheduling policies that could be easily implemented with the best solutions reducing both average patient waiting time and average nurse overtime requirements.}, journal={Health Systems}, publisher={Informa UK Limited}, author={Slocum, Ryan F. and Jones, Herbert L. and Fletcher, Matthew T. and McConnell, Brandon M. and Hodgson, Thom J. and Taheri, Javad and Wilson, James R.}, year={2020}, month={Feb}, pages={1–16} } @article{alexopoulos_boone_goldsman_lobos_dingec_wilson_2020, title={STEADY-STATE QUANTILE ESTIMATION USING STANDARDIZED TIME SERIES}, ISSN={["0891-7736"]}, DOI={10.1109/WSC48552.2020.9384130}, abstractNote={Extending developments of Calvin and Nakayama in 2013 and Alexopoulos et al. in 2019, we formulate point and confidence-interval (CI) estimators for given quantiles of a steady-state simulation output process based on the method of standardized time series (STS). Under mild, empirically verifiable conditions, including a geometric-moment contraction (GMC) condition and a functional central limit theorem for an associated indicator process, we establish basic asymptotic properties of the STS quantile-estimation process. The GMC condition has also been proved for many widely used time-series models and a few queueing processes such as M/M/1 waiting times. We derive STS estimators for the associated variance parameter that are computed from nonoverlapping batches of outputs, and we combine those estimators to build asymptotically valid CIs. Simulated experimentation shows that our STS-based CI estimators have the potential to compare favorably with their conventional counterparts computed from nonoverlapping batches.}, journal={2020 WINTER SIMULATION CONFERENCE (WSC)}, author={Alexopoulos, Christos and Boone, Joseph H. and Goldsman, David and Lobos, Athanasios and Dingec, Kemal Dincer and Wilson, James R.}, year={2020}, pages={289–300} } @article{liu_kuhl_liu_wilson_2019, title={Modeling and Simulation of Nonstationary Non-Poisson Arrival Processes}, volume={31}, ISSN={["1526-5528"]}, DOI={10.1287/ijoc.2018.0828}, abstractNote={ We develop CIATA, a combined inversion-and-thinning approach for modeling a nonstationary non-Poisson process (NNPP), where the target arrival process is described by a given rate function and its associated mean-value function together with a given asymptotic variance-to-mean (dispersion) ratio. CIATA is based on the following: (i) a piecewise-constant majorizing rate function that closely approximates the given rate function from above; (ii) the associated piecewise-linear majorizing mean-value function; and (iii) an equilibrium renewal process (ERP) whose noninitial interrenewal times have mean 1 and variance equal to the given dispersion ratio. Transforming the ERP by the inverse of the majorizing mean-value function yields a majorizing NNPP whose arrival epochs are then thinned to deliver an NNPP having the specified properties. CIATA-Ph is a simulation algorithm that implements this approach based on an ERP whose noninitial interrenewal times have a phase-type distribution. Supporting theorems establish that CIATA-Ph can generate an NNPP having the desired mean-value function and asymptotic dispersion ratio. Extensive simulation experiments substantiated the effectiveness of CIATA-Ph with various rate functions and dispersion ratios. In all cases, we found approximate convergence of the dispersion ratio to its asymptotic value beyond a relatively short warm-up period. }, number={2}, journal={INFORMS JOURNAL ON COMPUTING}, author={Liu, Ran and Kuhl, Michael E. and Liu, Yunan and Wilson, James R.}, year={2019}, pages={347–366} } @article{zhang_wu_denton_wilson_lobo_2019, title={Probabilistic sensitivity analysis on Markov models with uncertain transition probabilities: an application in evaluating treatment decisions for type 2 diabetes}, volume={22}, ISSN={1386-9620 1572-9389}, url={http://dx.doi.org/10.1007/S10729-017-9420-8}, DOI={10.1007/S10729-017-9420-8}, abstractNote={Markov models are commonly used for decision-making studies in many application domains; however, there are no widely adopted methods for performing sensitivity analysis on such models with uncertain transition probability matrices (TPMs). This article describes two simulation-based approaches for conducting probabilistic sensitivity analysis on a given discrete-time, finite-horizon, finite-state Markov model using TPMs that are sampled over a specified uncertainty set according to a relevant probability distribution. The first approach assumes no prior knowledge of the probability distribution, and each row of a TPM is independently sampled from the uniform distribution on the row's uncertainty set. The second approach involves random sampling from the (truncated) multivariate normal distribution of the TPM's maximum likelihood estimators for its rows subject to the condition that each row has nonnegative elements and sums to one. The two sampling methods are easily implemented and have reasonable computation times. A case study illustrates the application of these methods to a medical decision-making problem involving the evaluation of treatment guidelines for glycemic control of patients with type 2 diabetes, where natural variation in a patient's glycated hemoglobin (HbA1c) is modeled as a Markov chain, and the associated TPMs are subject to uncertainty.}, number={1}, journal={Health Care Management Science}, publisher={Springer Nature}, author={Zhang, Yuanhui and Wu, Haipeng and Denton, Brian T. and Wilson, James R. and Lobo, Jennifer M.}, year={2019}, month={Mar}, pages={34–52} } @article{alexopoulos_goldsman_mokashi_tien_wilson_2019, title={Sequest: A Sequential Procedure for Estimating Quantiles in Steady-State Simulations}, volume={67}, ISBN={0030-364X}, DOI={10.1287/opre.2018:1829}, number={4}, journal={OPERATIONS RESEARCH}, author={Alexopoulos, Christos and Goldsman, David and Mokashi, Anup C. and Tien, Kal-Wen and Wilson, James R.}, year={2019}, pages={1162–1183} } @article{hicklin_ivy_wilson_cobb payton_viswanathan_myers_2019, title={Simulation model of the relationship between cesarean section rates and labor duration}, volume={22}, ISSN={1386-9620 1572-9389}, url={http://dx.doi.org/10.1007/S10729-018-9449-3}, DOI={10.1007/s10729-018-9449-3}, abstractNote={Cesarean delivery is the most common major abdominal surgery in many parts of the world, and it accounts for nearly one-third of births in the United States. For a patient who requires a C-section, allowing prolonged labor is not recommended because of the increased risk of infection. However, for a patient who is capable of a successful vaginal delivery, performing an unnecessary C-section can have a substantial adverse impact on the patient's future health. We develop two stochastic simulation models of the delivery process for women in labor; and our objectives are (i) to represent the natural progression of labor and thereby gain insights concerning the duration of labor as it depends on the dilation state for induced, augmented, and spontaneous labors; and (ii) to evaluate the Friedman curve and other labor-progression rules, including their impact on the C-section rate and on the rates of maternal and fetal complications. To use a shifted lognormal distribution for modeling the duration of labor in each dilation state and for each type of labor, we formulate a percentile-matching procedure that requires three estimated quantiles of each distribution as reported in the literature. Based on results generated by both simulation models, we concluded that for singleton births by nulliparous women with no prior complications, labor duration longer than two hours (i.e., the time limit for labor arrest based on the Friedman curve) should be allowed in each dilation state; furthermore, the allowed labor duration should be a function of dilation state.}, number={4}, journal={Health Care Management Science}, publisher={Springer Science and Business Media LLC}, author={Hicklin, Karen T. and Ivy, Julie S. and Wilson, James R. and Cobb Payton, Fay and Viswanathan, Meera and Myers, Evan R.}, year={2019}, month={Dec}, pages={635–657} } @inproceedings{moore_mcconnell_wilson_2018, title={Simulation-based Evaluation On Integrating Additive Manufacturing Capability In A Deployed Military Environment}, url={http://www.lib.ncsu.edu/resolver/1840.20/36302}, DOI={10.1109/wsc.2018.8632474}, abstractNote={This article develops a data-driven forecast of repair parts for the M109A6 Paladin self-propelled 155 mm howitzer, and this forecast drives a discrete-event simulation to assess requirements for Additive Manufacturing (AM) to be a feasible part of the U.S. Army’s expeditionary supply chain. Actual part demand from the initial invasion of Iraq in 2003 during Operation Iraqi Freedom (OIF) feeds a sample-path-based forecasting method to obtain part demand for each scenario. A simulation of a conceptualized deployed Army 3D-printing facility integrated into the supply chain evaluates the performance and feasibility of the different operational policies. Results indicate current technology could support one battery (or smaller unit) for parts below 100 cubic inches while keeping performance comparable with OIF. These results are incorporated in realistic recommendations for how the Army can potentially improve its supply chain practices with this progressive technology.}, note={annote: Moore, T. A., McConnell, B. M., & Wilson, J. R. (2018). Simulation-based Evaluation On Integrating Additive Manufacturing Capability In A Deployed Military Environment. Proceedings of the 2018 Winter Simulation Conference, 3721–3729.}, booktitle={Proceedings of the 2018 Winter Simulation Conference}, publisher={IEEE}, author={Moore, T.A. and McConnell, B.M. and Wilson, J.R.}, year={2018}, pages={3721–3729} } @article{capan_ivy_wilson_huddleston_2017, title={A stochastic model of acute-care decisions based on patient and provider heterogeneity}, volume={20}, ISSN={["1572-9389"]}, DOI={10.1007/s10729-015-9347-x}, abstractNote={The primary cause of preventable death in many hospitals is the failure to recognize and/or rescue patients from acute physiologic deterioration (APD). APD affects all hospitalized patients, potentially causing cardiac arrest and death. Identifying APD is difficult, and response timing is critical - delays in response represent a significant and modifiable patient safety issue. Hospitals have instituted rapid response systems or teams (RRT) to provide timely critical care for APD, with thresholds that trigger the involvement of critical care expertise. The National Early Warning Score (NEWS) was developed to define these thresholds. However, current triggers are inconsistent and ignore patient-specific factors. Further, acute care is delivered by providers with different clinical experience, resulting in quality-of-care variation. This article documents a semi-Markov decision process model of APD that incorporates patient and provider heterogeneity. The model allows for stochastically changing health states, while determining patient subpopulation-specific RRT-activation thresholds. The objective function minimizes the total time associated with patient deterioration and stabilization; and the relative values of nursing and RRT times can be modified. A case study from January 2011 to December 2012 identified six subpopulations. RRT activation was optimal for patients in "slightly concerning" health states (NEWS > 0) for all subpopulations, except surgical patients with low risk of deterioration for whom RRT was activated in "concerning" states (NEWS > 4). Clustering methods identified provider clusters considering RRT-activation preferences and estimation of stabilization-related resource needs. Providers with conservative resource estimates preferred waiting over activating RRT. This study provides simple practical rules for personalized acute care delivery.}, number={2}, journal={HEALTH CARE MANAGEMENT SCIENCE}, author={Capan, Muge and Ivy, Julie S. and Wilson, James R. and Huddleston, Jeanne M.}, year={2017}, month={Jun}, pages={187–206} } @article{alexopoulos_goldsman_mokashi_wilson_2017, title={Automated Estimation of Extreme Steady-State Quantiles via the Maximum Transformation}, volume={27}, ISSN={["1558-1195"]}, DOI={10.1145/3122864}, abstractNote={We present Sequem, a sequential procedure that delivers point and confidence-interval (CI) estimators for extreme steady-state quantiles of a simulation-generated process. Because it is specified completely, Sequem can be implemented directly and applied automatically. The method is an extension of the Sequest procedure developed by Alexopoulos et al. in 2014 to estimate nonextreme steady-state quantiles. Sequem exploits a combination of batching, sectioning, and the maximum transformation technique to achieve the following: (i) reduction in point-estimator bias arising from the simulation’s initial condition or from inadequate simulation run length; and (ii) adjustment of the CI half-length to compensate for the effects of skewness or autocorrelation on intermediate quantile point estimators computed from nonoverlapping batches of observations. Sequem’s CIs are designed to satisfy user-specified requirements concerning coverage probability and absolute or relative precision. In an experimental evaluation based on seven processes selected to stress-test the procedure, Sequem exhibited uniformly good performance.}, number={4}, journal={ACM TRANSACTIONS ON MODELING AND COMPUTER SIMULATION}, author={Alexopoulos, Christos and Goldsman, David and Mokashi, Anup C. and Wilson, James R.}, year={2017}, month={Dec} } @inproceedings{sargent_wilson_2017, title={Creation of the computer simulation archive}, DOI={10.1109/wsc.2017.8247796}, abstractNote={This paper discusses the founding of the Computer Simulation Archive at the North Carolina State University Libraries, obtaining the initial contributions to the archive, establishing endowments to support the archive, and forming the Archive Advisory Committee.}, booktitle={2017 winter simulation conference (wsc)}, author={Sargent, R. G. and Wilson, J. R.}, year={2017}, pages={324–329} } @inproceedings{goldsman_costa_goldsman_wilson_2017, title={History of the Winter Simulation Conference: Overview and notable facts and figures}, DOI={10.1109/wsc.2017.8247318}, abstractNote={The Winter Simulation Conference (WSC) is the leading international forum for disseminating recent advances in computer simulation. WSC also provides an unmatched occasion for interactions between simulation practitioners, researchers, and vendors working in all disciplines and in the academic, governmental, industrial, and military sectors. In this paper we discuss key aspects of WSC's evolution over the past fifty years. The discussion is based on our examination of all WSC Proceedings papers published between 1968 and 2016, which collectively document much of the history of simulation and WSC. We gather and summarize interesting facts and figures about WSC authors and their Proceedings papers so as to gain insights into conference dynamics and the interconnections between notable authors and between highly cited papers. We extract relevant information from the Web of Science, Scopus, and Google Scholar databases; and we present network visualizations of the interconnections between authors and between papers.}, booktitle={2017 winter simulation conference (wsc)}, author={Goldsman, D. and Costa, M. D. and Goldsman, P. and Wilson, J. R.}, year={2017}, pages={16–39} } @inproceedings{seminelli_wilson_mcconnell_2017, title={Implementing discrete event simulation to improve optometry clinic operations}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85014203113&partnerID=MN8TOARS}, DOI={10.1109/wsc.2016.7822258}, abstractNote={As the tempo of military operations slows, Army Medical Facilities are faced with a need to improve the efficiency of their clinics to provide timely service to the growing population of Soldiers who are spending more time at home station. Discrete event simulation was used to examine six scheduling and staffing policies for the Womack Army Medical Center's Optometry Clinic with a goal of increasing the daily patient throughput of the clinic with consideration to patient waiting times. The best policy increased clinic throughput by eight patients a day, generating an additional $314,000 in Relative Value Units (RVUs) annually, while only increasing patient wait times by 26%. As a minimum, increasing the walk-in provider's scheduled patient load by two enables the provider to optimally treat both scheduled and walk-in patients, with a $94,000 annual RVU increase. Implementation of these results will improve clinic performance, revenue, and increase Soldiers' access to care.}, booktitle={Proceedings - Winter Simulation Conference}, author={Seminelli, M.D. and Wilson, J.W. and McConnell, Brandon M.}, year={2017}, pages={2157–2168} } @article{lobo_denton_wilson_shah_smith_2017, title={Using claims data linked with electronic health records to monitor and improve adherence to medication}, volume={7}, ISSN={2472-5579 2472-5587}, url={http://dx.doi.org/10.1080/24725579.2017.1346728}, DOI={10.1080/24725579.2017.1346728}, abstractNote={ABSTRACT Poor adherence to medication is a serious problem in the United States, leading to complications and preventable hospitalizations, particularly for patients with chronic diseases. Interventions have been proposed as a means to improve adherence to medication, but the optimal time to perform an intervention has not been well studied. We provide a use case for how claims data linked with electronic health records (EHRs) can be used to monitor patient adherence to medication and provide a source of information to help decide when to perform an intervention. We propose a Markov decision process (MDP) model to determine when to perform adherence-improving interventions based on a patient’s EHR. We consider the the societal perspective where we trade off maximization of time to first adverse health event and minimization of cost of interventions, medication, and adverse events. We use our model to evaluate the costs and benefits of implementing an EHR-based active surveillance system for adherence-improving interventions in the context of cardiovascular disease management for patients with type 2 diabetes. We also provide some theoretical insights into the structure of the optimal intervention policy and the influence of health risks and costs on intervention decisions.}, number={4}, journal={IISE Transactions on Healthcare Systems Engineering}, publisher={Informa UK Limited}, author={Lobo, J. M. and Denton, B. T. and Wilson, J. R. and Shah, N. D. and Smith, S. A.}, year={2017}, month={Jun}, pages={194–214} } @article{orgut_ivy_uzsoy_wilson_2016, title={Modeling for the equitable and effective distribution of donated food under capacity constraints}, volume={48}, ISSN={["1545-8830"]}, DOI={10.1080/0740817x.2015.1063792}, abstractNote={Abstract Mathematical models are presented and analyzed to facilitate a food bank's equitable and effective distribution of donated food among a population at risk for hunger. Typically exceeding the donated supply, demand is proportional to the poverty population within the food bank's service area. The food bank seeks to ensure a perfectly equitable distribution of food; i.e., each county in the service area should receive a food allocation that is exactly proportional to the county's demand such that no county is at a disadvantage compared to any other county. This objective often conflicts with the goal of maximizing effectiveness by minimizing the amount of undistributed food. Deterministic network-flow models are developed to minimize the amount of undistributed food while maintaining a user-specified upper bound on the absolute deviation of each county from a perfectly equitable distribution. An extension of this model identifies optimal policies for the allocation of additional receiving capacity to counties in the service area. A numerical study using data from a large North Carolina food bank illustrates the uses of the models. A probabilistic sensitivity analysis reveals the effect on the models' optimal solutions arising from uncertainty in the receiving capacities of the counties in the service area.}, number={3}, journal={IIE TRANSACTIONS}, author={Orgut, Irem Sengul and Ivy, Julie and Uzsoy, Reha and Wilson, James R.}, year={2016}, pages={252–266} } @article{thompson_wilson_2016, title={Multifractal detrended fluctuation analysis: Practical applications to financial time series}, volume={126}, ISSN={["1872-7166"]}, DOI={10.1016/j.matcom.2016.03.003}, abstractNote={To analyze financial time series exhibiting volatility clustering or other highly irregular behavior, we exploit multifractal detrended fluctuation analysis (MF-DFA). We summarize the use of local Hölder exponents, generalized Hurst exponents, and the multifractal spectrum in characterizing the way that the sample paths of a multifractal stochastic process exhibit light- or heavy-tailed fluctuations as well as short- or long-range dependence on different time scales. We detail the development of a robust, computationally efficient software tool for estimating the multifractal spectrum from a time series using MF-DFA, with special emphasis on selecting the algorithm’s parameters. The software is tested on simulated sample paths of Brownian motion, fractional Brownian motion, and the binomial multiplicative process to verify the accuracy of the resulting multifractal spectrum estimates. We also perform an in-depth analysis of General Electric’s stock price using conventional time series models, and we contrast the results with those obtained using MF-DFA.}, journal={MATHEMATICS AND COMPUTERS IN SIMULATION}, author={Thompson, James R. and Wilson, James R.}, year={2016}, month={Aug}, pages={63–88} } @article{alexopoulos_goldsman_tang_wilson_2016, title={SPSTS: A sequential procedure for estimating the steady-state mean using standardized time series}, volume={48}, ISSN={["1545-8830"]}, DOI={10.1080/0740817x.2016.1163443}, abstractNote={ABSTRACT This article presents SPSTS, an automated sequential procedure for computing point and Confidence-Interval (CI) estimators for the steady-state mean of a simulation-generated process subject to user-specified requirements for the CI coverage probability and relative half-length. SPSTS is the first sequential method based on Standardized Time Series (STS) area estimators of the steady-state variance parameter (i.e., the sum of covariances at all lags). Whereas its leading competitors rely on the method of batch means to remove bias due to the initial transient, estimate the variance parameter, and compute the CI, SPSTS relies on the signed areas corresponding to two orthonormal STS area variance estimators for these tasks. In successive stages of SPSTS, standard tests for normality and independence are applied to the signed areas to determine (i) the length of the warm-up period, and (ii) a batch size sufficient to ensure adequate convergence of the associated STS area variance estimators to their limiting chi-squared distributions. SPSTS's performance is compared experimentally with that of recent batch-means methods using selected test problems of varying degrees of difficulty. SPSTS performed comparatively well in terms of its average required sample size as well as the coverage and average half-length of the final CIs.}, number={9}, journal={IIE TRANSACTIONS}, author={Alexopoulos, Christos and Goldsman, David and Tang, Peng and Wilson, James R.}, year={2016}, month={Sep}, pages={864–880} } @article{thompson_wilson_2015, title={Agent-based simulations of financial markets: zero- and positive-intelligence models}, volume={91}, ISSN={["1741-3133"]}, DOI={10.1177/0037549715582252}, abstractNote={To analyze the impact of intelligent traders with differing fundamental motivations on agent-based simulations of financial markets, we construct both zero-intelligence and positive-intelligence models of those markets using the MASON agent-based modeling framework. We exploit our software implementation of multifractal detrended fluctuation analysis (MF-DFA) to analyze the price paths generated by both simulation models as well as the price paths of selected stocks traded on the New York Stock Exchange. We study the changes in the models’ macrolevel price paths when altering some of the microlevel agent behaviors; and we compare and contrast the multifractal properties of the zero- and positive-intelligence price paths with those properties of the selected real price paths. For the positive-intelligence and real price paths, we generally observed long-range dependence in the small-magnitude fluctuations and short-range dependence in the large-magnitude fluctuations. On the other hand, the zero-intelligence price paths failed to exhibit the multifractal properties seen in the selected real price paths.}, number={6}, journal={SIMULATION-TRANSACTIONS OF THE SOCIETY FOR MODELING AND SIMULATION INTERNATIONAL}, author={Thompson, James R. and Wilson, James R.}, year={2015}, month={Jun}, pages={527–552} } @article{tejada_ivy_wilson_ballan_diehl_yankaskas_2015, title={Combined DES/SD model of breast cancer screening for older women, I: Natural-history simulation}, volume={47}, ISSN={["1545-8830"]}, DOI={10.1080/0740817x.2014.959671}, abstractNote={Two companion articles develop and exploit a simulation modeling framework to evaluate the effectiveness of breast cancer screening policies for U.S. women who are at least 65 years old. This first article examines the main components in the breast cancer screening-and-treatment process for older women; then it introduces a two-phase simulation approach to defining and modeling those components. Finally this article discusses the first-phase simulation, a natural-history model of the incidence and progression of untreated breast cancer for randomly sampled individuals from the designated population of older U.S. women. The companion article details the second-phase simulation, an integrated screening-and-treatment model that uses information about the genesis of breast cancer in the sampled individuals as generated by the natural-history model to estimate the benefits of different policies for screening the designated population and treating the women afflicted with the disease. Both simulation models are composed of interacting sub-models that represent key aspects of the incidence, progression, screening, treatment, survival, and cost of breast cancer in the population of older U.S. women as well as the overall structure of the system for detecting and treating the disease.}, number={6}, journal={IIE TRANSACTIONS}, author={Tejada, Jeremy J. and Ivy, Julie S. and Wilson, James R. and Ballan, Matthew J. and Diehl, Kathleen M. and Yankaskas, Bonnie C.}, year={2015}, month={Jun}, pages={600–619} } @article{wang_kim_huo_hur_wilson_2015, title={Monitoring nonlinear profiles adaptively with a wavelet-based distribution-free CUSUM chart}, volume={53}, ISSN={["1366-588X"]}, DOI={10.1080/00207543.2015.1029085}, abstractNote={A wavelet-based distribution-free tabular CUSUM chart based on adaptive thresholding, is designed for rapidly detecting shifts in the mean of a high-dimensional profile whose noise components have a continuous nonsingular multivariate distribution. First computing a discrete wavelet transform of the noise vectors for randomly sampled Phase I (in-control) profiles, uses a matrix-regularization method to estimate the covariance matrix of the wavelet-transformed noise vectors; then, those vectors are aggregated (batched) so that the non-overlapping batch means of the wavelet-transformed noise vectors have manageable covariances. Lower and upper in-control thresholds are computed for the resulting batch means of the wavelet-transformed noise vectors using the associated marginal Cornish–Fisher expansions that have been suitably adjusted for between-component correlations. From the thresholded batch means of the wavelet-transformed noise vectors, Hotelling’s -type statistics are computed to set the parameters of a CUSUM procedure. To monitor shifts in the mean profile during Phase II (regular) operation, computes a similar Hotelling’s -type statistic from successive thresholded batch means of the wavelet-transformed noise vectors using the in-control thresholds; then applies the CUSUM procedure to the resulting -type statistics. Experimentation with several normal and non-normal test processes revealed that outperformed existing non-adaptive profile-monitoring schemes.}, number={15}, journal={INTERNATIONAL JOURNAL OF PRODUCTION RESEARCH}, author={Wang, Huizhu and Kim, Seong-Hee and Huo, Xiaoming and Hur, Youngmi and Wilson, James R.}, year={2015}, month={Aug}, pages={4648–4667} } @article{lobo_wilson_thoney_hodgson_king_2014, title={A practical method for evaluating worker allocations in large-scale dual resource constrained job shops}, volume={46}, ISSN={0740-817X 1545-8830}, url={http://dx.doi.org/10.1080/0740817X.2014.892231}, DOI={10.1080/0740817X.2014.892231}, abstractNote={In two recent articles, Lobo et al. present algorithms for allocating workers to machine groups in a Dual Resource Constrained (DRC) job shop so as to minimize Lmax , the maximum job lateness. Procedure LBSA delivers an effective lower bound on Lmax , while the heuristic delivers an allocation whose associated schedule has a (usually) near-optimal Lmax  value. To evaluate an HSP-based allocation’s quality in a given DRC job shop, the authors first compute the gap between HSP’s associated Lmax  value and ’s lower bound. Next they refer this gap to the distribution of a “quasi-optimality” gap that is generated as follows: (i) independent simulation replications of the given job shop are obtained by randomly sampling each job’s characteristics; and (ii) for each replication, the associated quasi-optimality gap is computed by enumerating all feasible allocations. Because step (ii) is computationally intractable in large-scale problems, this follow-up article formulates a revised step (ii) wherein each simulation invokes , an improved version of , to yield an approximation to the quasi-optimality gap. Based on comprehensive experimentation, it is concluded that the -based distribution did not differ significantly from its enumeration-based counterpart; and the revised evaluation method was computationally tractable in practice. Two examples illustrate the use of the revised method.}, number={11}, journal={IIE Transactions}, publisher={Informa UK Limited}, author={Lobo, Benjamin J. and Wilson, James R. and Thoney, Kristin A. and Hodgson, Thom J. and King, Russell E.}, year={2014}, month={Jul}, pages={1209–1226} } @article{tejada_ivy_king_wilson_ballan_kay_diehl_yankaskas_2014, title={Combined DES/SD model of breast cancer screening for older women, II: screening-and-treatment simulation}, volume={46}, ISSN={0740-817X 1545-8830}, url={http://dx.doi.org/10.1080/0740817X.2013.851436}, DOI={10.1080/0740817x.2013.851436}, abstractNote={In the second article of a two-article sequence, the focus is on a simulation model for screening and treatment of breast cancer in U.S. women of age 65+. The first article details a natural-history simulation model of the incidence and progression of untreated breast cancer in a representative simulated population of older U.S. women, which ultimately generates a database of untreated breast cancer histories for individuals in the simulated population. Driven by the resulting database, the screening-and-treatment simulation model is composed of discrete-event simulation (DES) and system dynamics (SD) submodels. For each individual in the simulated population, the DES submodel simulates screening policies and treatment procedures to estimate the resulting survival rates and the costs of screening and treatment. The SD submodel represents the overall structure and operation of the U.S. system for detecting and treating breast cancer. The main results and conclusions are summarized, including a final recommendation for annual screening between ages 65 and 80. A discussion is also presented on how both the natural-history and screening-and-treatment simulations can be used for performance comparisons of proposed screening policies based on overall cost-effectiveness, the numbers of life-years and quality-adjusted life-years saved, and the main components of the total cost incurred by each policy.}, number={7}, journal={IIE Transactions}, publisher={Informa UK Limited}, author={Tejada, Jeremy J. and Ivy, Julie S. and King, Russell E. and Wilson, James R. and Ballan, Matthew J. and Kay, Michael G. and Diehl, Kathleen M. and Yankaskas, Bonnie C.}, year={2014}, month={Mar}, pages={707–727} } @article{zhang_ivy_wilson_diehl_yankaskas_2014, title={Competing risks analysis in mortality estimation for breast cancer patients from independent risk groups}, volume={17}, ISSN={["1572-9389"]}, DOI={10.1007/s10729-013-9255-x}, abstractNote={This study quantifies breast cancer mortality in the presence of competing risks for complex patients. Breast cancer behaves differently in different patient populations, which can have significant implications for patient survival; hence these differences must be considered when making screening and treatment decisions. Mortality estimation for breast cancer patients has been a significant research question. Accurate estimation is critical for clinical decision making, including recommendations. In this study, a competing risks framework is built to analyze the effect of patient risk factors and cancer characteristics on breast cancer and other cause mortality. To estimate mortality probabilities from breast cancer and other causes as a function of not only the patient's age or race but also biomarkers for estrogen and progesterone receptor status, a nonparametric cumulative incidence function is formulated using data from the community-based Carolina Mammography Registry. Based on the log(-log) transformation, confidence intervals are constructed for mortality estimates over time. To compare mortality probabilities in two independent risk groups at a given time, a method with improved power is formulated using the log(-log) transformation.}, number={3}, journal={HEALTH CARE MANAGEMENT SCIENCE}, author={Zhang, Shengfan and Ivy, Julie S. and Wilson, James R. and Diehl, Kathleen M. and Yankaskas, Bonnie C.}, year={2014}, month={Sep}, pages={259–269} } @article{lobo_hodgson_king_thoney_wilson_2013, title={Allocating job-shop manpower to minimize L-max : Optimality criteria, search heuristics, and probabilistic quality metrics}, volume={40}, ISSN={0305-0548}, url={http://dx.doi.org/10.1016/J.COR.2013.02.008}, DOI={10.1016/J.COR.2013.02.008}, abstractNote={We address questions raised by Lobo et al. in 2012 regarding the NP-hard problem of finding an optimal allocation of workers to machine groups in a job shop so as to minimize Lmax, the maximum job lateness. Lobo et al. formulated a lower bound on Lmax given a worker allocation, and an algorithm to find an allocation yielding the smallest such lower bound. In this article we establish optimality criteria to verify that a given allocation corresponds to a schedule that yields the minimum value of Lmax. For situations in which the optimality criteria are not satisfied, we present the Heuristic Search Procedure (HSP), which sequentially invokes three distinct search heuristics, the Local Neighborhood Search Strategy (LNSS), Queuing Time Search Strategy 1 (QSS1), and Queuing Time Search Strategy 2 (QSS2), before delivering the best allocation encountered by LNSS, QSS1, and QSS2. HSP is designed to find allocations allowing a heuristic scheduler to generate schedules with a smaller value of Lmax than that achieved via the allocation yielding the final lower bound of Lobo et al. Comprehensive experimentation indicated that HSP delivered significant reductions in Lmax. We also estimate a probability distribution for evaluating the quality (closeness to optimality) of an allocation delivered by a heuristic search procedure such as HSP. This distribution permits assessing the user's confidence that a given allocation will enable the heuristic scheduler to generate its best possible schedule—i.e., the schedule with the heuristic scheduler's smallest achievable Lmax value.}, number={10}, journal={Computers & Operations Research}, publisher={Elsevier BV}, author={Lobo, Benjamin J. and Hodgson, Thom J. and King, Russell E. and Thoney, Kristin A. and Wilson, James R.}, year={2013}, month={Oct}, pages={2569–2584} } @article{lobo_hodgson_king_thoney_wilson_2013, title={An effective lower bound on Lmax in a worker-constrained job shop}, volume={40}, ISSN={0305-0548}, url={http://dx.doi.org/10.1016/j.cor.2012.07.003}, DOI={10.1016/j.cor.2012.07.003}, abstractNote={A common industrial operation is a dual resource constrained job shop where: (a) the objective is to minimize L max , the maximum job lateness; (b) machines are organized into groups; and (c) each worker is assigned to a specific machine group. Because this problem is NP-hard, finding optimal solutions by enumeration is impractical. This paper details a procedure to compute a lower bound on L max that will be used in follow-up work to effectively evaluate the absolute performance of heuristic solutions. Given an allocation of workers to machine groups, a lower bound on L max is first computed for each machine group using a network-flow formulation. The lower bound on L max for the job shop is the largest of the lower bounds for the machine groups. A search algorithm then finds a worker allocation yielding the smallest such lower bound on L max for the job shop; and the latter quantity is our proposed lower bound on L max . Given a worker allocation, we use the Virtual Factory (a heuristic scheduler developed by Hodgson et al. in 1998) to generate a schedule. Experiments with a wide variety of job shops indicated that the proposed lower bound on L max could often be achieved by a Virtual Factory schedule based on the worker allocation yielding this lower bound. However, there were problem instances for which other worker allocations enabled the Virtual Factory to generate better schedules. Follow-up work provides optimality criteria, and heuristics to find improved allocations if these criteria are not satisfied.}, number={1}, journal={Computers & Operations Research}, publisher={Elsevier BV}, author={Lobo, Benjamin J. and Hodgson, Thom J. and King, Russell E. and Thoney, Kristin A. and Wilson, James R.}, year={2013}, month={Jan}, pages={328–343} } @article{lee_hur_kim_wilson_2012, title={Monitoring nonlinear profiles using a wavelet-based distribution-free CUSUM chart}, volume={50}, ISSN={["1366-588X"]}, DOI={10.1080/00207543.2012.655865}, abstractNote={WDFTC is a wavelet-based distribution-free CUSUM chart for detecting shifts in the mean of a profile with noisy components. Exploiting a discrete wavelet transform (DWT) of the mean in-control profile, WDFTC selects a reduced-dimension vector of the associated DWT components from which the mean in-control profile can be approximated with minimal weighted relative reconstruction error. Based on randomly sampled Phase I (in-control) profiles, the covariance matrix of the corresponding reduced-dimension DWT vectors is estimated using a matrix-regularisation method; then the DWT vectors are aggregated (batched) so that the non-overlapping batch means of the reduced-dimension DWT vectors have manageable covariances. To monitor shifts in the mean profile during Phase II operation, WDFTC computes a Hotelling's T 2-type statistic from successive non-overlapping batch means and applies a CUSUM procedure to those statistics, where the associated control limits are evaluated analytically from the Phase I data. Experimentation with several normal and non-normal test processes revealed that WDFTC was competitive with existing profile-monitoring schemes.}, number={22}, journal={INTERNATIONAL JOURNAL OF PRODUCTION RESEARCH}, author={Lee, Joongsup and Hur, Youngmi and Kim, Seong-Hee and Wilson, James R.}, year={2012}, pages={6574–6594} } @article{tafazzoli_steiger_wilson_2011, title={N-Skart: A Nonsequential Skewness- and Autoregression-Adjusted Batch-Means Procedure for Simulation Analysis}, volume={56}, ISSN={["1558-2523"]}, DOI={10.1109/tac.2010.2052137}, abstractNote={We discuss N-Skart, a nonsequential procedure designed to deliver a confidence interval (CI) for the steady-state mean of a simulation output process when the user supplies a single simulation-generated time series of arbitrary size and specifies the required coverage probability for a CI based on that data set. N-Skart is a variant of the method of batch means that exploits separate adjustments to the half-length of the CI so as to account for the effects on the distribution of the underlying Student's t -statistic that arise from skewness (nonnormality) and autocorrelation of the batch means. If the sample size is sufficiently large, then N-Skart delivers not only a CI but also a point estimator for the steady-state mean that is approximately free of initialization bias. In an experimental performance evaluation involving a wide range of test processes and sample sizes, N-Skart exhibited close conformance to the user-specified CI coverage probabilities.}, number={2}, journal={IEEE TRANSACTIONS ON AUTOMATIC CONTROL}, author={Tafazzoli, Ali and Steiger, Natalie M. and Wilson, James R.}, year={2011}, month={Feb}, pages={254–264} } @article{wilson_2011, title={Note on 'Influences of Resource Limitations and Transmission Costs on Epidemic Simulations and Critical Thresholds in Scale-Free Networks'}, volume={87}, ISSN={["0037-5497"]}, DOI={10.1177/0037549710366018}, abstractNote={In a recent paper entitled ‘Influences of Resource Limitations and Transmission Costs on Epidemic Simulations and Critical Thresholds in Scale-Free Networks’ by Huang et al., the authors attempted to establish a key characteristic of epidemic dynamics in a scale-free network when properly accounting for the cost of transmitting the infection at each node and the resources available for transmission to that node. The main input parameter is the effective rate of spreading the infection, i.e. the instantaneous rate at which the infection is spread to an uninfected node via a single link to an infected node. The primary result is the existence of a positive critical threshold for the infection-spreading rate at or below which the epidemic dies out and above which the epidemic is spread through the network and ultimately reaches a steady-state non-vanishing condition. Some flaws in the authors’ proof of this result are discussed, and an alternative derivation is provided that sheds additional light on the transient and steady-state behavior of the system. The alternative derivation may be adapted to the analysis of other scale-free networks with different features.}, number={3}, journal={SIMULATION-TRANSACTIONS OF THE SOCIETY FOR MODELING AND SIMULATION INTERNATIONAL}, author={Wilson, James R.}, year={2011}, month={Mar}, pages={262–266} } @article{tafazzoli_wilson_lada_steiger_2011, title={Performance of Skart: A Skewness- and Autoregression-Adjusted Batch Means Procedure for Simulation Analysis}, volume={23}, ISSN={["1526-5528"]}, DOI={10.1287/ijoc.1100.0401}, abstractNote={ An analysis is given for an extensive experimental performance evaluation of Skart, an automated sequential batch means procedure for constructing an asymptotically valid confidence interval (CI) on the steady-state mean of a simulation output process. Skart is designed to deliver a CI satisfying user-specified requirements on absolute or relative precision as well as coverage probability. Skart exploits separate adjustments to the half-length of the classical batch means CI so as to account for the effects on the distribution of the underlying Student's t-statistic that arise from skewness (nonnormality) and autocorrelation of the batch means. Skart also delivers a point estimator for the steady-state mean that is approximately free of initialization bias. In an experimental performance evaluation involving a wide range of test processes, Skart compared favorably with other steady-state simulation analysis methods—namely, its predecessors ASAP3, WASSP, and SBatch, as well as ABATCH, LBATCH, the Heidelberger–Welch procedure, and the Law–Carson procedure. Specifically, Skart exhibited competitive sampling efficiency and closer conformance to the given CI coverage probabilities than the other procedures, especially in the most difficult test processes. }, number={2}, journal={INFORMS JOURNAL ON COMPUTING}, author={Tafazzoli, Ali and Wilson, James R. and Lada, Emily K. and Steiger, Natalie M.}, year={2011}, pages={297–314} } @article{tafazzoli_wilson_2011, title={Skart: A skewness- and autoregression-adjusted batch-means procedure for simulation analysis}, volume={43}, ISSN={["0740-817X"]}, DOI={10.1080/0740817x.2010.504688}, abstractNote={We discuss Skart, an automated batch-means procedure for constructing a skewness- and autoregression-adjusted confidence interval for the steady-state mean of a simulation output process. Skart is a sequential procedure designed to deliver a confidence interval that satisfies user-specified requirements concerning not only coverage probability but also the absolute or relative precision provided by the half-length. Skart exploits separate adjustments to the half-length of the classical batch-means confidence interval so as to account for the effects on the distribution of the underlying Student¿s t -statistic that arise from nonnormality and autocorrelation of the batch means. Skart also delivers a point estimator for the steady-state mean that is approximately free of initialization bias. In an experimental performance evaluation involving a wide range of test processes, Skart compared favorably with other simulation analysis methods-namely, its predecessors ASAP3, WASSP, and SBatch as well as ABATCH, LBATCH, the Heidelberger-Welch procedure, and the Law-Carson procedure.}, number={2}, journal={IIE TRANSACTIONS}, author={Tafazzoli, Ali and Wilson, James R.}, year={2011}, pages={110–128} } @article{mokashi_tejada_yousefi_xu_wilson_tafazzoli_steiger_2010, title={PERFORMANCE COMPARISON OF MSER-5 AND N-SKART ON THE SIMULATION START-UP PROBLEM}, ISSN={["0891-7736"]}, DOI={10.1109/wsc.2010.5679094}, abstractNote={We summarize some results from an extensive performance comparison of the procedures MSER-5 and N-Skart for handling the simulation start-up problem. We assume a fixed-length simulation-generated time series from which point and confidence-interval (CI) estimators of the steady-state mean are sought. MSER-5 uses the data-truncation point that minimizes the half-length of the usual batch-means CI computed from the truncated data set. N-Skart uses a randomness test to determine the data-truncation point beyond which spaced batch means are approximately independent of each other and the simulation's initial condition; then using truncated nonspaced batch means, N-Skart exploits separate adjustments to the CI half-length that account for the effects on the distribution of the underlying Student's t-statistic arising from skewness and autocorrelation of the batch means. In most of the test problems, N-Skart's point estimator had smaller bias than that of MSER-5; moreover in all cases, N-Skart's CI estimator outperformed that of MSER-5.}, journal={PROCEEDINGS OF THE 2010 WINTER SIMULATION CONFERENCE}, author={Mokashi, Anup C. and Tejada, Jeremy J. and Yousefi, Saeideh and Xu, Tianxiang and Wilson, James R. and Tafazzoli, Ali and Steiger, Natalie M.}, year={2010}, pages={971–982} } @article{kuhl_ivy_lada_steiger_wagner_wilson_2010, title={Univariate input models for stochastic simulation}, volume={4}, ISSN={1747-7778 1747-7786}, url={http://dx.doi.org/10.1057/jos.2009.31}, DOI={10.1057/jos.2009.31}, abstractNote={Techniques are presented for modelling and then randomly sampling many of the continuous univariate probabilistic input processes that drive discrete-event simulation experiments. Emphasis is given to the generalized beta distribution family, the Johnson translation system of distributions, and the Bézier distribution family because of the flexibility of these families to model a wide range of distributional shapes that arise in practical applications. Methods are described for rapidly fitting these distributions to data or to subjective information (expert opinion) and for randomly sampling from the fitted distributions. Also discussed are applications ranging from pharmaceutical manufacturing and medical decision analysis to smart-materials research and health-care systems analysis.}, number={2}, journal={Journal of Simulation}, publisher={Informa UK Limited}, author={Kuhl, M E and Ivy, J S and Lada, E K and Steiger, N M and Wagner, M A and Wilson, J R}, year={2010}, month={Jun}, pages={81–97} } @book{alexopoulos_goldsman_wilson_2009, title={Advancing the Frontiers of Simulation: A festschrift in honor of George Samuel Fishman}, ISBN={9781441908162}, publisher={New York: Springer Verlag}, author={Alexopoulos, C. and Goldsman, D. and Wilson, J. R.}, editor={Alexopoulos, C. and Goldsman, D. and Wilson, J. R.Editors}, year={2009} } @article{antonini_alexopoulos_goldsman_wilson_2009, title={Area variance estimators for simulation using folded standardized time series}, volume={41}, ISSN={["1545-8830"]}, DOI={10.1080/07408170802331268}, abstractNote={We estimate the variance parameter of a stationary simulation-generated process using “folded” versions of standardized time series area estimators. Asymptotically as the sample size increases, different folding levels yield unbiased estimators that are independent scaled chi-squared variates, each with one degree of freedom. This result is exploited to formulate improved variance estimators based on the combination of multiple levels as well as the use of batching. The improved estimators preserve the asymptotic bias properties of their predecessors, but have substantially lower asymptotic variances. The performance of the new variance estimators is demonstrated in a first-order autoregressive process with autoregressive parameter 0.9 and in the queue-waiting-time process for an M/M/1 queue with server utilization 0.8. [Supplementary materials are available for this article. Go to the publisher's online edition of IIE Transactions for the following free supplemental resource: Appendix]}, number={2}, journal={IIE TRANSACTIONS}, author={Antonini, Claudia and Alexopoulos, Christos and Goldsman, David and Wilson, James R.}, year={2009}, pages={134–144} } @article{lee_alexopoulos_goldsman_kim_tsui_wilson_2009, title={Monitoring autocorrelated processes using a distribution-free tabular CUSUM chart with automated variance estimation}, volume={41}, ISSN={["1545-8830"]}, DOI={10.1080/07408170902906035}, abstractNote={We formulate and evaluate distribution-free statistical process control (SPC) charts for monitoring shifts in the mean of an autocorrelated process when a training data set is used to estimate the marginal variance of the process and the variance parameter (i.e., the sum of covariances at all lags). Two alternative variance estimators are adapted for automated use in DFTC-VE, a distribution-free tabular CUSUM chart, based on the simulation-analysis methods of standardized time series and a simplified combination of autoregressive representation and non-overlapping batch means. Extensive experimentation revealed that these variance estimators did not seriously degrade DFTC-VE's performance compared with its performance using the exact values of the marginal variance and the variance parameter. Moreover, DFTC-VE's performance compared favorably with that of other competing distribution-free SPC charts. [Supplementary materials are available for this article. Go to the publisher's online edition of IIE Transactions for the following free supplementary resource: Appendix]}, number={11}, journal={IIE TRANSACTIONS}, author={Lee, Joongsup and Alexopoulos, Christos and Goldsman, David and Kim, Seong-Hee and Tsui, Kwok-Leung and Wilson, James R.}, year={2009}, month={Nov}, pages={979–994} } @article{alexopoulos_goldsman_fontanesi_kopald_wilson_2008, title={Modeling patient arrivals in community clinics}, volume={36}, ISSN={["0305-0483"]}, DOI={10.1016/j.omega.2005.07.013}, abstractNote={We develop improved methods for modeling and simulating the streams of patients arriving at a community clinic. In previous practice, random (unscheduled) patient arrivals were often assumed to follow an ordinary Poisson process (so the corresponding patient interarrival times were randomly sampled from an exponential distribution); and for scheduled arrivals, each patient's tardiness (i.e., the deviation from the scheduled appointment time) was often assumed to be randomly sampled from a normal distribution. A thorough analysis of patient arrival times, obtained from detailed workflow observations in nine community clinics, indicates these assumptions are not generally valid, and the tardiness data sets for this study are best modeled by unbounded Johnson distributions. We also propose a nonhomogeneous Poisson process to model the random patient arrivals; we review a nonparametric approach to estimating the associated mean-value function; and we describe an algorithm for generating random patient arrivals from the estimated model. The adequacy of this model of random patient arrivals can be assessed by standard goodness-of-fit tests. These findings are important since testable scheduling optimization strategies must be based upon accurate models for both random and scheduled patient arrivals. The impacts on modeling, as well as implications for practice management, are discussed.}, number={1}, journal={OMEGA-INTERNATIONAL JOURNAL OF MANAGEMENT SCIENCE}, author={Alexopoulos, Christos and Goldsman, David and Fontanesi, John and Kopald, David and Wilson, James R.}, year={2008}, month={Feb}, pages={33–43} } @article{lada_steiger_wilson_2008, title={SBatch: A spaced batch means procedure for steady-state simulation analysis}, volume={2}, ISSN={1747-7778 1747-7786}, url={http://dx.doi.org/10.1057/jos.2008.11}, DOI={10.1057/jos.2008.11}, abstractNote={We discuss SBatch, a simplified procedure for steady-state simulation analysis that is based on spaced batch means, incorporating many advantages of its predecessors ASAP3 and WASSP while avoiding many of their disadvantages. SBatch is a sequential procedure designed to produce a confidence-interval (CI) estimator for the steady-state mean response that satisfies user-specified precision and coverage-probability requirements. First SBatch determines a batch size and an interbatch spacer size such that beyond the initial spacer, the spaced batch means approximately form a stationary first-order autoregressive process whose lag-one correlation does not significantly exceed 0.8. Next SBatch delivers a correlation-adjusted CI based on the sample variance and lag-one correlation of the spaced batch means as well as the grand mean of all the individual observations beyond the initial spacer. In an experimental evaluation on a broad range of test problems, SBatch compared favourably with ASAP3 and WASSP.}, number={3}, journal={Journal of Simulation}, publisher={Informa UK Limited}, author={Lada, E K and Steiger, N M and Wilson, J R}, year={2008}, month={Nov}, pages={170–185} } @article{tafazzoli_wilson_lada_steiger_2008, title={SKART: A SKEWNESS- AND AUTOREGRESSION-ADJUSTED BATCH-MEANS PROCEDURE FOR SIMULATION ANALYSIS}, ISBN={["978-1-4244-2707-9"]}, DOI={10.1109/wsc.2008.4736092}, abstractNote={We discuss Skart, an automated batch-means procedure for constructing a skewness- and autoregression-adjusted confidence interval for the steady-state mean of a simulation output process. Skart is a sequential procedure designed to deliver a confidence interval that satisfies user-specified requirements concerning not only coverage probability but also the absolute or relative precision provided by the half-length. Skart exploits separate adjustments to the half-length of the classical batch-means confidence interval so as to account for the effects on the distribution of the underlying Student's t -statistic that arise from nonnormality and autocorrelation of the batch means. Skart also delivers a point estimator for the steady-state mean that is approximately free of initialization bias. In an experimental performance evaluation involving a wide range of test processes, Skart compared favorably with other simulation analysis methods-namely, its predecessors ASAP3, WASSP, and SBatch as well as ABATCH, LBATCH, the Heidelberger-Welch procedure, and the Law-Carson procedure.}, journal={2008 WINTER SIMULATION CONFERENCE, VOLS 1-5}, author={Tafazzoli, Ali and Wilson, James R. and Lada, Emily K. and Steiger, Natalie M.}, year={2008}, pages={387-+} } @article{kim_alexopoulos_tsui_wilson_2007, title={A distribution-free tabular CUSUM chart for autocorrelated data}, volume={39}, ISSN={["0740-817X"]}, DOI={10.1080/07408170600743946}, abstractNote={A distribution-free tabular CUSUM chart called DFTC is designed to detect shifts in the mean of an autocorrelated process. The chart's Average Run Length (ARL) is approximated by generalizing Siegmund's ARL approximation for the conventional tabular CUSUM chart based on independent and identically distributed normal observations. Control limits for DFTC are computed from the generalized ARL approximation. Also discussed are the choice of reference value and the use of batch means to handle highly correlated processes. The performance of DFTC compared favorably with that of other distribution-free procedures in stationary test processes having various types of autocorrelation functions as well as normal or nonnormal marginals.}, number={3}, journal={IIE TRANSACTIONS}, author={Kim, Seong-Hee and Alexopoulos, Christos and Tsui, Kwok-Leung and Wilson, James R.}, year={2007}, month={Mar}, pages={317–330} } @article{wilson_2007, title={Editor's introduction: Special issue honoring Perwez Shahabuddin}, volume={17}, ISSN={["1049-3301"]}, DOI={10.1145/1225275.1225276}, abstractNote={This special issue of ACM Transactions on Modeling and Computer Simulation (TOMACS) honors Perwez Shahabuddin, a major contributor to the simulation literature on rare event simulation—in particular, the associated theory and methodology of importance sampling (including splitting and heavy-tailed importance sampling) and applications of these techniques to highly reliable systems, finance, and risk management. Two of Perwez’s last archival journal articles were under review for publication in TOMACS at the time he passed away, and we are proud to include both of those articles in this special issue. Also considered for inclusion in this issue were several other articles on rare event simulation and importance sampling by some of Perwez’s numerous colleagues and collaborators around the world. Because they required more extensive revision than could be completed in time for publication of the special issue, some of these articles should ultimately appear in future regular issues of TOMACS. The lead article is appropriately titled “Perwez Shahabuddin, 1962–2005: A Professional Appreciation.” In this article, Andradóttir, Glasserman, Glynn, Heidelberger, and Juneja survey Perwez’s numerous professional achievements. The authors discuss Perwez’s groundbreaking work on the technique of balanced failure biasing for simulation-based analysis of highly reliable systems that he did while he was a graduate student at Stanford University (1985– 1990) and a staff member at IBM Research (1990–1995). In surveying Perwez’s work while he served on the faculty of the Department of Industrial Engineering and Operations Research at Columbia University (1995–2005), the authors discuss Perwez’s accomplishments as a teacher, doctoral-student adviser, and editor of archival journals as well as his research contributions on rare event simulation (including splitting and methods for handling heavy-tailed random variables), finance (including options pricing and interest rate modeling), and risk management (including portfolio risk assessment and project management). The second article is titled “Asymptotics and Fast Simulation for Tail Probabilities of Maximum of Sums of Few Random Variables.” In this article, Juneja, Karandikar, and Shahabuddin address the problem of estimating the probability that an increasing threshold will be exceeded by the maximum of several (finite) sums of lightor heavy-tailed random variables. The authors develop asymptotically optimal importance sampling techniques for efficient estimation of such exceedance probabilities, including the method of asymptotic hazard rate twisting for handling heavy-tailed random variables in general applications; and they also discuss applications of these techniques to activity network-based project management.}, number={2}, journal={ACM TRANSACTIONS ON MODELING AND COMPUTER SIMULATION}, author={Wilson, James R.}, year={2007}, month={Apr} } @article{alexopoulos_argon_goldsman_steiger_tokol_wilson_2007, title={Efficient computation of overlapping variance estimators for simulation}, volume={19}, ISSN={["1526-5528"]}, DOI={10.1287/ijoc.1060.0198}, abstractNote={ For a steady-state simulation output process, we formulate efficient algorithms to compute certain estimators of the process variance parameter (i.e., the sum of covariances at all lags), where the estimators are derived in principle from overlapping batches separately and then averaged over all such batches. The algorithms require order-of-sample-size work to evaluate overlapping versions of the area and Cramér–von Mises estimators arising in the method of standardized time series. Recently, Alexopoulos et al. showed that, compared with estimators based on nonoverlapping batches, the estimators based on overlapping batches achieve reduced variance while maintaining similar bias asymptotically as the batch size increases. We provide illustrative analytical and Monte Carlo results for M/M/1 queue waiting times and for a first-order autoregressive process. We also present evidence that the asymptotic distribution of each overlapping variance estimator can be closely approximated using an appropriately rescaled chi-squared random variable with matching mean and variance. }, number={3}, journal={INFORMS JOURNAL ON COMPUTING}, author={Alexopoulos, Christos and Argon, Nilay Tanik and Goldsman, David and Steiger, Natalie M. and Tokol, Gamze and Wilson, James R.}, year={2007}, pages={314–327} } @article{aktaran-kalaycı_alexopoulos_argon_goldsman_wilson_2007, title={Exact expected values of variance estimators for simulation}, volume={54}, ISSN={0894-069X 1520-6750}, url={http://dx.doi.org/10.1002/nav.20215}, DOI={10.1002/nav.20215}, abstractNote={Abstract}, number={4}, journal={Naval Research Logistics}, publisher={Wiley}, author={Aktaran-Kalaycı, Tûba and Alexopoulos, Christos and Argon, Nilay Tanık and Goldsman, David and Wilson, James R.}, year={2007}, pages={397–410} } @article{aktaran-kalayci_goldsman_wilson_2007, title={Linear combinations of overlapping variance estimators for simulation}, volume={35}, ISSN={["1872-7468"]}, DOI={10.1016/j.orl.2006.08.007}, abstractNote={We estimate the variance parameter of a stationary simulation-generated process using a linear combination of overlapping standardized time series (STS) area variance estimators based on different batch sizes. We establish the linear-combination estimator's asymptotic distribution, presenting analytical and simulation-based results exemplifying its potential for improvements in accuracy and computational efficiency.}, number={4}, journal={OPERATIONS RESEARCH LETTERS}, author={Aktaran-Kalayci, Tuba and Goldsman, David and Wilson, James R.}, year={2007}, month={Jul}, pages={439–447} } @article{alexopoulos_argon_goldsman_tokol_wilson_2007, title={Overlapping variance estimators for simulation}, volume={55}, ISSN={["0030-364X"]}, DOI={10.1287/opre.1070.0475}, abstractNote={ To estimate the variance parameter (i.e., the sum of covariances at all lags) for a steady-state simulation output process, we formulate certain statistics that are computed from overlapping batches separately and then averaged over all such batches. We form overlapping versions of the area and Cramér–von Mises estimators using the method of standardized time series. For these estimators, we establish (i) their limiting distributions as the sample size increases while the ratio of the sample size to the batch size remains fixed; and (ii) their mean-square convergence to the variance parameter as both the batch size and the ratio of the sample size to the batch size increase. Compared with their counterparts computed from nonoverlapping batches, the estimators computed from overlapping batches asymptotically achieve reduced variance while maintaining the same bias as the sample size increases; moreover, the new variance estimators usually achieve similar improvements compared with the conventional variance estimators based on nonoverlapping or overlapping batch means. In follow-up work, we present several analytical and Monte Carlo examples, and we formulate efficient procedures for computing the overlapping estimators with only order-of-sample-size effort. }, number={6}, journal={OPERATIONS RESEARCH}, author={Alexopoulos, Christos and Argon, Nilay Tank and Goldsman, David and Tokol, Gamze and Wilson, James R.}, year={2007}, pages={1090–1103} } @article{lada_wilson_steiger_joines_2007, title={Performance of a wavelet-based spectral procedure for steady-state simulation analysis}, volume={19}, DOI={10.1287/ijoc.1050.0161}, abstractNote={ A summary and an analysis are given for an experimental performance evaluation of WASSP, an automated wavelet-based spectral method for constructing an approximate confidence interval on the steady-state mean of a simulation output process such that the delivered confidence interval satisfies user-specified requirements on absolute or relative precision as well as coverage probability. The experimentation involved three difficult test problems, each with an output process exhibiting some combination of the following characteristics: a long warm-up period, a persistent autocorrelation structure, or a highly nonnormal marginal distribution. These problems were used to compare the performance of WASSP with that of the Heidelberger-Welch algorithm and ASAP3, two sequential procedures based respectively on the methods of spectral analysis and nonoverlapping batch means. Concerning efficiency (required sample sizes) and robustness against the statistical anomalies commonly encountered in simulation studies, WASSP outperformed the Heidelberger-Welch procedure and compared favorably with ASAP3. }, number={2}, journal={INFORMS Journal on Computing}, publisher={Institute for Operations Research and the Management Sciences (INFORMS)}, author={Lada, E. K. and Wilson, J. R. and Steiger, N. M. and Joines, J. A.}, year={2007}, pages={150–160} } @article{lada_wilson_2006, title={A wavelet-based spectral procedure for steady-state simulation analysis}, volume={174}, ISSN={["0377-2217"]}, DOI={10.1016/j.ejor.2005.04.025}, abstractNote={We develop WASSP, a wavelet-based spectral method for steady-state simulation analysis. First WASSP determines a batch size and a warm-up period beyond which the computed batch means form an approximately stationary Gaussian process. Next WASSP computes the discrete wavelet transform of the bias-corrected log-smoothed-periodogram of the batch means, using a soft-thresholding scheme to denoise the estimated wavelet coefficients. Then taking the inverse discrete wavelet transform of the thresholded wavelet coefficients, WASSP computes estimators of the batch means log-spectrum and the steady-state variance parameter (i.e., the sum of covariances at all lags) for the original (unbatched) process. Finally by combining the latter estimator with the batch means grand average, WASSP provides a sequential procedure for constructing a confidence interval on the steady-state mean that satisfies user-specified requirements concerning absolute or relative precision as well as coverage probability. An experimental performance evaluation demonstrates WASSP’s effectiveness compared with other simulation analysis methods.}, number={3}, journal={EUROPEAN JOURNAL OF OPERATIONAL RESEARCH}, author={Lada, Emily K. and Wilson, James R.}, year={2006}, month={Nov}, pages={1769–1801} } @article{kuhl_sumant_wilson_2006, title={An automated multiresolution procedure for modeling complex arrival processes}, volume={18}, ISSN={["1526-5528"]}, DOI={10.1287/ijoc.1040.0113}, abstractNote={ To automate the multiresolution procedure of Kuhl et al. for modeling and simulating arrival processes that may exhibit a long-term trend, nested periodic phenomena (such as daily and weekly cycles), or both types of effects, we formulate a statistical-estimation method that involves the following steps at each resolution level corresponding to a basic cycle: (a) transforming the cumulative relative frequency of arrivals within the cycle (for example, the percentage of all arrivals as a function of the time of day within the daily cycle) to obtain a statistical model with approximately normal, constant-variance responses; (b) fitting a specially formulated polynomial to the transformed responses; (c) performing a likelihood ratio test to determine the degree of the fitted polynomial; and (d) fitting to the original (untransformed) responses a polynomial of the same form as in (b) with the degree determined in (c). A comprehensive experimental performance evaluation involving 100 independent replications of eight selected test processes demonstrates the accuracy and flexibility of the automated multiresolution procedure. }, number={1}, journal={INFORMS JOURNAL ON COMPUTING}, author={Kuhl, ME and Sumant, SG and Wilson, JR}, year={2006}, pages={3–18} } @article{joines_sommerich_mirka_wilson_moon_2006, title={Low-level exertions of the neck musculature: A study of research methods}, volume={16}, ISSN={1050-6411}, url={http://dx.doi.org/10.1016/j.jelekin.2005.09.007}, DOI={10.1016/j.jelekin.2005.09.007}, abstractNote={Musculoskeletal neck discomfort is prevalent in many occupations and has been the focus of much research employing surface electromyography (sEMG). Significant differences in experimental methods among researchers make comparisons across studies difficult. The goal of the current research was to use empirical methods to answer specific methodological questions concerning use of sEMG in evaluation of the neck extensor system. This was accomplished in two studies. In Experiment 1, ultrasound technology was used to: (a) determine accessibility of m. splenius and semispinalis capitis with surface electrodes, (b) identify appropriate electrode locations for these muscles/muscle groups, and (c) illustrate potential benefits of using ultrasound in locating muscles/placing electrodes. Experiment 2 sought to assess effects of posture when normalizing sEMG data. Results from Experiment 1 showed no direct access to semispinalis capitis for surface electrodes; their activity can only be sampled as part of a group of muscles. In most subjects, m. splenius was found to be accessible to surface electrodes. Electrode placement recommendations are provided. Results of Experiment 2 showed significant differences in normalized EMG data between a posture-specific technique and a reference posture technique. Posture-specific normalization is recommended for accurately assessing the relative intensity of contractions of these muscles.}, number={5}, journal={Journal of Electromyography and Kinesiology}, publisher={Elsevier BV}, author={Joines, Sharon M.B. and Sommerich, Carolyn M. and Mirka, Gary A. and Wilson, James R. and Moon, Samuel D.}, year={2006}, month={Oct}, pages={485–497} } @article{lada_steiger_wilson_2006, title={Performance evaluation of recent procedures for steady-state simulation analysis}, volume={38}, ISSN={["1545-8830"]}, DOI={10.1080/07408170600735520}, abstractNote={The performance of the batch-means procedure ASAP3 and the spectral procedure WASSP is evaluated on test problems with characteristics typical of practical applications of steady-state simulation analysis procedures. ASAP3 and WASSP are sequential procedures designed to produce a confidence-interval estimator for the mean response that satisfies user-specified half-length and coverage-probability requirements. ASAP3 is based on an inverse Cornish-Fisher expansion for the classical batch-means t-ratio, whereas WASSP is based on a wavelet estimator of the batch-means power spectrum. Regarding closeness of the empirical coverage probability and average half-length of the delivered confidence intervals to their respective nominal levels, both procedures compared favorably with the Law-Carson procedure and the original ASAP algorithm. Regarding the average sample sizes required for decreasing levels of maximum confidence-interval half-length, ASAP3 and WASSP exhibited reasonable efficiency in the test problems.}, number={9}, journal={IIE TRANSACTIONS}, author={Lada, Emily K. and Steiger, Natalie M. and Wilson, James R.}, year={2006}, month={Sep}, pages={711–727} } @article{kim_nelson_wilson_2005, title={Some Almost-Sure Convergence Properties Useful in Sequential Analysis}, volume={24}, ISSN={0747-4946 1532-4176}, url={http://dx.doi.org/10.1080/07474940500311021}, DOI={10.1080/07474940500311021}, abstractNote={Abstract Kim and Nelson propose sequential procedures for selecting the simulated system with the largest steady-state mean from a set of alternatives that yield stationary output processes. Each procedure uses a triangular continuation region so that sampling stops when the relevant test statistic first reaches the region's boundary. In applying the generalized continuous mapping theorem to prove the asymptotic validity of these procedures as the indifference-zone parameter tends to zero, we are given (i) a sequence of functions on the unit interval (which are right-continuous with left-hand limits) converging to a realization of a certain Brownian motion process with drift; and (ii) a sequence of triangular continuation regions corresponding to the functions in sequence (i) and converging to the triangular continuation region for the Brownian motion process. From each function in sequence (i) and its corresponding continuation region in sequence (ii), we obtain the associated boundary-hitting point; and we prove that the resulting sequence of such points converges almost surely to the boundary-hitting point for the Brownian motion process. We also discuss the application of this result to a statistical process-control scheme for autocorrelated data and to other selection procedures for steady-state simulation experiments. Recommended by Adam Martinsek}, number={4}, journal={Sequential Analysis}, publisher={Informa UK Limited}, author={Kim, Seong-Hee and Nelson, Barry L. and Wilson, James R.}, year={2005}, month={Oct}, pages={411–419} } @article{zouaoui_wilson_2004, title={Accounting for input-model and input-parameter uncertainties in simulation}, volume={36}, ISSN={["1545-8830"]}, DOI={10.1080/07408170490500708}, abstractNote={To account for the input-model and input-parameter uncertainties inherent in many simulations as well as the usual stochastic uncertainty, we present a Bayesian input-modeling technique that yields improved point and confidence-interval estimators for a selected posterior mean response. Exploiting prior information to specify the prior probabilities of the postulated input models and the associated prior input-parameter distributions, we use sample data to compute the posterior input-model and input-parameter distributions. Our Bayesian simulation replication algorithm involves: (i) estimating parameter uncertainty by randomly sampling the posterior input-parameter distributions; (ii) estimating stochastic uncertainty by running independent replications of the simulation using each set of input-model parameters sampled in (i); and (iii) estimating input-model uncertainty by weighting the responses generated in (ii) using the corresponding posterior input-model probabilities. Sampling effort is allocated among input models to minimize final point-estimator variance subject to a computing-budget constraint. A queueing simulation demonstrates the advantages of this approach.}, number={11}, journal={IIE TRANSACTIONS}, author={Zouaoui, F and Wilson, JR}, year={2004}, month={Nov}, pages={1135–1151} } @article{wilson_king_wilson_2004, title={Case study on statistically estimating minimum makespan for flow line scheduling problems}, volume={155}, ISSN={0377-2217}, url={http://dx.doi.org/10.1016/S0377-2217(02)00910-4}, DOI={10.1016/S0377-2217(02)00910-4}, abstractNote={Lower bounds are typically used to evaluate the performance of heuristics for solving combinatorial minimization problems. In the absence of tight analytical lower bounds, optimal objective-function values may be estimated statistically. In this paper, extreme value theory is used to construct confidence-interval estimates of the minimum makespan achievable when scheduling nonsimilar groups of jobs on a two-stage flow line. Experimental results based on randomly sampled solutions to each of 180 randomly generated test problems revealed that (i) least-squares parameter estimators outperformed standard analytical estimators for the Weibull approximation to the distribution of the sample minimum makespan; (ii) to evaluate each Weibull fit reliably, both the Anderson–Darling and Kolmogorov–Smirnov goodness-of-fit tests should be used; and (iii) applying a local improvement procedure to a large sample of randomly generated initial solutions improved the probability that the resulting Weibull fit yielded a confidence interval covering the minimum makespan.}, number={2}, journal={European Journal of Operational Research}, publisher={Elsevier BV}, author={Wilson, Amy D and King, Russell E and Wilson, James R}, year={2004}, month={Jun}, pages={439–454} } @article{stanfield_wilson_king_2004, title={Flexible modelling of correlated operation times with application in product-reuse facilities}, volume={42}, ISSN={0020-7543 1366-588X}, url={http://dx.doi.org/10.1080/0020754042000203903}, DOI={10.1080/0020754042000203903}, abstractNote={For each job (product instance) to be serviced in a product-reuse production system, multiple operations are often scheduled. Typically, there is high variability in the same operation times required by different jobs; and for each individual job, there is often significant probabilistic dependence (correlation) between many of the job's required operation times. Well-conditioned jobs require fewer operations with shorter durations. Poorly conditioned jobs require more operations with longer durations. Accurate and rapid methods for representing the uncertainty of operation necessity and duration are required to use simulation effectively as a schedule evaluation tool. This paper develops such methods using an alternative to the conventional multivariate extension of the Johnson system of univariate probability distributions. The alternative methods match the first three, and often four, marginal moments of the random vector of operation times for a given job as well as all pairwise correlations between those operation times. A logistic regression model is used to estimate the distribution of the binary random variable indicating the necessity of an operation conditioned on the indicators for the job's preceding operations. The proposed overall mixed-distribution modelling technique is computationally efficient, useful in product-reuse system practice, and easily integrated into existing simulation software platforms.}, number={11}, journal={International Journal of Production Research}, publisher={Informa UK Limited}, author={Stanfield, P. M. and Wilson, J. R. and King, R. E.}, year={2004}, month={Jun}, pages={2179–2196} } @article{zouaoui_wilson_2003, title={Accounting for parameter uncertainty in simulation input modeling}, volume={35}, DOI={10.1080/0740817039QN548}, number={9}, journal={IIE Transactions}, author={Zouaoui, F. and Wilson, J. R.}, year={2003}, pages={781–792} } @article{irizarry_kuhl_lada_subramanian_wilson_2003, title={Analyzing transformation-based simulation metamodels}, volume={35}, DOI={10.1080/07408170390175495}, number={3}, journal={IIE Transactions}, author={Irizarry, M. D. A. and Kuhl, M. E. and Lada, E. K. and Subramanian, S. and Wilson, J. R.}, year={2003}, pages={271–283} } @article{maingi_bell_bell_bialek_bourdelle_bush_darrow_fredrickson_gates_gilmore_et al._2003, title={Recent results from the national spherical torus experiment}, volume={45}, ISSN={["1361-6587"]}, DOI={10.1088/0741-3335/45/5/310}, abstractNote={The National Spherical Torus Experiment (NSTX) is a low aspect-ratio fusion research facility whose research goal is to make a determination of the attractiveness of the spherical torus concept in the areas of high-β stability, confinement, current drive, and divertor physics. Remarkable progress was made in extending the operational regime of the device in FY 2002. In brief, βt of 34% and βN of 6.5 were achieved. H-mode became the main operational regime, and energy confinement exceeded conventional aspect-ratio tokamak scalings. Heating was demonstrated with the radiofrequency antenna, and signatures of current drive were observed. Current initiation with coaxial helicity injection produced discharges of 400 kA, and first measurements of divertor heat flux profiles in H-mode were made.}, number={5}, journal={PLASMA PHYSICS AND CONTROLLED FUSION}, author={Maingi, R and Bell, MG and Bell, RE and Bialek, J and Bourdelle, C and Bush, CE and Darrow, DS and Fredrickson, ED and Gates, DA and Gilmore, M and et al.}, year={2003}, month={May}, pages={657–669} } @article{lada_lu_wilson_2002, title={A wavelet-based procedure for process fault detection}, volume={15}, ISSN={["0894-6507"]}, DOI={10.1109/66.983447}, abstractNote={To detect faults in a time-dependent process, we apply a discrete wavelet transform (DWT) to several independently replicated data sets generated by that process. The DWT can capture irregular data patterns such as sharp "jumps" better than the Fourier transform and standard statistical procedures without adding much computational complexity. Our wavelet coefficient selection method effectively balances model parsimony against data reconstruction error. The few selected wavelet coefficients serve as the "reduced-size" data set to facilitate an efficient decision-making method in situations with potentially large-volume data sets. We develop a general procedure to detect process faults based on differences between the reduced-size data sets obtained from the nominal (in-control) process and from a new instance of the target process that must be tested for an out-of-control condition. The distribution of the test statistic is constructed first using normal distribution theory and then with a new resampling procedure called "reversed jackknifing" that does not require any restrictive distributional assumptions. A Monte Carlo study demonstrates the effectiveness of these procedures. Our methods successfully detect process faults for quadrupole mass spectrometry samples collected from a rapid thermal chemical vapor deposition process.}, number={1}, journal={IEEE TRANSACTIONS ON SEMICONDUCTOR MANUFACTURING}, author={Lada, EK and Lu, JC and Wilson, JR}, year={2002}, month={Feb}, pages={79–90} } @article{medaglia_fang_nuttle_wilson_2002, title={An efficient and flexible mechanism for constructing membership functions}, volume={139}, ISSN={["0377-2217"]}, DOI={10.1016/S0377-2217(01)00157-6}, abstractNote={This paper introduces a Bézier curve-based mechanism for constructing membership functions of convex normal fuzzy sets. The mechanism can fit any given data set with a minimum level of discrepancy. In the absence of data, the mechanism can be intuitively manipulated by the user to construct membership functions with the desired shape. Some numerical experiments are included to compare the performance of the proposed mechanism with conventional methods.}, number={1}, journal={EUROPEAN JOURNAL OF OPERATIONAL RESEARCH}, author={Medaglia, AL and Fang, SC and Nuttle, HLW and Wilson, JR}, year={2002}, month={May}, pages={84–95} } @article{steiger_wilson_2002, title={An improved batch means procedure for simulation output analysis}, volume={48}, ISSN={["0025-1909"]}, DOI={10.1287/mnsc.48.12.1569.438}, abstractNote={ We formulate and evaluate the Automated Simulation Analysis Procedure (ASAP), an algorithm for steady-state simulation output analysis based on the method of nonover-lapping batch means (NOBM). ASAP delivers a confidence interval for an expected response that is centered on the sample mean of a portion of a simulation-generated time series and satisfies a user-specified absolute or relative precision requirement. ASAP operates as follows: The batch size is progressively increased until either (a) the batch means pass the von Neumann test for independence, and then ASAP delivers a classical NOBM confidence interval; or (b) the batch means pass the Shapiro-Wilk test for multivariate normality, and then ASAP delivers a correlation-adjusted confidence interval. The latter adjustment is based on an inverted Cornish-Fisher expansion for the classical NOBM t-ratio, where the terms of the expansion are estimated via an autoregressive-moving average time series model of the batch means. After determining the batch size and confidence-interval type, ASAP sequentially increases the number of batches until the precision requirement is satisfied. An extensive experimental study demonstrates the performance improvements achieved by ASAP versus well-known batch means procedures, especially in confidence-interval coverage probability. }, number={12}, journal={MANAGEMENT SCIENCE}, author={Steiger, NM and Wilson, JR}, year={2002}, month={Dec}, pages={1569–1586} } @article{wilson_2002, title={Responsible authorship and peer review}, volume={8}, ISSN={["1471-5546"]}, DOI={10.1007/s11948-002-0016-3}, abstractNote={In this article the basic principles of responsible authorship and peer review are surveyed, with special emphasis on (a) guidelines for refereeing archival journal articles and proposals; and (b) how these guidelines should be taken into account at all stages of writing.}, number={2}, journal={SCIENCE AND ENGINEERING ETHICS}, author={Wilson, JR}, year={2002}, month={Apr}, pages={155–174} } @article{irizarry_wilson_trevino_2001, title={A flexible simulation tool for manufacturing-cell design, I: model structure, operation, and case study}, volume={33}, ISSN={["0740-817X"]}, DOI={10.1023/A:1010966420792}, abstractNote={We present a general manufacturing-cell simulation model for evaluating the effects of world-class manufacturing practices on expected cell performance. The modular structure of the simulation provides the flexibility to analyze a wide variety of manufacturing cells. We formulate a comprehensive annualized cost function for evaluation and comparison of alternative cell configurations. A case study involving assembly of printed circuit boards illustrates the potential benefits of using this tool for cell design and analysis. The simulation model is intended for use in a two-phase approach to cell design that is based on simulated experimentation and response surface analysis as detailed in a companion paper.}, number={10}, journal={IIE TRANSACTIONS}, author={Irizarry, MDA and Wilson, JR and Trevino, J}, year={2001}, month={Oct}, pages={827–836} } @article{irizarry_wilson_trevino_2001, title={A flexible simulation tool for manufacturing-cell design, II: response surface analysis and case study}, volume={33}, ISSN={["0740-817X"]}, DOI={10.1023/A:1010970504862}, abstractNote={We present a two-phase approach to design and analysis of manufacturing cells based on simulated experimentation and response surface methodology using a general manufacturing-cell simulation model. The first phase involves factor-screening simulation experiments to identify design and operational factors that have a significant effect on cell performance as measured by a comprehensive annual cost function. In the second phase of experimentation, we construct simulation (response surface) metamodels to describe the relationship between the significant cell design and operational factors (the controllable input parameters) and the resulting simulation-based estimate of expected annual cell cost (the output response). We use canonical and ridge analyses of the estimated response surface to estimate the levels of the quantitative input factors that minimize the cell's expected annual cost. We apply this methodology to an assembly cell for printed circuit boards. Compared to the current cell operating policy, the simulation metamodel-based estimate of the optimum operating policy is predicted to yield average annual savings of approximately $425 000, which is a 20% reduction in annual cost. In a companion paper, we detail the structure and operation of the manufacturing-cell simulation model.}, number={10}, journal={IIE TRANSACTIONS}, author={Irizarry, MDA and Wilson, JR and Trevino, J}, year={2001}, month={Oct}, pages={837–846} } @article{wilson_2001, title={A multiplicative decomposition property of the screening-and-selection procedures of Nelson et al.}, volume={49}, ISSN={["0030-364X"]}, DOI={10.1287/opre.49.6.964.10013}, abstractNote={ Recently, Nelson et al. (2001a, b) formulated a class of combined screening-and-selection procedures for identifying the simulated system with optimal expected response when the number of alternatives is finite, but large enough to render conventional ranking-and-selection procedures impractical. Under a certain key assumption, they derived an additive decomposition lemma that provides a lower bound on the correct-selection probability when either the original or group-screening version of their combined screening-and-selection procedure is applied to randomly sampled normal populations with unknown and unequal variances. For both these procedures, we establish an improved lower bound on the correct-selection probability that is the product of (a) the probability that the best alternative will survive the first-stage screening procedure, and (b) the probability that the second-stage sampling-and-selection procedure will correctly identify the best alternative starting from the full set of alternatives. This multiplicative decomposition property offers a different perspective on the probabilistic structure of the entire class of combined screening-and-selection procedures developed by Nelson et al., and it does not require the key assumption of their additive decomposition lemma. }, number={6}, journal={OPERATIONS RESEARCH}, author={Wilson, JR}, year={2001}, pages={964–966} } @article{wilson_goldsman_2001, title={Alan Pritsker's multifaceted career: theory, practice,education, entrepreneurship, and service}, volume={33}, DOI={10.1080/07408170108936818}, abstractNote={In this lead article for the special issue of IIETransactions honoring Alan Pritsker, we summarize Alan's most significant contributions to the fields of industrial engineering and operations research, with special emphasis on his contributions to computer simulation.}, number={3}, journal={IIE Transactions}, author={Wilson, J. R. and Goldsman, D.}, year={2001}, pages={139–147} } @article{steiger_wilson_2001, title={Convergence properties of the batch means method for simulation output analysis}, volume={13}, ISSN={["1091-9856"]}, DOI={10.1287/ijoc.13.4.277.9737}, abstractNote={ We examine key convergence properties of the steady-state simulation analysis method of nonoverlapping batch means (NOBM) when it is applied to a stationary, phi-mixing process. For an increasing batch size and a fixed batch count, we show that the standardized vector of batch means converges in distribution to a vector of independent standard normal variates—a well-known result underlying the NOBM method for which there appears to be no direct, readily accessible justification. To characterize the asymptotic behavior of the classical NOBM confidence interval for the mean response, we formulate certain moment conditions on the components (numerator and denominator) of the associated Student's t-ratio that are necessary to ensure the validity of the confidence interval. For six selected stochastic systems, we summarize an extensive numerical analysis of the convergence to steady-state limits of these moment conditions; and for two systems we present a simulation-based analysis exemplifying the corresponding convergence in distribution of the components of the NOBM t-ratio. These results suggest that in many simulation output processes, approximate joint normality of the batch means is achieved at a substantially smaller batch size than is required to achieve approximate independence; and an improved batch means method should exploit this property whenever possible. }, number={4}, journal={INFORMS JOURNAL ON COMPUTING}, author={Steiger, NM and Wilson, JR}, year={2001}, pages={277–293} } @article{kuhl_wilson_2001, title={Modeling and simulating Poisson processes having trends or nontrigonometric cyclic effects}, volume={133}, ISSN={["0377-2217"]}, DOI={10.1016/S0377-2217(00)00203-4}, abstractNote={We formulate a nonparametric technique for estimating the (cumulative) mean-value function of a nonhomogeneous Poisson process having a long-term trend or some cyclic effect(s) that may lack familiar trigonometric characteristics such as symmetry over the corresponding cycle(s). This multiresolution procedure begins at the lowest level of resolution by estimating any long-term trend in the target counting process; then at progressively higher levels of resolution, the procedure yields estimates of the cyclic behavior associated with progressively smaller cycle lengths. We also formulate an efficient algorithm for generating realizations of such counting processes.}, number={3}, journal={EUROPEAN JOURNAL OF OPERATIONAL RESEARCH}, author={Kuhl, ME and Wilson, JR}, year={2001}, month={Sep}, pages={566–582} } @article{humphrey_wilson_2000, title={A revised simplex search procedure for stochastic simulation response surface optimization}, volume={12}, ISSN={["1526-5528"]}, DOI={10.1287/ijoc.12.4.272.11879}, abstractNote={ We develop a variant of the Nelder-Mead (NM) simplex search procedure for stochastic simulation optimization that is designed to avoid many of the weaknesses encumbering similar direct-search methods—in particular, excessive sensitivity to starting values, premature termination at a local optimum, lack of robustness against noisy responses, and computational inefficiency. The Revised Simplex Search (RSS) procedure consists of a three-phase application of the NM method in which: (a) the ending values for one phase become the starting values for the next phase; (b) the step size for the initial simplex (respectively, the shrink coefficient) decreases geometrically (respectively, increases linearly) over successive phases; and (c) the final estimated optimum is the best of the ending values for the three phases. To compare RSS versus NM and procedure RS+S9 due to Barton and Ivey, we summarize a simulation study based on four selected performance measures computed for six test problems that include additive white-noise error, with three levels of problem dimensionality and noise variability used in each problem. In the selected test problems, RSS yielded significantly more accurate estimates of the optimum than NM or RS+S9, and both RSS and RS+S9 required roughly four times as many function evaluations as NM. }, number={4}, journal={INFORMS JOURNAL ON COMPUTING}, author={Humphrey, DG and Wilson, JR}, year={2000}, pages={272–283} } @article{mirka_glasscock_stanfield_wilson_2000, title={An empirical approach to characterizing trunk muscle coactivation using simulation input modeling techniques}, volume={33}, ISSN={["0021-9290"]}, DOI={10.1016/s0021-9290(00)00151-2}, abstractNote={Accurately describing trunk muscle coactivation is fundamental to quantifying the spine reaction forces that occur during lifting tasks and has been the focus of a great deal of research in the spine biomechanics literature. One limitation of previous approaches has been a lack of consideration given to the variability in these coactivation strategies. The research presented in this paper is an empirical approach to quantifying and modeling trunk muscle coactivation using simulation input modeling techniques. Electromyographic (EMG) data were collected from 28 human subjects as they performed controlled trunk extension exertions. These exertions included isokinetic (10 and 45°/s) and constant acceleration (50°/s/s) trunk extensions in symmetric and asymmetric (30°) postures at two levels of trunk extension moment (30 and 80 Nm). The EMG data were collected from the right and left pairs of the erector spinae, latissimus dorsi, rectus abdominis, external obliques and internal obliques. Each subject performed nine repetitions of each combination of independent variables. The data collected during these trials were used to develop marginal distributions of trunk muscle activity as well as a 10×10 correlation matrix that described how the muscles cooperated to produce these extension torques. These elements were then combined to generate multivariate distributions describing the coactivation of the trunk musculature. An analysis of these distributions revealed that increases in extension moment, extension velocity and sagittal flexion angle created increases in both the mean and the variance of the distributions of the muscular response, while increases in the rate of trunk extension acceleration decreased both the mean and variance of the distributions of activity across all muscles considered. Increases in trunk asymmetry created a decrease in mean of the ipsi–lateral erector spinae and an increase in the mean of all other muscles considered, but there was little change in the variance of these distributions as a function of asymmetry.}, number={12}, journal={JOURNAL OF BIOMECHANICS}, author={Mirka, GA and Glasscock, NF and Stanfield, PM and Wilson, JR}, year={2000}, month={Dec}, pages={1701–1704} } @article{kuhl_wilson_2000, title={Least squares estimation of nonhomogeneous poisson processes}, volume={67}, number={1}, journal={Journal of Statistical Computation and Simulation}, author={Kuhl, M. E. and Wilson, J. R.}, year={2000}, pages={75–108} } @article{nuttle_king_hunter_wilson_fang_2000, title={Simulation Modeling of the textile supply chain - Part 1: The textile-plant models}, volume={91}, ISSN={["0040-5000"]}, DOI={10.1080/00405000008659526}, abstractNote={In Part I of the series, we describe stochastic computer models that simulate operations in the spinning, knitting, weaving, dyeing and finishing, and cut/sew sectors of the textile industry. The models are scaled to represent a supply chain designed to feed a garment-manufacturing operation involving four or five plants, i.e. part of each plant's output is ‘dedicated’ while simultaneously providing yarns and fabrics to the industry at large. Each of the sector models is unique because of the very different types of processing technology employed. The models are linked by means of streams of fabric orders from the manufacturing plants that make a range of garment types requiring many different fabrics for Basic (year-round sales), Seasonal (two or three seasons per year), and Fashion (shelf lives of 8–12 weeks) goods in a broad range of colors. In addition to each plant's product ranges and order sizes and frequencies, particular attention is paid to the machine-scheduling algorithms, although the models are deliberately kept at a ‘high’ as opposed to a ‘shop-floor’ level. The purpose of this modeling is to allow senior management to answer broad questions about the plants' ability to operate in a Quick Response environment. The various model outputs reflect this, having a heavy emphasis on on-time shipments, back-order levels, and service levels. In Part II of the series, we shall present the QR-related operating results to date, a description of a master-scheduling procedure to orchestrate the operations of the supply chain, ideas on an improved scheduling method, and an account of the construction of neural-network decision surface models as a decision support tool. We also overview ongoing efforts in technology transfer and in using ‘fuzzy’ mathematics to model the vagueness and uncertainty inherent in the supply- chain decision-making environment. The research effort of which this is a part is ongoing. We present these results in the hope of encouraging others to help carry the investigations forward.}, number={1}, journal={JOURNAL OF THE TEXTILE INSTITUTE}, author={Nuttle, HLW and King, RE and Hunter, NA and Wilson, JR and Fang, SC}, year={2000}, pages={35–50} } @article{nuttle_king_fang_wilson_hunter_2000, title={Simulation Modeling of the textile supply chain - Part II: Results and research directions}, volume={91}, ISSN={["0040-5000"]}, DOI={10.1080/00405000008659527}, abstractNote={In Part I of this series we described a set of computer-simulation models of the various components of the apparel-supply complex. Here we summarize the results obtained by exercising the models, with the main emphasis placed on whether or not the upstream operations of spinning, fabric production, and dyeing and finishing were capable of meeting Quick Response (QR) requirements for response time and service levels. In carrying out this work, questions arose about the kind of information systems required to link the various entities in the supply chain, alternative scheduling procedures, the possibility of interactive management-information systems that would allow rapid responses to the concerns of senior managers, and user-friendly means for technology transfer. We have also begun to explore the use of fuzzy mathematics to model the uncertainty and vagueness inherent in most supply-chain decision-making. Work on these topics is discussed here in Part II.}, number={1}, journal={JOURNAL OF THE TEXTILE INSTITUTE}, author={Nuttle, HLW and King, RE and Fang, SC and Wilson, JR and Hunter, NA}, year={2000}, pages={51–64} } @article{wilson_2000, title={Simulation world loses key founder, A. Alan B. Pritsker (1933-2000) - In memoriam}, volume={75}, ISSN={["0037-5497"]}, DOI={10.1177/003754970007500209}, number={2}, journal={SIMULATION}, author={Wilson, JR}, year={2000}, month={Aug}, pages={123–124} } @article{weintraub_cormier_hodgson_king_wilson_zozom_1999, title={Scheduling with alternatives: a link between process planning and scheduling}, volume={31}, DOI={10.1080/07408179908969910}, abstractNote={The objective of this research is to develop and evaluate effective, computationally efficient procedures for scheduling jobs in a large-scale manufacturing system involving, for example, over 1000 jobs and over 100 machines. The main performance measure is maximum lateness; and a useful lower bound on maximum lateness is derived from a relaxed scheduling problem in which preemption of jobs is based on the latest finish time of each job at each machine. To construct a production schedule that minimizes maximum lateness, an iterative simulation-based scheduling algorithm operates as follows: (a) job queuing times observed at each machine in the previous simulation iteration are used to compute a refined estimate of the effective due date (slack) for each job at each machine; and (b) in the current simulation iteration, jobs are dispatched at each machine in order of increasing slack. Iterations of the scheduling algorithm terminate when the lower bound on maximum lateness is achieved or the iteration limit is reached. This scheduling algorithm is implemented in Virtual Factory, a Windows-based software package. The performance of Virtual Factory is demonstrated in a suite of randomly generated test problems as well as in a large furniture manufacturing facility. To further reduce maximum lateness, a second scheduling algorithm also incorporates a tabu search procedure that identifies process plans with alternative operations and routings for jobs. This enhancement yields improved schedules that minimize manufacturing costs while satisfying job due dates. An extensive experimental performance evaluation indicates that in a broad range of industrial settings, the second scheduling algorithm can rapidly identify optimal or nearly optimal schedules.}, number={11}, journal={IIE Transactions}, author={Weintraub, A. and Cormier, D. and Hodgson, Thom and King, R. and Wilson, J. and Zozom, A.}, year={1999}, pages={1093–1102} } @inproceedings{humphrey_wilson_1998, title={A revised simplex search procedure for stochastic simulation response-surface optimization}, DOI={10.1109/wsc.1998.745060}, abstractNote={We develop a variant of the Nelder-Mead (NM) simplex search procedure for stochastic simulation optimization that is designed to avoid many of the weaknesses encumbering such direct-search methods-in particular, excessive sensitivity to starting values, premature termination at a local optimum, lack of robustness against noisy responses, and lack of computational efficiency. The revised simplex search (RSS) procedure consists of a three-phase application of the NM method in which: (a) the ending values for one phase become the starting values for the next phase; (b) the size of the initial simplex (respectively, the shrink coefficient) decreases geometrically (respectively, increases linearly) over successive phases; and (c) the final estimated optimum is the best of the ending values for the three phases. To compare RSS versus the NM procedure and RS9 (a simplex search procedure recently proposed by Barton and Ivey (1996)), we summarize a simulation study based on separate factorial experiments and follow-up multiple comparisons tests for four selected performance measures computed on each of six test problems, with three levels of problem dimensionality and noise variability used in each problem. The experimental results provide substantial evidence of RSS's improved performance with only marginally higher computational effort.}, booktitle={1998 Winter Simulation Conference: Proceedings: Grand Hotel, Washington, D.C., 13-16 December, 1998}, publisher={Piscataway, New Jersey: IEEE ; New York, New York: Association for Computing Machinery ; San Diego, California: Society for Computer Simulation International}, author={Humphrey, D. G. and Wilson, J. R.}, year={1998}, pages={751–760} } @article{avramidis_wilson_1998, title={Correlation-induction techniques for estimating quantiles in simulation experiments}, volume={46}, ISSN={["0030-364X"]}, DOI={10.1287/opre.46.4.574}, abstractNote={ A simulation-based quantile estimator measures the level of system performance that can be delivered with a prespecified probability. To estimate selected quantiles of the response of a finite-horizon simulation, we develop procedures based on correlation induction techniques for variance reduction, with emphasis on antithetic variates and Latin hypercube sampling. These procedures achieve improved precision by controlling the simulation's random-number inputs as an integral part of the experimental design. The proposed multiple-sample quantile estimator is the average of negatively correlated quantile estimators computed from disjoint samples of the simulation response, where negative correlation is induced between corresponding responses in different samples while mutual independence of responses is maintained within each sample. The proposed single-sample quantile estimator is computed from negatively correlated simulation responses within one all-inclusive sample. The single-sample estimator based on Latin hypercube sampling is shown to be asymptotically normal and unbiased with smaller variance than the comparable direct-simulation estimator based on independent replications. Similar asymptotic comparisons of the multiple-sample and direct-simulation estimators focus on bias and mean square error. Monte Carlo results suggest that the proposed procedures can yield significant reductions in bias, variance, and mean square error when estimating quantiles of the completion time of a stochastic activity network. }, number={4}, journal={OPERATIONS RESEARCH}, author={Avramidis, AN and Wilson, JR}, year={1998}, pages={574–591} } @article{seifert_kay_wilson_1998, title={Evaluation of AGV routeing strategies using hierarchical simulation}, volume={36}, ISSN={["0020-7543"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0032121916&partnerID=MN8TOARS}, DOI={10.1080/002075498193057}, abstractNote={To analyse an automated guided vehicle (AGV) system operating under selected vehicle routeing strategies, we present a simulation model that can handle an arbitrary system layout as well as arbitrary numbers of AGVs and pedestrians causing congestion in the system. We introduce a dynamic vehicle routeing strategy based on hierarchical simulation that operates as follows: at the time of each AGV routeing decision in the main simulation, subordinate simulations are performed to evaluate a limited set of alternative routes in succession until the current routeing decision can be finalized and the main simulation resumed. A case study involving a prototype AGV system operating under the control of a global vision system illustrates the advantages not only of this strategy but also of global-vision-based control.}, number={7}, journal={INTERNATIONAL JOURNAL OF PRODUCTION RESEARCH}, author={Seifert, RW and Kay, MG and Wilson, JR}, year={1998}, month={Jul}, pages={1961–1976} } @inproceedings{kuhl_damerdji_wilson_1998, title={Least squares estimation of nonhomogeneous Poisson processes}, DOI={10.1109/wsc.1998.745045}, abstractNote={We formulate and evaluate weighted least squares (WLS) and ordinary least squares (OLS) procedures for estimating the parametric mean-value function of a nonhomogeneous Poisson process. We focus the development on processes having an exponential rate function, where the exponent may include a polynomial component or some trigonometric components. Unanticipated problems with the WLS procedure are explained by an analysis of the associated residuals. The OLS procedure is based on a square root transformation of the detrended event (arrival) times - that is, the fitted mean-value function evaluated at the observed event times; and under appropriate conditions, the corresponding residuals are proved to converge weakly to a normal distribution with mean 0 and variance 0.25. The results of a Monte Carlo study indicate the advantages of the OLS procedure with respect to estimation accuracy and computational efficiency.}, booktitle={1998 Winter Simulation Conference: Proceedings: Grand Hotel, Washington, D.C., 13-16 December, 1998}, publisher={Piscataway, New Jersey: IEEE ; New York, New York: Association for Computing Machinery ; San Diego, California: Society for Computer Simulation International}, author={Kuhl, M. E. and Damerdji, H. and Wilson, J. R.}, year={1998}, pages={637–646} } @inproceedings{fang_donovan_nuttle_wilson_1998, title={Multi-customer due-date bargaining with soft computing}, volume={2}, number={1998}, booktitle={Proceedings of the Fourth Joint Conference of Information Sciences. 1998}, author={Fang, S.-C. and Donovan, M. and Nuttle, H. and Wilson, J.}, year={1998}, pages={84–87} } @article{wilson_carson_manivannan_1998, title={The Winter Simulation Conference: The premier forum on simulation practice and theory}, volume={25}, number={4}, journal={OR/MS Today}, author={Wilson, J. R. and Carson, J. S. and Manivannan, M.}, year={1998}, pages={29–31} } @article{lavelle_wilson_gold_canada_1997, title={A method for the incorporation of parametric uncertainty in the weighted evaluation multi-attribute decision analysis model}, volume={32}, ISSN={["0360-8352"]}, DOI={10.1016/S0360-8352(97)00012-0}, abstractNote={We develop an extension of the classic Weighted Evaluation (WE) Multi-Attribute Decision Analysis (MADA) model that allows for uncertainty in the parameters of the model. Uncertainties in attribute importance weights and alternative evaluation ratings are represented by independent uniform, triangular or beta random variables; and an iterative multi-variate integration scheme is used to evaluate the mean, variance and skewness of the resulting Probabilistic Weighted Evaluation (PWE). These moments are used to compute two-term Edgeworth and normal approximations to the distribution of: (a) the PWE for each of several alternatives that are to be analysed separately; or (b) the difference between PWEs for selected alternatives that are to be analysed on a pairwise basis. The proposed methodology is used to compare probabilistically three alternative solutions to the Mexico City Airport Siting Problem of Keeney and Raiffa (Keeney, R. L. and Raiffa, H., Decisions with Multiple Objectives. Wiley, New York, 1976).}, number={4}, journal={COMPUTERS & INDUSTRIAL ENGINEERING}, author={Lavelle, JP and Wilson, JR and Gold, HJ and Canada, JR}, year={1997}, month={Sep}, pages={769–786} } @article{wilson_1997, title={Conduct, misconduct, and cargo cult science}, ISBN={["0-7803-4278-X"]}, DOI={10.1145/268437.268790}, abstractNote={ity and unimpeachable analysis of experiments performed with those models. Finally I will discuss the I will elaborate some principles of ethical conduct in science that correspond to Richard Feynman’s wellknown precepts of “utter honesty” and “leaning over backwards” in all aspects of scientific work. These principles have recently been called into question by certain individuals who allege that such rules are based on a misunderstanding of “how science actually works” and are therefore potentially “damaging to the scientific enterprise.” In addition to examining critically the general basis for these allegations, I will discuss the particular relevance of Feynman’s ideals to the field of computer simulation; and I will emphasize the need for meticulous validation of simulation models together with exact reproducibilethical dilemmas inherent in the peer review system, and I will offer some concrete suggestions for improving the process of refereeing primary journal articles.}, journal={PROCEEDINGS OF THE 1997 WINTER SIMULATION CONFERENCE}, author={Wilson, JR}, year={1997}, pages={1405–1413} } @article{houck_joines_kay_wilson_1997, title={Empirical Investigation of the Benefits of Partial Lamarckianism}, volume={5}, ISSN={["1530-9304"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0031082512&partnerID=MN8TOARS}, DOI={10.1162/evco.1997.5.1.31}, abstractNote={ Genetic algorithms (GAs) are very efficient at exploring the entire search space; however, they are relatively poor at finding the precise local optimal solution in the region in which the algorithm converges. Hybrid GAs are the combination of improvement procedures, which are good at finding local optima, and GAs. There are two basic strategies for using hybrid GAs. In the first, Lamarckian learning, the genetic representation is updated to match the solution found by the improvement procedure. In the second, Baldwinian learning, improvement procedures are used to change the fitness landscape, but the solution that is found is not encoded back into the genetic string. This paper examines the issue of using partial Lamarckianism (i.e., the updating of the genetic representation for only a percentage of the individuals), as compared to pure Lamarckian and pure Baldwinian learning in hybrid GAs. Multiple instances of five bounded nonlinear problems, the location-allocation problem, and the cell formation problem were used as test problems in an empirical investigation. Neither a pure Lamarckian nor a pure Baldwinian search strategy was found to consistently lead to quicker convergence of the GA to the best known solution for the series of test problems. Based on a minimax criterion (i.e., minimizing the worst case performance across all test problem instances), the 20% and 40% partial Lamarckianism search strategies yielded the best mixture of solution quality and computational efficiency. }, number={1}, journal={EVOLUTIONARY COMPUTATION}, publisher={MIT Press - Journals}, author={Houck, Christopher R. and Joines, Jeffery A. and Kay, Michael G. and Wilson, James R.}, year={1997}, pages={31–60} } @article{kuhl_wilson_johnson_1997, title={Estimating and simulating Poisson processes having trends or multiple periodicities}, volume={29}, ISSN={["0740-817X"]}, DOI={10.1080/07408179708966327}, abstractNote={We develop and evaluate procedures for estimating and simulating nonhomogeneous Poisson processes having an exponential rate function, where the exponent may include a polynomial component or some trigonometric components or both. Maximum likelihood estimates of the unknown continuous parameters of the rate function are obtained numerically, and the degree of the polynomial rate component is determined by a likelihood ratio test. The experimental performance evaluation for this estimation procedure involves applying the procedure to 100 independent replications of nine selected point processes that possess up to four trigonometric rate components together with a polynomial rate component whose degree ranges from zero to three. On each replication of each process, the fitting procedure is applied to estimate the parameters of the process; and then the corresponding estimates of the rate and mean-value functions are computed over the observation interval. Evaluation of the fitting procedure is based on plotted tolerance bands for the rate and mean-value functions together with summary statistics for the maximum and average absolute estimation errors in these functions computed over the observation interval. The experimental results provide substantial evidence of the numerical stability and usefulness of the fitting procedure in simulation applications.}, number={3}, journal={IIE TRANSACTIONS}, author={Kuhl, ME and Wilson, JR and Johnson, MA}, year={1997}, month={Mar}, pages={201–211} } @article{schriber_wilson_1997, title={In memoriam: Harold Highland (1917-1997)}, volume={24}, number={6}, journal={OR/MS Today}, author={Schriber, T. J. and Wilson, J. R.}, year={1997}, pages={55} }