@article{mokhtari_frey_zheng_2006, title={Evaluation and recommendation of sensitivity analysis methods for application to Stochastic Human Exposure and Dose Simulation models}, volume={16}, ISSN={["1559-064X"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33751086727&partnerID=MN8TOARS}, DOI={10.1038/sj.jes.7500472}, abstractNote={Sensitivity analyses of exposure or risk models can help identify the most significant factors to aid in risk management or to prioritize additional research to reduce uncertainty in the estimates. However, sensitivity analysis is challenged by non-linearity, interactions between inputs, and multiple days or time scales. Selected sensitivity analysis methods are evaluated with respect to their applicability to human exposure models with such features using a testbed. The testbed is a simplified version of a US Environmental Protection Agency's Stochastic Human Exposure and Dose Simulation (SHEDS) model. The methods evaluated include the Pearson and Spearman correlation, sample and rank regression, analysis of variance, Fourier amplitude sensitivity test (FAST), and Sobol's method. The first five methods are known as "sampling-based" techniques, wheras the latter two methods are known as "variance-based" techniques. The main objective of the test cases was to identify the main and total contributions of individual inputs to the output variance. Sobol's method and FAST directly quantified these measures of sensitivity. Results show that sensitivity of an input typically changed when evaluated under different time scales (e.g., daily versus monthly). All methods provided similar insights regarding less important inputs; however, Sobol's method and FAST provided more robust insights with respect to sensitivity of important inputs compared to the sampling-based techniques. Thus, the sampling-based methods can be used in a screening step to identify unimportant inputs, followed by application of more computationally intensive refined methods to a smaller set of inputs. The implications of time variation in sensitivity results for risk management are briefly discussed.}, number={6}, journal={JOURNAL OF EXPOSURE SCIENCE AND ENVIRONMENTAL EPIDEMIOLOGY}, author={Mokhtari, Amirhossein and Frey, H. Christopher and Zheng, Junyu}, year={2006}, month={Nov}, pages={491–506} } @article{zheng_frey_2005, title={Quantitative analysis of variability and uncertainty with known measurement error: Methodology and case study}, volume={25}, ISSN={["1539-6924"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-23644460458&partnerID=MN8TOARS}, DOI={10.1111/j.1539-6924.2005.00620.x}, abstractNote={The appearance of measurement error in exposure and risk factor data potentially affects any inferences regarding variability and uncertainty because the distribution representing the observed data set deviates from the distribution that represents an error‐free data set. A methodology for improving the characterization of variability and uncertainty with known measurement errors in data is demonstrated in this article based on an observed data set, known measurement error, and a measurement‐error model. A practical method for constructing an error‐free data set is presented and a numerical method based upon bootstrap pairs, incorporating two‐dimensional Monte Carlo simulation, is introduced to address uncertainty arising from measurement error in selected statistics. When measurement error is a large source of uncertainty, substantial differences between the distribution representing variability of the observed data set and the distribution representing variability of the error‐free data set will occur. Furthermore, the shape and range of the probability bands for uncertainty differ between the observed and error‐free data set. Failure to separately characterize contributions from random sampling error and measurement error will lead to bias in the variability and uncertainty estimates. However, a key finding is that total uncertainty in mean can be properly quantified even if measurement and random sampling errors cannot be separated. An empirical case study is used to illustrate the application of the methodology.}, number={3}, journal={RISK ANALYSIS}, author={Zheng, JY and Frey, HC}, year={2005}, month={Jun}, pages={663–675} } @article{zheng_frey_2004, title={Quantification of variability and uncertainty using mixture distributions: Evaluation of sample size, mixing weights, and separation between components}, volume={24}, ISSN={["1539-6924"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-3042716474&partnerID=MN8TOARS}, DOI={10.1111/j.0272-4332.2004.00459.x}, abstractNote={Variability is the heterogeneity of values within a population. Uncertainty refers to lack of knowledge regarding the true value of a quantity. Mixture distributions have the potential to improve the goodness of fit to data sets not adequately described by a single parametric distribution. Uncertainty due to random sampling error in statistics of interests can be estimated based upon bootstrap simulation. In order to evaluate the robustness of using mixture distribution as a basis for estimating both variability and uncertainty, 108 synthetic data sets generated from selected population mixture log‐normal distributions were investigated, and properties of variability and uncertainty estimates were evaluated with respect to variation in sample size, mixing weight, and separation between components of mixtures. Furthermore, mixture distributions were compared with single‐component distributions. Findings include: (1) mixing weight influences the stability of variability and uncertainty estimates; (2) bootstrap simulation results tend to be more stable for larger sample sizes; (3) when two components are well separated, the stability of bootstrap simulation is improved; however, a larger degree of uncertainty arises regarding the percentiles coinciding with the separated region; (4) when two components are not well separated, a single distribution may often be a better choice because it has fewer parameters and better numerical stability; and (5) dependencies exist in sampling distributions of parameters of mixtures and are influenced by the amount of separation between the components. An emission factor case study based upon NOx emissions from coal‐fired tangential boilers is used to illustrate the application of the approach.}, number={3}, journal={RISK ANALYSIS}, author={Zheng, JY and Frey, HC}, year={2004}, month={Jun}, pages={553–571} } @article{frey_zheng_2002, title={Probabilistic analysis of driving cycle-based highway vehicle emission factors}, volume={36}, ISSN={["0013-936X"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036882670&partnerID=MN8TOARS}, DOI={10.1021/es0114308}, abstractNote={A probabilistic methodology for quantifying intervehicle variability and fleet average uncertainty in highway vehicle emission factors is developed. The methodology features the use of empirical distributions of emissions measurement data to characterize variability and the use of bootstrap simulation to characterize uncertainty. For the base emission rate as a function of mileage accumulation under standard conditions, a regression-based approach was employed in which the residual error terms were included in the probabilistic analysis. Probabilistic correction factors for different driving cycles, ambient temperature, and fuel Reid vapor pressure (RVP) were developed without interpolation or extrapolation of available data. The method was demonstrated for tailpipe carbon monoxide, hydrocarbon, and nitrogen oxides emissions for a selected light-duty gasoline vehicle technology. Intervehicle variability in emissions was found to span typically 2 or 3 orders of magnitude. The uncertainty in the fleet average emission factor was as low as +/- 10% for a 95% probability range, in the case of standard conditions, to as much as -90% to +280% when correction factors for alternative driving cycles, temperature, and RVP are applied. The implications of the results for method selection and for decision making are addressed.}, number={23}, journal={ENVIRONMENTAL SCIENCE & TECHNOLOGY}, author={Frey, HC and Zheng, JY}, year={2002}, month={Dec}, pages={5184–5191} } @article{frey_zheng_2002, title={Quantification of variability and uncertainty in air pollutant emission inventories: Method and case study for utility NOx emissions}, volume={52}, ISSN={["1047-3289"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036715372&partnerID=MN8TOARS}, DOI={10.1080/10473289.2002.10470837}, abstractNote={Abstract The quality of stationary source emission factors is typically described using data quality ratings, which provide no quantification of the precision of the emission factor for an average source, nor of the variability from one source to another within a category. Variability refers to actual differences caused by differences in feedstock composition, design, maintenance, and operation. Uncertainty refers to lack of knowledge regarding the true emissions. A general methodology for the quantification of variability and uncertainty in emission factors, activity factors, and emission inventories (EIs) is described, featuring the use of bootstrap simulation and related techniques. The methodology is demonstrated via a case study for a selected example of NOx emissions from coal-fired power plants. A prototype software tool was developed to implement the methodology. The range of interunit variability in selected activity and emission factors was shown to be as much as a factor of 4, and the range of uncertainty in mean emissions is shown to depend on the interunit variability and sample size. The uncertainty in the total inventory of −16 to +19% was attributed primarily to one technology group, suggesting priorities for collecting data and improving the inventory. The implications for decision-making are discussed.}, number={9}, journal={JOURNAL OF THE AIR & WASTE MANAGEMENT ASSOCIATION}, author={Frey, HC and Zheng, JY}, year={2002}, month={Sep}, pages={1083–1095} }