@article{arbanas_williams_leal_dunn_khuwaileh_wang_abdel-khalik_2015, title={Advancing Inverse Sensitivity/Uncertainty Methods for Nuclear Fuel Cycle Applications}, volume={123}, ISSN={["1095-9904"]}, DOI={10.1016/j.nds.2014.12.009}, abstractNote={The inverse sensitivity/uncertainty quantification (IS/UQ) method has recently been implemented in the Inverse Sensitivity/UnceRtainty Estimator (INSURE) module of the AMPX cross section processing system [M.E. Dunn and N.M. Greene, “AMPX-2000: A Cross-Section Processing System for Generating Nuclear Data for Criticality Safety Applications,” Trans. Am. Nucl. Soc. 86, 118–119 (2002)]. The IS/UQ method aims to quantify and prioritize the cross section measurements along with uncertainties needed to yield a given nuclear application(s) target response uncertainty, and doing this at a minimum cost. Since in some cases the extant uncertainties of the differential cross section data are already near the limits of the present-day state-of-the-art measurements, requiring significantly smaller uncertainties may be unrealistic. Therefore, we have incorporated integral benchmark experiments (IBEs) data into the IS/UQ method using the generalized linear least-squares method, and have implemented it in the INSURE module. We show how the IS/UQ method could be applied to systematic and statistical uncertainties in a self-consistent way and how it could be used to optimize uncertainties of IBEs and differential cross section data simultaneously. We itemize contributions to the cost of differential data measurements needed to define a realistic cost function.}, journal={NUCLEAR DATA SHEETS}, author={Arbanas, G. and Williams, M. L. and Leal, L. C. and Dunn, M. E. and Khuwaileh, B. A. and Wang, C. and Abdel-Khalik, H.}, year={2015}, month={Jan}, pages={51–56} } @article{khuwaileh_abdel-khalik_2015, title={Subspace-based Inverse Uncertainty Quantification for Nuclear Data Assessment}, volume={123}, ISSN={["1095-9904"]}, DOI={10.1016/j.nds.2014.12.010}, abstractNote={Safety analysis and design optimization depend on the accurate prediction of various reactor attributes. Predictions can be enhanced by reducing the uncertainty associated with the attributes of interest. An inverse problem can be defined and solved to assess the sources of uncertainty, and experimental effort can be subsequently directed to further improve the uncertainty associated with these sources. In this work a subspace-based algorithm for inverse sensitivity/uncertainty quantification (IS/UQ) has been developed to enable analysts account for all sources of nuclear data uncertainties in support of target accuracy assessment-type analysis. An approximate analytical solution of the optimization problem is used to guide the search for the dominant uncertainty subspace. By limiting the search to a subspace, the degrees of freedom available for the optimization search are significantly reduced. A quarter PWR fuel assembly is modeled and the accuracy of the multiplication factor and the fission reaction rate are used as reactor attributes whose uncertainties are to be reduced. Numerical experiments are used to demonstrate the computational efficiency of the proposed algorithm. Our ongoing work is focusing on extending the proposed algorithm to account for various forms of feedback, e.g., thermal-hydraulics and depletion effects.}, journal={NUCLEAR DATA SHEETS}, author={Khuwaileh, B. A. and Abdel-Khalik, H. S.}, year={2015}, month={Jan}, pages={57–61} } @inproceedings{zhang_abdel-khalik_2014, title={Development of subspace-based hybrid Monte Carlo-deterministic algorithms for reactor physics calculations}, booktitle={Proceedings of the 21st International Conference on Nuclear Engineering - 2013, vol 6}, author={Zhang, Q. and Abdel-Khalik, H.}, year={2014} } @article{zhang_abdel-khalik_2014, title={Global variance reduction for Monte Carlo reactor physics calculations}, volume={280}, ISSN={["1872-759X"]}, DOI={10.1016/j.nucengdes.2014.08.027}, abstractNote={The development of hybrid Monte-Carlo-Deterministic (MC-DT) approaches, taking place over the past few decades, have primarily focused on shielding and detection applications where the analysis requires a small number of responses, i.e. at the detector location(s). This work further develops a recently introduced global variance reduction approach, denoted by the SUBSPACE approach, and extends its application to reactor analysis problems, where responses are required everywhere in the phase space. In this proof-of-principle study, the SUBSPACE approach is shown to reduce the excessively long execution time of Monte-Carlo reactor physics calculations for simplified reactor geometries significantly. By way of demonstration, the SUBSPACE approach is applied to assembly level calculations used to generate the few-group homogenized cross sections. These models are typically expensive and need to be executed in the order of 103–105 times to properly characterize the few-group cross sections for downstream core-wide calculations. Applicability to k-eigenvalue core-wide models is also demonstrated in this work. Given the favorable results obtained in this work, we believe that the SUBSPACE method significantly enhances the state of the art of Monte-Carlo reactor physics analysis with particular focus on reducing the necessary runtime for achieving accurate results.}, journal={NUCLEAR ENGINEERING AND DESIGN}, author={Zhang, Qiong and Abdel-Khalik, Hany S.}, year={2014}, month={Dec}, pages={76–85} } @inproceedings{wang_abdel-khalik_2014, title={Stochastic higher-order generalized perturbation theory for neutron diffusion and transport calculations}, DOI={10.1115/icone21-16572}, abstractNote={The role of scientific computing has been heavily promoted in many fields to improve understanding the physics of complex engineering systems in recent years while conduct the experiments can be time-consuming, inflexible, expensive and difficult to repeat, e.g. nuclear reactor systems. The ultimate goal of scientific computing is to provide more reliable predictions for engineering systems within certain acceptable tolerance. To realize the benefits of scientific computing, extensive effort has been devoted to the development of efficient algorithms for Sensitivity Analysis (SA) and Uncertainty Quantification (UQ) whose numerical errors is under control and understood. However, the repeated execution of the simulations with different samples is computationally intractable for large-scale system with large number of Degrees of Freedom (DOF). The object of this manuscript will be focus on presenting our own developments of stochastic higher-order generalized perturbation theory to address the explosion in the computational load burden. Additionally, an overview of the current state-of-the-art of SA/UQ will also be provided.}, booktitle={Proceedings of the 21st International Conference on Nuclear Engineering - 2013, vol 6}, author={Wang, C. J. and Abdel-Khalik, H. S.}, year={2014} } @article{wang_abdel-khalik_2013, title={Exact-to-precision generalized perturbation theory for eigenvalue problems}, volume={256}, ISSN={["1872-759X"]}, DOI={10.1016/j.nucengdes.2012.11.006}, abstractNote={This manuscript extends the exact-to-precision generalized perturbation theory (EpGPT), introduced previously, to eigenvalue problems whereby previous developments focused on source driven problems only. The EpGPT collectively denotes new developments in generalized perturbation theory (GPT) that place high premium on computational efficiency in order to render GPT a standard analysis tool in routine design and safety reactor calculations. Unlike GPT, EpGPT defines a small number of what is referred to as the ‘active’ responses which are solely dependent on the physics model rather than on the responses of interest, the number of input parameters, or the number of points in the state phase space. The active responses are captured by determining all possible state variations resulting from all possible parameters perturbations. If r (the number of active responses) is much smaller than n (the size of the state space), one can show that by recasting GPT equations in terms of the active responses, all higher order responses variations can be determined to a user-defined accuracy criterion. In addition to presenting the mathematical theory of EpGPT to eigenvalue problems, illustrative numerical experiments will be conducted serving as proof of principle.}, journal={NUCLEAR ENGINEERING AND DESIGN}, author={Wang, Congjian and Abdel-Khalik, Hany S.}, year={2013}, month={Mar}, pages={130–140} } @article{wu_abdel-khalik_2013, title={Hybrid biasing approaches for global variance reduction}, volume={72}, ISSN={["0969-8043"]}, DOI={10.1016/j.apradiso.2012.09.026}, abstractNote={A new variant of Monte Carlo—deterministic (DT) hybrid variance reduction approach based on Gaussian process theory is presented for accelerating convergence of Monte Carlo simulation and compared with Forward-Weighted Consistent Adjoint Driven Importance Sampling (FW-CADIS) approach implemented in the SCALE package from Oak Ridge National Laboratory. The new approach, denoted the Gaussian process approach, treats the responses of interest as normally distributed random processes. The Gaussian process approach improves the selection of the weight windows of simulated particles by identifying a subspace that captures the dominant sources of statistical response variations. Like the FW-CADIS approach, the Gaussian process approach utilizes particle importance maps obtained from deterministic adjoint models to derive weight window biasing. In contrast to the FW-CADIS approach, the Gaussian process approach identifies the response correlations (via a covariance matrix) and employs them to reduce the computational overhead required for global variance reduction (GVR) purpose. The effective rank of the covariance matrix identifies the minimum number of uncorrelated pseudo responses, which are employed to bias simulated particles. Numerical experiments, serving as a proof of principle, are presented to compare the Gaussian process and FW-CADIS approaches in terms of the global reduction in standard deviation of the estimated responses.}, journal={APPLIED RADIATION AND ISOTOPES}, author={Wu, Zeyun and Abdel-Khalik, Hany S.}, year={2013}, month={Feb}, pages={83–88} } @article{abdel-khalik_bang_wang_2013, title={Overview of hybrid subspace methods for uncertainty quantification, sensitivity analysis}, volume={52}, ISSN={["1873-2100"]}, DOI={10.1016/j.anucene.2012.07.020}, abstractNote={The role of modeling and simulation has been heavily promoted in recent years to improve understanding of complex engineering systems. To realize the benefits of modeling and simulation, concerted efforts in the areas of uncertainty quantification and sensitivity analysis are required. The manuscript intends to serve as a pedagogical presentation of the material to young researchers and practitioners with little background on the subjects. We believe this is important as the role of these subjects is expected to be integral to the design, safety, and operation of existing as well as next generation reactors. In addition to covering the basics, an overview of the current state-of-the-art will be given with particular emphasis on the challenges pertaining to nuclear reactor modeling. The second objective will focus on presenting our own development of hybrid subspace methods intended to address the explosion in the computational overhead required when handling real-world complex engineering systems.}, journal={ANNALS OF NUCLEAR ENERGY}, author={Abdel-Khalik, Hany S. and Bang, Youngsuk and Wang, Congjian}, year={2013}, month={Feb}, pages={28–46} } @article{bang_abdel-khalik_2013, title={Projection-based second order perturbation theory}, volume={52}, ISSN={["0306-4549"]}, DOI={10.1016/j.anucene.2012.07.009}, abstractNote={Reactor analysis represents a typical example of a complex engineering system that is described by multi-scale and multi-physics nonlinear models with many input parameters and output responses. Obtaining reference solutions to these models is computationally expensive which renders impractical their repeated executions for engineering-oriented studies such as design optimization, uncertainty quantification, and safety analysis. To overcome this challenge, sensitivity analysis based on first-order perturbation theory has been widely used in the reactor analysis community to estimate changes in responses of interest due to input parameter variations. Although perturbation theory has been rigorously developed over the past four decades in order to extend its applicability to estimate higher order variations, engineering applications have primarily focused on first-order perturbation theory only. This is because the computational overhead of higher order perturbation theory are often overwhelming and do not justify the development effort required for their implementation. This manuscript further develops a recently introduced higher order approach to estimate second order variations. The objective is to demonstrate that first-order perturbation theory can be employed in practical engineering calculations to estimate higher order variations. The applicability of the introduced approach is analyzed with TSUNAMI-2D for typical lattice physics calculations.}, journal={ANNALS OF NUCLEAR ENERGY}, author={Bang, Youngsuk and Abdel-Khalik, Hany S.}, year={2013}, month={Feb}, pages={80–85} } @article{abdel-khalik_2012, title={Adjoint-based sensitivity analysis for multi-component models}, volume={245}, ISSN={["0029-5493"]}, DOI={10.1016/j.nucengdes.2012.01.017}, abstractNote={In typical design calculations, a multi-component model (i.e. a chain of codes) is often employed to calculate the quantity of interest. For design optimization, sensitivity analysis studies are often required to find optimum operating conditions or to propagate uncertainties required to set design margins. This manuscript presents a hybrid approach to enable the transfer of sensitivity information between the various components in an efficient manner that precludes the need for a global sensitivity analysis procedure, often envisaged to be computationally intractable. The presented method has two advantages over existing methods which may be classified into two broad categories: brute force-type methods and amalgamated-type methods. First, the presented method determines the minimum number of adjoint evaluations for each component as opposed to the brute force-type methods which require full evaluation of all sensitivities for all responses calculated by each component in the overall model, which proves computationally prohibitive for realistic problems. Second, the new method treats each component as a black-box as opposed to amalgamated-type methods which requires explicit knowledge of the system of equations associated with each component in order to reach the minimum number of adjoint evaluations. The discussion in this manuscript will be limited to the evaluation of first-order derivatives only. Current work focuses on the extension of this methodology to capture higher order derivatives.}, journal={NUCLEAR ENGINEERING AND DESIGN}, author={Abdel-Khalik, Hany S.}, year={2012}, month={Apr}, pages={49–54} } @article{kennedy_rabiti_abdel-khalik_2012, title={GENERALIZED PERTURBATION THEORY-FREE SENSITIVITY ANALYSIS FOR EIGENVALUE PROBLEMS}, volume={179}, ISSN={["1943-7471"]}, DOI={10.13182/nt179-169}, abstractNote={Generalized perturbation theory (GPT) has been recognized as the most computationally efficient approach for performing sensitivity analysis for models with many input parameters, which renders forward sensitivity analysis computationally overwhelming. In critical systems, GPT involves the solution of the adjoint form of the eigenvalue problem with a response-dependent fixed source. Although conceptually simple to implement, most neutronics codes that can solve the adjoint eigenvalue problem do not have a GPT capability unless envisioned during code development. We introduce in this manuscript a reduced-order modeling approach based on subspace methods that requires the solution of the fundamental adjoint equations but allows the generation of response sensitivities without the need to set up GPT equations, and that provides an estimate of the error resulting from the reduction. Moreover, the new approach solves the eigenvalue problem independently of the number or type of responses. This allows for an efficient computation of sensitivities when many responses are required. This paper introduces the theory and implementation details of the GPT-free approach and describes how the errors could be estimated as part of the analysis. The applicability is demonstrated by estimating the variations in the flux distribution everywhere in the phase space of a fast critical sphere and a high-temperature gas-cooled reactor prismatic lattice. The variations generated by the GPT-free approach are benchmarked to the exact variations generated by direct forward perturbations.}, number={2}, journal={NUCLEAR TECHNOLOGY}, author={Kennedy, Chris and Rabiti, Cristian and Abdel-Khalik, Hany}, year={2012}, month={Aug}, pages={169–179} } @article{bang_abdel-khalik_hite_2012, title={Hybrid reduced order modeling applied to nonlinear models}, volume={91}, ISSN={["1097-0207"]}, DOI={10.1002/nme.4298}, abstractNote={SUMMARY}, number={9}, journal={INTERNATIONAL JOURNAL FOR NUMERICAL METHODS IN ENGINEERING}, author={Bang, Youngsuk and Abdel-Khalik, Hany S. and Hite, Jason M.}, year={2012}, month={Aug}, pages={929–949} } @article{bang_wang_abdel-khalik_2012, title={State-Based Adjoint Method for Reduced Order Modeling}, volume={41}, ISSN={["1532-2424"]}, DOI={10.1080/00411450.2012.672359}, abstractNote={Introduced here is an adjoint state-based method for model reduction, which provides a single solution to two classes of reduction methods that are currently in the literature. The first class, which represents the main subject of this manuscript, is concerned with linear time invariant problems where one is interested in calculating linear responses variations resulting from initial conditions perturbations. The other class focuses on perturbations introduced in the operator, which result in nonlinear responses variations. Unlike existing adjoint-based methods where an adjoint function is calculated based on a given response, the state-based method employs the state variations to set up a number of adjoint problems, each corresponding to a pseudoresponse. This manuscript extends the applicability of state-based method to generate reduced order models for linear time invariant problems. Previous developments focusing on operator perturbations are reviewed briefly to highlight the common features of the state-based algorithm as applied to these two different classes of problems. Similar to previous developments, the state-based reduction is shown to set an upper-bound on the maximum discrepancy between the reduced and original model predictions. The methodology is applied and compared to other state-of-the-art methods employing several nuclear reactor diffusion and transport models.}, number={1-2}, journal={TRANSPORT THEORY AND STATISTICAL PHYSICS}, author={Bang, Youngsuk and Wang, Congjian and Abdel-Khalik, Hany S.}, year={2012}, pages={101–132} } @article{wang_abdel-khalik_2011, title={Exact-to-precision generalized perturbation theory for source-driven systems}, volume={241}, ISSN={["0029-5493"]}, DOI={10.1016/j.nucengdes.2011.09.009}, abstractNote={Presented in this manuscript are new developments to perturbation theory which are intended to extend its applicability to estimate, with quantifiable accuracy, the exact variations in all responses calculated by the model with respect to all possible perturbations in the model's input parameters. The new developments place high premium on reducing the associated computational overhead in order to enable the use of perturbation theory in routine reactor design calculations. By way of examples, these developments could be employed in core simulation to accurately estimate the few-group cross-sections variations resulting from perturbations in neutronics and thermal-hydraulics core conditions. These variations are currently being described using a look-up table approach, where thousands of assembly calculations are performed to capture few-group cross-sections variations for the downstream core calculations. Other applications include the efficient evaluation of surrogates for applications that require repeated model runs such as design optimization, inverse studies, uncertainty quantification, and online core monitoring. The theoretical background of these developments applied to source-driven systems and supporting numerical experiments are presented in this manuscript. Extension to eigenvalue problems will be presented in a future article.}, number={12}, journal={NUCLEAR ENGINEERING AND DESIGN}, author={Wang, Congjian and Abdel-Khalik, Hany S.}, year={2011}, month={Dec}, pages={5104–5112} } @article{jessee_turinsky_abdel-khalik_2011, title={Many-Group Cross-Section Adjustment Techniques for Boiling Water Reactor Adaptive Simulation}, volume={169}, ISSN={["0029-5639"]}, DOI={10.13182/nse09-67}, abstractNote={Abstract Computational capability has been developed to adjust multigroup neutron cross sections, including self-shielding correction factors, to improve the fidelity of boiling water reactor (BWR) core modeling and simulation. The method involves propagating multigroup neutron cross-section uncertainties through various BWR computational models to evaluate uncertainties in key core attributes such as core keff, nodal power distributions, thermal margins, and in-core detector readings. Uncertainty-based inverse theory methods are then employed to adjust multigroup cross sections to minimize the disagreement between BWR core modeling predictions and observed (i.e., measured) plant data. For this paper, observed plant data are virtually simulated in the form of perturbed three-dimensional nodal power distributions with the perturbations sized to represent actual discrepancies between predictions and real plant data. The major focus of this work is to efficiently propagate multigroup neutron cross-section uncertainty through BWR lattice physics and core simulator calculations. The data adjustment equations are developed using a subspace approach that exploits the ill-conditioning of the multigroup cross-section covariance matrix to minimize computation and storage burden. Tikhonov regularization is also employed to improve the conditioning of the data adjustment equations. Expressions are also provided for posterior covariance matrices of both the multigroup cross-section and core attributes uncertainties.}, number={1}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Jessee, M. A. and Turinsky, P. J. and Abdel-Khalik, H. S.}, year={2011}, month={Sep}, pages={40–55} } @article{iqbal_abdel-khalik_turinsky_2009, title={A comparative study of ZPR-6/7 with MCNP/5 and MC2-2/REBUS}, volume={36}, ISSN={["0306-4549"]}, DOI={10.1016/j.anucene.2009.03.005}, abstractNote={This work models the INL ZPR-6/7 assembly employing two different approaches: a probabilistic approach using MCNP/5 and a deterministic one using MC2-2/REBUS. With MCNP/5, each drawer of the assembly is modeled in detail with regard to geometry and fuel loading. In the deterministic approach, the MC2-2 collapses cross-sections in energy and space into a 15 few group structure homogenized spatially over each drawer for the REBUS 3D model. Various reactivity coefficients and reaction rates at different locations inside the core were evaluated and compared for both approaches and contrasted to published experimental data and were found to be in good agreement.}, number={7}, journal={ANNALS OF NUCLEAR ENERGY}, author={Iqbal, Masood and Abdel-Khalik, Hany and Turinsky, Paul}, year={2009}, month={Jul}, pages={995–997} }