@article{khuwaileh_turinsky_2019, title={Non-linear, time dependent target accuracy assessment algorithm for multi-physics, high dimensional nuclear reactor calculations}, volume={114}, ISSN={["0149-1970"]}, DOI={10.1016/j.pnucene.2019.01.023}, abstractNote={Safety analysis and design optimization depend on the accurate prediction of various reactor core responses. Model predictions can be enhanced by reducing the uncertainty associated with the responses of interest. Accordingly, an inverse problem analysis can be designed to provide guidance to determine the optimum experimental program to reduce the uncertainties in model parameters, e.g. cross-sections and fuel pellet-clad thermal conductivity, so as to reduce the uncertainties in constrained reactor core responses. This process is referred to as target accuracy assessment. In this work a nonlinear algorithm to determine an optimum experimental program has been developed and tested. The algorithm is based on the construction of surrogate model to replace the original model used to predict the core responses and uncertainties, therefore, enabling the target accuracy assessment to treat non-linearity within reasonable computational cost. Subspace based projection techniques are used to identify the influential degrees of freedom, which are then used to construct the surrogate model. Once constructed, the new computationally efficient surrogate model is used to propagate uncertainties via Monte Carlo sampling. Moreover, this work replaces the classical objective function used for nuclear data target accuracy assessment with another that factors in the financial gains of the target accuracy assessment results and replaces [or can supplement] differential experiments with many times more readily available integral experiments. Finally, the proposed algorithm is applied on a 3-dimensional fuel assembly depletion problem with thermal-hydraulics feedback using the VERA-CS core simulator. Specifically, CASL Progression Problem Number 6 is the illustrative problem employed which resembles a pressurized water reactor fuel assembly.}, journal={PROGRESS IN NUCLEAR ENERGY}, author={Khuwaileh, Bassam A. and Turinsky, Paul J.}, year={2019}, month={Jul}, pages={227–233} } @article{khuwaileh_williams_turinsky_hartanto_2019, title={Verification of Reduced Order Modeling based Uncertainty/Sensitivity Estimator (ROMUSE)}, volume={51}, ISSN={["1738-5733"]}, DOI={10.1016/j.net.2019.01.020}, abstractNote={This paper presents a number of verification case studies for a recently developed sensitivity/uncertainty code package. The code package, ROMUSE (Reduced Order Modeling based Uncertainty/Sensitivity Estimator) is an effort to provide an analysis tool to be used in conjunction with reactor core simulators, in particular the Virtual Environment for Reactor Applications (VERA) core simulator. ROMUSE has been written in C++ and is currently capable of performing various types of parameter perturbations and associated sensitivity analysis, uncertainty quantification, surrogate model construction and subspace analysis. The current version 2.0 has the capability to interface with the Design Analysis Kit for Optimization and Terascale Applications (DAKOTA) code, which gives ROMUSE access to the various algorithms implemented within DAKOTA, most importantly model calibration. The verification study is performed via two basic problems and two reactor physics models. The first problem is used to verify the ROMUSE single physics gradient-based range finding algorithm capability using an abstract quadratic model. The second problem is the Brusselator problem, which is a coupled problem representative of multi-physics problems. This problem is used to test the capability of constructing surrogates via ROMUSE-DAKOTA. Finally, light water reactor pin cell and sodium-cooled fast reactor fuel assembly problems are simulated via SCALE 6.1 to test ROMUSE capability for uncertainty quantification and sensitivity analysis purposes.}, number={4}, journal={NUCLEAR ENGINEERING AND TECHNOLOGY}, author={Khuwaileh, Bassam and Williams, Brian and Turinsky, Paul and Hartanto, Donny}, year={2019}, month={Jul}, pages={968–976} } @article{turinsky_2018, title={Preface to Shippingport Atomic Power Station thematic issue}, volume={102}, ISSN={["0149-1970"]}, DOI={10.1016/j.pnucene.2017.03.030}, abstractNote={Picard Iteration is a widely used coupling method for multiphysics simulations. This method allows one to directly leverage existing and well-developed single-physics programs without re-writing large portions of the codes. In Picard Iteration, single-physics codes just iteratively pass solutions to each other as inputs until each code has reached a converged solution. However, multiphysics computation linked by Picard Iteration is susceptible to over-solving, which can make the overall computation much less efficient. Over-solving means that each single-physics code provides an accurate solution in each Picard Iteration, which is not necessary in practice. Solving the single-physics codes in an inexact manner, i.e. with relaxed termination criteria, can help avoid this problem. This work develops a modified Picard Iteration coupling method with adaptive, inexact termination criteria for the underlying single-physics codes. Also, nested within the inexact Picard Iteration, inexact Newton methods were applied in the single-physics codes. The effect on the overall computation efficiency due to the inexact (relaxed) termination criteria at both levels is investigated by applying them to solve reactor transient problems. A reactor dynamics problem with temperature feedback in one-dimensional slab geometry is used to scope the behavior of nested inexact solvers. Then these methods are applied to a larger two-dimensional Boiling Water Reactor (BWR) problem. Computational time savings reach 55% for the two-dimensional problem. Additionally, applying an inexact termination criterion (inexact Newton method) to each single-physics code results in a further time savings of up to 18%.}, journal={PROGRESS IN NUCLEAR ENERGY}, author={Turinsky, Paul J.}, year={2018}, month={Jan}, pages={1–8} } @article{turinsky_martin_2017, title={Special issue on the "Consortium for Advanced Simulation of Light Water Reactors Research and Development Progress"}, volume={334}, ISSN={["1090-2716"]}, DOI={10.1016/j.jcp.2017.01.028}, journal={JOURNAL OF COMPUTATIONAL PHYSICS}, author={Turinsky, Paul J. and Martin, William R.}, year={2017}, month={Apr}, pages={687–688} } @article{turinsky_kothe_2016, title={Modeling and simulation challenges pursued by the Consortium for Advanced Simulation of Light Water Reactors (CASL)}, volume={313}, ISSN={["1090-2716"]}, DOI={10.1016/j.jcp.2016.02.043}, abstractNote={The Consortium for the Advanced Simulation of Light Water Reactors (CASL), the first Energy Innovation Hub of the Department of Energy, was established in 2010 with the goal of providing modeling and simulation (M&S) capabilities that support and accelerate the improvement of nuclear energy's economic competitiveness and the reduction of spent nuclear fuel volume per unit energy, and all while assuring nuclear safety. To accomplish this requires advances in M&S capabilities in radiation transport, thermal-hydraulics, fuel performance and corrosion chemistry. To focus CASL's R&D, industry challenge problems have been defined, which equate with long standing issues of the nuclear power industry that M&S can assist in addressing. To date CASL has developed a multi-physics “core simulator” based upon pin-resolved radiation transport and subchannel (within fuel assembly) thermal-hydraulics, capitalizing on the capabilities of high performance computing. CASL's fuel performance M&S capability can also be optionally integrated into the core simulator, yielding a coupled multi-physics capability with untapped predictive potential. Material models have been developed to enhance predictive capabilities of fuel clad creep and growth, along with deeper understanding of zirconium alloy clad oxidation and hydrogen pickup. Understanding of corrosion chemistry (e.g., CRUD formation) has evolved at all scales: micro, meso and macro. CFD R&D has focused on improvement in closure models for subcooled boiling and bubbly flow, and the formulation of robust numerical solution algorithms. For multiphysics integration, several iterative acceleration methods have been assessed, illuminating areas where further research is needed. Finally, uncertainty quantification and data assimilation techniques, based upon sampling approaches, have been made more feasible for practicing nuclear engineers via R&D on dimensional reduction and biased sampling. Industry adoption of CASL's evolving M&S capabilities, which is in progress, will assist in addressing long-standing and future operational and safety challenges of the nuclear industry.}, journal={JOURNAL OF COMPUTATIONAL PHYSICS}, author={Turinsky, Paul J. and Kothe, Douglas B.}, year={2016}, month={May}, pages={367–376} } @article{hays_turinsky_2014, title={STOCHASTIC OPTIMIZATION FOR NUCLEAR FACILITY DEPLOYMENT SCENARIOS USING VISION}, volume={186}, ISSN={["1943-7471"]}, DOI={10.13182/nt13-68}, abstractNote={Abstract The process of transitioning from the current once-through nuclear fuel cycle to a hypothetical closed fuel cycle necessarily introduces a much greater degree of supply feedback and complexity. When considering such advanced technologies, it is necessary to consider when and how fuel cycle facilities can be deployed in order to avoid resource conflicts while maximizing certain stakeholder values. A multiobjective optimization capability was developed around the VISION nuclear fuel cycle simulation code to allow for the automated determination of optimum deployment scenarios and objective trade-off surfaces for dynamic fuel cycle transition scenarios. A parallel simulated annealing optimization framework with modular objective function definitions is utilized to maximize computational power and flexibility. Three sample objective functions representing a range of economic and sustainability goals are presented, as well as representative optimization results demonstrating both robust convergence toward a set of optimum deployment configurations and a consistent set of trade-off surfaces.}, number={1}, journal={NUCLEAR TECHNOLOGY}, author={Hays, Ross and Turinsky, Paul}, year={2014}, month={Apr}, pages={76–89} } @article{heo_turinsky_doster_2013, title={Optimization of Thermal-Hydraulic Reactor System for SMRs via Data Assimilation and Uncertainty Quantification}, volume={173}, ISSN={["1943-748X"]}, DOI={10.13182/nse11-113}, abstractNote={Abstract This paper discusses the utilization of an uncertainty quantification methodology for nuclear power plant thermal-hydraulic transient predictions, with a focus on small modular reactors characterized by the integral pressurized water reactor design, to determine the value of completing experiments in reducing uncertainty. To accomplish this via the improvement of the prediction of key system attributes, e.g., minimum departure from nucleate boiling ratio, a thermal-hydraulic simulator is used to complete data assimilation for input parameters to the simulator employing experimental data generated by the virtual reactor. The mathematical approach that is used to complete this analysis depends upon whether the system responses, i.e., sensor signals, and the system attributes are or are not linearly dependent upon the parameters. For a transient producing mildly nonlinear response sensitivities, a Bayesian-type approach was used to obtain the a posteriori distributions of the parameters assuming Gaussian distributions for the input parameters and responses. For a transient producing highly nonlinear response sensitivities, the Markov chain Monte Carlo method was utilized based upon Bayes’ theorem to estimate the a posteriori distributions of the parameters. To evaluate the value of completing experiments, an optimization problem was formulated and solved. The optimization addressed both the experiments to complete and the modifications to be made to the nuclear power plant made possible by using the increased margins resulting from data assimilation. The decision variables of the experiment optimization problem include the selection of sensor types and locations and experiment type imposing realistic constraints. The decision variables of the nuclear power plant modification optimization problem include various design specifications, e.g., power rating, steam generator size, and reactor coolant pump size, with the objective of minimizing cost as constrained by required margins to accommodate the uncertainty. Since the magnitude of the uncertainty is dependent upon the experiments via data assimilation, the nuclear power plant optimization problem is treated as a suboptimization problem within the experiment optimization problem. The experiment optimization problem objective is to maximize the net savings, defined as the savings in nuclear power plant cost due to the modified design specifications minus the cost of the experiments. Both the experiment and the nuclear power plant optimization problems were solved using the simulated annealing method.}, number={3}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Heo, Jaeseok and Turinsky, Paul J. and Doster, J. Michael}, year={2013}, month={Mar}, pages={293–311} } @article{turinsky_2012, title={ADVANCES IN MULTI-PHYSICS AND HIGH PERFORMANCE COMPUTING IN SUPPORT OF NUCLEAR REACTOR POWER SYSTEMS MODELING AND SIMULATION}, volume={44}, ISSN={["1738-5733"]}, DOI={10.5516/net.01.2012.500}, abstractNote={Significant advances in computational performance have occurred over the past two decades, achieved not only by the introduction of more powerful processors but the incorporation of parallelism in computer hardware at all levels. Simultaneous with these hardware and associated system software advances have been advances in modeling physical phenomena and the numerical algorithms to allow their usage in simulation. This paper presents a review of the advances in computer performance, discusses the modeling and simulation capabilities required to address the multi-physics and multi-scale phenomena applicable to a nuclear reactor core simulator, and present examples of relevant physics simulation codes’ performances on high performance computers.}, number={2}, journal={NUCLEAR ENGINEERING AND TECHNOLOGY}, author={Turinsky, Paul J.}, year={2012}, month={Mar}, pages={103–112} } @article{stover_turinsky_2012, title={EXPERIMENT OPTIMIZATION TO REDUCE NUCLEAR DATA UNCERTAINTIES IN SUPPORT OF REACTOR DESIGN}, volume={180}, ISSN={["1943-7471"]}, DOI={10.13182/nt12-a14635}, abstractNote={The safe and economical design of new, innovative nuclear reactors will require uncertainty reduction in basic nuclear data that are input to simulations used during reactor design. These data uncertainties propagate to uncertainties in design responses, which in turn require the reactor designer to incorporate additional safety margins into the design, often increasing the cost of the reactor. Therefore, basic nuclear data need to be improved, and this is accomplished through experimentation, which is often done using cold critical experiments. Considering the high cost of nuclear experiments, it is desired to have an optimized experiment that will provide the experimental data needed for maximum uncertainty reduction in the design responses. However, the optimization of the experiment is coupled to the reactor design itself because with reduced uncertainty in the design responses the reactor design can be re-optimized. It is thus desired to find the experiment design that gives the most optimized reactor design. Solution of this nested optimization problem is made possible by the use of the simulated annealing algorithm. Cost values for experiment design specifications and reactor design specifications are estimated and used to compute a total savings by comparing the a posteriori reactor cost to the a priori cost accounting for the offsetting cost of the experiment. This was done for the Argonne National Laboratory-developed Advanced Burner Test Reactor design concept employing a modified Zero Power Physics Reactor as the experimental facility.}, number={2}, journal={NUCLEAR TECHNOLOGY}, author={Stover, Tracy E. and Turinsky, Paul J.}, year={2012}, month={Nov}, pages={216–230} } @article{jessee_turinsky_abdel-khalik_2011, title={Many-Group Cross-Section Adjustment Techniques for Boiling Water Reactor Adaptive Simulation}, volume={169}, ISSN={["0029-5639"]}, DOI={10.13182/nse09-67}, abstractNote={Abstract Computational capability has been developed to adjust multigroup neutron cross sections, including self-shielding correction factors, to improve the fidelity of boiling water reactor (BWR) core modeling and simulation. The method involves propagating multigroup neutron cross-section uncertainties through various BWR computational models to evaluate uncertainties in key core attributes such as core keff, nodal power distributions, thermal margins, and in-core detector readings. Uncertainty-based inverse theory methods are then employed to adjust multigroup cross sections to minimize the disagreement between BWR core modeling predictions and observed (i.e., measured) plant data. For this paper, observed plant data are virtually simulated in the form of perturbed three-dimensional nodal power distributions with the perturbations sized to represent actual discrepancies between predictions and real plant data. The major focus of this work is to efficiently propagate multigroup neutron cross-section uncertainty through BWR lattice physics and core simulator calculations. The data adjustment equations are developed using a subspace approach that exploits the ill-conditioning of the multigroup cross-section covariance matrix to minimize computation and storage burden. Tikhonov regularization is also employed to improve the conditioning of the data adjustment equations. Expressions are also provided for posterior covariance matrices of both the multigroup cross-section and core attributes uncertainties.}, number={1}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Jessee, M. A. and Turinsky, P. J. and Abdel-Khalik, H. S.}, year={2011}, month={Sep}, pages={40–55} } @article{hays_turinsky_2011, title={BWR in-core fuel management optimization using parallel simulated annealing in FORMOSA-B}, volume={53}, ISSN={["0149-1970"]}, DOI={10.1016/j.pnucene.2010.09.002}, abstractNote={The process of finding optimized fuel reload patterns for boiling water reactors is complicated by a number of factors including the large number of fuel assemblies involved, the three-dimensional neutronic and thermal-hydraulic variations, and the interplay of coolant flow rate with control rod programming. The FORMOSA-B code was developed to provide an automated method for finding fuel loading patterns, control rod programs and coolant flow rate schedules to minimize certain quantitative metrics of core performance while satisfying given operational constraints. One drawback of this code has been the long runtimes required for a complete cycle optimization on a desktop workstation (oftentimes several days or more). To address this shortcoming, a parallel simulated annealing algorithm has been added to the FORMOSA-B code, so that the runtimes may be greatly reduced by using a multiprocessor computer cluster. Tests of the algorithm on a sample problem indicate that it is capable of parallel efficiencies exceeding 80% when using four processors.}, number={6}, journal={PROGRESS IN NUCLEAR ENERGY}, author={Hays, Ross and Turinsky, Paul}, year={2011}, month={Aug}, pages={600–606} } @article{iqbal_abdel-khalik_turinsky_2009, title={A comparative study of ZPR-6/7 with MCNP/5 and MC2-2/REBUS}, volume={36}, ISSN={["0306-4549"]}, DOI={10.1016/j.anucene.2009.03.005}, abstractNote={This work models the INL ZPR-6/7 assembly employing two different approaches: a probabilistic approach using MCNP/5 and a deterministic one using MC2-2/REBUS. With MCNP/5, each drawer of the assembly is modeled in detail with regard to geometry and fuel loading. In the deterministic approach, the MC2-2 collapses cross-sections in energy and space into a 15 few group structure homogenized spatially over each drawer for the REBUS 3D model. Various reactivity coefficients and reaction rates at different locations inside the core were evaluated and compared for both approaches and contrasted to published experimental data and were found to be in good agreement.}, number={7}, journal={ANNALS OF NUCLEAR ENERGY}, author={Iqbal, Masood and Abdel-Khalik, Hany and Turinsky, Paul}, year={2009}, month={Jul}, pages={995–997} } @article{abdel-khalik_turinsky_jessee_2008, title={Efficient subspace methods-based algorithms for performing sensitivity, uncertainty, and adaptive simulation of large-scale computational models}, volume={159}, ISSN={["1943-748X"]}, DOI={10.13182/NSE159-256}, abstractNote={Abstract This paper introduces the concepts and derives the mathematical theory of efficient subspace methods (ESMs) applied to the simulation of large-scale complex models, of which nuclear reactor simulation will serve as a test basis. ESMs are intended to advance the capabilities of predictive simulation to meet the functional requirements of future energy system simulation and overcome the inadequacies of current design methods. Some of the inadequacies addressed by ESM include lack of rigorous approach to perform comprehensive validation of the multitudes of models and input data used in the design calculations and lack of robust mathematical approaches to enhance fidelity of existing and advanced computational codes. To accomplish these tasks, the computational tools must be capable of performing the following three applications with both accuracy and efficiency: (a) sensitivity analysis of key system attributes with respect to various input data; (b) uncertainty quantification for key system attributes; and (c) adaptive simulation, also known as data assimilation, for adapting existing models based on the assimilated body of experimental information to achieve the best possible prediction accuracy. These three applications, involving large-scale computational models, are now considered computationally infeasible if both the input data and key system attributes or experimental information fields are large. This paper will develop the mathematical theory of ESM-based algorithms for these three applications. The treatment in this paper is based on linearized approximation of the associated computational models. Extension to higher-order approximations represents the focus of our ongoing research.}, number={3}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Abdel-Khalik, Hany S. and Turinsky, Paul J. and Jessee, Matthew A.}, year={2008}, month={Jul}, pages={256–272} } @article{abdel-khalik_turinsky_jessee_elkins_stover_iqbal_2008, title={Uncertainty Quantification, Sensitivity Analysis, and Data Assimilation for Nuclear Systems Simulation}, volume={109}, ISSN={["0090-3752"]}, DOI={10.1016/j.nds.2008.11.010}, abstractNote={Reliable evaluation of nuclear data will play a major role in reduction of nuclear systems simulation uncertainties via the use of advanced sensitivity analysis (SA), uncertainty quantification (UQ), and data assimilation (DA) methodologies. This follows since nuclear data have proven to constitute a major source of neutronics uncertainties. This paper will overview the use of the Efficient Subspace Method (ESM), developed at NCSU, to overcome one of the main deficiencies of existing methodologies for SA/UQ/DA, that is the ability to handle codes with large input and output (I/O) data streams, where neither the forward nor the adjoint approach alone are appropriate. We demonstrate the functionality of ESM for an LWR core, a boiling water reactor, and a fast reactor benchmark experiment, the ZPR6/7A assembly. This work demonstrates the capability of adjusting cross section data thereby providing guidance to cross section evaluation efforts by identification of key cross sections and associated energy ranges that contribute the most to the propagated core attributes uncertainties.}, number={12}, journal={NUCLEAR DATA SHEETS}, author={Abdel-Khalik, H. and Turinsky, P. and Jessee, M. and Elkins, J. and Stover, T. and Iqbal, M.}, year={2008}, month={Dec}, pages={2785–2790} } @article{abdel-khalik_turinsky_2005, title={Adaptive core simulation employing discrete inverse theory - Part I: Theory}, volume={151}, number={1}, journal={Nuclear Technology}, author={Abdel-Khalik, H. S. and Turinsky, P. J.}, year={2005}, pages={21-} } @article{abdel-khalik_turinsky_2005, title={Adaptive core simulation employing discrete inverse theory - Part II: Numerical experiments}, volume={151}, ISSN={["1943-7471"]}, DOI={10.13182/NT05-A3628}, abstractNote={Use of adaptive simulation is intended to improve the fidelity and robustness of important core attribute predictions such as core power distribution, thermal margins, and core reactivity. Adaptive simulation utilizes a selected set of past and current reactor measurements of reactor observables, i.e., in-core instrumentation readings, to adapt the simulation in a meaningful way. The companion paper, “Adaptive Core Simulation Employing Discrete Inverse Theory - Part I: Theory,” describes in detail the theoretical background of the proposed adaptive techniques. This paper, Part II, demonstrates several computational experiments conducted to assess the fidelity and robustness of the proposed techniques. The intent is to check the ability of the adapted core simulator model to predict future core observables that are not included in the adaption or core observables that are recorded at core conditions that differ from those at which adaption is completed. Also, this paper demonstrates successful utilization of an efficient sensitivity analysis approach to calculate the sensitivity information required to perform the adaption for millions of input core parameters. Finally, this paper illustrates a useful application for adaptive simulation - reducing the inconsistencies between two different core simulator code systems, where the multitudes of input data to one code are adjusted to enhance the agreement between both codes for important core attributes, i.e., core reactivity and power distribution. Also demonstrated is the robustness of such an application.}, number={1}, journal={NUCLEAR TECHNOLOGY}, author={Abdel-Khalik, HS and Turinsky, PJ}, year={2005}, month={Jul}, pages={22–34} } @article{kastanya_turinsky_2005, title={Development and implementation of a Newton-BICGSTAB iterative solver in the FORMOSA-B BWR core simulator code}, volume={150}, ISSN={["1943-748X"]}, DOI={10.13182/NSE05-A2501}, abstractNote={Abstract A Newton-Krylov iterative solver has been developed to reduce the CPU execution time of boiling water reactor (BWR) core simulators implemented in the core simulator part of the Fuel Optimization for Reloads Multiple Objectives by Simulated Annealing for BWR (FORMOSA-B) code, which is an in-core fuel management optimization code for BWRs. This new solver utilizes Newton’s method to explicitly treat strong nonlinearities in the problem, replacing the traditionally used nested iterative approach. Newton’s method provides the solver with a higher-than-linear convergence rate, assuming that good initial estimates of the unknowns are provided. Within each Newton iteration, an appropriately preconditioned Krylov solver is utilized for solving the linearized system of equations. Taking advantage of the higher convergence rate provided by Newton’s method and utilizing an efficient preconditioned Krylov solver, we have developed a Newton-Krylov solver to evaluate the three-dimensional, two-group neutron diffusion equations coupled with a two-phase flow model within a BWR core simulator. Numerical tests on the new solver have shown that speedups ranging from 1.6 to 2.1, with reference to the traditional approach of employing nested iterations to treat the nonlinear feedbacks, can be achieved. However, if a preconditioned Krylov solver is employed to complete the inner iterations of the traditional approach, negligible CPU time differences are noted between the Newton-Krylov and traditional (Krylov) approaches.}, number={1}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Kastanya, DYF and Turinsky, PJ}, year={2005}, month={May}, pages={56–71} } @article{turinsky_2005, title={Nuclear fuel management optimization: A work in progress}, volume={151}, number={1}, journal={Nuclear Technology}, author={Turinsky, P. J.}, year={2005}, pages={08-} } @article{mertyurek_turinsky_2004, title={Super-nodal methods for space-time kinetics}, volume={147}, ISSN={["1943-748X"]}, DOI={10.13182/NSE04-A2422}, abstractNote={Abstract A Super-Nodal method is developed to improve computational efficiency of core simulations for three-dimensional (3-D) core neutronics models. Computational performance of the neutronics model is increased by reducing the number of spatial nodes used in the core modeling. The Super-Nodal method reduces the errors associated with the use of coarse nodes in the analyses by providing a new set of cross sections and discontinuity factors for the new nodalization. These so-called homogenization parameters are obtained by employing a consistent collapsing technique. During this research a new type of singularity, namely, “fundamental mode singularity,” is addressed in the analytical nodal method solution. The “coordinate shifting” approach is developed as a method to address this singularity. Also, the “buckling shifting” approach is developed as an alternative to address the “zero buckling singularity.” In the course of addressing the treatment of these singularities, an effort was made to provide better and more robust results from the Super-Nodal method by developing several new methods for determining the collapsed diffusion coefficient. A simple error analysis based on the relative residual in the 3-D few-group diffusion equation at the fine mesh level is also introduced in this work.}, number={2}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Mertyurek, U and Turinsky, PJ}, year={2004}, month={Jun}, pages={93–126} } @article{karve_turinsky_2001, title={FORMOSA-B: A boiling water reactor in-core fuel management optimization package III}, volume={135}, ISSN={["0029-5450"]}, DOI={10.13182/NT01-A3219}, abstractNote={Abstract As part of the continuing development of the boiling water reactor in-core fuel management optimization code FORMOSA-B, the cold shutdown margin (SDM) constraint evaluator has been improved. The SDM evaluator in FORMOSA-B had been a first-order accurate Rayleigh quotient variational technique. It was deemed unreliable for difficult perturbed loading patterns (LPs) and thus was replaced by a high-fidelity, robust, computationally efficient evaluator. The new model is based on the solution of the one-group diffusion equation using approximate albedo boundary conditions for a three-dimensional, variable axial node, 10 × 10 assembly subregion around the stuck rod location. The fidelity and robustness of the model are first demonstrated by performing calculations on difficult perturbed LPs and for different plant cores. It is shown that the SDM reactivity is estimated within 40 pcm for the highest worth rod and that the speedup factors are 50 to 100 for small cores (and even more for larger cores) in comparison to the full-core three-dimensional simulations. Next, the successful implementation of the model in imposing the SDM constraint for FORMOSA-B’s adaptive simulated annealing (SA)-based optimization strategy is presented. The results demonstrate SA’s ability to remove large SDM violations (>700 pcm) along with thermal margin and critical flow constraint violations. Finally, the importance of having the SDM constraint on during optimization is shown by comparing results with a simulation in which the constraint is off.}, number={3}, journal={NUCLEAR TECHNOLOGY}, author={Karve, AA and Turinsky, PJ}, year={2001}, month={Sep}, pages={241–251} } @article{keller_turinsky_2001, title={FORMOSA-P three-dimensional/two-dimensional geometry collapse methodology}, volume={139}, ISSN={["0029-5639"]}, DOI={10.13182/NSE01-A2234}, abstractNote={Abstract A methodology has been developed whereby a three-dimensional (3-D) geometry, nodal expansion method (NEM), pressurized water reactor (PWR) core simulator model is collapsed to form an equivalent two-dimensional (2-D) geometry model that preserves approximately, but with negligible loss of fidelity, the global quantities and axially integrated reaction rates and surface currents of the 3-D model. In comparison with typical licensed-quality 3-D models, the 2-D collapsed NEM model typically requires a factor of 50 less computational time and exhibits root-mean-square (rms) assembly relative power fraction errors, as compared with the original 3-D model, of 5 × 10–3 over an entire fuel cycle, and average maximum errors over the fuel cycle of 1 × 10–2. The collapse methodology includes a pin reconstruction methodology, which exhibits assemblywise rms pin power errors of 5 × 10–3 and average maximum assemblywise pin power errors of 1.2 × 10–2. When coupled with FORMOSA-P’s existing assembly power response generalized perturbation theory reactor core simulator, this permits loading-pattern evaluations at a speed approximately 100 to 150 times faster than full, 3-D models, providing the computational efficiency needed for efficient incore fuel management optimization using stochastic methods.}, number={3}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Keller, PM and Turinsky, PJ}, year={2001}, month={Nov}, pages={235–247} } @article{karve_turinsky_2000, title={FORMOSA-B: A boiling water reactor in-core fuel management optimization package II}, volume={131}, ISSN={["0029-5450"]}, DOI={10.13182/NT00-A3104}, abstractNote={As part of the continuing development of the boiling water reactor in-core fuel management optimization code FORMOSA-B, the fidelity of the core simulator has been improved and a control rod pattern (CRP) sampling capability has been added. The robustness of the core simulator is first demonstrated by benchmarking against core load-follow depletion predictions of both SIMULATE-3 and MICROBURN-B2 codes. The CRP sampling capability, based on heuristic rules, is next successfully tested on a fixed fuel loading pattern (LP) to yield a feasible CRP that removes the thermal margin and critical flow constraint violations. Its performance in facilitating a spectral shift flow operation is also demonstrated, and then its significant influence on the cost of thermal margin is presented. Finally, the heuristic CRP sampling capability is coupled with the stochastic LP optimization capability in FORMOSA-B—based on simulated annealing (SA)—to solve the combined CRP-LP optimization problem. Effectiveness of the sampling in improving the efficiency of the SA adaptive algorithm is shown by comparing the results to those obtained with the sampling turned off (i.e., only LP optimization is carried out for the fixed reference CRP). The results presented clearly indicate the successful implementation of the CRP sampling algorithm and demonstrate FORMOSA-B’s enhanced optimization features, which facilitate the code’s usage for broader optimization studies.}, number={1}, journal={NUCLEAR TECHNOLOGY}, author={Karve, AA and Turinsky, PJ}, year={2000}, month={Jul}, pages={48–68} } @article{moore_turinsky_karve_1999, title={Formosa-B: A boiling water reactor in-core fuel management optimization package}, volume={126}, ISSN={["0029-5450"]}, DOI={10.13182/NT99-A2964}, abstractNote={The computational capability to determine optimal core loading patterns (LPs) for boiling water reactors (BWRs) given a reference control rod program has been developed. The design and fidelity of the reference BWR core simulator are presented. The placement of feed and reload fuel is solved by an adaptive optimization by simulated annealing (OSA) objective algorithm. Objective functions available for BWR fuel management are maximization of end-of-cycle core reactivity, minimization of peak linear power density, maximization of critical power ratio, maximization of region average discharge burnup, and minimization of total reload cost. Constraints include thermal and fuel exposure related limits and cycle energy production, when appropriate. The results presented demonstrate the utility of OSA to improve LPs in this highly nonlinear and constrained search space.}, number={2}, journal={NUCLEAR TECHNOLOGY}, author={Moore, BR and Turinsky, PJ and Karve, AA}, year={1999}, month={May}, pages={153–169} } @article{turinsky_1999, title={Mathematical optimization of incore nuclear fuel management decisions: status and trends}, volume={44}, number={7}, journal={ATW - Internationale Zeitschrift Fur Kernenergie}, author={Turinsky, P. J.}, year={1999}, pages={454} } @article{moore_turinsky_1998, title={Higher order generalized perturbation theory for boiling water reactor in-core fuel management optimization}, volume={130}, ISSN={["0029-5639"]}, DOI={10.13182/NSE98-A1993}, abstractNote={Boiling water reactor (BWR) loading pattern assessment requires solving the two-group, nodal form of the neutron diffusion equation and drift-flux form of the fluid equations simultaneously because these equation sets are strongly coupled via nonlinear feedback. To reduce the computational burden associated with the calculation of the core attributes (that is, core eigenvalue and thermal margins) of a perturbed BWR loading pattern, the analytical and numerical aspects of a higher order generalized perturbation theory (GPT) method, which correctly addresses the strong nonlinear feedbacks of two-phase flow, have been established. Inclusion of Jacobian information in the definition of the generalized flux adjoints provides for a rapidly convergent iterative method for solution of the power distribution and eigenvalue of a loading pattern perturbed from a reference state. Results show that the computational speedup of GPT compared with conventional forward solution methods demanding consistent accuracy is highly dependent on the number of spatial nodes utilized by the core simulator, varying from superior to inferior performance as the number of nodes increases.}, number={1}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Moore, BR and Turinsky, PJ}, year={1998}, month={Sep}, pages={98–112} } @article{ye_turinsky_1998, title={Pressurized water reactor core maneuvering utilizing optimal control theory}, volume={129}, ISSN={["0029-5639"]}, DOI={10.13182/NSE98-A1967}, abstractNote={The computational capability of automatically determining the optimal control strategies for pressurized water reactor core maneuvering, in terms of an operating strategy generator (OSG), has been ...}, number={2}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Ye, JQ and Turinsky, PJ}, year={1998}, month={Jun}, pages={97–123} } @inproceedings{yacout_turinsky_al-chalabi_rasdorf_1989, title={A nuclear power plant operator aid utilizing on-line simulation models}, ISBN={0894481487}, booktitle={Proceedings of the Topical Meeting on America Nuclear Society's Advances in Nuclear Engineering Computation and Radiation Shielding, Eldorado Hotel, Santa Fe, New Mexico, April 9-13, 1989}, publisher={Santa Fe, NM: American Nuclear Society}, author={Yacout, A. M. and Turinsky, P. J. and Al-Chalabi, R. and Rasdorf, W. J.}, year={1989} }