@article{greis_nogueira_bhattacharya_spooner_schmitz_2022, title={Stability modeling for chatter avoidance in self-aware machining: an application of physics-guided machine learning}, url={https://doi.org/10.1007/s10845-022-01999-w}, DOI={10.1007/s10845-022-01999-w}, abstractNote={Abstract Physics-guided machine learning (PGML) offers a new approach to stability modeling during machining that leverages experimental data generated during the machining process while incorporating decades of theoretical process modeling efforts. This approach addresses specific limitations of machine learning models and physics-based models individually. Data-driven machine learning models are typically black box models that do not provide deep insight into the underlying physics and do not reflect physical constraints for the modeled system, sometimes yielding solutions that violate physical laws or operational constraints. In addition, acquiring the large amounts of manufacturing data needed for machine learning modeling can be costly. On the other hand, many physical processes are not completely understood by domain experts and have a high degree of uncertainty. Physics-based models must make simplifying assumptions that can compromise prediction accuracy. This research explores whether data generated by an uncertain physics-based milling stability model that is used to train a physics-guided machine learning stability model, and then updated with measured data, domain knowledge, and theory-based knowledge provides a useful approximation to the unknown true stability model for a specific set of factory operating conditions. Four novel strategies for updating the machine learning model with experimental data are explored. These updating strategies differ in their assumptions about and implementation of the type of physics-based knowledge included in the PGML model. Using a simulation experiment, these strategies achieve useful approximations of the underlying true stability model while reducing the number of experimental measurements required for model update.}, journal={Journal of Intelligent Manufacturing}, author={Greis, Noel P. and Nogueira, Monica L. and Bhattacharya, Sambit and Spooner, Catherine and Schmitz, Tony}, year={2022}, month={Nov} } @article{greis_nogueira_rohde_2021, title={Digital Twin Framework for Machine Learning-Enabled Integrated Production and Logistics Processes}, volume={630}, ISBN={["978-3-030-85873-5"]}, ISSN={["1868-422X"]}, url={https://doi.org/10.1007/978-3-030-85874-2_23}, DOI={10.1007/978-3-030-85874-2_23}, abstractNote={This paper offers an integrated framework bridging production and logistics processes that employs a machine learning-enabled digital twin to ensure adaptive production scheduling and resilient supply chain operations. The digital-twin based architecture will enable manufacturers to proactively manage supply chain risk in an increasingly complex and dynamic environment. This integrated framework enables “sense-and-respond” capabilities, i.e. the ability to sense potential supplier and production risks that affect ultimate delivery to the customer, to update anticipated customer delivery dates, and recommend mitigating steps that minimize any anticipated disruption. In its core functionality this framework senses disruptions at a supplier facility that cascade down the upstream supply chain and employs the predictive capabilities of its machine learning-based engine to trigger and support adaptive changes to the manufacturer’s MES system. Any changes to the production schedule that cannot be accommodated in a revised schedule are propagated across the downstream supply chain alerting end customers to any changes.}, journal={ADVANCES IN PRODUCTION MANAGEMENT SYSTEMS: ARTIFICIAL INTELLIGENCE FOR SUSTAINABLE AND RESILIENT PRODUCTION SYSTEMS, APMS 2021, PT I}, publisher={Springer International Publishing}, author={Greis, Noel P. and Nogueira, Monica L. and Rohde, Wolfgang}, year={2021}, pages={218–227} } @article{sizemore_nogueira_greis_davies_2020, title={Application of Machine Learning to the Prediction of Surface Roughness in Diamond Machining}, volume={48}, url={http://dx.doi.org/10.1016/j.promfg.2020.05.142}, DOI={10.1016/j.promfg.2020.05.142}, abstractNote={The manufacturing process for single-point diamond turning germanium (Ge) can be complex when it comes to freeform IR optics. The multi-variant problem requires an operator to understand that the machining input parameters and choice of tooling will dictate the efficiency of generating surfaces with the appropriate tolerances. Ge is a brittle material and exhibits surface fracture when diamond turned. However, with the introduction of a negatively raked tool, surface fracture can be suppressed such that plastic flow of the material is possible. This paper focuses on the application and evaluation of machine learning methods to better assist the prediction of surface roughness parameters in Ge and provides a comparison with a well-understood ductile material, copper (Cu). Preliminary results show that both classic machine learning (ML) methods and artificial neural network (ANN) models offer improved predictive capability when compared with analytical prediction of surface roughness for both materials. Significantly, ML and ANN models were able to perform well for both Ge, a brittle material prone to surface fracture, and the more ductile Cu. ANN models offered the best prediction tool overall with minimal error. From a computational perspective, both ML and ANN models were able to achieve good results with smaller datasets than typical for many ML applications—which is beneficial since diamond turning can be costly.}, journal={Procedia Manufacturing}, publisher={Elsevier BV}, author={Sizemore, Nicholas E. and Nogueira, Monica L. and Greis, Noel P. and Davies, Matthew A.}, year={2020}, pages={1029–1040} } @inproceedings{greis_nogueira_bhattacharya_schmitz_2020, title={Physics-Guided Machine Learning for Self-Aware Machining}, url={https://aiinmanufacturing.wixsite.com/symposium/physics-guided-machine-learning-for}, booktitle={2020 AAAI Spring Symposium Series on Artificial Intelligence in Manufacturing}, author={Greis, N.P. and Nogueira, M.L. and Bhattacharya, S. and Schmitz, T.}, year={2020}, month={Mar} } @article{greis_nogueira_schmitz_dillon_2019, title={MANUFACTURING-UBER: Intelligent Operator Assignment in a Connected Factory}, volume={52}, ISSN={["2405-8963"]}, url={https://www.sciencedirect.com/science/article/pii/S240589631931609X}, DOI={10.1016/j.ifacol.2019.11.621}, abstractNote={This paper introduces the Manufacturing-Uber concept for dynamic assignment of operators in the Connected Factory. In traditional non-IoT machining environments it is common to assign an operator to a (small) number of machines, clustered in close proximity within a cell. In contrast to “fixed” assignment within a cell, the Manufacturing-Uber approach leverages the connectivity of the IoT environment to allow on-demand “floating” operator assignment across cells. An intelligent assignment engine determines and assigns the operator to achieve best system performance. Results show that Manufacturing-Uber outperforms fixed assignment with respect to reduction in required operators, increased machine up-time and more parts completed.}, note={9th IFAC Conference on Manufacturing Modelling, Management and Control MIM 2019}, number={13}, journal={IFAC PAPERSONLINE}, publisher={Elsevier BV}, author={Greis, Noel P. and Nogueira, Monica L. and Schmitz, Tony and Dillon, Michael}, year={2019}, pages={2734–2739} } @inproceedings{sizemore_nogueira_greis_schmitz_davies_2019, place={Gaithersburg, Maryland}, title={Machine Learning Model for Surface Finish in Ultra-Precision Diamond Turning}, url={https://nvlpubs.nist.gov/nistpubs/ams/NIST.AMS.100-24.pdf#page=139}, DOI={10.6028/nist.ams.100-24}, abstractNote={Technical endeavors have always used the concept of models.Models are fundamental to the way humans think and solve problems.Efforts around modeling with software attempt to capture and reflect the abstract nature of human reasoning and memory.As engineering modeling languages and analysis capability evolve, engineering organizations are approaching a transformative condition where Engineering Environments enable modeling as a basis for Engineering work.These environments incorporate sophisticated collaboration and configuration control on top of massively scaled computing, promising a modern digital experience known as a Model-Based Engineering Environment or MBEE.There are challenges for making MBEEs successful.Economically viable implementations are needed to enable organizations to operate MBEEs successfully.MBEEs must be able to evolve in scale to meet the insatiable appetite for compute and data handling that accompany their use.And possibly most important, Engineers using an MBEE are being driven by the requirements for the product they are Engineering.Thus, an MBEE must provide significant advantages for the Engineer by both increasing the quality of the creative experience and removing obstacles to productivity.Open MBEE exists to address these challenges and empower Engineers through the phenomena of the open source collaborative software movement.The community that comes with open source provides a large-scale mechanism for developing consensus that is captured as concrete technical products.These technical products represent invariants that can propel Model-Based Engineering Environments to success in adopting organizations.Commodity access is crucial in maintaining the strong pace of innovation and technical capability necessary for MBEEs to evolve quickly enough to meet the needs of Engineers.This vision begins to form a projection of a substantial transformation in the world of collaborative engineering modeling such that MBEEs can be sophisticated, productive, and cost-effective.}, booktitle={Proceedings of the 10th model-based enterprise summit (MBE 2019)}, publisher={NIST Pubs}, author={Sizemore, N. E. and Nogueira, M. L. and Greis, N. P. and Schmitz, T. L. and Davies, M. A.}, editor={Hedberg, Thomas D. and Carlisle, Mark G.Editors}, year={2019}, month={Jul}, pages={131–139} } @inbook{greis_nogueira_2017, title={A Data-Driven Approach to Food Safety Surveillance and Response}, url={http://dx.doi.org/10.1016/b978-1-78242-251-8.00005-9}, DOI={10.1016/b978-1-78242-251-8.00005-9}, abstractNote={Increasingly, new techniques of big data and predictive analytics are being marshaled to reduce the time, scale and scope of foodborne contamination events. Contamination of food can occur at any point across increasingly complicated and intersecting global food chains. By the time a food safety problem is suspected, several weeks may have passed since first contact with the tainted product. And only after a foodborne outbreak has been confirmed by laboratory tests can the responsible products be identified and recalled. The entire process from detection to product recall can take weeks, even months. Dealing with the twin problems of early detection and rapid response are the core challenges of food safety today. In this chapter we describe a prototype informatics tool called NCFEDA (North Carolina Foodborne Events Data Integration and Analysis) that builds situational awareness of emerging contamination events by fusing traditional and nontraditional data sources, predictive analytics, visualization tools, and real-time collaboration across stakeholders to reduce the latency in detecting and responding to emerging contamination events.}, booktitle={Food Protection and Security}, publisher={Woodhead Publishing}, author={Greis, N.P. and Nogueira, M.L.}, editor={Kennedy, S.Editor}, year={2017}, pages={75–99} } @inproceedings{nogueira_greis_2013, place={Berlin, Heidelberg}, title={An Answer Set Programming Solution for Supply Chain Traceability}, url={https://link.springer.com/chapter/10.1007/978-3-642-54105-6_14}, DOI={10.1007/978-3-642-54105-6_14}, abstractNote={Developing measures to improve the traceability of contaminated food products across the supply chain is one of the key provisions of the 2011 FDA Food Safety Modernization Act (FSMA). In the event of a recall, FSMA requires companies to provide information about their immediate suppliers and customers—what is referred to as “one step forward” and “one step backward” traceability. In this paper we implement the logic-based approach called answer set programming that uses inference rules to trace the flows of contaminated products—both upstream to the source of the contamination and downstream to consumer locations. The approach does not require common standards or unique product identifiers for tracking individual products. This elaboration-tolerant method can accommodate changes in the supply chain such as: 1) the addition of new multiple product pathways; 2) consideration of multiple ingredients in a single product; and 3) multiple products with multiple pathways. We demonstrate this highly flexible methodology for pork and peanut products.}, booktitle={Knowledge Discovery, Knowledge Engineering and Knowledge Management}, publisher={Springer Berlin Heidelberg}, author={Nogueira, Monica L. and Greis, Noel P.}, editor={Fred, Ana and Dietz, Jan L. G. and Liu, Kecheng and Filipe, JoaquimEditors}, year={2013}, pages={211–227} } @inproceedings{nogueira_greis_2013, title={Supply Chain Tracing of Multiple Products under Uncertainty and Incomplete Information - An Application of Answer Set Programming}, url={http://dx.doi.org/10.5220/0004627603990406}, DOI={10.5220/0004627603990406}, abstractNote={Food supply chains are complex networks involving many organizations and food products from the farm to the consumer. The ability to quickly trace the trajectory of a tainted food product and to identify the origin of the contamination is essential to minimizing the economic and human costs of foodborne disease. Complexities arise when multiple products traverse multiple states and/or countries and when products cross multiple intersecting supply chains. In this paper we use the example of a recent Salmonella contamination involving tomatoes and peppers imported from Mexico into the U.S. to demonstrate the use of Answer Set Programming to localize the source of contamination in a complex supply chain characterized by uncertainty and incomplete information.}, booktitle={Proceedings of the International Conference on Knowledge Engineering and Ontology Development}, publisher={SciTePress}, author={Nogueira, Monica L. and Greis, Noel P.}, year={2013}, pages={399–406} } @inproceedings{nogueira_greis_2012, title={Recall-driven Product Tracing and Supply Chain Tracking using Answer Set Programming}, url={http://dx.doi.org/10.5220/0004146201250133}, DOI={10.5220/0004146201250133}, abstractNote={Incomplete information and the inability to trace the movement of contaminated products across the food chain has hindered our ability to locate and remove contaminated products once a food recall has been announced. The FDA Food Safety Modernization Act (FSMA) that was signed into law in 2011, however, supports traceability by both expanding the registration requirements for companies that are involved in food production and, in the event of a food recall, requiring companies to provide information about their immediate suppliers and customers—what is referred to as “one step forward” and “one step backward” traceability. In this paper we implement the logic-based approach called answer set programming that uses inference rules to determine the set of all companies that may be linked to a contaminated product. Unlike other approaches, we do not depend on the availability of common standards or unique identifiers. Rather, the proposed approach utilizes information about the company’s primary suppliers and customers along with their products—consistent with the “one step forward” and “one step backward” required under FMSA as noted above. We demonstrate this approach using the example of a food recall involving pork products.}, booktitle={Proceedings of the International Conference on Knowledge Engineering and Ontology Development}, publisher={SciTePress - Science and and Technology Publications}, author={Nogueira, Monica L. and Greis, Noel P.}, year={2012}, pages={125–133} } @inproceedings{nogueira_greis_2011, place={Berlin, Heidelberg}, title={Application of Answer Set Programming for Public Health Data Integration and Analysis}, url={https://link.springer.com/chapter/10.1007/978-3-642-23300-5_10}, DOI={10.1007/978-3-642-23300-5_10}, abstractNote={Public health surveillance systems routinely process massive volumes of data to identify health adverse events affecting the general population. Surveillance and response to foodborne disease suffers from a number of systemic and other delays that hinder early detection and confirmation of emerging contamination situations. In this paper we develop an answer set programming (ASP) application to assist public health officials in detecting an emerging foodborne disease outbreak by integrating and analyzing in near real-time temporally, spatially and symptomatically diverse data. These data can be extracted from a large number of distinct information systems such as surveillance and laboratory reporting systems from health care providers, real-time complaint hotlines from consumers, and inspection reporting systems from regulatory agencies. We encode geographic ontologies in ASP to infer spatial relationships that may not be evident using traditional statistical tools. These technologies and ontologies have been implemented in a new informatics tool, the North Carolina Foodborne Events Data Integration and Analysis Tool (NCFEDA). The application was built to demonstrate the potential of situational awareness—created through real-time data fusion, analytics, visualization, and real-time communication—to reduce latency of response to foodborne disease outbreaks by North Carolina public health personnel.}, booktitle={Availability, Reliability and Security for Business, Enterprise and Health Information Systems}, publisher={Springer Berlin Heidelberg}, author={Nogueira, Monica L. and Greis, Noel P.}, editor={Tjoa, A. Min and Quirchmayr, Gerald and You, Ilsun and Xu, LidaEditors}, year={2011}, pages={118–134} } @inproceedings{nogueira_greis_2011, place={Berlin, Heidelberg}, title={Rule-Based Complex Event Processing for Food Safety and Public Health}, url={https://link.springer.com/chapter/10.1007/978-3-642-22546-8_31}, DOI={10.1007/978-3-642-22546-8_31}, abstractNote={The challenge for public health officials is to detect an emerging foodborne disease outbreak from a large set of simple and isolated, domain-specific events. These events can be extracted from a large number of distinct information systems such as surveillance and laboratory reporting systems from health care providers, real-time complaint hotlines from consumers, and inspection reporting systems from regulatory agencies. In this paper we formalize a foodborne disease outbreak as a complex event and apply an event-driven rule-based engine to the problem of detecting emerging events. We define an evidence set as a set of simple events that are linked symptomatically, spatially and temporally. A weighted metric is used to compute the strength of the evidence set as a basis for response by public health officials.}, booktitle={Rule-Based Reasoning, Programming, and Applications}, publisher={Springer Berlin Heidelberg}, author={Nogueira, Monica L. and Greis, Noel P.}, editor={Bassiliades, Nick and Governatori, Guido and Paschke, AdrianEditors}, year={2011}, pages={376–383} } @book{greis_nogueira_2010, title={Food Safety—Emerging Public-Private Approaches: A Perspective for Local, State, and Federal Government Leaders}, url={http://www.businessofgovernment.org/sites/default/files/Food%20Safety.pdf}, journal={IBM Center for The Business of Government}, author={Greis, Noel P. and Nogueira, Monica L.}, year={2010}, pages={6–43} } @article{balduccini_gelfond_nogueira_2006, title={Answer set based design of knowledge systems}, volume={47}, url={http://dx.doi.org/10.1007/s10472-006-9026-1}, DOI={10.1007/s10472-006-9026-1}, abstractNote={The aim of this paper is to demonstrate that A-Prolog is a powerful language for the construction of reasoning systems. In fact, A-Prolog allows to specify the initial situation, the domain model, the control knowledge, and the reasoning modules. Moreover, it is efficient enough to be used for practical tasks and can be nicely integrated with programming languages such as Java. An extension of A-Prolog (CR-Prolog) allows to further improve the quality of reasoning by specifying requirements that the solutions should satisfy if at all possible. The features of A-Prolog and CR-Prolog are demonstrated by describing in detail the design of USA-Advisor, an A-Prolog based decision support system for the Space Shuttle flight controllers.}, number={1}, journal={Annals of Mathematics and Artificial Intelligence}, author={Balduccini, Marcello and Gelfond, Michael and Nogueira, Monica}, year={2006}, month={Nov}, pages={183–219} } @inbook{nogueira_balduccini_gelfond_watson_barry_2001, title={An A-Prolog Decision Support System for the Space Shuttle}, url={http://dx.doi.org/10.1007/3-540-45241-9_12}, DOI={10.1007/3-540-45241-9_12}, abstractNote={The goal of this paper is to test if a programming methodology based on the declarative language A-Prolog and the systems for computing answer sets of such programs, can be successfully applied to the development of medium size knowledge-intensive applications. We report on a successful design and development of such a system controlling some of the functions of the Space Shuttle.}, booktitle={Practical Aspects of Declarative Languages}, publisher={Springer Berlin Heidelberg}, author={Nogueira, Monica and Balduccini, Marcello and Gelfond, Michael and Watson, Richard and Barry, Matthew}, year={2001}, month={Mar}, pages={169–183} } @inbook{balduccini_gelfond_watson_nogueira_2001, title={The USA-Advisor: A Case Study in Answer Set Planning}, url={http://dx.doi.org/10.1007/3-540-45402-0_39}, DOI={10.1007/3-540-45402-0_39}, abstractNote={In this work we show how control knowledge was used to improve planning in the USA-Advisor decision support system for the Space Shuttle. The USA-Advisor is a medium size, real-world planning application for use by NASA flight controllers and contains over a dozen domain dependent and domain independent heuristics. Experimental results are presented here, illustrating how this control knowledge helps improve both the quality of plans as well as overall system performance.}, booktitle={Logic Programming and Nonmotonic Reasoning}, publisher={Springer Berlin Heidelberg}, author={Balduccini, Marcello and Gelfond, Micchael and Watson, R. and Nogueira, M.}, year={2001}, month={Sep}, pages={439–442} } @article{nogueira_nandigam_1998, title={Why Intervals? Because If We Allow Other Sets, Tractable Problems Become Intractable}, volume={4}, url={http://dx.doi.org/10.1023/a:1024475901686}, DOI={10.1023/a:1024475901686}, number={4}, journal={Reliable Computing}, publisher={Springer Science and Business Media LLC}, author={Nogueira, Monica and Nandigam, Amarendra}, year={1998}, month={Nov}, pages={389–394} }