@article{lee_nie_han_2024, title={Automatic and Real-Time Joint Tracking and Three-Dimensional Scanning for a Construction Welding Robot}, volume={150}, ISSN={["1943-7862"]}, url={https://doi.org/10.1061/JCEMD4.COENG-14135}, DOI={10.1061/JCEMD4.COENG-14135}, abstractNote={Although welding is one of the essential steel fabrication processes, the American Welding Society expects that the labor shortage in the United States will reach a deficit of 360,000 welders by 2027. Developing an automatic robotic welding system could potentially alleviate the labor shortage and provide better welding quality. As a first step, this paper designs a system pipeline that can automatically detect different welding joints and plan and track the joints’ trajectory with the initial point alignment in real time. There are rare studies that could achieve automatic initial point alignment in real time because the laser stripe’s deformation is not obvious at the narrow weld. In this study, the target joint’s endpoints were detected once the joint was detected on live video. Then, the joint trajectory was planned, and the robotic arm automatically aligned with the initial point and tracked the planned trajectory while scanning. The results demonstrate the accuracy and effectiveness of the proposed method.}, number={3}, journal={JOURNAL OF CONSTRUCTION ENGINEERING AND MANAGEMENT}, author={Lee, Doyun and Nie, Guang-Yu and Han, Kevin}, year={2024}, month={Mar} } @article{wang_lee_nimawat_han_gupta_2024, title={Integrated 4D Design Change Management Model for Construction Projects}, volume={150}, ISSN={["1943-7862"]}, url={https://doi.org/10.1061/JCEMD4.COENG-14246}, DOI={10.1061/JCEMD4.COENG-14246}, number={5}, journal={JOURNAL OF CONSTRUCTION ENGINEERING AND MANAGEMENT}, author={Wang, Liannian and Lee, Joomyung and Nimawat, Jatin and Han, Kevin and Gupta, Abhinav}, year={2024}, month={May} } @article{son_han_2023, title={Automated Model-Based 3D Scan Planning for Prefabricated Building Components}, volume={37}, ISSN={["1943-5487"]}, url={https://doi.org/10.1061/(ASCE)CP.1943-5487.0001055}, DOI={10.1061/(ASCE)CP.1943-5487.0001055}, abstractNote={Modular construction can improve construction performance (i.e., cost, schedule, and safety) by prefabricating modules at an off-site facility and installing them at a construction site. However, when defects of modules are not easily repairable on the construction site, they cause additional cost overruns and delays due to long lead times of refabrication and reshipment. Thus, quality assessment of modular components at the fabrication facility before shipment is very important. The current inspection practices rely on manual measurement, which can be imprecise, labor-intensive, and time-consuming. To address this issue, some research efforts are made on the module inspection techniques (e.g., estimates of geometric properties and surface quality) using laser-scanned data. The accuracy of these techniques relies on the quality (i.e., coverage and resolution) of the scan data. However, ensuring the consistent quality of data is a major challenge as there is little to no research on optimal scan planning for modular components. Therefore, this paper proposes a model-based 3D scan planning method for modular components that ensures user-specified scan quality. Given a 3D computer-aided design (CAD) or building information modeling (BIM) model, scanner property, and user’s quality requirement, this method automatically computes the input parameters for the laser scanner (i.e., angular step and field of view) and optimal scan positions. It also predicts the scan quality and shows the areas that will not meet the user requirement due to geometric constraints (i.e., self-occluded surfaces). This study was validated through two case studies using two modular-sized structures in a fabrication facility. The results showed that the scan planner is able to accurately predict the scanning quality and ensure that the output scan will meet the user quality requirement.}, number={2}, journal={JOURNAL OF COMPUTING IN CIVIL ENGINEERING}, author={Son, Rachel Hyo and Han, Kevin}, year={2023}, month={Mar} } @article{lee_nie_han_2023, title={Vision-based inspection of prefabricated components using camera poses: Addressing inherent limitations of image-based 3D reconstruction}, volume={64}, ISSN={["2352-7102"]}, url={https://doi.org/10.1016/j.jobe.2022.105710}, DOI={10.1016/j.jobe.2022.105710}, abstractNote={Modular construction can lead to additional cost overruns and delays when a defect is found on the construction site and is not easily repairable. Researchers have developed various methods that use image-based 3D reconstruction for quality assessment, but they have inherent limitations, such as inconsistency and dealing with surfaces with reflectivity and limited visual features. Therefore, this paper presents a vision-based quality assessment method using cameras for prefabricated components by addressing these limitations. Specifically, this paper proposes a novel quality inspection method with sub-millimeter accuracy using cameras focused on leveraging camera poses (as opposed to 3D point clouds that are often not consistent in quality) from the image-based 3D reconstruction. The 3D point estimation by computing triangulation was used for achieving accurate measurement. The proposed method is validated using six different variances and two case studies – an aluminum pipe with a reflective surface and a fabricated concrete column. The results demonstrate the accuracy and effectiveness of the proposed method.}, journal={JOURNAL OF BUILDING ENGINEERING}, author={Lee, Doyun and Nie, Guang-Yu and Han, Kevin}, year={2023}, month={Apr} } @article{liu_han_rasdorf_2022, title={Assessment and Prediction of Impact of Flight Configuration Factors on UAS-Based Photogrammetric Survey Accuracy}, volume={14}, ISSN={["2072-4292"]}, url={https://doi.org/10.3390/rs14164119}, DOI={10.3390/rs14164119}, abstractNote={Recent advances in computer vision and camera-equipped unmanned aerial systems (UAS) for 3D modeling enable UAS-based photogrammetry surveys with high spatial-temporal resolutions. To generate consistent and high-quality 3D models using UASs, understanding how influence factors (i.e., flight height, image overlap, etc.) affect the 3D modeling accuracy and their levels of significance are important. However, there is little to no quantitative analysis that studies how these influence factors interact with and affect the accuracy when changing the values of the influence factors. Moreover, there is little to no research that assesses more than three influence factors. Therefore, to fill this gap, this paper aims to evaluate and predict the accuracy generated by different flight combinations. This paper presents a study that (1) assessed the significance levels of five influence factors (flight height, average image quality, image overlap, ground control point (GCP) quantity, and camera focal lengths), (2) investigated how they interact and impact 3D modeling accuracy using the multiple regression (MR) method, and (3) used the developed MR models for predicting horizontal and vertical accuracies. To build the MR model, 160 datasets were created from 40 flight missions collected at a site with a facility and open terrain. For validating the prediction model, five testing datasets were collected and used at a larger site with a complex building and open terrain. The results show that the findings of this study can be applied to surveyors’ better design flight configurations that result in the highest accuracies, given different site conditions and constraints. The results also provide a reasonable prediction of accuracy given different flight configurations.}, number={16}, journal={REMOTE SENSING}, author={Liu, Yajie and Han, Kevin and Rasdorf, William}, year={2022}, month={Aug} } @article{noghabaei_liu_han_2022, title={Automated compatibility checking of prefabricated components using 3D as-built models and BIM}, volume={143}, ISSN={["1872-7891"]}, DOI={10.1016/j.autcon.2022.104566}, abstractNote={There have been recent efforts to use reality capture technologies to perform remote quality control in construction. However, there is a lack of research efforts in detecting construction incompatibilities in modular construction using reality capture technologies. The construction incompatibilities in modular construction often cause reworks and delays in the project schedule. To address this issue, this paper presents a general compatibility analysis method that propose scanning the modules in manufacturing plant and construction site, and check module-to-module compatibility remotely, prior to the shipment and installation. This study provides three sample module-to-module compatibility scenarios to validate the proposed compatibility analysis. The case study results show that the compatibility analysis method was able to identify the compatibility issues with high accuracy. Lastly, the compatibility analysis method was validated in terms of accuracy and time performance in six scenarios that was defined on the modules.}, journal={AUTOMATION IN CONSTRUCTION}, author={Noghabaei, Mojtaba and Liu, Yajie and Han, Kevin}, year={2022}, month={Nov} } @article{nie_bodda_sandhu_han_gupta_2022, title={Computer-Vision-Based Vibration Tracking Using a Digital Camera: A Sparse-Optical-Flow-Based Target Tracking Method}, volume={22}, ISSN={["1424-8220"]}, url={https://doi.org/10.3390/s22186869}, DOI={10.3390/s22186869}, abstractNote={Computer-vision-based target tracking is a technology applied to a wide range of research areas, including structural vibration monitoring. However, current target tracking methods suffer from noise in digital image processing. In this paper, a new target tracking method based on the sparse optical flow technique is introduced for improving the accuracy in tracking the target, especially when the target has a large displacement. The proposed method utilizes the Oriented FAST and Rotated BRIEF (ORB) technique which is based on FAST (Features from Accelerated Segment Test), a feature detector, and BRIEF (Binary Robust Independent Elementary Features), a binary descriptor. ORB maintains a variety of keypoints and combines the multi-level strategy with an optical flow algorithm to search the keypoints with a large motion vector for tracking. Then, an outlier removal method based on Hamming distance and interquartile range (IQR) score is introduced to minimize the error. The proposed target tracking method is verified through a lab experiment—a three-story shear building structure subjected to various harmonic excitations. It is compared with existing sparse-optical-flow-based target tracking methods and target tracking methods based on three other types of techniques, i.e., feature matching, dense optical flow, and template matching. The results show that the performance of target tracking is greatly improved through the use of a multi-level strategy and the proposed outlier removal method. The proposed sparse-optical-flow-based target tracking method achieves the best accuracy compared to other existing target tracking methods.}, number={18}, journal={SENSORS}, author={Nie, Guang-Yu and Bodda, Saran Srikanth and Sandhu, Harleen Kaur and Han, Kevin and Gupta, Abhinav}, year={2022}, month={Sep} } @inproceedings{design of computer vision-based robotic welding for nuclear power plants construction project_2022, booktitle={26th International Conference on Structural Mechanics in Reactor Technology (SMiRT26)}, year={2022}, month={Jul} } @article{development of automated welding system for construction: focused on robotic arm operation for varying weave patterns_2022, journal={International Journal of High-Rise Buildings (IJHRB)}, year={2022}, month={Jun} } @article{crowder_lee_gupta_han_bodda_ritter_2022, title={Digital Engineering for Integrated Modeling and Simulation for Building-Piping Systems Through Interoperability Solutions}, ISSN={["1943-748X"]}, DOI={10.1080/00295639.2022.2055705}, abstractNote={Abstract Designing piping systems for nuclear power plants involves engineers from multiple disciplines (i.e., thermal hydraulics, mechanical engineering, and structural/seismic) and close coordination with the contractors who build the plant. Any design changes during construction need to be carefully communicated and managed with all stakeholders in order to assess risks associated with the design changes. To allow the quick assessment of building and piping design changes through a streamlined building-piping coupled analysis, this paper presents a novel interoperability solution that converts bidirectionally between building information models (BIMs) and pipe stress models. Any design changes during construction that are shown in an as-built BIM are automatically converted into a pipe stress model. Any further design changes due to building-piping interaction analyses are converted back to the BIM for the contractor and other designers to access the latest model. Two case studies are presented to illustrate the bidirectional conversion that allows an integrated coupled analysis of the building-piping system to account for their interactions.}, journal={NUCLEAR SCIENCE AND ENGINEERING}, author={Crowder, Nicholas and Lee, Joomyung and Gupta, Abhinav and Han, Kevin and Bodda, Saran and Ritter, Christopher}, year={2022}, month={May} } @inproceedings{lee_nie_han_2022, title={Real-Time and Automatic Detection of Welding Joints Using Deep Learning}, url={http://dx.doi.org/10.1061/9780784483961.063}, DOI={10.1061/9780784483961.063}, abstractNote={Welding technique plays a pivotal role in many industries, such as construction, automobile manufacturing, and nuclear power plants (NPPs). However, the shortage of skilled welding workers is still controversial due to the severe working environment and conditions. Therefore, to conserve human labor and improve manufacturing efficiency, an automated welding process is necessary. Also, welding efficiency and quality are vital indicators requiring attention for automatic welding. Notably, in NPPs, minor welding defects can occur serious safety issues. Therefore, our research’s ultimate goal is to develop an automatic welding system to improve welding quality and manufacturing efficiency using visual sensors [e.g., a camera and light detection and ranging (LiDAR)], a robotic arm, and a welding machine. As the first step, this paper presents a method for automatically detecting different welding joints in real-time. Then, the different target joints are trained using a deep learning algorithm and detected by the camera. The results demonstrate the accuracy and effectiveness of the proposed method.}, booktitle={Construction Research Congress 2022}, publisher={American Society of Civil Engineers}, author={Lee, Doyun and Nie, Guang-Yu and Han, Kevin}, year={2022}, month={Mar} } @article{asadi_haritsa_han_ore_2021, title={Automated Object Manipulation Using Vision-Based Mobile Robotic System for Construction Applications}, volume={35}, ISSN={["1943-5487"]}, url={http://dx.doi.org/10.1061/(asce)cp.1943-5487.0000946}, DOI={10.1061/(ASCE)CP.1943-5487.0000946}, abstractNote={AbstractIn the last decade, automated object manipulation for construction applications has received much attention. However, the majority of existing systems are situated in a fixed location. They...}, number={1}, journal={JOURNAL OF COMPUTING IN CIVIL ENGINEERING}, publisher={American Society of Civil Engineers (ASCE)}, author={Asadi, Khashayar and Haritsa, Varun R. and Han, Kevin and Ore, John-Paul}, year={2021}, month={Jan} } @article{noghabaei_han_albert_2021, title={Feasibility Study to Identify Brain Activity and Eye-Tracking Features for Assessing Hazard Recognition Using Consumer-Grade Wearables in an Immersive Virtual Environment}, volume={147}, ISSN={["1943-7862"]}, url={https://doi.org/10.1061/(ASCE)CO.1943-7862.0002130}, DOI={10.1061/(ASCE)CO.1943-7862.0002130}, abstractNote={AbstractHazard recognition is vital to achieving effective safety management. Unmanaged or unrecognized hazards on construction sites can lead to unexpected accidents. Recent research has identifie...}, number={9}, journal={JOURNAL OF CONSTRUCTION ENGINEERING AND MANAGEMENT}, author={Noghabaei, Mojtaba and Han, Kevin and Albert, Alex}, year={2021}, month={Sep} } @article{noghabaei_han_2021, title={Object manipulation in immersive virtual environments: Hand Motion tracking technology and snap-to-fit function}, volume={124}, ISSN={["1872-7891"]}, url={http://dx.doi.org/10.1016/j.autcon.2021.103594}, DOI={10.1016/j.autcon.2021.103594}, abstractNote={There have been recent efforts to use virtual reality and manipulation to train construction workers and inspection. However, there is a lack of research efforts comparing and evaluating virtual manipulation hardware for construction tasks. Moreover, the current practice of virtual manipulation has limited functionality to guide users with the placement of objects in virtual environments. To address these issues, this paper presents 1) a detailed case study that compares three types of manipulation hardware (image-based, infrared-based, and magnetic-based) for construction applications and 2) a snap-to-fit method that improves the VM through solving limitations of the advanced virtual reality interaction metaphors. The latter enhances the placement process for manipulation by comparing two models (e.g., scan and BIM models) for proper placement in assembly scenarios. The case study results show that magnetic-based systems outperform others in construction scenarios. Lastly, the snap-to-fit function was validated in terms of accuracy and time performance.}, journal={AUTOMATION IN CONSTRUCTION}, publisher={Elsevier BV}, author={Noghabaei, Mojtaba and Han, Kevin}, year={2021}, month={Apr} } @article{jeelani_asadi_ramshankar_han_albert_2021, title={Real-time vision-based worker localization & hazard detection for construction}, volume={121}, ISSN={["1872-7891"]}, DOI={10.1016/j.autcon.2020.103448}, abstractNote={Despite training, construction workers often fail to recognize a significant proportion of hazards in construction environments. Therefore, there is a need for developing technology that assists workers and safety managers in identifying hazards in complex and dynamic construction environments. This study develops a framework for an automated system that detects hazardous conditions and objects in real-time to assist workers and managers. The framework consists of three independent pipelines for localization of workers, semantic segmentation of the visual scene around workers, and detection of static and dynamic hazards. The framework can be used to automate and augment the hazard detection ability of workers and safety managers in construction workplaces. In addition, the framework offers several computing contributions including an improved real-time worker localization method and an efficient architecture for integrating pipelines for entity localization and object detection. A system developed based on the proposed framework as a proof of concept and was tested in indoor and outdoor construction environments. It achieved over 93% accuracy.}, journal={AUTOMATION IN CONSTRUCTION}, author={Jeelani, Idris and Asadi, Khashayar and Ramshankar, Hariharan and Han, Kevin and Albert, Alex}, year={2021}, month={Jan} } @article{asadi_suresh_ender_gotad_maniyar_anand_noghabaei_han_lobaton_wu_2020, title={An Integrated UGV-UAV System for Construction Site Data Collection}, volume={112}, url={http://dx.doi.org/10.1016/j.autcon.2019.103068}, DOI={10.1016/j.autcon.2019.103068}, abstractNote={There have been recent efforts to increase the degree of automation and frequency of data collection for construction applications using Unmanned Aerial/Ground Vehicles (UAV/UGV). However, the current practice of data collection is traditionally performed, which is manual, costly, time-consuming, and error-prone. Developing vision-based mobile robotic systems that are aware of its surrounding and capable of autonomous navigation are becoming essential to many construction applications, namely surveying, monitoring, and inspection. Nevertheless, the systems above suffer from a series of performance issues. One major problem is inefficient navigation in indoor and cluttered scenes with many obstacles and barriers, where some places are inaccessible by a UGV. To provide a solution to this problem, this paper designs a UAV-UGV team that integrates two custom-built mobile robots. The UGV autonomously navigates through space, leveraging its sensors. The UAV acts as an external eye for the UGV, observing the scene from a vantage point that is inaccessible to the UGV. The relative pose of the UAV is estimated continuously, which allows it to maintain a fixed location that is relative to the UGV. The key aspects for the development of this system that is capable of autonomous navigation are the localization of both UAV and UGV, mapping of the surrounding environment, and efficient path planning using multiple sensors. The proposed system is tested in an indoor and cluttered construction-like environment. The performance of the system demonstrates the feasibility of developing and deploying a robust and automated data collection system for construction applications in the near future.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Asadi, Khashayar and Suresh, Akshay Kalkunte and Ender, Alper and Gotad, Siddhesh and Maniyar, Suraj and Anand, Smit and Noghabaei, Mojtaba and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2020}, month={Apr}, pages={103068} } @article{albert_jeelani_han_2020, title={Developing hazard recognition skill among the next-generation of construction professionals}, volume={38}, ISSN={["1466-433X"]}, DOI={10.1080/01446193.2020.1797133}, abstractNote={Abstract Globally, a large number of safety hazards remain unrecognised in construction workplaces. These unrecognised safety hazards are also likely to remain unmanaged and can potentially cascade into unexpected safety incidents. Therefore, the development of hazards recognition skill – particularly among the next-generation of construction professionals – is vital for injury prevention and safe work-operations. To foster the development of such skill, the current investigation examined the effect of administering a hazard recognition intervention to students seeking to enter the construction workforce. First, prior to introducing the intervention, the pre-intervention hazard recognition skill of the participating students was measured. Next, the intervention that included a number of programme elements was introduced. The programme elements included (1) visual cues to promote systematic hazard recognition, (2) personalised hazard recognition performance feedback, (3) visual demonstration of common hazard recognition search weaknesses, and (4) diagnosis of hazard search weaknesses using metacognitive prompts. Finally, the post-intervention skill demonstrated by the student participants was measured and compared against their pre-intervention performance. The results suggest that the intervention was effective in improving the hazard recognition skill demonstrated by the next-generation of construction professionals. The observed effect was particularly prominent among those that demonstrated relatively lower levels of skill in the pre-intervention phase. The research also unveiled particular impediments to hazards recognition that the participants experienced.}, number={11}, journal={CONSTRUCTION MANAGEMENT AND ECONOMICS}, author={Albert, Alex and Jeelani, Idris and Han, Kevin}, year={2020}, month={Nov}, pages={1024–1039} } @article{jeelani_han_albert_2020, title={Development of virtual reality and stereo-panoramic environments for construction safety training}, volume={ahead-of-print}, url={https://doi.org/10.1108/ECAM-07-2019-0391}, DOI={10.1108/ECAM-07-2019-0391}, abstractNote={PurposeWorkers and construction professionals are generally not proficient in recognizing and managing safety hazards. Although valuable, traditional training experiences have not sufficiently addressed the issue of poor hazard recognition and management in construction. Since hazard recognition and management are cognitive skills that depend on attention, visual examination and decision-making, performance assessment and feedback in an environment that is realistic and representative of actual working conditions are important. The purpose of this paper is to propose a personalized safety training protocol that is delivered using robust, realistic and immersive environments.}, number={ahead-of-print}, journal={Engineering, Construction and Architectural Management}, publisher={Emerald}, author={Jeelani, Idris and Han, Kevin and Albert, Alex}, year={2020}, month={Apr} } @article{berglund_monroe_ahmed_noghabaei_do_pesantez_khaksar fasaee_bardaka_han_proestos_et al._2020, title={Smart Infrastructure: A Vision for the Role of the Civil Engineering Profession in Smart Cities}, volume={26}, ISSN={1076-0342 1943-555X}, url={http://dx.doi.org/10.1061/(ASCE)IS.1943-555X.0000549}, DOI={10.1061/(ASCE)IS.1943-555X.0000549}, abstractNote={AbstractSmart city programs provide a range of technologies that can be applied to solve infrastructure problems associated with ageing infrastructure and increasing demands. The potential for infr...}, number={2}, journal={Journal of Infrastructure Systems}, publisher={American Society of Civil Engineers (ASCE)}, author={Berglund, Emily Zechman and Monroe, Jacob G. and Ahmed, Ishtiak and Noghabaei, Mojtaba and Do, Jinung and Pesantez, Jorge E. and Khaksar Fasaee, Mohammad Ali and Bardaka, Eleni and Han, Kevin and Proestos, Giorgio T. and et al.}, year={2020}, month={Jun}, pages={03120001} } @article{noghabaei_heydarian_balali_han_2020, title={Trend Analysis on Adoption of Virtual and Augmented Reality in the Architecture, Engineering, and Construction Industry}, volume={5}, url={https://doi.org/10.3390/data5010026}, DOI={10.3390/data5010026}, abstractNote={With advances in Building Information Modeling (BIM), Virtual Reality (VR) and Augmented Reality (AR) technologies have many potential applications in the Architecture, Engineering, and Construction (AEC) industry. However, the AEC industry, relative to other industries, has been slow in adopting AR/VR technologies, partly due to lack of feasibility studies examining the actual cost of implementation versus an increase in profit. The main objectives of this paper are to understand the industry trends in adopting AR/VR technologies and identifying gaps within the industry. The identified gaps can lead to opportunities for developing new tools and finding new use cases. To achieve these goals, two rounds of a survey at two different time periods (a year apart) were conducted. Responses from 158 industry experts and researchers were analyzed to assess the current state, growth, and saving opportunities for AR/VR technologies for the AEC industry. The findings demonstrate that older generations are significantly more confident about the future of AR/VR technologies and they see more benefits in AR/VR utilization. Furthermore, the research results indicate that Residential and commercial sectors have adopted these tools the most, compared to other sectors and institutional and transportation sectors had the highest growth from 2017 to 2018. Industry experts anticipated a solid growth in the use of AR/VR technologies in 5 to 10 years, with the highest expectations towards healthcare. Ultimately, the findings show a significant increase in AR/VR utilization in the AEC industry from 2017 to 2018.}, number={1}, journal={Data}, publisher={MDPI AG}, author={Noghabaei, Mojtaba and Heydarian, Arsalan and Balali, Vahid and Han, Kevin}, year={2020}, month={Mar}, pages={26} } @article{yoon_han_ham_2019, title={A Framework of Human-Motion Based Structural Dynamics Simulation Using Mobile Devices}, url={https://doi.org/10.3390/s19153258}, DOI={10.3390/s19153258}, abstractNote={Due to the nature of real-world problems in civil engineering, students have had limited hands-on experiences in structural dynamics classes. To address this challenge, this paper aims to bring real-world problems in structural dynamics into classrooms through a new interactive learning tool that promotes physical interaction among students and enhances their engagement in classrooms. The main contribution is to develop and test a new interactive computing system that simulates structural dynamics by integrating a dynamic model of a structure with multimodal sensory data obtained from mobile devices. This framework involves integrating multiple physical components, estimating students’ motions, applying these motions as inputs to a structural model for structural dynamics, and providing students with an interactive response to observe how a given structure behaves. The mobile devices will capture dynamic movements of the students in real-time and take them as inputs to the dynamic model of the structure, which will virtually simulate structural dynamics affected by moving players. Each component of synchronizing the dynamic analysis with motion sensing is tested through case studies. The experimental results promise the potential to enable complex theoretical knowledge in structural dynamics to be more approachable, leading to more in-depth learning and memorable educational experiences in classrooms.}, journal={Sensors}, author={Yoon, Hyungchul and Han, Kevin and Ham, Youngjib}, year={2019}, month={Jul} } @article{jeelani_albert_han_azevedo_2019, title={Are Visual Search Patterns Predictive of Hazard Recognition Performance? Empirical Investigation Using Eye-Tracking Technology}, volume={145}, ISSN={0733-9364 1943-7862}, url={http://dx.doi.org/10.1061/(ASCE)CO.1943-7862.0001589}, DOI={10.1061/(ASCE)CO.1943-7862.0001589}, abstractNote={AbstractPoor hazard recognition is a widespread issue in the construction industry. When construction hazards remain unrecognized, workers are more likely to indulge in unsafe behavior, experience ...}, number={1}, journal={Journal of Construction Engineering and Management}, publisher={American Society of Civil Engineers (ASCE)}, author={Jeelani, Idris and Albert, Alex and Han, Kevin and Azevedo, Roger}, year={2019}, month={Jan}, pages={04018115} } @inproceedings{delgado_norville_han_lobaton_wu_2019, place={Raleigh, NC}, title={Assessing the Effectiveness of Individual Learning in a Realistic Engineering Design Class}, url={http://www.asee-se.org/proceedings/ASEE2019/papers2019/138.pdf}, booktitle={2019 American Society for Engineering Education Southeastern Section Conference}, publisher={American Society for Engineering Education Southeastern Section Conference}, author={Delgado, C. and Norville, K. and Han, K. and Lobaton, E. and Wu, T.}, year={2019}, month={Mar}, pages={10–12,} } @article{asadi_chen_han_wu_lobaton_2019, title={LNSNet: Lightweight Navigable Space Segmentation for Autonomous Robots on Construction Sites}, volume={4}, ISSN={2306-5729}, url={http://dx.doi.org/10.3390/data4010040}, DOI={10.3390/data4010040}, abstractNote={An autonomous robot that can monitor a construction site should be able to be can contextually detect its surrounding environment by recognizing objects and making decisions based on its observation. Pixel-wise semantic segmentation in real-time is vital to building an autonomous and mobile robot. However, the learning models’ size and high memory usage associated with real-time segmentation are the main challenges for mobile robotics systems that have limited computing resources. To overcome these challenges, this paper presents an efficient semantic segmentation method named LNSNet (lightweight navigable space segmentation network) that can run on embedded platforms to determine navigable space in real-time. The core of model architecture is a new block based on separable convolution which compresses the parameters of present residual block meanwhile maintaining the accuracy and performance. LNSNet is faster, has fewer parameters and less model size, while provides similar accuracy compared to existing models. A new pixel-level annotated dataset for real-time and mobile navigable space segmentation in construction environments has been constructed for the proposed method. The results demonstrate the effectiveness and efficiency that are necessary for the future development of the autonomous robotics systems.}, number={1}, journal={Data}, publisher={MDPI AG}, author={Asadi, Khashayar and Chen, Pengyu and Han, Kevin and Wu, Tianfu and Lobaton, Edgar}, year={2019}, month={Mar}, pages={40} } @inproceedings{real-time hazard proximity detection—localization of workers using visual data_2019, url={http://dx.doi.org/10.1061/9780784482438.036}, DOI={10.1061/9780784482438.036}, abstractNote={Research indicates that workers often fail to recognize a significant proportion of safety hazards. To reduce injury likelihood, efforts have traditionally focused on developing and delivering training interventions. Despite such efforts, desirable levels of hazard recognition are rarely achieved. Therefore, augmenting human abilities with a technology-driven solution to improve overall hazard recognition can yield substantial benefits. Accordingly, the objective of this study is to develop a method for localizing workers with respect to pre-identified hazards in real-time. To achieve this objective, a 3D point cloud of a construction site as a global map is created and hazard locations are marked on this map. Workers are provided with a head-mounted camera that continuously records their first-person view (FPV) videos. The image frames from these videos are localized onto the global map using bag of word (BoW) localization. Apart from estimating the proximity to safety hazards, the system can also capture large-scale data that captures unsafe behaviors (e.g., entry to restricted areas) and near-miss incidents for training purposes.}, booktitle={Computing in Civil Engineering 2019}, year={2019}, month={Jun} } @article{asadi_ramshankar_noghabaei_han_2019, title={Real-Time Image Localization and Registration with BIM Using Perspective Alignment for Indoor Monitoring of Construction}, volume={33}, url={https://doi.org/10.1061/(ASCE)CP.1943-5487.0000847}, DOI={10.1061/(ASCE)CP.1943-5487.0000847}, abstractNote={AbstractConstruction performance monitoring has been identified as a key component that leads to the success of a construction project. Real-time and frequent monitoring will enable early detection...}, number={5}, journal={Journal of Computing in Civil Engineering}, publisher={American Society of Civil Engineers (ASCE)}, author={Asadi, Khashayar and Ramshankar, Hariharan and Noghabaei, Mojtaba and Han, Kevin}, year={2019}, month={Sep}, pages={04019031} } @inproceedings{asadi_chen_han_wu_lobaton_2019, title={Real-Time Scene Segmentation Using a Light Deep Neural Network Architecture for Autonomous Robot Navigation on Construction Sites}, url={http://dx.doi.org/10.1061/9780784482438.041}, DOI={10.1061/9780784482438.041}, abstractNote={Camera-equipped unmanned vehicles (UVs) have received a lot of attention in data collection for construction monitoring applications. To develop an autonomous platform, the UV should be able to process multiple modules (e.g., context-awareness, control, localization, and mapping) on an embedded platform. Pixel-wise semantic segmentation provides a UV with the ability to be contextually aware of its surrounding environment. However, in the case of mobile robotic systems with limited computing resources, the large size of the segmentation model and high memory usage requires high computing resources, which a major challenge for mobile UVs (e.g., a small-scale vehicle with limited payload and space). To overcome this challenge, this paper presents a light and efficient deep neural network architecture to run on an embedded platform in real-time. The proposed model segments navigable space on an image sequence (i.e., a video stream), which is essential for an autonomous vehicle that is based on machine vision. The results demonstrate the performance efficiency of the proposed architecture compared to the existing models and suggest possible improvements that could make the model even more efficient, which is necessary for the future development of the autonomous robotics systems.}, booktitle={Computing in Civil Engineering 2019}, publisher={American Society of Civil Engineers}, author={Asadi, Khashayar and Chen, Pengyu and Han, Kevin and Wu, Tianfu and Lobaton, Edgar}, year={2019}, month={Jun} } @article{noghabaei_heydarian_balali_han_2019, title={Trend Analysis on Adoption of Virtual and Augmented Reality in the Architecture, Engineering, and Construction Industry}, volume={12}, url={https://doi.org/10.20944/preprints201912.0369.v1}, DOI={10.20944/preprints201912.0369.v1}, abstractNote={With advances in Building Information Modeling (BIM), Virtual Reality (VR) and Augmented Reality (AR) technologies have many potential applications in the Architecture, Engineering, and Construction (AEC) industry. However, the AEC industry, relative to other industries, has been slow in adopting AR/VR technologies, partly due to lack of feasibility studies examining the actual cost of implementation versus an increase in profit. The main objectives of this paper are to understand the industry trends in adopting AR/VR technologies and identifying gaps within the industry. The identified gaps can lead to opportunities for developing new tools and finding new use cases. To achieve these goals, two rounds of a survey at two different time periods (a year apart) were conducted. Responses from 158 industry experts and researchers were analyzed to assess the current state, growth, and saving opportunities for AR/VR technologies for the AEC industry. The findings demonstrate that older generations are significantly more confident about the future of AR/VR technologies and they see more benefits in AR/VR utilization. Furthermore, the research results indicate that Residential and commercial sectors have adopted these tools the most, compared to other sectors and institutional and transportation sectors had the highest growth from 2017 to 2018. Industry experts anticipated a solid growth in the use of AR/VR technologies in 5 to 10 years, with the highest expectations towards healthcare. Ultimately, the findings show a significant increase in AR/VR utilization in the AEC industry from 2017 to 2018.}, publisher={MDPI AG}, author={Noghabaei, Mojtaba and Heydarian, Arsalan and Balali, Vahid and Han, Kevin}, year={2019}, month={Dec} } @inproceedings{noghabaei_asadi_han_2019, title={Virtual Manipulation in an Immersive Virtual Environment: Simulation of Virtual Assembly}, url={http://dx.doi.org/10.1061/9780784482421.013}, DOI={10.1061/9780784482421.013}, abstractNote={To fill the lack of research efforts in virtual assembly of modules and training, this paper presents a virtual manipulation of building objects in an Immersive Virtual Environment (IVE). A worker wearing a Virtual Reality (VR) head-mounted device (HMD) virtually perform an assembly of multiple modules while identifying any issues. Hand motions of the worker are tracked by a motion sensor mounted on the HMD. The worker can be graded based on his/her overall performance and speed during this VR simulation. The developed VR simulation can ultimately enable workers to identify unforeseen issues (e.g., not enough clearance for an object to be installed). The presented method can solve current deficiencies in discrepancy detection in 3D scanned models of elements. The developed VR platform can also be used for interactive training and simulation sessions that can potentially improve efficiency and help achieve better work performance for assemblies of complex systems.}, booktitle={Computing in Civil Engineering 2019}, author={Noghabaei, M. and Asadi, K. and Han, K.}, year={2019}, month={Jun}, pages={95–102} } @inproceedings{asadi_jain_qin_sun_noghabaei_cole_han_lobaton_2019, title={Vision-Based Obstacle Removal System for Autonomous Ground Vehicles Using a Robotic Arm}, url={http://dx.doi.org/10.1061/9780784482438.042}, DOI={10.1061/9780784482438.042}, abstractNote={Over the past few years, the use of camera-equipped robotic platforms for data collection and visually monitoring applications has exponentially grown. Cluttered construction sites with many objects (e.g., bricks, pipes, etc.) on the ground are challenging environments for a mobile unmanned ground vehicle (UGV) to navigate. To address this issue, this study presents a mobile UGV equipped with a stereo camera and a robotic arm that can remove obstacles along the UGV's path. To achieve this objective, the surrounding environment is captured by the stereo camera and obstacles are detected. The obstacle's relative location to the UGV is sent to the robotic arm module through Robot Operating System (ROS). Then, the robotic arm picks up and removes the obstacle. The proposed method will greatly enhance the degree of automation and the frequency of data collection for construction monitoring. The proposed system is validated through two case studies. The results successfully demonstrate the detection and removal of obstacles, serving as one of the enabling factors for developing an autonomous UGV with various construction operating applications.}, booktitle={Computing in Civil Engineering 2019}, publisher={American Society of Civil Engineers}, author={Asadi, Khashayar and Jain, Rahul and Qin, Ziqian and Sun, Mingda and Noghabaei, Mojtaba and Cole, Jeremy and Han, Kevin and Lobaton, Edgar}, year={2019}, month={Jun} } @inproceedings{jeelani_han_albert_2018, title={Automating Analysis of Construction Workers' Viewing Patterns for Personalized Safety Training and Management}, url={https://doi.org/10.22260/ISARC2018/0131}, DOI={10.22260/ISARC2018/0131}, abstractNote={Unrecognized hazards increase the likelihood of workplace fatalities and injuries substantially. However, recent research has demonstrated that a large proportion of hazards remain unrecognized in dynamic construction environments. Recent studies have suggested a strong correlation between viewing patterns of workers and their hazard recognition performance. Hence, it is important to study and analyze the viewing patterns of workers to gain a better understanding of their hazard recognition performance. The objective of this exploratory research is to explore hazard recognition as a visual search process to identifying various visual search factors that affect the process of hazard recognition. Further, the study also proposes a framework to develop a vision based tool capable of recording and analyzing viewing patterns of construction workers and generate feedback for personalized training and proactive safety management.}, booktitle={Proceedings of the 35th International Symposium on Automation and Robotics in Construction (ISARC)}, publisher={International Association for Automation and Robotics in Construction (IAARC)}, author={Jeelani, Idris and Han, Kevin and Albert, Alex}, year={2018}, month={Jul} } @article{jeelani_han_albert_2018, title={Automating and scaling personalized safety training using eye-tracking data}, volume={93}, ISSN={0926-5805}, url={http://dx.doi.org/10.1016/j.autcon.2018.05.006}, DOI={10.1016/j.autcon.2018.05.006}, abstractNote={Research has shown that a large proportion of hazards remain unrecognized, which expose construction workers to unanticipated safety risks. Recent studies have also found that a strong correlation exists between viewing patterns of workers, captured using eye-tracking devices, and their hazard recognition performance. Therefore, it is important to analyze the viewing patterns of workers to gain a better understanding of their hazard recognition performance. From the training standpoint, scan paths and attention maps, generated using eye-tracking technology, can be used effectively to provide personalized and focused feedback to workers. Such feedback is used to communicate the search process deficiency to workers in order to trigger self-reflection and subsequently improve their hazard recognition performance. This paper proposes a computer vision-based method that tracks workers on a construction site and automatically locates their fixation points, collected using a wearable eye-tracker, on a 3D point cloud. This data is then used to analyze their viewing behavior and compute their attention distribution. The presented case studies validate the proposed method.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Jeelani, Idris and Han, Kevin and Albert, Alex}, year={2018}, month={Sep}, pages={63–77} } @inproceedings{asadi_ramshankar_pullagurla_bhandare_shanbhag_mehta_kundu_han_lobaton_wu_2018, title={Building an Integrated Mobile Robotic System for Real-Time Applications in Construction}, url={https://doi.org/10.22260/ISARC2018/0063}, DOI={10.22260/ISARC2018/0063}, abstractNote={One of the major challenges of a real-time autonomous robotic system for construction monitoring is to simultaneously localize, map, and navigate over the lifetime of the robot, with little or no human intervention. Past research on Simultaneous Localization and Mapping (SLAM) and context-awareness are two active research areas in the computer vision and robotics communities. The studies that integrate both in real-time into a single modular framework for construction monitoring still need further investigation. A monocular vision system and real-time scene understanding are computationally heavy and the major state-of-the-art algorithms are tested on high-end desktops and/or servers with a high CPU- and/or GPU- computing capabilities, which affect their mobility and deployment for real-world applications. To address these challenges and achieve automation, this paper proposes an integrated robotic computer vision system, which generates a real-world spatial map of the obstacles and traversable space present in the environment in near real-time. This is done by integrating contextual Awareness and visual SLAM into a ground robotics agent. This paper presents the hardware utilization and performance of the aforementioned system for three different outdoor environments, which represent the applicability of this pipeline to diverse outdoor scenes in near real-time. The entire system is also self-contained and does not require user input, which demonstrates the potential of this computer vision system for autonomous navigation.}, booktitle={Proceedings of the International Symposium on Automation and Robotics in Construction (IAARC)}, publisher={International Association for Automation and Robotics in Construction (IAARC)}, author={Asadi, Khashayar and Ramshankar, Hariharan and Pullagurla, Harish and Bhandare, Aishwarya and Shanbhag, Suraj and Mehta, Pooja and Kundu, Spondon and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2018}, month={Jul} } @article{han_degol_golparvar-fard_2018, title={Geometry- and Appearance-Based Reasoning of Construction Progress Monitoring}, volume={144}, ISSN={0733-9364 1943-7862}, url={http://dx.doi.org/10.1061/(ASCE)CO.1943-7862.0001428}, DOI={10.1061/(ASCE)CO.1943-7862.0001428}, abstractNote={AbstractAlthough adherence to project schedules and budgets is most highly valued by project owners, more than 53% of typical construction projects are behind schedule and more than 66% suffer from...}, number={2}, journal={Journal of Construction Engineering and Management}, publisher={American Society of Civil Engineers (ASCE)}, author={Han, Kevin and Degol, Joseph and Golparvar-Fard, Mani}, year={2018}, month={Feb}, pages={04017110} } @inproceedings{balali_noghabaei_heydarian_han_2018, title={Improved Stakeholder Communication and Visualizations: Real-Time Interaction and Cost Estimation within Immersive Virtual Environments}, ISBN={9780784481264}, url={http://dx.doi.org/10.1061/9780784481264.051}, DOI={10.1061/9780784481264.051}, abstractNote={One of the main desired qualities of an efficient project management in the construction industry is the ability to visualize a construction project in 2D drawings and execute them accurately and efficiently on-site. However, there are several types of delays that significantly influence project durations, often resulting from misunderstanding and miscommunications among parties (owners, contractors, and other stakeholders). One of the more common delays is change orders that are usually attributed as one of the major reasons behind delays in construction. Making changes after a building enters the construction phase can be very expensive, conflicting, and time-consuming. Such delays may arise due to the lack of communication and coordination among the owner and the contractors regarding the change orders. In most instances, it is highly conflicting to form a mutual agreement when there is a predicted price difference to accept a change order. To minimize the impact of change orders on the design and total cost, an approach that allows communication and interaction with 3D models through advanced visualization tools (i.e., virtual and augmented reality environments) can be effective. To further identify the influence of immersive virtual environments (IVEs) on project management, a systematic approach is proposed through which stakeholders can: (1) visualize and interact with 3D models in one-to-one scaled realistic virtual environments (fully immersive); and (2) visualize the dollar amount changes as the results of change orders. The results of the presented case study show that clients can exercise the ability to make changes virtually well before actual construction begins.}, booktitle={Construction Research Congress 2018}, publisher={American Society of Civil Engineers}, author={Balali, Vahid and Noghabaei, Mojtaba and Heydarian, Arsalan and Han, Kevin}, year={2018}, month={Mar} } @article{ramachandra_nawathe_monroe_han_ham_vatsavai_2018, title={Real-Time Energy Audit of Built Environments: Simultaneous Localization and Thermal Mapping}, volume={24}, ISSN={["1943-555X"]}, DOI={10.1061/(ASCE)IS.1943-555X.0000431}, abstractNote={AbstractLeveraging thermography for managing built environments has become prevalent as a robust tool for detecting, analyzing, and reporting their performance in a nondestructive manner. Despite m...}, number={3}, journal={JOURNAL OF INFRASTRUCTURE SYSTEMS}, author={Ramachandra, Bharathkumar and Nawathe, Pranav and Monroe, Jacob and Han, Kevin and Ham, Youngjib and Vatsavai, Ranga Raju}, year={2018}, month={Sep} } @inproceedings{asadi_han_2018, title={Real-Time Image-to-BIM Registration Using Perspective Alignment for Automated Construction Monitoring}, ISBN={9780784481264}, url={http://dx.doi.org/10.1061/9780784481264.038}, DOI={10.1061/9780784481264.038}, booktitle={Construction Research Congress 2018}, publisher={American Society of Civil Engineers}, author={Asadi, Khashayar and Han, Kevin}, year={2018}, month={Mar} } @inproceedings{jeelani_han_albert_2018, title={Scaling Personalized Safety Training Using Automated Feedback Generation}, ISBN={9780784481288}, url={http://dx.doi.org/10.1061/9780784481288.020}, DOI={10.1061/9780784481288.020}, abstractNote={Hazard identification is one of the primary steps in effective safety management. However, research has shown that a large proportion of construction hazards remain unrecognized in workplaces, which expose workers to unanticipated risks. While researchers and practitioners have developed several training programs to improve hazard recognition levels, much of this effort has proceeded with limited understanding of factors that impede hazard recognition at the worker level. Recent studies have suggested a strong correlation between viewing patterns of workers and their hazard recognition performance. Hence, it is important to study and analyze the viewing patterns of workers to gain a better understanding of their hazard recognition performance. From the training point of view, scan paths and attention maps can be used, very effectively, to provide personalized and focused feedback to workers to communicate search process deficiency, trigger self-reflection processes, and improve subsequent hazard search performance. However, providing the personalized visual attention feedback requires the recording of scan paths of individual workers and examining the data manually, which is very time-consuming and susceptible to human errors. Consequently, the manual process can result in inaccurate assessments and inefficient feedback. In an attempt to automate and scale up the generation of personalized feedback, this paper proposes a computer vision-based method that tracks workers in a construction site and automatically locates their fixation points (collected by a wearable eye tracker). Using the localized positions of workers and the fixation data from the wearable eye tracker, each worker’s viewing behavior (i.e., attention distribution, fixation points, etc.) is monitored analyzed to generate personalized feedback for training purposes. The presented case study validates the proposed method and the results show the potential of automating and scaling up personalized feedback generation.}, booktitle={Construction Research Congress 2018}, publisher={American Society of Civil Engineers}, author={Jeelani, Idris and Han, Kevin and Albert, Alex}, year={2018}, month={Mar} } @article{asadi_ramshankar_pullagurla_bhandare_shanbhag_mehta_kundu_han_lobaton_wu_2018, title={Vision-based integrated mobile robotic system for real-time applications in construction}, volume={96}, ISSN={0926-5805}, url={http://dx.doi.org/10.1016/j.autcon.2018.10.009}, DOI={10.1016/j.autcon.2018.10.009}, abstractNote={To increase the degree of automation and frequency of data collection for monitoring construction sites, there has been a rapid increase in the number of studies, in the past few years, that developed and/or examined mobile robotic applications in construction. These vision-based platforms capable of autonomous navigation and scene understanding are becoming essential in many construction applications, namely construction sites surveying, work-in-progress monitoring, and existing structure inspection. Simultaneous Localization and Mapping (SLAM) and object recognition for proper context-aware motion planning are some of the core vision techniques that are driving innovation for these robotic systems. To characterize the limitations of current techniques on real-time performance and identify challenges in integration and implementation for construction applications, this paper proposes a mobile robotic platform that incorporates a stack of embedded platforms with integrated Graphical Processing Units (GPUs). This paper presents three case studies to evaluate the performance of the proposed system. The results demonstrate the robustness and feasibility of developing and deploying an autonomous system in the near future.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Asadi, Khashayar and Ramshankar, Hariharan and Pullagurla, Harish and Bhandare, Aishwarya and Shanbhag, Suraj and Mehta, Pooja and Kundu, Spondon and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2018}, month={Dec}, pages={470–482} } @article{han_golparvar-fard_2017, title={Crowdsourcing BIM-guided collection of construction material library from site photologs}, volume={5}, url={https://doi.org/10.1186/s40327-017-0052-3}, DOI={10.1186/s40327-017-0052-3}, abstractNote={Abstract}, number={1}, journal={Visualization in Engineering}, publisher={Springer Science and Business Media LLC}, author={Han, Kevin and Golparvar-Fard, Mani}, year={2017}, month={Dec} } @inproceedings{jeelani_han_albert_2017, title={Development of Immersive Personalized Training Environment for Construction Workers}, url={http://dx.doi.org/10.1061/9780784480830.050}, DOI={10.1061/9780784480830.050}, abstractNote={The ability of workers to recognize and manage construction hazards is essential for effective safety management. However, studies have unanimously demonstrated that a large proportion of construction hazards remain unrecognized in dynamic work environments. Such poor hazard recognition levels have been partly attributed to the pervasive use of unengaging and ineffective training practices within construction. To improve training effectiveness, recent efforts have focused on assessing the learning needs of particular workers, and customizing training experiences accordingly to maximize training outcomes. This paper builds upon the previous research by developing an immersive safety training environment that provide a more effective personalized training experience for workers. After development, the degree of realism and immersive experience offered by the training environment was measured and found to be 73% of the real environment. The findings of this study will be useful to practicing professionals seeking to improve training efforts are safety training outcomes.}, booktitle={Computing in Civil Engineering 2017}, author={Jeelani, I. and Han, K. and Albert, A.}, year={2017}, month={Jun}, pages={407–415} } @inproceedings{boroujeni_han_2017, title={Perspective-Based Image-to-BIM Alignment for Automated Visual Data Collection and Construction Performance Monitoring}, url={http://dx.doi.org/10.1061/9780784480830.022}, DOI={10.1061/9780784480830.022}, abstractNote={In efforts to automate construction performance monitoring, past studies have worked on vision-based registration of image to BIM and 3D point clouds to BIM. The continuous development of simultaneous localization and mapping (SLAM) enabled real-time estimation of locations and orientations of a camera while incrementally reconstructing a 3D scene. However, it localizes a camera to an arbitrary local coordinate system and produces a low-resolution and noisy point cloud that is not suitable for quality assessment of a structure. For the architecture/engineering/construction industry, the better and realistic approach is to localize with respect to building information models (BIMs) in real-time and post-process 3D dense reconstruction. This approach will allow project management teams to better communicate quality and progress using visuals associated with locations shown with BIMs. Moreover, it will automate images-to-BIM and image-based point clouds-to-BIM registration, enhancing past studies that attempt to automate image-based progress detection and quality assessment. On the other hand, the current state-of-the-art method for registering an image-based point cloud to a BIM requires selection of the correspondences. To address these challenges and achieve automation, this paper presents a new localization method that aligns an image to a BIM by detecting and matching perspectives of the image and the BIM. The results demonstrate the potential for enabling automated visual data collection (as-built aligned with as-planned) for performance monitoring.}, booktitle={Computing in Civil Engineering 2017}, author={Boroujeni, K. A. and Han, K.}, year={2017}, month={Jun}, pages={171–178} } @article{han_golparvar-fard_2017, title={Potential of big visual data and building information modeling for construction performance analytics: An exploratory study}, volume={73}, ISSN={["1872-7891"]}, url={https://doi.org/10.1016/j.autcon.2016.11.004}, DOI={10.1016/j.autcon.2016.11.004}, abstractNote={The ever increasing volume of visual data due to recent advances in smart devices and camera-equipped platforms provides an unprecedented opportunity to visually capture actual status of construction sites at a fraction of cost compared to other alternatives methods. Most efforts on documenting as-built status, however, stay at collecting visual data and updating BIM. Hundreds of images and videos are captured but most of them soon become useless without properly being localized with plan document and time. To take full advantage of visual data for construction performance analytics, three aspects (reliability, relevance, and speed) of capturing, analyzing, and reporting visual data are critical. This paper 1) investigates current strategies for leveraging emerging big visual data and BIM in construction performance monitoring from these three aspects, 2) characterizes gaps in knowledge via case studies and structures a road map for research in visual sensing and analytics.}, journal={AUTOMATION IN CONSTRUCTION}, publisher={Elsevier BV}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2017}, month={Jan}, pages={184–198} } @inproceedings{han_muthukumar_golparvar-fard_2016, title={Enhanced Appearance-Based Material Classification for the Monitoring of Operation-Level Construction Progress through the Removal of Occlusions}, DOI={10.1061/9780784479827.089}, abstractNote={Tracking and visualizing the status of work-in-progress on construction sites can minimize the gap between short-term and long-term planning and lower coordination costs. Over the past few years, there have been studies on leveraging appearance information in point clouds and 4D building information models (BIM) to automatically detect progress deviations. These methods rely on libraries of construction material images to train the underlying material recognition models. Areas of the images corresponding to BIM elements are extracted and tested with the trained material recognition model. While successful results have been achieved, these extracted images contain occlusions that cause some BIM elements to be misclassified. To improve the accuracy, this paper presents a method that removes occlusions prior to performing material recognition. By creating 3D depth maps and simple linear iterative clustering (SLIC) superpixels, occlusions presented in the images are removed. The presented method is validated with four case studies. The experimental result shows the improved accuracies compared against that of the previous method without occlusion removal.}, booktitle={Construction Research Congress 2016}, publisher={American Society of Civil Engineers}, author={Han, Kevin K. and Muthukumar, Banu and Golparvar-Fard, Mani}, year={2016}, month={May} } @article{xu_jiang_wang_han_ameen_khan_chang_liu_2016, title={Large-area, uniform and low-cost dual-mode plasmonic naked-eye colorimetry and SERS sensor with handheld Raman spectrometer}, volume={8}, ISSN={2040-3364 2040-3372}, url={http://dx.doi.org/10.1039/C5NR08357E}, DOI={10.1039/C5NR08357E}, abstractNote={We demonstrated a highly-sensitive, wafer-scale, highly-uniform plasmonic nano-mushroom substrate based on plastic for naked-eye plasmonic colorimetry and surface-enhanced Raman spectroscopy (SERS). We gave it the name FlexBrite. The dual-mode functionality of FlexBrite allows for label-free qualitative analysis by SERS with an enhancement factor (EF) of 10(8) and label-free quantitative analysis by naked-eye colorimetry with a sensitivity of 611 nm RIU(-1). The SERS EF of FlexBrite in the wet state was found to be 4.81 × 10(8), 7 times stronger than in the dry state, making FlexBrite suitable for aqueous environments such as microfluid systems. The label-free detection of biotin-streptavidin interaction by both SERS and colorimetry was demonstrated with FlexBrite. The detection of trace amounts of the narcotic drug methamphetamine in drinking water by SERS was implemented with a handheld Raman spectrometer and FlexBrite. This plastic-based dual-mode nano-mushroom substrate has the potential to be used as a sensing platform for easy and fast analysis in chemical and biological assays.}, number={11}, journal={Nanoscale}, publisher={Royal Society of Chemistry (RSC)}, author={Xu, Zhida and Jiang, Jing and Wang, Xinhao and Han, Kevin and Ameen, Abid and Khan, Ibrahim and Chang, Te-Wei and Liu, Gang Logan}, year={2016}, pages={6162–6172} } @article{ham_han_lin_golparvar-fard_2016, title={Visual monitoring of civil infrastructure systems via camera-equipped Unmanned Aerial Vehicles (UAVs): a review of related works}, volume={4}, ISSN={2213-7459}, url={http://dx.doi.org/10.1186/s40327-015-0029-z}, DOI={10.1186/s40327-015-0029-z}, abstractNote={Abstract}, number={1}, journal={Visualization in Engineering}, publisher={Springer Science and Business Media LLC}, author={Ham, Youngjib and Han, Kevin K. and Lin, Jacob J and Golparvar-Fard, Mani}, year={2016}, month={Jan} } @inproceedings{lin_han_golparvar-fard_2015, title={A Framework for Model-Driven Acquisition and Analytics of Visual Data Using UAVs for Automated Construction Progress Monitoring}, DOI={10.1061/9780784479247.020}, abstractNote={Automated assessment of work-in-progress using large collections of site images and four-dimensional (4D) building information modelling (BIM) has potential to significantly improve the efficiency of construction project controls. Nevertheless, today’s manual procedures for taking site photos do not support the desired frequency or completeness for automated progress monitoring. While the usage of Unmanned Aerial Vehicles for acquisition of site images has gained popularity, their application for addressing issues associated with image-based progress monitoring and particularly leveraging 4D BIM for steering the data collection process has not been investigated before. By presenting examples from two case studies conducted on real-world construction projects, this paper suggests a framework for model-driven acquisition and analytics of progress images. In particular, the potential of spatial (geometry, appearance, and interconnectivity) and temporal information in 4D BIM for autonomous data acquisition and analytics that guarantees completeness and accuracy for both as-built modeling and monitoring work-in-progress at the schedule task-level is discussed.}, booktitle={Computing in Civil Engineering 2015}, publisher={American Society of Civil Engineers}, author={Lin, Jacob J. and Han, Kevin K. and Golparvar-Fard, Mani}, year={2015}, month={Jun} } @article{han_golparvar-fard_2015, title={Appearance-based material classification for monitoring of operation-level construction progress using 4D BIM and site photologs}, volume={53}, url={http://www.sciencedirect.com/science/article/pii/S0926580515000266}, DOI={http://dx.doi.org/10.1016/j.autcon.2015.02.007}, abstractNote={This paper presents a new appearance-based material classification method for monitoring construction progress deviations at the operational-level. The method leverages 4D Building Information Models (BIM) and 3D point cloud models generated from site photologs using Structure-from-Motion techniques. To initialize, a user manually assigns correspondences between the point cloud model and BIM, which automatically brings in the photos and the 4D BIM into alignment from all camera viewpoints. Through reasoning about occlusion, each BIM element is back-projected on all images that see that element. From these back-projections, several 2D patches are sampled per element and are classified into different material types. To perform material classification, the expected material type information is derived from BIM. Then the image patches are convolved with texture and color filters and their concatenated vector-quantized responses are compared with multiple discriminative material classification models that are relevant to the expected progress of that element. For each element, a quantized histogram of the observed material types is formed and the material type with the highest appearance frequency infers the appearance and thus the state of progress. To validate, four new datasets of incomplete and noisy point cloud models are introduced which are assembled from real-world construction site images and BIMs. An extended version of the Construction Material Library (CML) is also introduced for training/testing the material classifiers. The material classification shows an average accuracy of 92.4% for CML image patches of 100 × 100 pixels. The experiments on those four datasets show an accuracy of 95.9%, demonstrating the potential of appearance-based recognition method for inferring the actual state of construction progress for BIM elements.}, journal={Automation in Construction}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2015}, pages={44–57} } @article{han_golparvar-fard_2015, title={Appearance-based material classification for monitoring of operation-level construction progress using 4D BIM and site photologs}, volume={53}, ISSN={0926-5805}, url={http://dx.doi.org/10.1016/J.AUTCON.2015.02.007}, DOI={10.1016/J.AUTCON.2015.02.007}, abstractNote={This paper presents a new appearance-based material classification method for monitoring construction progress deviations at the operational-level. The method leverages 4D Building Information Models (BIM) and 3D point cloud models generated from site photologs using Structure-from-Motion techniques. To initialize, a user manually assigns correspondences between the point cloud model and BIM, which automatically brings in the photos and the 4D BIM into alignment from all camera viewpoints. Through reasoning about occlusion, each BIM element is back-projected on all images that see that element. From these back-projections, several 2D patches are sampled per element and are classified into different material types. To perform material classification, the expected material type information is derived from BIM. Then the image patches are convolved with texture and color filters and their concatenated vector-quantized responses are compared with multiple discriminative material classification models that are relevant to the expected progress of that element. For each element, a quantized histogram of the observed material types is formed and the material type with the highest appearance frequency infers the appearance and thus the state of progress. To validate, four new datasets of incomplete and noisy point cloud models are introduced which are assembled from real-world construction site images and BIMs. An extended version of the Construction Material Library (CML) is also introduced for training/testing the material classifiers. The material classification shows an average accuracy of 92.4% for CML image patches of 100 × 100 pixels. The experiments on those four datasets show an accuracy of 95.9%, demonstrating the potential of appearance-based recognition method for inferring the actual state of construction progress for BIM elements.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2015}, month={May}, pages={44–57} } @inproceedings{han_golparvar-fard_2015, title={BIM-Assisted Structure-from-Motion for Analyzing and Visualizing Construction Progress Deviations through Daily Site Images and BIM}, DOI={10.1061/9780784479247.074}, abstractNote={In an effort to document work-in-progress, many construction companies take hundreds of images on their project sites on a daily basis. These images together with 4D BIM can serve as a great resource for analyzing progress deviations. To facilitate image-vs.-BIM comparison, several methods have been introduced that tie in all images together in 3D using standard Structure-from-Motion (SfM) procedures. The resulting point clouds are then superimposed with the 4D BIM, resulting in back-projection of BIM on all images that were successfully registered through the SfM procedure. However, often site images exhibit wide baselines and thus are not successfully registered with BIM. To address current limitations, this paper presents a method together with experimental results that leverages BIM as a priori to initiate the SfM procedure. It is shown that by interactively guiding BIM into one or a few images that have significant overlap with the rest, the proposed BIM-assisted SfM procedure results in more complete point clouds and also generate more accurate BIM overlays on site images.}, booktitle={Computing in Civil Engineering 2015}, publisher={American Society of Civil Engineers}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2015}, month={Jun} } @article{han_cline_golparvar-fard_2015, title={Formalized knowledge of construction sequencing for visual monitoring of work-in-progress via incomplete point clouds and low-LoD 4D BIMs}, volume={29}, ISSN={1474-0346}, DOI={10.1016/j.aei.2015.10.006}, abstractNote={Over the last few years, new methods that detect construction progress deviations by comparing laser scanning or image-based point clouds with 4D BIM are developed. To create complete as-built models, these methods require the visual sensors to have proper line-of-sight and field-of-view to building elements. For reporting progress deviations, they also require Building Information Modeling (BIM) and schedule Work-Breakdown-Structure (WBS) with high Level of Development (LoD). While certain logics behind sequences of construction activities can augment 4D BIM with lower LoDs to support making inferences about states of progress under limited visibility, their application in visual monitoring systems has not been explored. To address these limitations, this paper formalizes an ontology that models construction sequencing rationale such as physical relationships among components. It also presents a classification mechanism that integrates this ontology with BIM to infer states of progress for partially and fully occluded components. The ontology and classification mechanism are validated using a Charrette test and by presenting their application together with BIM and as-built data on real-world projects. The results demonstrate the effectiveness and generality of the proposed ontology. It also illustrates how the classification mechanism augments 4D BIM at lower LoDs and WBS to enable visual progress assessment for partially and fully occluded BIM elements and provide detailed operational-level progress information.}, number={4}, journal={Advanced Engineering Informatics}, publisher={Elsevier BV}, author={Han, K. and Cline, D. and Golparvar-Fard, M.}, year={2015}, month={Oct}, pages={889–901} } @inproceedings{han_golparvar-fard_2014, title={Automated Monitoring of Operation-level Construction Progress Using 4D BIM and Daily Site Photologs}, DOI={10.1061/9780784413517.106}, abstractNote={Recent research efforts on improving construction progress monitoring have mainly focused on model-based assessment methods. In these methods, the expected performance is typically modeled with 4D BIM and the actual performance is sensed through 3D image-based reconstruction method or laser scanning. Previous research on 4-Dimensional Augmented Reality (D4AR) models– which fuse 4D BIM with point clouds generated from daily site photologs– and also laser scan-vs.-BIM have shown that it is possible to conduct occupancy-based assessments and as an indicator of progress, detect whether or not BIM elements are present in the scene. However, to detect deviations beyond typical Work Breakdown Structure (WBS) in 4D BIM, these method also need to capture operation-level details (e.g., current stage of concrete placement: formwork, rebars, concrete). To overcome current limitations, this paper presents methods for sampling and recognizing construction material from image-based point cloud data and using that information in a statistical form to infer the state of progress. The proposed method is validated using the D4AR model generated for a building construction site. The preliminary experimental results show that it is feasible to sample and detect construction materials from the images that are registered to a point cloud model and use frequency histograms of the detected materials to infer the actual state of progress for BIM elements.}, booktitle={Construction Research Congress 2014}, publisher={American Society of Civil Engineers}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2014}, month={May} } @inproceedings{han_golparvar-fard_2014, title={Multi-Sample Image-Based Material Recognition and Formalized Sequencing Knowledge for Operation-Level Construction Progress Monitoring}, DOI={10.1061/9780784413616.046}, abstractNote={This paper presents a new method for operation-level monitoring of construction progress using image-based 3D point clouds and 4D Building Information Model (BIM). Previous research on comparing point clouds to 4D BIM has proven the practicality of performing progress monitoring by occupancy-based assessment: detecting if BIM elements are present in the scene. Nonetheless, without appearance information, operation-level monitoring – formwork vs. concrete surfaces for concrete placement– is still challenging. By leveraging the interconnectivity of site images and BIM-registered point clouds, this paper presents a new method for densely sampling and extracting 2D patches from all site images from which BIM elements are expected to be visible. Our method reasons about occlusions in the scene and classifies the material in each image patch. By formalizing the sequencing knowledge of construction operations for progress monitoring purposes and using histogram-based representation for possible types of construction materials, our method can accurately detect the current state-of-progress for BIM elements in presence of occlusions. We introduce a new image dataset for material recognition, and present promising results on operation-level progress monitoring on an actual concrete building construction site. Our method addresses the challenges of working with non-detailed BIM or high-level work breakdown structures.}, booktitle={Computing in Civil and Building Engineering (2014)}, publisher={American Society of Civil Engineers}, author={Han, Kevin K. and Golparvar-Fard, Mani}, year={2014}, month={Jun} }