@article{sanders_xie_chen_2023, title={A comparison of the psychological effects of robot motion in physical and virtual environments}, volume={112}, ISSN={["1872-9126"]}, url={https://doi.org/10.1016/j.apergo.2023.104039}, DOI={10.1016/j.apergo.2023.104039}, abstractNote={A mixed-methods approach was used to assess the fidelity of virtual environments as ergonomic assessment tools for human-robot interaction. Participants performed a visual search task in the physical environment while a nearby collaborative robot repeatedly extended its arm toward them. This scenario was reconstructed in two virtual environments with different levels of graphical detail. Measures of presence, task performance, workload, and anxiety were taken to determine the effect of robot motion in all three environments. Task performance decreased in response to robot motion in the physical environment, while workload and anxiety increased. This simple effect of motion was consistent across environments for measures of task performance and anxiety. However, people performed faster overall in virtual reality, and the effect of motion on workload was greatly reduced in virtual reality. Results in the virtual environments suggest that people were distracted by the sound of the robot, and that presence was affected by manipulations of immersion and coherence.}, journal={APPLIED ERGONOMICS}, author={Sanders, Nathan Edward and Xie, Ziyang and Chen, Karen B.}, year={2023}, month={Oct} } @article{xie_lu_wang_li_xu_2023, title={An Image-Based Human-Robot Collision Avoidance Scheme: A Proof of Concept}, volume={6}, ISSN={["2472-5846"]}, DOI={10.1080/24725838.2023.2222651}, abstractNote={Occupational Applications:In modern industrial plants, collisions between humans and robots pose a significant risk to occupational safety. To address this concern, we sought to devise a reliable system for human-robot collision avoidance system employing computer vision. This system enables proactive prevention of dangerous collisions between humans and robots. In contrast to previous approaches, we used a standard RGB camera, making implementation more convenient and cost-effective. Furthermore, the proposed method greatly extends the effective detection range compared to previous studies, thereby enhancing its utility for monitoring large-scale workplaces.}, journal={IISE TRANSACTIONS ON OCCUPATIONAL ERGONOMICS & HUMAN FACTORS}, author={Xie, Ziyang and Lu, Lu and Wang, Hanwen and Li, Li and Xu, Xu}, year={2023}, month={Jun} } @article{xie_lu_wang_su_liu_xu_2023, title={Improving Workers' Musculoskeletal Health During Human-Robot Collaboration Through Reinforcement Learning}, volume={5}, ISSN={["1547-8181"]}, url={https://doi.org/10.1177/00187208231177574}, DOI={10.1177/00187208231177574}, abstractNote={OBJECTIVE This study aims to improve workers' postures and thus reduce the risk of musculoskeletal disorders in human-robot collaboration by developing a novel model-free reinforcement learning method. BACKGROUND Human-robot collaboration has been a flourishing work configuration in recent years. Yet, it could lead to work-related musculoskeletal disorders if the collaborative tasks result in awkward postures for workers. METHODS The proposed approach follows two steps: first, a 3D human skeleton reconstruction method was adopted to calculate workers' continuous awkward posture (CAP) score; second, an online gradient-based reinforcement learning algorithm was designed to dynamically improve workers' CAP score by adjusting the positions and orientations of the robot end effector. RESULTS In an empirical experiment, the proposed approach can significantly improve the CAP scores of the participants during a human-robot collaboration task when compared with the scenarios where robot and participants worked together at a fixed position or at the individual elbow height. The questionnaire outcomes also showed that the working posture resulted from the proposed approach was preferred by the participants. CONCLUSION The proposed model-free reinforcement learning method can learn the optimal worker postures without the need for specific biomechanical models. The data-driven nature of this method can make it adaptive to provide personalized optimal work posture. APPLICATION The proposed method can be applied to improve the occupational safety in robot-implemented factories. Specifically, the personalized robot working positions and orientations can proactively reduce exposure to awkward postures that increase the risk of musculoskeletal disorders. The algorithm can also reactively protect workers by reducing the workload in specific joints.}, journal={HUMAN FACTORS}, author={Xie, Ziyang and Lu, Lu and Wang, Hanwen and Su, Bingyi and Liu, Yunan and Xu, Xu}, year={2023}, month={May} } @article{wang_xie_lu_su_jung_xu_2022, title={A mobile platform-based app to assist undergraduate learning of human kinematics in biomechanics courses}, volume={142}, ISSN={["1873-2380"]}, DOI={10.1016/j.jbiomech.2022.111243}, abstractNote={Whole-body biomechanics examines different physical characteristics of the human body movement by applying principles of Newtonian mechanics. Therefore, undergraduate biomechanics courses are highly demanding in mathematics and physics. While the inclusion of laboratory experiences can augment student comprehension of biomechanics concepts, the cost and the required expertise associated with experiment equipment can be a burden of offering laboratory sessions. In this study, we developed a mobile app to facilitate learning human kinematics in biomechanics curriculums. First, a mobile-based computer-vision algorithm that is based on Convolutional pose machine (CPM), MobileNet V2, and TensorFlow Lite framework is adopted to reconstruct 2D human poses from the images collected by a mobile device camera. Key joint locations are then applied to the human kinematics variable estimator for human kinematics analysis. Simultaneously, students can view various kinematics data for a selected joint or body segment in real-time through the user interface of the mobile device. The proposed app can serve as a potential instructional tool to assist in conducting human motion experiments in biomechanics courses.}, journal={JOURNAL OF BIOMECHANICS}, author={Wang, Hanwen and Xie, Ziyang and Lu, Lu and Su, Bingyi and Jung, Sehee and Xu, Xu}, year={2022}, month={Sep} } @misc{lu_xie_wang_li_xu_2022, title={Mental stress and safety awareness during human-robot collaboration - Review}, volume={105}, ISSN={["1872-9126"]}, url={https://doi.org/10.1016/j.apergo.2022.103832}, DOI={10.1016/j.apergo.2022.103832}, abstractNote={Human-robot collaboration (HRC) is an emerging research area that has gained tremendous attention in both academia and industry. Yet, the feature that humans and robots sharing the workplace has led to safety concerns. In particular, the mental stress or safety awareness of human teammates during HRC remains unclear but is also of great importance to workplace safety. In this manuscript, we reviewed twenty-five studies for understanding the relationships between HRC and workers' mental stress or safety awareness. Specifically, we aimed to understand: (1) robot-related factors that may affect human workers' mental stress or safety awareness, (2) a number of measurements that could be used to evaluate workers' mental stress in HRC, and (3) various methods for measuring safety awareness that had been adopted or could be applied in HRC. According to our literature review, robot-related factors including robot characteristics, social touching and trajectory have relationships with workers' mental stress or safety awareness. For the measurement of mental stress and safety awareness, each method mentioned has its validity and rationality. Additionally, a discussion related to the potential co-robot actions to lower mental stress or improve safety awareness as well as future implications were provided.}, journal={APPLIED ERGONOMICS}, publisher={Elsevier BV}, author={Lu, Lu and Xie, Ziyang and Wang, Hanwen and Li, Li and Xu, Xu}, year={2022}, month={Nov} } @article{xie_li_xu_2022, title={Real-Time Driving Distraction Recognition Through a Wrist-Mounted Accelerometer}, volume={64}, ISSN={["1547-8181"]}, url={https://doi.org/10.1177/0018720821995000}, DOI={10.1177/0018720821995000}, abstractNote={Objective We propose a method for recognizing driver distraction in real time using a wrist-worn inertial measurement unit (IMU). }, number={8}, journal={HUMAN FACTORS}, publisher={SAGE Publications}, author={Xie, Ziyang and Li, Li and Xu, Xu}, year={2022}, month={Dec}, pages={1412–1428} } @article{wu_cui_baker_mahendran_xie_zhu_2021, title={A Biaxially Stretchable and Self-Sensing Textile Heater Using Silver Nanowire Composite}, volume={13}, ISSN={["1944-8252"]}, DOI={10.1021/acsami.1c17651}, abstractNote={Wearable heaters have garnered significant attention from academia and industry for their great potential in thermotherapy. Silver nanowire (AgNW) is a promising conductive material for flexible and stretchable electrodes. Here, a resistive, biaxially stretchable heater based on AgNW composite is reported for the first time, where a AgNW percolation network is encased in a thin polyimide (PI) film and integrated with a highly stretchable textile. AgNW/PI is patterned with a 2D Kirigami structure, which enables constant resistance under a large tensile strain (up to uniaxial 100% strain and 50% biaxial strain). The heater can achieve a high temperature of ∼140 °C with a low current of 0.125 A, fast heating and cooling rates of ∼16.5 and ∼14.1 °C s-1, respectively, and stable performance over 400 heating cycles. A feedback control system is developed to provide constant heating temperature under a temperature change of the surrounding environment. Demonstrated applications in applying thermotherapy at the curvilinear surface of the knee using the stretchable heater illustrate its promising potential for wearable applications.}, number={49}, journal={ACS APPLIED MATERIALS & INTERFACES}, author={Wu, Shuang and Cui, Zheng and Baker, G. Langston and Mahendran, Siddarth and Xie, Ziyang and Zhu, Yong}, year={2021}, month={Dec}, pages={59085–59091} } @article{wang_xie_lu_li_xu_2021, title={A computer-vision method to estimate joint angles and L5/S1 moments during lifting tasks through a single camera}, volume={129}, ISSN={["1873-2380"]}, DOI={10.1016/j.jbiomech.2021.110860}, abstractNote={Weight lifting is a risk factor of work-related low-back musculoskeletal disorders (MSD). From the ergonomics perspective, it is important to measure workers' body motion during a lifting task and estimate low-back joint moments to ensure the low-back biomechanical loadings are within the failure tolerance. With the recent development of advanced deep neural networks, an increasing number of computer vision algorithms have been presented to estimate 3D human poses through videos. In this study, we first performed a 3D pose estimation of lifting tasks using a single RGB camera and VideoPose3D, an open-source library with a fully convolutional model. Joint angle trajectories and L5/S1 joint moment were then calculated following a top-down inverse dynamic biomechanical model. To evaluate the accuracy of the computer-vision-based angular trajectories and L5/S1 joint moments, we conducted an experiment in which participants performed a variety of lifting tasks. The body motions of the participants were concurrently captured by an RGB camera and a laboratory-grade motion tracking system. The body joint angles and L5/S1 joint moments obtained from the camera were compared with those obtained from the motion tracking system. The results showed a strong correlation (r > 0.9, RMSE < 10°) between the two methods for shoulder flexion, trunk flexion, trunk rotation, and elbow flexion. The computer-vision-based method also yielded a good estimate for the total L5/S1 moment and the L5/S1 moment in the sagittal plane (r > 0.9, RMSE < 20 N·m). This study showed computer vision could facilitate safety practitioners to quickly identify the jobs with high MSD risks through field survey videos.}, journal={JOURNAL OF BIOMECHANICS}, author={Wang, Hanwen and Xie, Ziyang and Lu, Lu and Li, Li and Xu, Xu}, year={2021}, month={Dec} } @article{li_prabhu_xie_wang_lu_xu_2021, title={Lifting Posture Prediction With Generative Models for Improving Occupational Safety}, volume={51}, ISSN={["2168-2305"]}, url={https://doi.org/10.1109/THMS.2021.3102511}, DOI={10.1109/THMS.2021.3102511}, abstractNote={Lifting tasks have been identified to be highly associated with work-related low back pain. Posture prediction can be used for simulating workers’ posture of lifting tasks and thus facilitate the prevention of low back pain (LBP). This study adopts two generative models, conditional variational encoder and conditional generative adversarial network, to predict lifting postures. A regular feed-forward neural network (FNN) developed upon previous studies is also investigated for comparison purposes. Ground-truth lifting posture data collected by a motion capture system is used for training and testing the models. The models are trained with datasets of different size and loss functions, and the results are compared. The conditional variational autoencoder and the regular FNN achieved comparable top performance in lifting posture prediction in terms of accuracy and posture validity. Both generative models are able to partially capture the variability of constrained postures. Overall, the results prove that using a generative model is able to predict postures with reasonable accuracy and validity (RMSE of coordinates = 0.049 m; RMSE of joint angles = 19.58$^\circ$). The predicted postures can support biomechanical analysis and ergonomics assessment of a lifting task to reduce the risk of low back injuries.}, number={5}, journal={IEEE TRANSACTIONS ON HUMAN-MACHINE SYSTEMS}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Li, Li and Prabhu, Saiesh and Xie, Ziyang and Wang, Hanwen and Lu, Lu and Xu, Xu}, year={2021}, month={Oct}, pages={494–503} } @article{li_xie_xu_2020, title={MOPED25: A multimodal dataset of full-body pose and motion in occupational tasks}, volume={113}, ISSN={["1873-2380"]}, DOI={10.1016/j.jbiomech.2020.110086}, abstractNote={In recent years, there has been a trend of using images and deep neural network-based computer vision algorithms to perform postural evaluation in workplace safety and ergonomics community. The performance of the computer vision algorithms, however, heavily relies on the generalizability of the posture dataset that was used for algorithm training. Current open-access posture datasets from the computer vision community mainly focus on the pose and motion of daily activities and lack the context in workplaces. In this study, a new posture dataset named, MOPED25 (Multimodal Occupational Posture Dataset with 25 tasks) is presented. This dataset includes full-body kinematics data and the synchronized videos of 11 participants, performing commonly seen tasks at workplaces. All the data has been made publicly available online. This dataset can serve as a benchmark for developing more robust computer vision algorithms for postural evaluation at workplaces.}, journal={JOURNAL OF BIOMECHANICS}, author={Li, Li and Xie, Ziyang and Xu, Xu}, year={2020}, month={Dec} }