@article{wu_lin_jin_weng_xu_zhang_xu_xiang_sun_ye_2024, title={CCHA YOLO for mycelium clamp connection (CC) and hyphae Autolysis (HA) detection under microscopy imaging and web deployment}, volume={201}, ISSN={["1095-9149"]}, DOI={10.1016/j.microc.2024.110483}, abstractNote={Microscopic examination is commonly employed to assess edible fungal mycelium vitality. However, this method can become time-intensive when evaluating a substantial volume of hyphae samples, which implies an urgent need to develop an accurate and automatic determination method. The challenges of mycelium detection come mostly from the multi-scale target detection under various magnifications. In this study, microscopic images of 10 edible fungi strains under different magnification scales or stain colors were collected to create a dataset. An improved multi-scale object detection model for mycelium vitality detection, CCHA YOLO, was proposed by enhancing the Backbone via combining Yolov8m and Swin Transformer (SwinT). Meanwhile, the Convolutional Block Attention Module (CBAM) was introduced to the Head, as well as optimized post-processing techniques to further promote model performance. The results indicated that CCHA YOLO achieved a mAP50:95 (mean average precision) of 89.02 % with a computational load of 98.61 GFLOPs. Additionally, it indicates a 16.67 % accuracy enhancement, needing only 11.3 more computational operations compared to the baseline YOLOv8m. In the meantime, CCHA YOLO was deployed on the web-based edge to facilitate the detection of microscopic images, highlighting the practical applicability of CCHA YOLO in determining mycelium vitality.}, journal={MICROCHEMICAL JOURNAL}, author={Wu, Libin and Lin, Shaodan and Jin, Wensong and Weng, Haiyong and Xu, Jinchai and Zhang, Lintong and Xu, Yawen and Xiang, Lirong and Sun, Shujing and Ye, Dapeng}, year={2024}, month={Jun} } @article{zhang_he_wu_quesada_xiang_2024, title={Development of a bionic hexapod robot with adaptive gait and clearance for enhanced agricultural field scouting}, volume={11}, ISSN={["2296-9144"]}, DOI={10.3389/frobt.2024.1426269}, abstractNote={High agility, maneuverability, and payload capacity, combined with small footprints, make legged robots well-suited for precision agriculture applications. In this study, we introduce a novel bionic hexapod robot designed for agricultural applications to address the limitations of traditional wheeled and aerial robots. The robot features a terrain-adaptive gait and adjustable clearance to ensure stability and robustness over various terrains and obstacles. Equipped with a high-precision Inertial Measurement Unit (IMU), the robot is able to monitor its attitude in real time to maintain balance. To enhance obstacle detection and self-navigation capabilities, we have designed an advanced version of the robot equipped with an optional advanced sensing system. This advanced version includes LiDAR, stereo cameras, and distance sensors to enable obstacle detection and self-navigation capabilities. We have tested the standard version of the robot under different ground conditions, including hard concrete floors, rugged grass, slopes, and uneven field with obstacles. The robot maintains good stability with pitch angle fluctuations ranging from −11.5° to 8.6° in all conditions and can walk on slopes with gradients up to 17°. These trials demonstrated the robot’s adaptability to complex field environments and validated its ability to maintain stability and efficiency. In addition, the terrain-adaptive algorithm is more energy efficient than traditional obstacle avoidance algorithms, reducing energy consumption by 14.4% for each obstacle crossed. Combined with its flexible and lightweight design, our robot shows significant potential in improving agricultural practices by increasing efficiency, lowering labor costs, and enhancing sustainability. In our future work, we will further develop the robot’s energy efficiency, durability in various environmental conditions, and compatibility with different crops and farming methods.}, journal={FRONTIERS IN ROBOTICS AND AI}, author={Zhang, Zhenghua and He, Weilong and Wu, Fan and Quesada, Lina and Xiang, Lirong}, year={2024}, month={Sep} } @article{li_xu_xiang_chen_zhuang_yin_li_2024, title={Foundation models in smart agriculture: Basics, opportunities, and challenges}, volume={222}, ISSN={["1872-7107"]}, DOI={10.1016/j.compag.2024.109032}, abstractNote={The past decade has witnessed the rapid development and adoption of machine and deep learning (ML & DL) methodologies in agricultural systems, showcased by great successes in applications such as smart crop management, smart plant breeding, smart livestock farming, precision aquaculture farming, and agricultural robotics. However, these conventional ML/DL models have certain limitations: they heavily rely on large, costly-to-acquire labeled datasets for training, require specialized expertise for development and maintenance, and are mostly tailored for specific tasks, thus lacking generalizability. Recently, large pre-trained models, also known as foundation models (FMs), have demonstrated remarkable successes in language, vision, and decision-making tasks across various domains. These models are trained on a vast amount of data from multiple domains and modalities. Once trained, they can accomplish versatile tasks with just minor fine-tuning and minimal task-specific labeled data. Despite their proven effectiveness and huge potential, there has been little exploration of applying FMs to agriculture artificial intelligence (AI). Therefore, this study aims to explore the potential of FMs in the field of smart agriculture. In particular, conceptual tools and technical background are presented to facilitate the understanding of the problem space and uncover new research directions in this field. To this end, recent FMs in the general computer science (CS) domain are reviewed, and the models are categorized into four categories: language FMs, vision FMs, multimodal FMs, and reinforcement learning FMs. Subsequently, the process of developing agriculture FMs (AFMs) is outlined and their potential applications in smart agriculture are discussed. In addition, the unique challenges and risks associated with developing AFMs are discussed, including model training, validation, and deployment. Through this study, the advancement of AI in agriculture is explored by introducing AFMs as a promising paradigm that can significantly mitigate the reliance on extensive labeled datasets and enhance the efficiency, effectiveness, and generalization of agricultural AI systems. To facilitate further research, a well-classified and actively updated list of papers on AFMs is organized and accessible at https://github.com/JiajiaLi04/Agriculture-Foundation-Models.}, journal={COMPUTERS AND ELECTRONICS IN AGRICULTURE}, author={Li, Jiajia and Xu, Mingle and Xiang, Lirong and Chen, Dong and Zhuang, Weichao and Yin, Xunyuan and Li, Zhaojian}, year={2024}, month={Jul} } @article{wu_xiang_you_tang_gai_2024, title={Plant-Denoising-Net (PDN): A plant point cloud denoising network based on density gradient field learning}, volume={210}, ISSN={["1872-8235"]}, DOI={10.1016/j.isprsjprs.2024.03.010}, abstractNote={Effective point cloud denoising is critical in 3D plant phenotyping applications, which reduces interference in subsequent algorithms and improves the accuracy of plant phenotypes measurement. Deep learning-based point cloud denoising algorithms have shown excellent denoising performance on simple CAD models. However, these algorithms suffer from issues including over-smoothing or shrinkage and low efficiency when applied on density uneven, incomplete, various types of noise and complex plant point clouds. We proposed a plant point cloud denoising network (PDN) based on point cloud density gradient field learning, which can effectively address the challenges posed by plant point clouds. PDN consists of three main modules: point density feature (PDF) exception module, umbrella operator feature (UOF) computation module, and point density gradient (DG) estimation module. The performance of PDN was evaluated in experiments using point clouds of multiple plant species with noise of different types. Under different levels of Gaussian noise, our method achieved a relative performance improvement of 7.6%-19.3% compared to the state-of-the-art baseline methods, reaching state-of-the-art denoising performance. For noise of different types, the majority of our denoising results outperformed the baseline methods. In addition, our method was 0.5 and 8.6 times faster than the baseline methods when processing point clouds with low and high noise level, respectively. The good robustness, generalization, and computational efficacy of PDN are expected to facilitate the acquisition of high-precision 3D point clouds for various plant species, enhance the versatility of 3D phenotyping methods, improve the accuracy of the measurement of structural phenotypes, and increase the throughput of data processing, therefore facilitate the development of modern breeding research. The source code and the datasets used in this study is available on GitHub at https://github.com/suetme/PDN-plant-denoising-net.}, journal={ISPRS JOURNAL OF PHOTOGRAMMETRY AND REMOTE SENSING}, author={Wu, Jianeng and Xiang, Lirong and You, Hui and Tang, Lie and Gai, Jingyao}, year={2024}, month={Apr}, pages={282–299} } @article{he_gage_rellan-alvarez_xiang_2024, title={Swin-Roleaf: A new method for characterizing leaf azimuth angle in large-scale maize plants}, volume={224}, ISSN={["1872-7107"]}, url={https://doi.org/10.1016/j.compag.2024.109120}, DOI={10.1016/j.compag.2024.109120}, journal={COMPUTERS AND ELECTRONICS IN AGRICULTURE}, author={He, Weilong and Gage, Joseph L. and Rellan-Alvarez, Ruben and Xiang, Lirong}, year={2024}, month={Sep} } @article{chen_xiang_zhao_liu_jia_2024, title={Unraveling Nature's Color Palette: The Chemistry, Biosynthesis and Applications in Health Promotion of Anthocyanins-A Comprehensive Review}, volume={9}, ISSN={["1525-6103"]}, DOI={10.1080/87559129.2024.2404471}, journal={FOOD REVIEWS INTERNATIONAL}, author={Chen, Bojian and Xiang, Lirong and Zhao, Danyue and Liu, Zunying and Jia, Fei}, year={2024}, month={Sep} } @misc{xiang_wang_2023, title={A review of three-dimensional vision techniques in food and agriculture applications}, volume={5}, ISSN={["2772-3755"]}, DOI={10.1016/j.atech.2023.100259}, abstractNote={In recent years, three-dimensional (3D) machine vision techniques have been widely employed in agriculture and food systems, leveraging advanced deep learning technologies. However, with the rapid development of three-dimensional (3D) imaging techniques, the lack of a systematic review has hindered our ability to identify the most suitable imaging systems for specific agricultural and food applications. In this review, a variety of 3D imaging techniques are introduced, with their working principles and applications in agriculture and food systems. These techniques include Structure lighting-based 3D imaging, Multiview 3D imaging system, Time of Flight (ToF)-based 3D imaging system, Lighting Detection and Ranging (LiDAR), and Depth estimation from monocular image. Furthermore, the three-dimensional image analysis methods applied to these 3D imaging techniques are described and discussed in this review.}, journal={SMART AGRICULTURAL TECHNOLOGY}, author={Xiang, Lirong and Wang, Dongyi}, year={2023}, month={Oct} } @article{lin_li_huang_cheng_xiang_ye_weng_2023, title={Early Detection of Rice Blast Using a Semi-Supervised Contrastive Unpaired Translation Iterative Network Based on UAV Images}, volume={12}, ISSN={["2223-7747"]}, DOI={10.3390/plants12213675}, abstractNote={Rice blast has caused major production losses in rice, and thus the early detection of rice blast plays a crucial role in global food security. In this study, a semi-supervised contrastive unpaired translation iterative network is specifically designed based on unmanned aerial vehicle (UAV) images for rice blast detection. It incorporates multiple critic contrastive unpaired translation networks to generate fake images with different disease levels through an iterative process of data augmentation. These generated fake images, along with real images, are then used to establish a detection network called RiceBlastYolo. Notably, the RiceBlastYolo model integrates an improved fpn and a general soft labeling approach. The results show that the detection precision of RiceBlastYolo is 99.51% under intersection over union (IOU0.5) conditions and the average precision is 98.75% under IOU0.5–0.9 conditions. The precision and recall rates are respectively 98.23% and 99.99%, which are higher than those of common detection models (YOLO, YOLACT, YOLACT++, Mask R-CNN, and Faster R-CNN). Additionally, external data also verified the ability of the model. The findings demonstrate that our proposed model can accurately identify rice blast under field-scale conditions.}, number={21}, journal={PLANTS-BASEL}, author={Lin, Shaodan and Li, Jiayi and Huang, Deyao and Cheng, Zuxin and Xiang, Lirong and Ye, Dapeng and Weng, Haiyong}, year={2023}, month={Nov} } @article{xiang_gai_bao_yu_schnable_tang_2023, title={Field-based robotic leaf angle detection and characterization of maize plants using stereo vision and deep convolutional neural networks}, volume={2}, ISSN={["1556-4967"]}, url={http://dx.doi.org/10.1002/rob.22166}, DOI={10.1002/rob.22166}, abstractNote={AbstractMaize (Zea mays L.) is one of the three major cereal crops in the world. Leaf angle is an important architectural trait of crops due to its substantial role in light interception by the canopy and hence photosynthetic efficiency. Traditionally, leaf angle has been measured using a protractor, a process that is both slow and laborious. Efficiently measuring leaf angle under field conditions via imaging is challenging due to leaf density in the canopy and the resulting occlusions. However, advances in imaging technologies and machine learning have provided new tools for image acquisition and analysis that could be used to characterize leaf angle using three‐dimensional (3D) models of field‐grown plants. In this study, PhenoBot 3.0, a robotic vehicle designed to traverse between pairs of agronomically spaced rows of crops, was equipped with multiple tiers of PhenoStereo cameras to capture side‐view images of maize plants in the field. PhenoStereo is a customized stereo camera module with integrated strobe lighting for high‐speed stereoscopic image acquisition under variable outdoor lighting conditions. An automated image processing pipeline (AngleNet) was developed to measure leaf angles of nonoccluded leaves. In this pipeline, a novel representation form of leaf angle as a triplet of keypoints was proposed. The pipeline employs convolutional neural networks to detect each leaf angle in two‐dimensional images and 3D modeling approaches to extract quantitative data from reconstructed models. Satisfactory accuracies in terms of correlation coefficient (r) and mean absolute error (MAE) were achieved for leaf angle () and internode heights (). Our study demonstrates the feasibility of using stereo vision to investigate the distribution of leaf angles in maize under field conditions. The proposed system is an efficient alternative to traditional leaf angle phenotyping and thus could accelerate breeding for improved plant architecture.}, journal={JOURNAL OF FIELD ROBOTICS}, author={Xiang, Lirong and Gai, Jingyao and Bao, Yin and Yu, Jianming and Schnable, Patrick S. S. and Tang, Lie}, year={2023}, month={Feb} } @article{wang_xiang_morota_wickens_cushon_brooks_yu_2023, title={Shinyanimalcv: Interactive Web Application for Object Detection and Three-Dimensional Visualization of Animals Using Computer Vision}, volume={101}, ISSN={["1525-3163"]}, DOI={10.1093/jas/skad281.294}, abstractNote={Abstract Precision livestock farming applies integrated sensors and information technology to provide individualized animal care in a timely manner. A computer vision system, a non-intrusive and cost-efficient sensor technology, may be useful for monitoring animals to support farm management decisions. While the advancement in camera sensors provides a great opportunity for producers to improve animal health and welfare sustainably, the limited availability of user-friendly image data processing software tools substantially hinders the implementation of computer vision in livestock production systems. The objective of this study was to develop ShinyAnimalCV, which is a Shiny-based interactive animal computer vision web application. This software tool offers a user-friendly graphical user interface for object detection and three-dimensional visualization. The object detection module employs the Mask-RCNN to precisely segment the masks (regions) of the focal animals from two-dimensional images. A Python-based computer vision library OpenCV was used to draw minimum bounding boxes covering the segmented masks for object detection. The three-dimensional visualization module takes the depth map file captured from a top-view three-dimensional camera as an input, which contains the numerical distances between the camera and the objects (animals and background). The depth map file was first converted to a heatmap image, followed by identifying and segmenting the animal from the background using the Mask-RCNN model. The object detection module returns detection results, including the detected animal’s location, class/type, and morphological traits (e.g., body length and width). The visualization module generates a segmented mask to extract the height of the animal from the depth map file, which can be used to interactively visualize the three-dimensional surface of the animal and estimate its morphological traits, including length, width, height, and volume. By integrating these two modules into R Shiny, we deployed ShinyAnimalCV on a cloud server with pre-trained Mask-RCNN models using pig and cattle data to allow users to upload custom data and perform object detection and three-dimensional surface visualization. The features extracted by ShinyAnimalCV are expected to be useful for performing animal identification, feed intake monitoring, body weight predictions, and body condition score estimations. We conclude that the newly developed ShinyAnimalCV could facilitate the application of computer vision in the animal science community.}, journal={JOURNAL OF ANIMAL SCIENCE}, author={Wang, Jin and Xiang, Lirong and Morota, Gota and Wickens, Carissa and Cushon, Emily and Brooks, Samantha and Yu, Haipeng}, year={2023}, month={Nov}, pages={244–245} } @article{gai_wang_xie_xiang_wang_2023, title={Spectroscopic determination of chlorophyll content in sugarcane leaves for drought stress detection}, volume={11}, ISSN={["1573-1618"]}, DOI={10.1007/s11119-023-10082-0}, journal={PRECISION AGRICULTURE}, author={Gai, Jingyao and Wang, Jingyong and Xie, Sasa and Xiang, Lirong and Wang, Ziting}, year={2023}, month={Nov} } @article{ye_weng_xiang_jia_xu_2023, title={Synchronously Predicting Tea Polyphenol and Epigallocatechin Gallate in Tea Leaves Using Fourier Transform-Near-Infrared Spectroscopy and Machine Learning}, volume={28}, ISSN={["1420-3049"]}, DOI={10.3390/molecules28145379}, abstractNote={Tea polyphenol and epigallocatechin gallate (EGCG) were considered as key components of tea. The rapid prediction of these two components can be beneficial for tea quality control and product development for tea producers, breeders and consumers. This study aimed to develop reliable models for tea polyphenols and EGCG content prediction during the breeding process using Fourier Transform–near infrared (FT-NIR) spectroscopy combined with machine learning algorithms. Various spectral preprocessing methods including Savitzky–Golay smoothing (SG), standard normal variate (SNV), vector normalization (VN), multiplicative scatter correction (MSC) and first derivative (FD) were applied to improve the quality of the collected spectra. Partial least squares regression (PLSR) and least squares support vector regression (LS-SVR) were introduced to establish models for tea polyphenol and EGCG content prediction based on different preprocessed spectral data. Variable selection algorithms, including competitive adaptive reweighted sampling (CARS) and random forest (RF), were further utilized to identify key spectral bands to improve the efficiency of the models. The results demonstrate that the optimal model for tea polyphenols calibration was the LS-SVR with Rp = 0.975 and RPD = 4.540 based on SG-smoothed full spectra. For EGCG detection, the best model was the LS-SVR with Rp = 0.936 and RPD = 2.841 using full original spectra as model inputs. The application of variable selection algorithms further improved the predictive performance of the models. The LS-SVR model for tea polyphenols prediction with Rp = 0.978 and RPD = 4.833 used 30 CARS-selected variables, while the LS-SVR model build on 27 RF-selected variables achieved the best predictive ability with Rp = 0.944 and RPD = 3.049, respectively, for EGCG prediction. The results demonstrate a potential of FT-NIR spectroscopy combined with machine learning for the rapid screening of genotypes with high tea polyphenol and EGCG content in tea leaves.}, number={14}, journal={MOLECULES}, author={Ye, Sitan and Weng, Haiyong and Xiang, Lirong and Jia, Liangquan and Xu, Jinchai}, year={2023}, month={Jul} } @article{wang_hu_xiang_morota_brooks_wickens_miller-cushon_yu_2024, title={Technical note: ShinyAnimalCV: open-source cloud-based web application for object detection, segmentation, and three-dimensional visualization of animals using computer vision}, volume={102}, ISSN={["1525-3163"]}, DOI={10.1093/jas/skad416}, abstractNote={Abstract Computer vision (CV), a non-intrusive and cost-effective technology, has furthered the development of precision livestock farming by enabling optimized decision-making through timely and individualized animal care. The availability of affordable two- and three-dimensional camera sensors, combined with various machine learning and deep learning algorithms, has provided a valuable opportunity to improve livestock production systems. However, despite the availability of various CV tools in the public domain, applying these tools to animal data can be challenging, often requiring users to have programming and data analysis skills, as well as access to computing resources. Moreover, the rapid expansion of precision livestock farming is creating a growing need to educate and train animal science students in CV. This presents educators with the challenge of efficiently demonstrating the complex algorithms involved in CV. Thus, the objective of this study was to develop ShinyAnimalCV, an open-source cloud-based web application designed to facilitate CV teaching in animal science. This application provides a user-friendly interface for performing CV tasks, including object segmentation, detection, three-dimensional surface visualization, and extraction of two- and three-dimensional morphological features. Nine pre-trained CV models using top-view animal data are included in the application. ShinyAnimalCV has been deployed online using cloud computing platforms. The source code of ShinyAnimalCV is available on GitHub, along with detailed documentation on training CV models using custom data and deploying ShinyAnimalCV locally to allow users to fully leverage the capabilities of the application. ShinyAnimalCV can help to support the teaching of CV, thereby laying the groundwork to promote the adoption of CV in the animal science community.}, journal={JOURNAL OF ANIMAL SCIENCE}, author={Wang, Jin and Hu, Yu and Xiang, Lirong and Morota, Gota and Brooks, Samantha A. and Wickens, Carissa L. and Miller-Cushon, Emily K. and Yu, Haipeng}, year={2024}, month={Jan} } @inproceedings{xiang_liu_raj_tang_2022, place={Houston, TX}, title={Detection and characterization of maize plant architectural traits in the field using stereo vision and deep convolutional neural networks}, booktitle={2022 ASABE Annual International Meeting}, author={Xiang, L. and Liu, X. and Raj, A. and Tang, L.}, year={2022} } @inproceedings{liu_xiang_raj_tang_2022, title={In-field soybean seed pod phenotyping on harvest stocks using 3D imaging and deep learning}, booktitle={2022 ASABE Annual International Meeting}, author={Liu, X. and Xiang, L. and Raj, A. and Tang, L.}, year={2022} } @inproceedings{xiang_liu_raj_yu_schnable_tang_2022, title={Robotic Field-based Plant Architectural Traits Characterization Using Stereo Vision and Deep Neural Networks}, booktitle={Fourth International Workshop on Machine Learning for Cyber-Agricultural Systems (MLCAS2022)}, author={Xiang, L. and Liu, X. and Raj, A. and Yu, J. and Schnable, P.S. and Tang, L.}, year={2022} } @article{wang_xiang_tang_jiang_2021, title={A Convolutional Neural Network-Based Method for Corn Stand Counting in the Field}, volume={21}, ISSN={1424-8220}, url={http://dx.doi.org/10.3390/s21020507}, DOI={10.3390/s21020507}, abstractNote={Accurate corn stand count in the field at early season is of great interest to corn breeders and plant geneticists. However, the commonly used manual counting method is time consuming, laborious, and prone to error. Nowadays, unmanned aerial vehicles (UAV) tend to be a popular base for plant-image-collecting platforms. However, detecting corn stands in the field is a challenging task, primarily because of camera motion, leaf fluttering caused by wind, shadows of plants caused by direct sunlight, and the complex soil background. As for the UAV system, there are mainly two limitations for early seedling detection and counting. First, flying height cannot ensure a high resolution for small objects. It is especially difficult to detect early corn seedlings at around one week after planting, because the plants are small and difficult to differentiate from the background. Second, the battery life and payload of UAV systems cannot support long-duration online counting work. In this research project, we developed an automated, robust, and high-throughput method for corn stand counting based on color images extracted from video clips. A pipeline developed based on the YoloV3 network and Kalman filter was used to count corn seedlings online. The results demonstrate that our method is accurate and reliable for stand counting, achieving an accuracy of over 98% at growth stages V2 and V3 (vegetative stages with two and three visible collars) with an average frame rate of 47 frames per second (FPS). This pipeline can also be mounted easily on manned cart, tractor, or field robotic systems for online corn counting.}, number={2}, journal={Sensors}, publisher={MDPI AG}, author={Wang, Le and Xiang, Lirong and Tang, Lie and Jiang, Huanyu}, year={2021}, month={Jan}, pages={507} } @inproceedings{xiang_gai_bao_yu_schnable_tang_2021, title={AngleNet: Leaf Angle Detection and Characterization of Maize Plants in the Field Based on Stereo Vision and Deep Convolutional Neural Network}, booktitle={2021 ASABE Annual International Virtual Meeting}, author={Xiang, L. and Gai, J. and Bao, Y. and Yu, J. and Schnable, P.S. and Tang, L.}, year={2021} } @misc{bao_gai_xiang_tang_2021, title={Field Robotic Systems for High-Throughput Plant Phenotyping: A Review and a Case Study}, ISBN={9783030737337 9783030737344}, ISSN={2662-3188 2662-3196}, url={http://dx.doi.org/10.1007/978-3-030-73734-4_2}, DOI={10.1007/978-3-030-73734-4_2}, abstractNote={Continuous crop improvement is essential to meet the growing demands for food, feed, fuel, and fiber around the globe. High-throughput plant phenotypingHigh-Throughput Crop Phenotyping (HTPP) (HTPP) aims to break the bottleneckBottleneck in plant breedingPlant breeding programs where phenotypic data are mostly collected with inefficient manual methods. With the recent rapid advancements and applications of robotics in many industries, field robots are also expected to bring transformational changes to HTPP applications. This chapter presents an updated review of the infield ground-based robotic HTPPHigh-Throughput Crop Phenotyping (HTPP) systems developed so far. Moreover, we report a case study of an autonomous mobile phenotyping robotAutonomous mobile phenotyping robot PhenoBot 3.0 for row crop phenotyping, focusing on the development and evaluation of the navigationNavigation system for the articulated steering, a four-wheel-drive robotFour-wheel-drive robot with an extremely tall sensorSensors mast. Several navigationNavigation techniques were integrated to achieve robustness at different corn plant growth stages. Additionally, we briefly review the major sensing technologies for field-based HTPPHigh-Throughput Crop Phenotyping (HTPP) and present a vision sensorVision sensor PhenoStereo to show the promising potential of integrating conventional stereo imaging with the state-of-the-artState-of-the-art visual perception techniques for plant organ phenotyping applications. As an example, we show that a highly accurate estimation of sorghum stem diameter can be achieved with PhenoStereo. With this chapter, our goal is to provide valuable insights and guidance on the development of infield ground robotic HTPPHigh-Throughput Crop Phenotyping (HTPP) systems to researchers and practitioners.}, journal={Concepts and Strategies in Plant Sciences}, publisher={Springer International Publishing}, author={Bao, Yin and Gai, Jingyao and Xiang, Lirong and Tang, Lie}, year={2021}, pages={13–38} } @article{xiang_tang_gai_wang_2021, title={Measuring Stem Diameter of Sorghum Plants in the Field Using a High-Throughput Stereo Vision System}, volume={64}, ISSN={2151-0040}, url={http://dx.doi.org/10.13031/trans.14156}, DOI={10.13031/trans.14156}, abstractNote={HighlightsA custom-built camera module named PhenoStereo was developed for high-throughput field-based plant phenotyping.Novel integration of strobe lights facilitated application of PhenoStereo in various environmental conditions.Image-derived stem diameters were found to have high correlations with ground truth, which outperformed any previously reported sensing approach.PhenoStereo showed promising potential to characterize a broad spectrum of plant phenotypes.Abstract. The stem diameter of sorghum plants is an important trait for evaluation of stalk strength and biomass potential, but it is a challenging sensing task to automate in the field due to the complexity of the imaging object and the environment. In recent years, stereo vision has offered a viable three-dimensional (3D) solution due to its high spatial resolution and wide selection of camera modules. However, the performance of in-field stereo imaging for plant phenotyping is adversely affected by textureless regions, occlusion of plants, variable outdoor lighting, and wind conditions. In this study, a portable stereo imaging module named PhenoStereo was developed for high-throughput field-based plant phenotyping. PhenoStereo features a self-contained embedded design, which makes it capable of capturing images at 14 stereoscopic frames per second. In addition, a set of customized strobe lights is integrated to overcome lighting variations and enable the use of high shutter speed to overcome motion blur. PhenoStereo was used to acquire a set of sorghum plant images, and an automated point cloud data processing pipeline was developed to automatically extract the stems and then quantify their diameters via an optimized 3D modeling process. The pipeline employed a mask region convolutional neural network (Mask R-CNN) for detecting stalk contours and a semi-global block matching (SGBM) stereo matching algorithm for generating disparity maps. The correlation coefficient (r) between the image-derived stem diameters and the ground truth was 0.97 with a mean absolute error (MAE) of 1.44 mm, which outperformed any previously reported sensing approach. These results demonstrate that, with proper customization, stereo vision can be an effective sensing method for field-based plant phenotyping using high-fidelity 3D models reconstructed from stereoscopic images. Based on the results from sorghum plant stem diameter sensing, this proposed stereo sensing approach can likely be extended to characterize a broad range of plant phenotypes, such as the leaf angle and tassel shape of maize plants and the seed pods and stem nodes of soybean plants. Keywords: Field-based high-throughput phenotyping, Point cloud, Stem diameter, Stereo vision.}, number={6}, journal={Transactions of the ASABE}, publisher={American Society of Agricultural and Biological Engineers (ASABE)}, author={Xiang, Lirong and Tang, Lie and Gai, Jingyao and Wang, Le}, year={2021}, pages={1999–2010} } @article{xiang_nolan_bao_elmore_tuel_gai_shah_wang_huser_hurd_et al._2021, title={Robotic Assay for Drought (RoAD): an automated phenotyping system for brassinosteroid and drought responses}, volume={107}, ISSN={0960-7412 1365-313X}, url={http://dx.doi.org/10.1111/tpj.15401}, DOI={10.1111/tpj.15401}, abstractNote={SummaryBrassinosteroids (BRs) are a group of plant steroid hormones involved in regulating growth, development, and stress responses. Many components of the BR pathway have previously been identified and characterized. However, BR phenotyping experiments are typically performed in a low‐throughput manner, such as on Petri plates. Additionally, the BR pathway affects drought responses, but drought experiments are time consuming and difficult to control. To mitigate these issues and increase throughput, we developed the Robotic Assay for Drought (RoAD) system to perform BR and drought response experiments in soil‐grown Arabidopsis plants. RoAD is equipped with a robotic arm, a rover, a bench scale, a precisely controlled watering system, an RGB camera, and a laser profilometer. It performs daily weighing, watering, and imaging tasks and is capable of administering BR response assays by watering plants with Propiconazole (PCZ), a BR biosynthesis inhibitor. We developed image processing algorithms for both plant segmentation and phenotypic trait extraction to accurately measure traits including plant area, plant volume, leaf length, and leaf width. We then applied machine learning algorithms that utilize the extracted phenotypic parameters to identify image‐derived traits that can distinguish control, drought‐treated, and PCZ‐treated plants. We carried out PCZ and drought experiments on a set of BR mutants and Arabidopsis accessions with altered BR responses. Finally, we extended the RoAD assays to perform BR response assays using PCZ in Zea mays (maize) plants. This study establishes an automated and non‐invasive robotic imaging system as a tool to accurately measure morphological and growth‐related traits of Arabidopsis and maize plants in 3D, providing insights into the BR‐mediated control of plant growth and stress responses.}, number={6}, journal={The Plant Journal}, publisher={Wiley}, author={Xiang, Lirong and Nolan, Trevor M. and Bao, Yin and Elmore, Mitch and Tuel, Taylor and Gai, Jingyao and Shah, Dylan and Wang, Ping and Huser, Nicole M. and Hurd, Ashley M. and et al.}, year={2021}, month={Aug}, pages={1837–1853} } @article{gai_xiang_tang_2021, title={Using a depth camera for crop row detection and mapping for under-canopy navigation of agricultural robotic vehicle}, volume={188}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2021.106301}, DOI={10.1016/j.compag.2021.106301}, abstractNote={Computer vision provides local environmental information for robotic navigation in crop fields. It is particularly useful for robots operating under canopies of tall plants such as corns (Zea Mays) and sorghums (Sorghum bicolor), where GPS signal is not always receivable. However, the development of under-canopy navigation systems is still an open research area. The key contribution of our work is the development of a vision-based system for under-canopy navigation using a Time-of-Flight (ToF) camera. In the system, a novel algorithm was used to detect parallel crop rows from depth images taken under crop canopies. Two critical tasks in navigation were accomplished based on the detection results: 1) generating crop field maps as occupancy grids when reliable robot localization is available (from other sources such as GPS and IMU), and 2) providing inter-row vehicle positioning data when the field map is available and the localization is not reliable. The proposed system was evaluated in field tests. The test results showed that the proposed system was able to map the crop rows with mean absolute errors (MAE) of 3.4 cm and 3.6 cm in corn and sorghum fields, respectively. It provides lateral positioning data with MAE of 5.0 cm and 4.2 cm for positioning in corn and sorghum crop rows, respectively. The potential and limitations of using ToF cameras for under-canopy navigation were discussed.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Gai, Jingyao and Xiang, Lirong and Tang, Lie}, year={2021}, month={Sep}, pages={106301} } @inproceedings{xiang_gai_tang_2020, place={Tucson, AZ}, title={Developing a high-throughput stereo vision system for plant phenotyping}, booktitle={Phenome 2020}, author={Xiang, L. and Gai, J. and Tang, L.}, year={2020} } @inproceedings{gai_tuel_xiang_tang_2020, title={Developing the Control System of an Autonomous Robot for Field-based Maize/Sorghum Plant Phenotyping}, booktitle={2020 ASABE Annual International Virtual Meeting}, author={Gai, J. and Tuel, T. and Xiang, L. and Tang, L.}, year={2020} } @inproceedings{gai_tuel_xiang_tang_2020, title={PhenoBot 3.0 - an Autonomous Robot for Field-based Maize/Sorghum Plant Phenotyping}, booktitle={Phenome 2020}, author={Gai, J. and Tuel, T. and Xiang, L. and Tang, L.}, year={2020} } @inproceedings{xiang_tang_gai_wang_2020, title={PhenoStereo: a high-throughput stereo vision system for field-based plant phenotyping - with an application in sorghum stem diameter estimation}, url={http://dx.doi.org/10.13031/aim.202001190}, DOI={10.13031/aim.202001190}, abstractNote={In recent years, three-dimensional (3D) sensing has gained a great interest in plant phenotyping because it can represent the 3D nature of plant architecture. Among all available 3D imaging technologies, stereo vision offers a viable solution due to its high spatial resolution and wide selection of camera modules. However, the performance of in-field stereo imaging for plant phenotyping has been adversely affected by textureless regions and occlusions of plants, and variable outdoor lighting and wind conditions. In this research, a portable stereo imaging module namely PhenoStereo was developed for high-throughput fieldbased plant phenotyping. PhenoStereo featured a self-contained embedded design, which made it capable of capturing images at 14 stereoscopic frames per second. In addition, a set of customized strobe lights was integrated to overcome lighting variations and enable the use of high shutter speed to overcome motion blurs. The stem diameter of sorghum plants is an important trait for stalk strength and biomass potential evaluation but has been identified as a challenging sensing task to automated in the field due to the complexity of the imaging object and the environment. To that connection, PhenoStereo was used to acquire a set of sorghum plant images and an automated point cloud data processing pipeline was also developed to automatically extract the stems and then quantify their diameters via an optimized 3D modeling process. The pipeline employed a Mask R-CNN deep learning network for detecting stalk contours and a Semi-Global Block Matching stereo matching algorithm for generating disparity maps. The correlation coefficient (r) between the image-derived stem diameters and the ground truth was 0.97 with a mean absolute error (MAE) of 1.44 mm, which outperformed any previously reported sensing approaches. These results demonstrated that with proper customization stereo vision can be a highly desirable sensing method for field-based plant phenotyping using high-fidelity 3D models reconstructed from stereoscopic images. With the proving results from sorghum plant stem diameter sensing, this proposed stereo sensing approach can likely be extended to characterize a broad spectrum of plant phenotypes such as leaf angle and tassel shape of maize plants and seed pods and stem nodes of soybean plants.}, booktitle={2020 ASABE Annual International Virtual Meeting, July 13-15, 2020}, publisher={American Society of Agricultural and Biological Engineers}, author={Xiang, Lirong and Tang, Lie and Gai, Jingyao and Wang, Le}, year={2020} } @article{xiang_bao_tang_ortiz_salas-fernandez_2019, title={Automated morphological traits extraction for sorghum plants via 3D point cloud data analysis}, volume={162}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2019.05.043}, DOI={10.1016/j.compag.2019.05.043}, abstractNote={The ability to correlate morphological traits of plants with their genotypes plays an important role in plant phenomics research. However, measuring phenotypes manually is time-consuming, labor intensive, and prone to human errors. The 3D surface model of a plant can potentially provide an efficient and accurate way to digitize plant architecture. This study focused on the extraction of morphological traits at multiple developmental timepoints from sorghum plants grown under controlled conditions. A non-destructive 3D scanning system using a commodity depth camera was implemented to capture sequential images of a plant at different heights. To overcome the challenges of overlapping tillers, an algorithm was developed to first search for the stem in the merged point cloud data, and then the associated leaves. A 3D skeletonization algorithm was created by slicing the point cloud along the vertical direction, and then linking the connected Euclidean clusters between adjacent layers. Based on the structural clues of the sorghum plant, heuristic rules were implemented to separate overlapping tillers. Finally, each individual leaf was automatically segmented, and multiple parameters were obtained from the skeleton and the reconstructed point cloud including: plant height, stem diameter, leaf angle, and leaf surface area. The results showed high correlations between the manual measurements and the estimated values generated by the system. Statistical analyses between biomass and extracted traits revealed that stem volume was a promising predictor of shoot fresh weight and shoot dry weight, and the total leaf area was strongly correlated to shoot biomass at early stages.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Xiang, Lirong and Bao, Yin and Tang, Lie and Ortiz, Diego and Salas-Fernandez, Maria G.}, year={2019}, month={Jul}, pages={951–961} } @inproceedings{xiang_bao_nolan_yin_tang_2019, title={Robotic imaging-based methods for leaf segmentation and growth tracking in Arabidopsis}, booktitle={2019 ASABE Annual International Meeting}, author={Xiang, L. and Bao, Y. and Nolan, T. and Yin, Y. and Tang, L.}, year={2019} } @inproceedings{xiang_bao_tang_salas-fernandez_2018, title={Automated morphological trait extraction for sorghum plants via 3D point cloud data analysis}, booktitle={2018 ASABE Annual International Meeting}, author={Xiang, L. and Bao, Y. and Tang, L. and Salas-Fernandez, M.G.}, year={2018} } @article{hu_wang_xiang_wu_jiang_2018, title={Automatic Non-Destructive Growth Measurement of Leafy Vegetables Based on Kinect}, volume={18}, ISSN={1424-8220}, url={http://dx.doi.org/10.3390/s18030806}, DOI={10.3390/s18030806}, abstractNote={Non-destructive plant growth measurement is essential for plant growth and health research. As a 3D sensor, Kinect v2 has huge potentials in agriculture applications, benefited from its low price and strong robustness. The paper proposes a Kinect-based automatic system for non-destructive growth measurement of leafy vegetables. The system used a turntable to acquire multi-view point clouds of the measured plant. Then a series of suitable algorithms were applied to obtain a fine 3D reconstruction for the plant, while measuring the key growth parameters including relative/absolute height, total/projected leaf area and volume. In experiment, 63 pots of lettuce in different growth stages were measured. The result shows that the Kinect-measured height and projected area have fine linear relationship with reference measurements. While the measured total area and volume both follow power law distributions with reference data. All these data have shown good fitting goodness (R2 = 0.9457–0.9914). In the study of biomass correlations, the Kinect-measured volume was found to have a good power law relationship (R2 = 0.9281) with fresh weight. In addition, the system practicality was validated by performance and robustness analysis.}, number={3}, journal={Sensors}, publisher={MDPI AG}, author={Hu, Yang and Wang, Le and Xiang, Lirong and Wu, Qian and Jiang, Huanyu}, year={2018}, month={Mar}, pages={806} } @article{xiang_ma_zhao_liu_he_feng_2017, title={Comparative Analysis of Chemometrics Method on Heavy Metal Detection in Soil with Laser-Induced Breakdown Spectroscopy}, volume={37}, url={http://www.gpxygpfx.com/EN/Y2017/V37/I12/3871}, number={12}, journal={Spectroscopy and Spectral Analysis}, author={Xiang, L.R. and Ma, Z.H. and Zhao, X.Y. and Liu, F. and He, Y. and Feng, L.}, year={2017}, pages={3871–3876} }