@article{wu_lin_jin_weng_xu_zhang_xu_xiang_sun_ye_2024, title={CCHA YOLO for mycelium clamp connection (CC) and hyphae Autolysis (HA) detection under microscopy imaging and web deployment}, volume={201}, ISSN={["1095-9149"]}, DOI={10.1016/j.microc.2024.110483}, abstractNote={Microscopic examination is commonly employed to assess edible fungal mycelium vitality. However, this method can become time-intensive when evaluating a substantial volume of hyphae samples, which implies an urgent need to develop an accurate and automatic determination method. The challenges of mycelium detection come mostly from the multi-scale target detection under various magnifications. In this study, microscopic images of 10 edible fungi strains under different magnification scales or stain colors were collected to create a dataset. An improved multi-scale object detection model for mycelium vitality detection, CCHA YOLO, was proposed by enhancing the Backbone via combining Yolov8m and Swin Transformer (SwinT). Meanwhile, the Convolutional Block Attention Module (CBAM) was introduced to the Head, as well as optimized post-processing techniques to further promote model performance. The results indicated that CCHA YOLO achieved a mAP50:95 (mean average precision) of 89.02 % with a computational load of 98.61 GFLOPs. Additionally, it indicates a 16.67 % accuracy enhancement, needing only 11.3 more computational operations compared to the baseline YOLOv8m. In the meantime, CCHA YOLO was deployed on the web-based edge to facilitate the detection of microscopic images, highlighting the practical applicability of CCHA YOLO in determining mycelium vitality.}, journal={MICROCHEMICAL JOURNAL}, author={Wu, Libin and Lin, Shaodan and Jin, Wensong and Weng, Haiyong and Xu, Jinchai and Zhang, Lintong and Xu, Yawen and Xiang, Lirong and Sun, Shujing and Ye, Dapeng}, year={2024}, month={Jun} } @article{li_xu_xiang_chen_zhuang_yin_li_2024, title={Foundation models in smart agriculture: Basics, opportunities, and challenges}, volume={222}, ISSN={["1872-7107"]}, DOI={10.1016/j.compag.2024.109032}, abstractNote={The past decade has witnessed the rapid development and adoption of machine and deep learning (ML & DL) methodologies in agricultural systems, showcased by great successes in applications such as smart crop management, smart plant breeding, smart livestock farming, precision aquaculture farming, and agricultural robotics. However, these conventional ML/DL models have certain limitations: they heavily rely on large, costly-to-acquire labeled datasets for training, require specialized expertise for development and maintenance, and are mostly tailored for specific tasks, thus lacking generalizability. Recently, large pre-trained models, also known as foundation models (FMs), have demonstrated remarkable successes in language, vision, and decision-making tasks across various domains. These models are trained on a vast amount of data from multiple domains and modalities. Once trained, they can accomplish versatile tasks with just minor fine-tuning and minimal task-specific labeled data. Despite their proven effectiveness and huge potential, there has been little exploration of applying FMs to agriculture artificial intelligence (AI). Therefore, this study aims to explore the potential of FMs in the field of smart agriculture. In particular, conceptual tools and technical background are presented to facilitate the understanding of the problem space and uncover new research directions in this field. To this end, recent FMs in the general computer science (CS) domain are reviewed, and the models are categorized into four categories: language FMs, vision FMs, multimodal FMs, and reinforcement learning FMs. Subsequently, the process of developing agriculture FMs (AFMs) is outlined and their potential applications in smart agriculture are discussed. In addition, the unique challenges and risks associated with developing AFMs are discussed, including model training, validation, and deployment. Through this study, the advancement of AI in agriculture is explored by introducing AFMs as a promising paradigm that can significantly mitigate the reliance on extensive labeled datasets and enhance the efficiency, effectiveness, and generalization of agricultural AI systems. To facilitate further research, a well-classified and actively updated list of papers on AFMs is organized and accessible at https://github.com/JiajiaLi04/Agriculture-Foundation-Models.}, journal={COMPUTERS AND ELECTRONICS IN AGRICULTURE}, author={Li, Jiajia and Xu, Mingle and Xiang, Lirong and Chen, Dong and Zhuang, Weichao and Yin, Xunyuan and Li, Zhaojian}, year={2024}, month={Jul} } @article{wu_xiang_you_tang_gai_2024, title={Plant-Denoising-Net (PDN): A plant point cloud denoising network based on density gradient field learning}, volume={210}, ISSN={["1872-8235"]}, DOI={10.1016/j.isprsjprs.2024.03.010}, journal={ISPRS JOURNAL OF PHOTOGRAMMETRY AND REMOTE SENSING}, author={Wu, Jianeng and Xiang, Lirong and You, Hui and Tang, Lie and Gai, Jingyao}, year={2024}, month={Apr}, pages={282–299} } @article{he_gage_rellan-alvarez_xiang_2024, title={Swin-Roleaf: A new method for characterizing leaf azimuth angle in large-scale maize plants}, volume={224}, ISSN={["1872-7107"]}, url={https://doi.org/10.1016/j.compag.2024.109120}, DOI={10.1016/j.compag.2024.109120}, journal={COMPUTERS AND ELECTRONICS IN AGRICULTURE}, author={He, Weilong and Gage, Joseph L. and Rellan-Alvarez, Ruben and Xiang, Lirong}, year={2024}, month={Sep} } @article{wang_hu_xiang_morota_brooks_wickens_miller-cushon_yu_2024, title={Technical note: ShinyAnimalCV: open-source cloud-based web application for object detection, segmentation, and three-dimensional visualization of animals using computer vision}, volume={102}, ISSN={["1525-3163"]}, DOI={10.1093/jas/skad416}, abstractNote={Abstract}, journal={JOURNAL OF ANIMAL SCIENCE}, author={Wang, Jin and Hu, Yu and Xiang, Lirong and Morota, Gota and Brooks, Samantha A. and Wickens, Carissa L. and Miller-Cushon, Emily K. and Yu, Haipeng}, year={2024}, month={Jan} } @misc{xiang_wang_2023, title={A review of three-dimensional vision techniques in food and agriculture applications}, volume={5}, ISSN={["2772-3755"]}, DOI={10.1016/j.atech.2023.100259}, abstractNote={In recent years, three-dimensional (3D) machine vision techniques have been widely employed in agriculture and food systems, leveraging advanced deep learning technologies. However, with the rapid development of three-dimensional (3D) imaging techniques, the lack of a systematic review has hindered our ability to identify the most suitable imaging systems for specific agricultural and food applications. In this review, a variety of 3D imaging techniques are introduced, with their working principles and applications in agriculture and food systems. These techniques include Structure lighting-based 3D imaging, Multiview 3D imaging system, Time of Flight (ToF)-based 3D imaging system, Lighting Detection and Ranging (LiDAR), and Depth estimation from monocular image. Furthermore, the three-dimensional image analysis methods applied to these 3D imaging techniques are described and discussed in this review.}, journal={SMART AGRICULTURAL TECHNOLOGY}, author={Xiang, Lirong and Wang, Dongyi}, year={2023}, month={Oct} } @article{lin_li_huang_cheng_xiang_ye_weng_2023, title={Early Detection of Rice Blast Using a Semi-Supervised Contrastive Unpaired Translation Iterative Network Based on UAV Images}, volume={12}, ISSN={["2223-7747"]}, DOI={10.3390/plants12213675}, abstractNote={Rice blast has caused major production losses in rice, and thus the early detection of rice blast plays a crucial role in global food security. In this study, a semi-supervised contrastive unpaired translation iterative network is specifically designed based on unmanned aerial vehicle (UAV) images for rice blast detection. It incorporates multiple critic contrastive unpaired translation networks to generate fake images with different disease levels through an iterative process of data augmentation. These generated fake images, along with real images, are then used to establish a detection network called RiceBlastYolo. Notably, the RiceBlastYolo model integrates an improved fpn and a general soft labeling approach. The results show that the detection precision of RiceBlastYolo is 99.51% under intersection over union (IOU0.5) conditions and the average precision is 98.75% under IOU0.5–0.9 conditions. The precision and recall rates are respectively 98.23% and 99.99%, which are higher than those of common detection models (YOLO, YOLACT, YOLACT++, Mask R-CNN, and Faster R-CNN). Additionally, external data also verified the ability of the model. The findings demonstrate that our proposed model can accurately identify rice blast under field-scale conditions.}, number={21}, journal={PLANTS-BASEL}, author={Lin, Shaodan and Li, Jiayi and Huang, Deyao and Cheng, Zuxin and Xiang, Lirong and Ye, Dapeng and Weng, Haiyong}, year={2023}, month={Nov} } @article{xiang_gai_bao_yu_schnable_tang_2023, title={Field-based robotic leaf angle detection and characterization of maize plants using stereo vision and deep convolutional neural networks}, volume={2}, ISSN={["1556-4967"]}, url={http://dx.doi.org/10.1002/rob.22166}, DOI={10.1002/rob.22166}, abstractNote={Abstract}, journal={JOURNAL OF FIELD ROBOTICS}, author={Xiang, Lirong and Gai, Jingyao and Bao, Yin and Yu, Jianming and Schnable, Patrick S. S. and Tang, Lie}, year={2023}, month={Feb} } @article{wang_xiang_morota_wickens_cushon_brooks_yu_2023, title={Shinyanimalcv: Interactive Web Application for Object Detection and Three-Dimensional Visualization of Animals Using Computer Vision}, volume={101}, ISSN={["1525-3163"]}, DOI={10.1093/jas/skad281.294}, abstractNote={Abstract}, journal={JOURNAL OF ANIMAL SCIENCE}, author={Wang, Jin and Xiang, Lirong and Morota, Gota and Wickens, Carissa and Cushon, Emily and Brooks, Samantha and Yu, Haipeng}, year={2023}, month={Nov}, pages={244–245} } @article{gai_wang_xie_xiang_wang_2023, title={Spectroscopic determination of chlorophyll content in sugarcane leaves for drought stress detection}, volume={11}, ISSN={["1573-1618"]}, DOI={10.1007/s11119-023-10082-0}, journal={PRECISION AGRICULTURE}, author={Gai, Jingyao and Wang, Jingyong and Xie, Sasa and Xiang, Lirong and Wang, Ziting}, year={2023}, month={Nov} } @article{ye_weng_xiang_jia_xu_2023, title={Synchronously Predicting Tea Polyphenol and Epigallocatechin Gallate in Tea Leaves Using Fourier Transform-Near-Infrared Spectroscopy and Machine Learning}, volume={28}, ISSN={["1420-3049"]}, DOI={10.3390/molecules28145379}, abstractNote={Tea polyphenol and epigallocatechin gallate (EGCG) were considered as key components of tea. The rapid prediction of these two components can be beneficial for tea quality control and product development for tea producers, breeders and consumers. This study aimed to develop reliable models for tea polyphenols and EGCG content prediction during the breeding process using Fourier Transform–near infrared (FT-NIR) spectroscopy combined with machine learning algorithms. Various spectral preprocessing methods including Savitzky–Golay smoothing (SG), standard normal variate (SNV), vector normalization (VN), multiplicative scatter correction (MSC) and first derivative (FD) were applied to improve the quality of the collected spectra. Partial least squares regression (PLSR) and least squares support vector regression (LS-SVR) were introduced to establish models for tea polyphenol and EGCG content prediction based on different preprocessed spectral data. Variable selection algorithms, including competitive adaptive reweighted sampling (CARS) and random forest (RF), were further utilized to identify key spectral bands to improve the efficiency of the models. The results demonstrate that the optimal model for tea polyphenols calibration was the LS-SVR with Rp = 0.975 and RPD = 4.540 based on SG-smoothed full spectra. For EGCG detection, the best model was the LS-SVR with Rp = 0.936 and RPD = 2.841 using full original spectra as model inputs. The application of variable selection algorithms further improved the predictive performance of the models. The LS-SVR model for tea polyphenols prediction with Rp = 0.978 and RPD = 4.833 used 30 CARS-selected variables, while the LS-SVR model build on 27 RF-selected variables achieved the best predictive ability with Rp = 0.944 and RPD = 3.049, respectively, for EGCG prediction. The results demonstrate a potential of FT-NIR spectroscopy combined with machine learning for the rapid screening of genotypes with high tea polyphenol and EGCG content in tea leaves.}, number={14}, journal={MOLECULES}, author={Ye, Sitan and Weng, Haiyong and Xiang, Lirong and Jia, Liangquan and Xu, Jinchai}, year={2023}, month={Jul} } @inproceedings{xiang_liu_raj_tang_2022, place={Houston, TX}, title={Detection and characterization of maize plant architectural traits in the field using stereo vision and deep convolutional neural networks}, booktitle={2022 ASABE Annual International Meeting}, author={Xiang, L. and Liu, X. and Raj, A. and Tang, L.}, year={2022} } @inproceedings{liu_xiang_raj_tang_2022, title={In-field soybean seed pod phenotyping on harvest stocks using 3D imaging and deep learning}, booktitle={2022 ASABE Annual International Meeting}, author={Liu, X. and Xiang, L. and Raj, A. and Tang, L.}, year={2022} } @inproceedings{xiang_liu_raj_yu_schnable_tang_2022, title={Robotic Field-based Plant Architectural Traits Characterization Using Stereo Vision and Deep Neural Networks}, booktitle={Fourth International Workshop on Machine Learning for Cyber-Agricultural Systems (MLCAS2022)}, author={Xiang, L. and Liu, X. and Raj, A. and Yu, J. and Schnable, P.S. and Tang, L.}, year={2022} } @article{wang_xiang_tang_jiang_2021, title={A Convolutional Neural Network-Based Method for Corn Stand Counting in the Field}, volume={21}, ISSN={1424-8220}, url={http://dx.doi.org/10.3390/s21020507}, DOI={10.3390/s21020507}, abstractNote={Accurate corn stand count in the field at early season is of great interest to corn breeders and plant geneticists. However, the commonly used manual counting method is time consuming, laborious, and prone to error. Nowadays, unmanned aerial vehicles (UAV) tend to be a popular base for plant-image-collecting platforms. However, detecting corn stands in the field is a challenging task, primarily because of camera motion, leaf fluttering caused by wind, shadows of plants caused by direct sunlight, and the complex soil background. As for the UAV system, there are mainly two limitations for early seedling detection and counting. First, flying height cannot ensure a high resolution for small objects. It is especially difficult to detect early corn seedlings at around one week after planting, because the plants are small and difficult to differentiate from the background. Second, the battery life and payload of UAV systems cannot support long-duration online counting work. In this research project, we developed an automated, robust, and high-throughput method for corn stand counting based on color images extracted from video clips. A pipeline developed based on the YoloV3 network and Kalman filter was used to count corn seedlings online. The results demonstrate that our method is accurate and reliable for stand counting, achieving an accuracy of over 98% at growth stages V2 and V3 (vegetative stages with two and three visible collars) with an average frame rate of 47 frames per second (FPS). This pipeline can also be mounted easily on manned cart, tractor, or field robotic systems for online corn counting.}, number={2}, journal={Sensors}, publisher={MDPI AG}, author={Wang, Le and Xiang, Lirong and Tang, Lie and Jiang, Huanyu}, year={2021}, month={Jan}, pages={507} } @inproceedings{xiang_gai_bao_yu_schnable_tang_2021, title={AngleNet: Leaf Angle Detection and Characterization of Maize Plants in the Field Based on Stereo Vision and Deep Convolutional Neural Network}, booktitle={2021 ASABE Annual International Virtual Meeting}, author={Xiang, L. and Gai, J. and Bao, Y. and Yu, J. and Schnable, P.S. and Tang, L.}, year={2021} } @misc{bao_gai_xiang_tang_2021, title={Field Robotic Systems for High-Throughput Plant Phenotyping: A Review and a Case Study}, ISBN={9783030737337 9783030737344}, ISSN={2662-3188 2662-3196}, url={http://dx.doi.org/10.1007/978-3-030-73734-4_2}, DOI={10.1007/978-3-030-73734-4_2}, abstractNote={Continuous crop improvement is essential to meet the growing demands for food, feed, fuel, and fiber around the globe. High-throughput plant phenotypingHigh-Throughput Crop Phenotyping (HTPP) (HTPP) aims to break the bottleneckBottleneck in plant breedingPlant breeding programs where phenotypic data are mostly collected with inefficient manual methods. With the recent rapid advancements and applications of robotics in many industries, field robots are also expected to bring transformational changes to HTPP applications. This chapter presents an updated review of the infield ground-based robotic HTPPHigh-Throughput Crop Phenotyping (HTPP) systems developed so far. Moreover, we report a case study of an autonomous mobile phenotyping robotAutonomous mobile phenotyping robot PhenoBot 3.0 for row crop phenotyping, focusing on the development and evaluation of the navigationNavigation system for the articulated steering, a four-wheel-drive robotFour-wheel-drive robot with an extremely tall sensorSensors mast. Several navigationNavigation techniques were integrated to achieve robustness at different corn plant growth stages. Additionally, we briefly review the major sensing technologies for field-based HTPPHigh-Throughput Crop Phenotyping (HTPP) and present a vision sensorVision sensor PhenoStereo to show the promising potential of integrating conventional stereo imaging with the state-of-the-artState-of-the-art visual perception techniques for plant organ phenotyping applications. As an example, we show that a highly accurate estimation of sorghum stem diameter can be achieved with PhenoStereo. With this chapter, our goal is to provide valuable insights and guidance on the development of infield ground robotic HTPPHigh-Throughput Crop Phenotyping (HTPP) systems to researchers and practitioners.}, journal={Concepts and Strategies in Plant Sciences}, publisher={Springer International Publishing}, author={Bao, Yin and Gai, Jingyao and Xiang, Lirong and Tang, Lie}, year={2021}, pages={13–38} } @article{xiang_tang_gai_wang_2021, title={Measuring Stem Diameter of Sorghum Plants in the Field Using a High-Throughput Stereo Vision System}, volume={64}, ISSN={2151-0040}, url={http://dx.doi.org/10.13031/trans.14156}, DOI={10.13031/trans.14156}, abstractNote={Highlights}, number={6}, journal={Transactions of the ASABE}, publisher={American Society of Agricultural and Biological Engineers (ASABE)}, author={Xiang, Lirong and Tang, Lie and Gai, Jingyao and Wang, Le}, year={2021}, pages={1999–2010} } @article{xiang_nolan_bao_elmore_tuel_gai_shah_wang_huser_hurd_et al._2021, title={Robotic Assay for Drought (RoAD): an automated phenotyping system for brassinosteroid and drought responses}, volume={107}, ISSN={0960-7412 1365-313X}, url={http://dx.doi.org/10.1111/tpj.15401}, DOI={10.1111/tpj.15401}, abstractNote={Summary}, number={6}, journal={The Plant Journal}, publisher={Wiley}, author={Xiang, Lirong and Nolan, Trevor M. and Bao, Yin and Elmore, Mitch and Tuel, Taylor and Gai, Jingyao and Shah, Dylan and Wang, Ping and Huser, Nicole M. and Hurd, Ashley M. and et al.}, year={2021}, month={Aug}, pages={1837–1853} } @article{gai_xiang_tang_2021, title={Using a depth camera for crop row detection and mapping for under-canopy navigation of agricultural robotic vehicle}, volume={188}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2021.106301}, DOI={10.1016/j.compag.2021.106301}, abstractNote={Computer vision provides local environmental information for robotic navigation in crop fields. It is particularly useful for robots operating under canopies of tall plants such as corns (Zea Mays) and sorghums (Sorghum bicolor), where GPS signal is not always receivable. However, the development of under-canopy navigation systems is still an open research area. The key contribution of our work is the development of a vision-based system for under-canopy navigation using a Time-of-Flight (ToF) camera. In the system, a novel algorithm was used to detect parallel crop rows from depth images taken under crop canopies. Two critical tasks in navigation were accomplished based on the detection results: 1) generating crop field maps as occupancy grids when reliable robot localization is available (from other sources such as GPS and IMU), and 2) providing inter-row vehicle positioning data when the field map is available and the localization is not reliable. The proposed system was evaluated in field tests. The test results showed that the proposed system was able to map the crop rows with mean absolute errors (MAE) of 3.4 cm and 3.6 cm in corn and sorghum fields, respectively. It provides lateral positioning data with MAE of 5.0 cm and 4.2 cm for positioning in corn and sorghum crop rows, respectively. The potential and limitations of using ToF cameras for under-canopy navigation were discussed.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Gai, Jingyao and Xiang, Lirong and Tang, Lie}, year={2021}, month={Sep}, pages={106301} } @inproceedings{xiang_gai_tang_2020, place={Tucson, AZ}, title={Developing a high-throughput stereo vision system for plant phenotyping}, booktitle={Phenome 2020}, author={Xiang, L. and Gai, J. and Tang, L.}, year={2020} } @inproceedings{gai_tuel_xiang_tang_2020, title={Developing the Control System of an Autonomous Robot for Field-based Maize/Sorghum Plant Phenotyping}, booktitle={2020 ASABE Annual International Virtual Meeting}, author={Gai, J. and Tuel, T. and Xiang, L. and Tang, L.}, year={2020} } @inproceedings{gai_tuel_xiang_tang_2020, title={PhenoBot 3.0 - an Autonomous Robot for Field-based Maize/Sorghum Plant Phenotyping}, booktitle={Phenome 2020}, author={Gai, J. and Tuel, T. and Xiang, L. and Tang, L.}, year={2020} } @inproceedings{xiang_tang_gai_wang_2020, title={PhenoStereo: a high-throughput stereo vision system for field-based plant phenotyping - with an application in sorghum stem diameter estimation}, url={http://dx.doi.org/10.13031/aim.202001190}, DOI={10.13031/aim.202001190}, abstractNote={In recent years, three-dimensional (3D) sensing has gained a great interest in plant phenotyping because it can represent the 3D nature of plant architecture. Among all available 3D imaging technologies, stereo vision offers a viable solution due to its high spatial resolution and wide selection of camera modules. However, the performance of in-field stereo imaging for plant phenotyping has been adversely affected by textureless regions and occlusions of plants, and variable outdoor lighting and wind conditions. In this research, a portable stereo imaging module namely PhenoStereo was developed for high-throughput fieldbased plant phenotyping. PhenoStereo featured a self-contained embedded design, which made it capable of capturing images at 14 stereoscopic frames per second. In addition, a set of customized strobe lights was integrated to overcome lighting variations and enable the use of high shutter speed to overcome motion blurs. The stem diameter of sorghum plants is an important trait for stalk strength and biomass potential evaluation but has been identified as a challenging sensing task to automated in the field due to the complexity of the imaging object and the environment. To that connection, PhenoStereo was used to acquire a set of sorghum plant images and an automated point cloud data processing pipeline was also developed to automatically extract the stems and then quantify their diameters via an optimized 3D modeling process. The pipeline employed a Mask R-CNN deep learning network for detecting stalk contours and a Semi-Global Block Matching stereo matching algorithm for generating disparity maps. The correlation coefficient (r) between the image-derived stem diameters and the ground truth was 0.97 with a mean absolute error (MAE) of 1.44 mm, which outperformed any previously reported sensing approaches. These results demonstrated that with proper customization stereo vision can be a highly desirable sensing method for field-based plant phenotyping using high-fidelity 3D models reconstructed from stereoscopic images. With the proving results from sorghum plant stem diameter sensing, this proposed stereo sensing approach can likely be extended to characterize a broad spectrum of plant phenotypes such as leaf angle and tassel shape of maize plants and seed pods and stem nodes of soybean plants.}, booktitle={2020 ASABE Annual International Virtual Meeting, July 13-15, 2020}, publisher={American Society of Agricultural and Biological Engineers}, author={Xiang, Lirong and Tang, Lie and Gai, Jingyao and Wang, Le}, year={2020} } @article{xiang_bao_tang_ortiz_salas-fernandez_2019, title={Automated morphological traits extraction for sorghum plants via 3D point cloud data analysis}, volume={162}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2019.05.043}, DOI={10.1016/j.compag.2019.05.043}, abstractNote={The ability to correlate morphological traits of plants with their genotypes plays an important role in plant phenomics research. However, measuring phenotypes manually is time-consuming, labor intensive, and prone to human errors. The 3D surface model of a plant can potentially provide an efficient and accurate way to digitize plant architecture. This study focused on the extraction of morphological traits at multiple developmental timepoints from sorghum plants grown under controlled conditions. A non-destructive 3D scanning system using a commodity depth camera was implemented to capture sequential images of a plant at different heights. To overcome the challenges of overlapping tillers, an algorithm was developed to first search for the stem in the merged point cloud data, and then the associated leaves. A 3D skeletonization algorithm was created by slicing the point cloud along the vertical direction, and then linking the connected Euclidean clusters between adjacent layers. Based on the structural clues of the sorghum plant, heuristic rules were implemented to separate overlapping tillers. Finally, each individual leaf was automatically segmented, and multiple parameters were obtained from the skeleton and the reconstructed point cloud including: plant height, stem diameter, leaf angle, and leaf surface area. The results showed high correlations between the manual measurements and the estimated values generated by the system. Statistical analyses between biomass and extracted traits revealed that stem volume was a promising predictor of shoot fresh weight and shoot dry weight, and the total leaf area was strongly correlated to shoot biomass at early stages.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Xiang, Lirong and Bao, Yin and Tang, Lie and Ortiz, Diego and Salas-Fernandez, Maria G.}, year={2019}, month={Jul}, pages={951–961} } @inproceedings{xiang_bao_nolan_yin_tang_2019, title={Robotic imaging-based methods for leaf segmentation and growth tracking in Arabidopsis}, booktitle={2019 ASABE Annual International Meeting}, author={Xiang, L. and Bao, Y. and Nolan, T. and Yin, Y. and Tang, L.}, year={2019} } @inproceedings{xiang_bao_tang_salas-fernandez_2018, title={Automated morphological trait extraction for sorghum plants via 3D point cloud data analysis}, booktitle={2018 ASABE Annual International Meeting}, author={Xiang, L. and Bao, Y. and Tang, L. and Salas-Fernandez, M.G.}, year={2018} } @article{hu_wang_xiang_wu_jiang_2018, title={Automatic Non-Destructive Growth Measurement of Leafy Vegetables Based on Kinect}, volume={18}, ISSN={1424-8220}, url={http://dx.doi.org/10.3390/s18030806}, DOI={10.3390/s18030806}, abstractNote={Non-destructive plant growth measurement is essential for plant growth and health research. As a 3D sensor, Kinect v2 has huge potentials in agriculture applications, benefited from its low price and strong robustness. The paper proposes a Kinect-based automatic system for non-destructive growth measurement of leafy vegetables. The system used a turntable to acquire multi-view point clouds of the measured plant. Then a series of suitable algorithms were applied to obtain a fine 3D reconstruction for the plant, while measuring the key growth parameters including relative/absolute height, total/projected leaf area and volume. In experiment, 63 pots of lettuce in different growth stages were measured. The result shows that the Kinect-measured height and projected area have fine linear relationship with reference measurements. While the measured total area and volume both follow power law distributions with reference data. All these data have shown good fitting goodness (R2 = 0.9457–0.9914). In the study of biomass correlations, the Kinect-measured volume was found to have a good power law relationship (R2 = 0.9281) with fresh weight. In addition, the system practicality was validated by performance and robustness analysis.}, number={3}, journal={Sensors}, publisher={MDPI AG}, author={Hu, Yang and Wang, Le and Xiang, Lirong and Wu, Qian and Jiang, Huanyu}, year={2018}, month={Mar}, pages={806} } @article{xiang_ma_zhao_liu_he_feng_2017, title={Comparative Analysis of Chemometrics Method on Heavy Metal Detection in Soil with Laser-Induced Breakdown Spectroscopy}, volume={37}, url={http://www.gpxygpfx.com/EN/Y2017/V37/I12/3871}, number={12}, journal={Spectroscopy and Spectral Analysis}, author={Xiang, L.R. and Ma, Z.H. and Zhao, X.Y. and Liu, F. and He, Y. and Feng, L.}, year={2017}, pages={3871–3876} }