@article{reynolds_wilkins_martin_taggart_rivera_tunc-ozdemir_rufty_lobaton_bozkurt_daniele_2024, title={Evaluating Bacterial Nanocellulose Interfaces for Recording Surface Biopotentials from Plants}, volume={24}, ISSN={["1424-8220"]}, url={https://doi.org/10.3390/s24072335}, DOI={10.3390/s24072335}, abstractNote={The study of plant electrophysiology offers promising techniques to track plant health and stress in vivo for both agricultural and environmental monitoring applications. Use of superficial electrodes on the plant body to record surface potentials may provide new phenotyping insights. Bacterial nanocellulose (BNC) is a flexible, optically translucent, and water-vapor-permeable material with low manufacturing costs, making it an ideal substrate for non-invasive and non-destructive plant electrodes. This work presents BNC electrodes with screen-printed carbon (graphite) ink-based conductive traces and pads. It investigates the potential of these electrodes for plant surface electrophysiology measurements in comparison to commercially available standard wet gel and needle electrodes. The electrochemically active surface area and impedance of the BNC electrodes varied based on the annealing temperature and time over the ranges of 50 °C to 90 °C and 5 to 60 min, respectively. The water vapor transfer rate and optical transmittance of the BNC substrate were measured to estimate the level of occlusion caused by these surface electrodes on the plant tissue. The total reduction in chlorophyll content under the electrodes was measured after the electrodes were placed on maize leaves for up to 300 h, showing that the BNC caused only a 16% reduction. Maize leaf transpiration was reduced by only 20% under the BNC electrodes after 72 h compared to a 60% reduction under wet gel electrodes in 48 h. On three different model plants, BNC–carbon ink surface electrodes and standard invasive needle electrodes were shown to have a comparable signal quality, with a correlation coefficient of >0.9, when measuring surface biopotentials induced by acute environmental stressors. These are strong indications of the superior performance of the BNC substrate with screen-printed graphite ink as an electrode material for plant surface biopotential recordings.}, number={7}, journal={SENSORS}, author={Reynolds, James and Wilkins, Michael and Martin, Devon and Taggart, Matthew and Rivera, Kristina R. and Tunc-Ozdemir, Meral and Rufty, Thomas and Lobaton, Edgar and Bozkurt, Alper and Daniele, Michael A.}, year={2024}, month={Apr} } @article{banerjee_reynolds_taggart_daniele_bozkurt_lobaton_2024, title={Quantifying Visual Differences in Drought Stressed Maize through Reflectance and Data-Driven Analysis}, url={https://doi.org/10.20944/preprints202404.1949.v1}, DOI={10.20944/preprints202404.1949.v1}, abstractNote={Environmental factors, such as drought-stress, significantly impact maize growth and productivity worldwide. To improve yield and quality, effective strategies for early detection and mitigation of drought-stress in maize are essential. This paper presents a detailed analysis of three imaging trials conducted to detect drought-stress in maize plants using an existing, custom-developed, low cost, high throughput phenotyping platform. We propose a pipeline for early detection of water stress in maize plants using a Vision Transformer classifier and analysis of distributions of near-infrared (NIR) reflectance from the plants. We also explored suitable regions on the plant that are more sensitive to drought-stress and show that the region surrounding the youngest expanding leaf (YEL) and the stem can be used as a more consistent alternative to analysis involving just the YEL. Our results show good separation between well-watered and drought-stressed trials for two out of the three imaging trials both in terms of classification accuracy from data-driven features as well as through analysis of histograms of NIR reflectance.}, author={Banerjee, Sanjana and Reynolds, James and Taggart, Matthew and Daniele, Michael A. and Bozkurt, Alper and Lobaton, Edgar}, year={2024}, month={Apr} } @article{banerjee_reynolds_taggart_daniele_bozkurt_lobaton_2024, title={Quantifying Visual Differences in Drought-Stressed Maize through Reflectance and Data-Driven Analysis}, volume={5}, ISSN={["2673-2688"]}, url={https://doi.org/10.3390/ai5020040}, DOI={10.3390/ai5020040}, abstractNote={Environmental factors, such as drought stress, significantly impact maize growth and productivity worldwide. To improve yield and quality, effective strategies for early detection and mitigation of drought stress in maize are essential. This paper presents a detailed analysis of three imaging trials conducted to detect drought stress in maize plants using an existing, custom-developed, low-cost, high-throughput phenotyping platform. A pipeline is proposed for early detection of water stress in maize plants using a Vision Transformer classifier and analysis of distributions of near-infrared (NIR) reflectance from the plants. A classification accuracy of 85% was achieved in one of our trials, using hold-out trials for testing. Suitable regions on the plant that are more sensitive to drought stress were explored, and it was shown that the region surrounding the youngest expanding leaf (YEL) and the stem can be used as a more consistent alternative to analysis involving just the YEL. Experiments in search of an ideal window size showed that small bounding boxes surrounding the YEL and the stem area of the plant perform better in separating drought-stressed and well-watered plants than larger window sizes enclosing most of the plant. The results presented in this work show good separation between well-watered and drought-stressed categories for two out of the three imaging trials, both in terms of classification accuracy from data-driven features as well as through analysis of histograms of NIR reflectance.}, number={2}, journal={AI}, author={Banerjee, Sanjana and Reynolds, James and Taggart, Matthew and Daniele, Michael and Bozkurt, Alper and Lobaton, Edgar}, year={2024}, month={Jun}, pages={790–802} } @article{soleimani_guo_haley_jacks_lobaton_2024, title={The Impact of Pause and Filler Word Encoding on Dementia Detection with Contrastive Learning}, volume={14}, ISSN={["2076-3417"]}, url={https://www.mdpi.com/2076-3417/14/19/8879}, DOI={10.3390/app14198879}, abstractNote={Dementia is primarily caused by neurodegenerative diseases like Alzheimer’s disease (AD). It affects millions worldwide, making detection and monitoring crucial. This study focuses on the detection of dementia from speech transcripts of controls and dementia groups. We propose encoding in-text pauses and filler words (e.g., “uh” and “um”) in text-based language models and thoroughly evaluating their impact on performance (e.g., accuracy). Additionally, we suggest using contrastive learning to improve performance in a multi-task framework. Our results demonstrate the effectiveness of our approaches in enhancing the model’s performance, achieving 87% accuracy and an 86% f1-score. Compared to the state of the art, our approach has similar performance despite having significantly fewer parameters. This highlights the importance of pause and filler word encoding on the detection of dementia.}, number={19}, journal={APPLIED SCIENCES-BASEL}, author={Soleimani, Reza and Guo, Shengjie and Haley, Katarina L. and Jacks, Adam and Lobaton, Edgar}, year={2024}, month={Oct} } @article{dieffenderfer_brewer_noonan_smith_eichenlaub_haley_jacks_lobaton_neupert_hess_et al._2023, title={A Wearable System for Continuous Monitoring and Assessment of Speech, Gait, and Cognitive Decline for Early Diagnosis of ADRD}, ISSN={["1558-4615"]}, DOI={10.1109/EMBC40787.2023.10339986}, abstractNote={Early detection of cognitive decline is essential to study mild cognitive impairment and Alzheimer’s Disease in order to develop targeted interventions and prevent or stop the progression of dementia. This requires continuous and longitudinal assessment and tracking of the related physiological and behavioral changes during daily life. In this paper, we present a low cost and low power wearable system custom designed to track the trends in speech, gait, and cognitive stress while also considering the important human factor needs such as privacy and compliance. In the form factors of a wristband and waist-patch, this multimodal, multi-sensor system measures inertial signals, sound, heart rate, electrodermal activity and pulse transit time. A total power consumption of 2.6 mW without any duty cycling allows for more than 3 weeks of run time between charges when 1500 mAh batteries are used.Clinical Relevance— Much earlier detection of Alzheimer’s disease and related dementias may be possible by continuous monitoring of physiological and behavioral state using application specific wearable sensors during the activities of daily life.}, journal={2023 45TH ANNUAL INTERNATIONAL CONFERENCE OF THE IEEE ENGINEERING IN MEDICINE & BIOLOGY SOCIETY, EMBC}, author={Dieffenderfer, James and Brewer, Alec and Noonan, Maxwell A. and Smith, Madeline and Eichenlaub, Emily and Haley, Katarina L. and Jacks, Adam and Lobaton, Edgar and Neupert, Shevaun D. and Hess, Thomas M. and et al.}, year={2023} } @article{barahona_mills_hernandez_bozkurt_carpenter_lobaton_2023, title={Adolescent Asthma Monitoring: A Preliminary Study of Audio and Spirometry Modalities}, ISSN={["1558-4615"]}, DOI={10.1109/EMBC40787.2023.10340643}, abstractNote={Asthma patients’ sleep quality is correlated with how well their asthma symptoms are controlled. In this paper, deep learning techniques are explored to improve forecasting of forced expiratory volume in one second (FEV1) by using audio data from participants and test whether auditory sleep disturbances are correlated with poorer asthma outcomes. These are applied to a representative data set of FEV1 collected from a commercially available sprirometer and audio spectrograms collected overnight using a smartphone. A model for detecting nonverbal vocalizations including coughs, sneezes, sighs, snoring, throat clearing, sniffs, and breathing sounds was trained and used to capture nightly sleep disturbances. Our preliminary analysis found significant improvement in FEV1 forecasting when using overnight nonverbal vocalization detections as an additional feature for regression using XGBoost over using only spirometry data.Clinical relevance— This preliminary study establishes up to 30% improvement of FEV1 forecasting using features generated by deep learning techniques over only spirometry-based features.}, journal={2023 45TH ANNUAL INTERNATIONAL CONFERENCE OF THE IEEE ENGINEERING IN MEDICINE & BIOLOGY SOCIETY, EMBC}, author={Barahona, Jeffrey A. and Mills, Katie and Hernandez, Michelle and Bozkurt, Alper and Carpenter, Delesha and Lobaton, Edgar J.}, year={2023} } @article{soleimani_barahona_chen_bozkurt_daniele_pozdin_lobaton_2023, title={An Overview of and Advances in Modeling and Interoperability of Deep Neural Sleep Staging}, url={https://www.mdpi.com/2673-9488/4/1/1}, DOI={10.3390/physiologia4010001}, abstractNote={Sleep staging has a very important role in diagnosing patients with sleep disorders. In general, this task is very time-consuming for physicians to perform. Deep learning shows great potential to automate this process and remove physician bias from decision making. In this study, we aim to identify recent trends on performance improvement and the causes for these trends. Recent papers on sleep stage classification and interpretability are investigated to explore different modeling and data manipulation techniques, their efficiency, and recent advances. We identify an improvement in performance up to 12% on standard datasets over the last 5 years. The improvements in performance do not appear to be necessarily correlated to the size of the models, but instead seem to be caused by incorporating new architectural components, such as the use of transformers and contrastive learning.}, journal={Physiologia}, author={Soleimani, Reza and Barahona, Jeffrey and Chen, Yuhan and Bozkurt, Alper and Daniele, Michael and Pozdin, Vladimir A. and Lobaton, Edgar}, year={2023}, month={Dec} } @article{queener_ahmmed_victorio_twiddy_dehn_brewer_lobaton_bozkurt_pozdin_daniele_2023, place={Vienna, Austria}, title={Conformal Micropatterned Organic-Metal Electrodes for Physiological Recording}, ISSN={["1930-0395"]}, url={http://dx.doi.org/10.1109/sensors56945.2023.10324963}, DOI={10.1109/SENSORS56945.2023.10324963}, abstractNote={Conformal electrodes provide a soft and conforming interface with the skin for reduced impedance, comfortable skin contact, and improved signal quality compared to commercial electrodes. In this paper, we present conformal micropatterned organic-metal (CMOM) electrodes and our investigation on the effect of perforation micropatterning and PEDOT:PSS coating. CMOM electrodes were characterized then evaluated in vivo against commercial-off-the-shelf electrodes. PEDOT:PSS was found to reduce the overall impedance in each electrode variant, resulting in a >97% decrease in impedance at low frequencies. The change in impedance at high frequencies was not significant for the control or $30\ \mu \mathrm{m}$ vias electrodes, but the impedance was significantly greater following EPD for $60\ \mu \mathrm{m}$ vias electrodes.}, journal={2023 IEEE SENSORS}, author={Queener, Kirstie M. and Ahmmed, Parvez and Victorio, Mauro and Twiddy, Jack and Dehn, Ashley and Brewer, Alec and Lobaton, Edgar and Bozkurt, Alper and Pozdin, Vladimir and Daniele, Michael}, year={2023} } @article{reynolds_taggart_martin_lobaton_cardoso_daniele_bozkurt_2023, title={Rapid Drought Stress Detection in Plants Using Bioimpedance Measurements and Analysis}, url={https://doi.org/10.1109/TAFE.2023.3330583}, DOI={10.1109/TAFE.2023.3330583}, abstractNote={Smart farming is the targeted use of phenotyping for the rapid, continuous, and accurate assessment of plant health in the field. Bioimpedance monitoring can play a role in smart farming as a phenotyping method, which is now accessible thanks to recent efforts to commoditize and miniaturize electronics. Here, we demonstrate that bioimpedance measurements reflect the physiological changes in live plant tissue with induced alterations in their environmental conditions. When plants were exposed to $-$1.0 MPa polyethylene glycol, to simulate drought conditions, the extracellular resistance was observed to increase prior to the intercellular resistance, where the low frequency bioimpedance measurements increased by 25% within one hour. Similar patterns were observed when drought stress was applied to the plants by water withholding, with a bioimpedance increase within a matter of a few hours. The bioimpedance measurements were also compared with leaf relative water content, imaging, and field transpirable soil water, which reinforced these findings. These preliminary results suggest that bioimpedance can function as a phenotyping tool for continuous and real time monitoring of plant stress to allow the development of strategies to prevent damage from environmental stresses such as drought.}, journal={IEEE Transactions on AgriFood Electronics}, author={Reynolds, James and Taggart, Matt and Martin, Devon and Lobaton, Edgar and Cardoso, Amanda and Daniele, Michael and Bozkurt, Alper}, year={2023} } @article{chen_attri_barahona_hernandez_carpenter_bozkurt_lobaton_2023, title={Robust Cough Detection With Out-of-Distribution Detection}, volume={27}, ISSN={["2168-2208"]}, url={https://doi.org/10.1109/JBHI.2023.3264783}, DOI={10.1109/JBHI.2023.3264783}, abstractNote={Cough is an important defense mechanism of the respiratory system and is also a symptom of lung diseases, such as asthma. Acoustic cough detection collected by portable recording devices is a convenient way totrack potential condition worsening for patients who have asthma. However, the data used in building current cough detection models are often clean, containing a limited set of sound categories, and thus perform poorly when they are exposed to a variety of real-world sounds which could be picked up by portable recording devices. The sounds that are not learned by the model are referred to as Out-of-Distribution (OOD) data. In this work, we propose two robust cough detection methods combined with an OOD detection module, that removes OOD data without sacrificing the cough detection performance of the original system. These methods include adding a learning confidence parameter and maximizing entropy loss. Our experiments show that 1) the OOD system can produce dependable In-Distribution (ID) and OOD results at a sampling rate above 750 Hz; 2) the OOD sample detection tends to perform better for larger audio window sizes; 3) the model's overall accuracy and precision get better as the proportion of OOD samples increase in the acoustic signals; 4) a higher percentage of OOD data is needed to realize performance gains at lower sampling rates. The incorporation of OOD detection techniques improves cough detection performance by a significant margin and provides a valuable solution to real-world acoustic cough detection problems.}, number={7}, journal={IEEE JOURNAL OF BIOMEDICAL AND HEALTH INFORMATICS}, author={Chen, Yuhan and Attri, Pankaj and Barahona, Jeffrey and Hernandez, Michelle L. and Carpenter, Delesha and Bozkurt, Alper and Lobaton, Edgar}, year={2023}, month={Jul}, pages={3210–3221} } @article{nguyen_holt_knauer_abner_lobaton_young_2023, title={Towards rapid weight assessment of finishing pigs using a handheld, mobile RGB-D camera}, volume={226}, ISSN={["1537-5129"]}, url={https://doi.org/10.1016/j.biosystemseng.2023.01.005}, DOI={10.1016/j.biosystemseng.2023.01.005}, abstractNote={Pig weight measurement is essential for monitoring performance, welfare, and production value. Weight measurement using a scale provides the most accurate results; however, it is time consuming and may increase animal stress. Subjective visual evaluations, even when conducted by an experienced caretaker, lack consistency and accuracy. Optical sensing systems provide alternative methods for estimating pig weight, but studies examining these systems only focus on images taken from stationary cameras. This study fills a gap in existing technology through examining a handheld, portable RGB-D imaging system for estimating pig weight. An Intel RealSense camera collected RGB-D data from finishing pigs at various market weights. 3D point clouds were computed for each pig, and latent features from a 3D generative model were used to predict pig weights using three regression models (SVR, MLP and AdaBoost). These models were compared to two baseline models: median prediction and linear regression using body dimension measurements as predictor variables. Using 10-fold cross validation mean absolute error (MAE) and root-mean-square error (RMSE), all three latent feature models performed better than the median prediction model (MAE = 12.3 kg, RMSE = 16.0 kg) but did not outperform linear regression between weight and girth measurements (MAE = 4.06 kg, RMSE = 4.94 kg). Of the models under consideration, SVR performed best (MAE = 9.25 kg, RMSE = 12.3 kg, mean absolute percentage error = 7.54%) when tested on unseen data. This research is an important step towards developing rapid pig body weight estimation methods from a handheld, portable imaging system by leveraging deep learning feature outputs and depth imaging technology.}, journal={BIOSYSTEMS ENGINEERING}, author={Nguyen, Anh H. and Holt, Jonathan P. and Knauer, Mark T. and Abner, Victoria A. and Lobaton, Edgar J. and Young, Sierra N.}, year={2023}, month={Feb}, pages={155–168} } @article{graham_park_billings_hulse-kemp_haigler_lobaton_2022, title={Efficient imaging and computer vision detection of two cell shapes in young cotton fibers}, volume={11}, ISSN={["2168-0450"]}, url={https://doi.org/10.1002/aps3.11503}, DOI={10.1002/aps3.11503}, abstractNote={AbstractPremiseThe shape of young cotton (Gossypium) fibers varies within and between commercial cotton species, as revealed by previous detailed analyses of one cultivar of G. hirsutum and one of G. barbadense. Both narrow and wide fibers exist in G. hirsutum cv. Deltapine 90, which may impact the quality of our most abundant renewable textile material. More efficient cellular phenotyping methods are needed to empower future research efforts.MethodsWe developed semi‐automated imaging methods for young cotton fibers and a novel machine learning algorithm for the rapid detection of tapered (narrow) or hemisphere (wide) fibers in homogeneous or mixed populations.ResultsThe new methods were accurate for diverse accessions of G. hirsutum and G. barbadense and at least eight times more efficient than manual methods. Narrow fibers dominated in the three G. barbadense accessions analyzed, whereas the three G. hirsutum accessions showed a mixture of tapered and hemisphere fibers in varying proportions.DiscussionThe use or adaptation of these improved methods will facilitate experiments with higher throughput to understand the biological factors controlling the variable shapes of young cotton fibers or other elongating single cells. This research also enables the exploration of links between early cell shape and mature cotton fiber quality in diverse field‐grown cotton accessions.}, journal={APPLICATIONS IN PLANT SCIENCES}, author={Graham, Benjamin P. and Park, Jeremy and Billings, Grant T. and Hulse-Kemp, Amanda M. and Haigler, Candace H. and Lobaton, Edgar}, year={2022}, month={Nov} } @article{soleimani_lobaton_2022, title={Enhancing Inference on Physiological and Kinematic Periodic Signals via Phase-Based Interpretability and Multi-Task Learning}, volume={13}, ISSN={["2078-2489"]}, url={https://www.mdpi.com/2078-2489/13/7/326}, DOI={10.3390/info13070326}, abstractNote={Physiological and kinematic signals from humans are often used for monitoring health. Several processes of interest (e.g., cardiac and respiratory processes, and locomotion) demonstrate periodicity. Training models for inference on these signals (e.g., detection of anomalies, and extraction of biomarkers) require large amounts of data to capture their variability, which are not readily available. This hinders the performance of complex inference models. In this work, we introduce a methodology for improving inference on such signals by incorporating phase-based interpretability and other inference tasks into a multi-task framework applied to a generative model. For this purpose, we utilize phase information as a regularization term and as an input to the model and introduce an interpretable unit in a neural network, which imposes an interpretable structure on the model. This imposition helps us in the smooth generation of periodic signals that can aid in data augmentation tasks. We demonstrate the impact of our framework on improving the overall inference performance on ECG signals and inertial signals from gait locomotion.}, number={7}, journal={INFORMATION}, author={Soleimani, Reza and Lobaton, Edgar}, year={2022}, month={Jul} } @article{richmond_cole_dangler_daniele_marchitto_lobaton_2022, title={Forabot: Automated Planktic Foraminifera Isolation and Imaging}, volume={23}, ISSN={["1525-2027"]}, url={http://dx.doi.org/10.1029/2022gc010689}, DOI={10.1029/2022gc010689}, abstractNote={AbstractPhysical inspection and sorting of foraminifera is a necessity in many research labs, as foraminifera serve as paleoenvironmental and chronostratigraphic indicators. In order to gain counts of species from samples, analyze chemical compositions, or extract morphological properties of foraminifera, research labs require human time and effort handling and sorting these microscopic fossils. The presented work describes Forabot, an open‐source system which can physically manipulate individual foraminifera for imaging and isolation with minimal human interaction. The major components to build a Forabot are outlined in this work, with supplementary information available which allows for other researchers to build a Forabot with low‐cost, off‐the‐shelf components. From a washed and sieved sample of hundreds of foraminifera, the Forabot is shown to be capable of isolating and imaging individual forams. The timing of the Forabot's current pipeline allows for the processing of up to 27 foram specimens per hour, a rate that can be improved for future classification purposes by reducing image quality and/or quantity. Along with the physical descriptions, the image processing and classification pipelines are also reviewed. A proof‐of‐concept classifier utilizes a finetuned VGG‐16 network to achieve a classification accuracy of 79% on a validation set of foraminifera images collected with Forabot. In conclusion, the system is able to be built by researchers for a low cost, effectively manipulate foraminifera with few mistakes, provide quality images for future research, and classify the species of imaged forams.}, number={12}, journal={GEOCHEMISTRY GEOPHYSICS GEOSYSTEMS}, publisher={American Geophysical Union (AGU)}, author={Richmond, Turner and Cole, Jeremy and Dangler, Gabriella and Daniele, Michael and Marchitto, Thomas and Lobaton, Edgar}, year={2022}, month={Dec} } @article{li_zhong_lobaton_huang_2022, title={Fusion of Human Gaze and Machine Vision for Predicting Intended Locomotion Mode}, volume={30}, ISSN={["1558-0210"]}, url={https://doi.org/10.1109/TNSRE.2022.3168796}, DOI={10.1109/TNSRE.2022.3168796}, abstractNote={Predicting the user’s intended locomotion mode is critical for wearable robot control to assist the user’s seamless transitions when walking on changing terrains. Although machine vision has recently proven to be a promising tool in identifying upcoming terrains in the travel path, existing approaches are limited to environment perception rather than human intent recognition that is essential for coordinated wearable robot operation. Hence, in this study, we aim to develop a novel system that fuses the human gaze (representing user intent) and machine vision (capturing environmental information) for accurate prediction of the user’s locomotion mode. The system possesses multimodal visual information and recognizes user’s locomotion intent in a complex scene, where multiple terrains are present. Additionally, based on the dynamic time warping algorithm, a fusion strategy was developed to align temporal predictions from individual modalities while producing flexible decisions on the timing of locomotion mode transition for wearable robot control. System performance was validated using experimental data collected from five participants, showing high accuracy (over 96% in average) of intent recognition and reliable decision-making on locomotion transition with adjustable lead time. The promising results demonstrate the potential of fusing human gaze and machine vision for locomotion intent recognition of lower limb wearable robots.}, journal={IEEE TRANSACTIONS ON NEURAL SYSTEMS AND REHABILITATION ENGINEERING}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Li, Minhan and Zhong, Boxuan and Lobaton, Edgar and Huang, He}, year={2022}, pages={1103–1112} } @article{da silva_zhong_chen_lobaton_2022, title={Improving Performance and Quantifying Uncertainty of Body-Rocking Detection Using Bayesian Neural Networks}, volume={13}, ISSN={["2078-2489"]}, url={https://www.mdpi.com/2078-2489/13/7/338}, DOI={10.3390/info13070338}, abstractNote={Body-rocking is an undesired stereotypical motor movement performed by some individuals, and its detection is essential for self-awareness and habit change. We envision a pipeline that includes inertial wearable sensors and a real-time detection system for notifying the user so that they are aware of their body-rocking behavior. For this task, similarities of body rocking to other non-related repetitive activities may cause false detections which prevent continuous engagement, leading to alarm fatigue. We present a pipeline using Bayesian Neural Networks with uncertainty quantification for jointly reducing false positives and providing accurate detection. We show that increasing model capacity does not consistently yield higher performance by itself, while pairing it with the Bayesian approach does yield significant improvements. Disparities in uncertainty quantification are better quantified by calibrating them using deep neural networks. We show that the calibrated probabilities are effective quality indicators of reliable predictions. Altogether, we show that our approach provides additional insights on the role of Bayesian techniques in deep learning as well as aids in accurate body-rocking detection, improving our prior work on this subject.}, number={7}, journal={Information}, publisher={MDPI AG}, author={da Silva, Rafael Luiz and Zhong, Boxuan and Chen, Yuhan and Lobaton, Edgar}, year={2022}, month={Jul}, pages={338} } @article{twiddy_taggart_reynolds_sharkey_rufty_lobaton_bozkurt_daniele_2022, title={Real-Time Monitoring of Plant Stalk Growth Using a Flexible Printed Circuit Board Sensor}, ISSN={["1930-0395"]}, DOI={10.1109/SENSORS52175.2022.9967167}, abstractNote={Monitoring of plant growth within agriculture is essential for ensuring the survival of crops and optimization of resources in the face of environmental and industrial challenges. Herein, we describe a low-cost and easily deployable flexible circuit board sensor for measurement of plant stalk growth, providing for remote tracking of plant development on an industrial scale. Three circuit topologies and measurement strategies - “ladder-type,” “multiplex-type,” and “mixed-type” - are initially assessed off-plant in a simulated growth experiment. Further development of the “multiplex-type” sensor and on-plant validation demonstrates its ability to quantify stalk growth as a proxy for plant development.}, journal={2022 IEEE SENSORS}, author={Twiddy, Jack and Taggart, Matthew and Reynolds, James and Sharkey, Chris and Rufty, Thomas and Lobaton, Edgar and Bozkurt, Alper and Daniele, Michael}, year={2022} } @article{mondino_wagner_russell_lobaton_griffith_gruen_lascelles_olby_2022, title={Static posturography as a novel measure of the effects of aging on postural control in dogs}, volume={17}, ISSN={["1932-6203"]}, url={http://dx.doi.org/10.1371/journal.pone.0268390}, DOI={10.1371/journal.pone.0268390}, abstractNote={Aging is associated with impairment in postural control in humans. While dogs are a powerful model for the study of aging, the associations between age and postural control in this species have not yet been elucidated. The aims of this work were to establish a reliable protocol to measure center of pressure excursions in standing dogs and to determine age-related changes in postural sway. Data were obtained from 40 healthy adult dogs (Group A) and 28 senior dogs (Group B) during seven trials (within one session of data collection) of quiet standing on a pressure sensitive walkway system. Velocity, acceleration, root mean square, 95% ellipse area, range and frequency revolve were recorded as measures of postural sway. In Group A, reliability was assessed with intraclass correlation, and the effect of morphometric variables was evaluated using linear regression. By means of stepwise linear regression we determined that root mean square overall and acceleration in the craniocaudal direction were the best variables able to discriminate between Group A and Group B. The relationship between these two center-of-pressure (COP) measures and the dogs’ fractional lifespan was examined in both groups and the role of pain and proprioceptive deficits was evaluated in Group B. All measures except for frequency revolve showed good to excellent reliability. Weight, height and length were correlated with most of the measures. Fractional lifespan impacted postural control in Group B but not Group A. Joint pain and its interaction with proprioceptive deficits influence postural sway especially in the acceleration in the craniocaudal direction, while fractional lifespan was most important in the overall COP displacement. In conclusion, our study found that pressure sensitive walkway systems are a reliable tool to evaluate postural sway in dogs; and that postural sway is affected by morphometric parameters and increases with age and joint pain.}, number={7}, journal={PLOS ONE}, publisher={Public Library of Science (PLoS)}, author={Mondino, Alejandra and Wagner, Grant and Russell, Katharine and Lobaton, Edgar and Griffith, Emily and Gruen, Margaret and Lascelles, B. Duncan X. and Olby, Natasha Jane}, editor={Evans, RichardEditor}, year={2022}, month={Jul} } @inbook{latif_dieffenderfer_silva_lobaton_bozkurt_2023, title={Wearable Cyberphysical Systems for Biomedicine}, url={http://dx.doi.org/10.1016/b978-0-12-822548-6.00124-2}, DOI={10.1016/b978-0-12-822548-6.00124-2}, abstractNote={This chapter surveys the state-of-the-art related to the building blocks of wearable cyberphysical systems for health monitoring and highlights its potential to revolutionize healthcare, specifically chronic disease management. The common sensing modalities and their corresponding wearable form factors are summarized for the application areas of cardiovascular diseases, asthma and chronic obstructive pulmonary disease. The use of these measurements with estimation approaches using signal processing and machine learning techniques is also reviewed. The outcomes of these estimation tasks can be used to provide feedback internally to optimize device performance and externally to the users about their health situation. The chapter concludes with a discussion of deployment barriers for wearable cyberphysical systems in real life.}, booktitle={Encyclopedia of Sensors and Biosensors}, publisher={Elsevier}, author={Latif, Tahmid and Dieffenderfer, James and Silva, Rafael Luiz and Lobaton, Edgar and Bozkurt, Alper}, year={2023}, pages={63–85} } @article{haque_lobaton_nelson_yencho_pecota_mierop_kudenov_boyette_williams_2021, title={Computer vision approach to characterize size and shape phenotypes of horticultural crops using high-throughput imagery}, volume={182}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2021.106011}, DOI={10.1016/j.compag.2021.106011}, abstractNote={For many horticultural crops, variation in quality (e.g., shape and size) contributes significantly to the crop's market value. Metrics characterizing less subjective harvest quantities (e.g., yield and total biomass) are routinely monitored. In contrast, metrics quantifying more subjective crop quality characteristics such as ideal size and shape remain difficult to characterize objectively at the production-scale due to the lack of modular technologies for high-throughput sensing and computation. Several horticultural crops are sent to packing facilities after having been harvested, where they are sorted into boxes and containers using high-throughput scanners. These scanners capture images of each fruit or vegetable being sorted and packed, but the images are typically used solely for sorting purposes and promptly discarded. With further analysis, these images could offer unparalleled insight on how crop quality metrics vary at the industrial production-scale and provide further insight into how these characteristics translate to overall market value. At present, methods for extracting and quantifying quality characteristics of crops using images generated by existing industrial infrastructure have not been developed. Furthermore, prior studies that investigated horticultural crop quality metrics, specifically of size and shape, used a limited number of samples, did not incorporate deformed or non-marketable samples, and did not use images captured from high-throughput systems. In this work, using sweetpotato (SP) as a use case, we introduce a computer vision algorithm for quantifying shape and size characteristics in a high-throughput manner. This approach generates 3D model of SPs from two 2D images captured by an industrial sorter 90 degrees apart and extracts 3D shape features in a few hundred milliseconds. We applied the 3D reconstruction and feature extraction method to thousands of image samples to demonstrate how variations in shape features across SP cultivars can be quantified. We created a SP shape dataset containing SP images, extracted shape features, and qualitative shape types (U.S. No. 1 or Cull). We used this dataset to develop a neural network-based shape classifier that was able to predict Cull vs. U.S. No. 1 SPs with 84.59% accuracy. In addition, using univariate Chi-squared tests and random forest, we identified the most important features for determining qualitative shape type (U.S. No. 1 or Cull) of the SPs. Our study serves as a key step towards enabling big data analytics for industrial SP agriculture. The methodological framework is readily transferable to other horticultural crops, particularly those that are sorted using commercial imaging equipment.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Haque, Samiul and Lobaton, Edgar and Nelson, Natalie and Yencho, G. Craig and Pecota, Kenneth V. and Mierop, Russell and Kudenov, Michael W. and Boyette, Mike and Williams, Cranos M.}, year={2021}, month={Mar}, pages={106011} } @article{zhong_silva_tran_huang_lobaton_2021, title={Efficient Environmental Context Prediction for Lower Limb Prostheses}, volume={52}, ISSN={["2168-2232"]}, url={https://doi.org/10.1109/TSMC.2021.3084036}, DOI={10.1109/TSMC.2021.3084036}, abstractNote={Environmental context prediction is important for wearable robotic applications, such as terrain-adaptive control. System efficiency is critical for wearable robots, in which system resources (e.g., processors and memory) are highly constrained. This article aims to address the system efficiency of real-time environmental context prediction for lower limb prostheses. First, we develop an uncertainty-aware frame selection strategy that can dynamically select frames according to lower limb motion and uncertainty captured by Bayesian neural networks (BNNs) for environment prediction. We further propose a dynamic Bayesian gated recurrent unit (D-BGRU) network to address the inconsistent frame rate which is a side effect of the dynamic frame selection. Second, we investigate the effects on the tradeoff between computational complexity and environment prediction accuracy of adding additional sensing modalities (e.g., GPS and an on-glasses camera) into the system. Finally, we implement and optimize our framework for embedded hardware, and evaluate the real-time inference accuracy and efficiency of classifying six types of terrains. The experiments show that our proposed frame selection strategy can reduce more than 90% of the computations without sacrificing environment prediction accuracy, and can be easily extended to the situation of multimodality fusion. We achieve around 93% prediction accuracy with less than one frame to be processed per second. Our model has 6.4 million 16-bit float numbers and takes 44 ms to process each frame on a lightweight embedded platform (NVIDIA Jetson TX2).}, number={6}, journal={IEEE TRANSACTIONS ON SYSTEMS MAN CYBERNETICS-SYSTEMS}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Zhong, Boxuan and Silva, Rafael Luiz and Tran, Michael and Huang, He and Lobaton, Edgar}, year={2021}, month={Jun} } @misc{chakraborty_hoque_jeem_biswas_bardhan_lobaton_2021, title={Fashion Recommendation Systems, Models and Methods: A Review}, volume={8}, ISSN={["2227-9709"]}, url={http://dx.doi.org/10.3390/informatics8030049}, DOI={10.3390/informatics8030049}, abstractNote={In recent years, the textile and fashion industries have witnessed an enormous amount of growth in fast fashion. On e-commerce platforms, where numerous choices are available, an efficient recommendation system is required to sort, order, and efficiently convey relevant product content or information to users. Image-based fashion recommendation systems (FRSs) have attracted a huge amount of attention from fast fashion retailers as they provide a personalized shopping experience to consumers. With the technological advancements, this branch of artificial intelligence exhibits a tremendous amount of potential in image processing, parsing, classification, and segmentation. Despite its huge potential, the number of academic articles on this topic is limited. The available studies do not provide a rigorous review of fashion recommendation systems and the corresponding filtering techniques. To the best of the authors’ knowledge, this is the first scholarly article to review the state-of-the-art fashion recommendation systems and the corresponding filtering techniques. In addition, this review also explores various potential models that could be implemented to develop fashion recommendation systems in the future. This paper will help researchers, academics, and practitioners who are interested in machine learning, computer vision, and fashion retailing to understand the characteristics of the different fashion recommendation systems.}, number={3}, journal={INFORMATICS-BASEL}, publisher={MDPI AG}, author={Chakraborty, Samit and Hoque, Md Saiful and Jeem, Naimur Rahman and Biswas, Manik Chandra and Bardhan, Deepayan and Lobaton, Edgar}, year={2021}, month={Sep} } @article{abdelkhalek_qiu_hernandez_bozkurt_lobaton_2021, title={Investigating the Relationship between Cough Detection and Sampling Frequency for Wearable Devices}, ISSN={["1558-4615"]}, url={http://dx.doi.org/10.1109/embc46164.2021.9630082}, DOI={10.1109/EMBC46164.2021.9630082}, abstractNote={Cough detection can provide an important marker to monitor chronic respiratory conditions. However, manual techniques which require human expertise to count coughs are both expensive and time-consuming. Recent Automatic Cough Detection Algorithms (ACDAs) have shown promise to meet clinical monitoring requirements, but only in recent years they have made their way to non-clinical settings due to the required portability of sensing technologies and the extended duration of data recording. More precisely, these ACDAs operate at high sampling frequencies, which leads to high power consumption and computing requirements, making these difficult to implement on a wearable device. Additionally, reproducibility of their performance is essential. Unfortunately, as the majority of ACDAs were developed using private clinical data, it is difficult to reproduce their results. We, hereby, present an ACDA that meets clinical monitoring requirements and reliably operates at a low sampling frequency. This ACDA is implemented using a convolutional neural network (CNN), and publicly available data. It achieves a sensitivity of 92.7%, a specificity of 92.3%, and an accuracy of 92.5% using a sampling frequency of just 750 Hz. We also show that a low sampling frequency allows us to preserve patients’ privacy by obfuscating their speech, and we analyze the trade-off between speech obfuscation for privacy and cough detection accuracy.Clinical relevance—This paper presents a new cough detection technique and preliminary analysis on the trade-off between detection accuracy and obfuscation of speech for privacy. These findings indicate that, using a publicly available dataset, we can sample signals at 750 Hz while still maintaining a sensitivity above 90%, suggested to be sufficient for clinical monitoring [1].}, journal={2021 43RD ANNUAL INTERNATIONAL CONFERENCE OF THE IEEE ENGINEERING IN MEDICINE & BIOLOGY SOCIETY (EMBC)}, publisher={IEEE}, author={Abdelkhalek, Mahmoud and Qiu, Jinyi and Hernandez, Michelle and Bozkurt, Alper and Lobaton, Edgar}, year={2021}, pages={7103–7107} } @article{cole_bozkurt_lobaton_2021, title={Simultaneous Localization of Biobotic Insects using Inertial Data and Encounter Information}, ISSN={["1558-4615"]}, url={http://dx.doi.org/10.1109/embc46164.2021.9629542}, DOI={10.1109/EMBC46164.2021.9629542}, abstractNote={Several recent research efforts have shown that the bioelectrical stimulation of their neuro-mechanical system can control the locomotion of Madagascar hissing cockroaches (Gromphadorhina portentosa). This has opened the possibility of using these insects to explore centimeter-scale environments, such as rubble piles in urban disaster areas. We present an inertial navigation system based on machine learning modules that is capable of localizing groups of G. portentosa carrying thorax-mounted inertial measurement units. The proposed navigation system uses the agents’ encounters with one another as signals of opportunity to increase tracking accuracy. Results are shown for five agents that are operating on a planar (2D) surface in controlled laboratory conditions. Trajectory reconstruction accuracy is improved by 16% when we use encounter information for the agents, and up to 27% when we add a heuristic that corrects speed estimates via a search for an optimal speed-scaling factor.}, journal={2021 43RD ANNUAL INTERNATIONAL CONFERENCE OF THE IEEE ENGINEERING IN MEDICINE & BIOLOGY SOCIETY (EMBC)}, publisher={IEEE}, author={Cole, Jeremy and Bozkurt, Alper and Lobaton, Edgar}, year={2021}, pages={4649–4653} } @article{chen_wilkins_barahona_rosenbaum_daniele_lobaton_2021, title={Toward Automated Analysis of Fetal Phonocardiograms: Comparing Heartbeat Detection from Fetal Doppler and Digital Stethoscope Signals}, ISSN={["1558-4615"]}, url={http://dx.doi.org/10.1109/embc46164.2021.9629814}, DOI={10.1109/EMBC46164.2021.9629814}, abstractNote={Longitudinal fetal health monitoring is essential for high-risk pregnancies. Heart rate and heart rate variability are prime indicators of fetal health. In this work, we implemented two neural network architectures for heartbeat detection on a set of fetal phonocardiogram signals captured using fetal Doppler and a digital stethoscope. We test the efficacy of these networks using the raw signals and the hand-crafted energy from the signal. The results show a Convolutional Neural Network is the most efficient at identifying the S1 waveforms in a heartbeat, and its performance is improved when using the energy of the Doppler signals. We further discuss issues, such as low Signal-to-Noise Ratios (SNR), present in the training of a model based on the stethoscope signals. Finally, we show that we can improve the SNR, and subsequently the performance of the stethoscope, by matching the energy from the stethoscope to that of the Doppler signal.}, journal={2021 43rd Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC)}, publisher={IEEE}, author={Chen, Yuhan and Wilkins, Michael D. and Barahona, Jeffrey and Rosenbaum, Alan J. and Daniele, Michael and Lobaton, Edgar}, year={2021}, pages={975–979} } @article{martin_reynolds_daniele_lobaton_bozkurt_2021, title={Towards Continuous Plant Bioimpedance Fitting and Parameter Estimation}, ISSN={["1930-0395"]}, url={http://dx.doi.org/10.1109/sensors47087.2021.9639492}, DOI={10.1109/SENSORS47087.2021.9639492}, abstractNote={The push to advance artificial intelligence, internet of things, and big data analysis all pave the way to automated and systematic optimization in precision agriculture and smart farming applications. These advancements lead to many benefits, including the optimization of primary production, prevention of spoilage via supply chain management, and detection of crop failure risk. Noninvasive impedance sensors serve as a promising candidate for monitoring plant health wirelessly and play a major role in this optimization problem. In this study, we developed a software pipeline to support impedance sensing applications and, as a proof of concept, applied this to track longitudinal consistent bioimpedance data from the V4 leaf midrib in maize plants. The script uses the single-shell equivalent circuit model to represent the extracellular fluid, cellular membrane, and intracellular fluid as a simplified resistive-capacitive circuit, where these elements’ parameters are estimated with complex nonlinear least squares. The double-shell model extends the single-shell model to account for the effects of the relatively large plant cell vacuole. Limit cases for impedance are utilized for specific parameters as an alternative method of estimation. We investigated a complex analysis-based modification to the objective function and model optimization for the data pipeline automation. Various weighing functions are applied and checked against one another. Additionally, a custom graphical user interface was developed to assist with parameter initialization for correcting potential convergence issues and understating the influence of each parameter on the dataset. We demonstrated that the analysis of an example longitudinal dataset was able to reveal a time series for parameter fitting.}, journal={2021 IEEE SENSORS}, publisher={IEEE}, author={Martin, Devon and Reynolds, James and Daniele, Michael and Lobaton, Edgar and Bozkurt, Alper}, year={2021} } @article{mohaddes_silva_akbulut_zhou_tanneeru_lobaton_lee_misra_2020, title={A Pipeline for Adaptive Filtering and Transformation of Noisy Left-Arm ECG to Its Surrogate Chest Signal}, volume={9}, url={https://doi.org/10.3390/electronics9050866}, DOI={10.3390/electronics9050866}, abstractNote={The performance of a low-power single-lead armband in generating electrocardiogram (ECG) signals from the chest and left arm was validated against a BIOPAC MP160 benchtop system in real-time. The filtering performance of three adaptive filtering algorithms, namely least mean squares (LMS), recursive least squares (RLS), and extended kernel RLS (EKRLS) in removing white (W), power line interference (PLI), electrode movement (EM), muscle artifact (MA), and baseline wandering (BLW) noises from the chest and left-arm ECG was evaluated with respect to the mean squared error (MSE). Filter parameters of the used algorithms were adjusted to ensure optimal filtering performance. LMS was found to be the most effective adaptive filtering algorithm in removing all noises with minimum MSE. However, for removing PLI with a maximal signal-to-noise ratio (SNR), RLS showed lower MSE values than LMS when the step size was set to 1 × 10−5. We proposed a transformation framework to convert the denoised left-arm and chest ECG signals to their low-MSE and high-SNR surrogate chest signals. With wide applications in wearable technologies, the proposed pipeline was found to be capable of establishing a baseline for comparing left-arm signals with original chest signals, getting one step closer to making use of the left-arm ECG in clinical cardiac evaluations.}, number={5}, journal={Electronics}, publisher={MDPI AG}, author={Mohaddes, Farzad and Silva, Rafael and Akbulut, Fatma and Zhou, Yilu and Tanneeru, Akhilesh and Lobaton, Edgar and Lee, Bongmook and Misra, Veena}, year={2020}, month={May}, pages={866} } @article{asadi_suresh_ender_gotad_maniyar_anand_noghabaei_han_lobaton_wu_2020, title={An integrated UGV-UAV system for construction site data collection}, volume={112}, url={http://dx.doi.org/10.1016/j.autcon.2019.103068}, DOI={10.1016/j.autcon.2019.103068}, abstractNote={There have been recent efforts to increase the degree of automation and frequency of data collection for construction applications using Unmanned Aerial/Ground Vehicles (UAV/UGV). However, the current practice of data collection is traditionally performed, which is manual, costly, time-consuming, and error-prone. Developing vision-based mobile robotic systems that are aware of its surrounding and capable of autonomous navigation are becoming essential to many construction applications, namely surveying, monitoring, and inspection. Nevertheless, the systems above suffer from a series of performance issues. One major problem is inefficient navigation in indoor and cluttered scenes with many obstacles and barriers, where some places are inaccessible by a UGV. To provide a solution to this problem, this paper designs a UAV-UGV team that integrates two custom-built mobile robots. The UGV autonomously navigates through space, leveraging its sensors. The UAV acts as an external eye for the UGV, observing the scene from a vantage point that is inaccessible to the UGV. The relative pose of the UAV is estimated continuously, which allows it to maintain a fixed location that is relative to the UGV. The key aspects for the development of this system that is capable of autonomous navigation are the localization of both UAV and UGV, mapping of the surrounding environment, and efficient path planning using multiple sensors. The proposed system is tested in an indoor and cluttered construction-like environment. The performance of the system demonstrates the feasibility of developing and deploying a robust and automated data collection system for construction applications in the near future.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Asadi, Khashayar and Suresh, Akshay Kalkunte and Ender, Alper and Gotad, Siddhesh and Maniyar, Suraj and Anand, Smit and Noghabaei, Mojtaba and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2020}, month={Apr}, pages={103068} } @article{ramos-giraldo_reberg-horton_locke_mirsky_lobaton_2020, title={Drought Stress Detection Using Low-Cost Computer Vision Systems and Machine Learning Techniques}, volume={22}, ISSN={1520-9202 1941-045X}, url={http://dx.doi.org/10.1109/MITP.2020.2986103}, DOI={10.1109/MITP.2020.2986103}, abstractNote={The real-time detection of drought stress has major implications for preventing cash crop yield loss due to variable weather conditions and ongoing climate change. The most widely used indicator of drought sensitivity/tolerance in corn and soybean is the presence or absence of leaf wilting during periods of water stress. We develop a low-cost automated drought detection system using computer vision coupled with machine learning (ML) algorithms that document the drought response in corn and soybeans field crops. Using ML, we predict the drought status of crop plants with more than 80% accuracy relative to expert-derived visual drought ratings.}, number={3}, journal={IT Professional}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Ramos-Giraldo, Paula and Reberg-Horton, Chris and Locke, Anna M. and Mirsky, Steven and Lobaton, Edgar}, year={2020}, month={May}, pages={27–29} } @article{ge_richmond_zhong_marchitto_lobaton_2021, title={Enhancing the morphological segmentation of microscopic fossils through Localized Topology-Aware Edge Detection}, volume={45}, ISSN={["1573-7527"]}, url={https://doi.org/10.1007/s10514-020-09950-9}, DOI={10.1007/s10514-020-09950-9}, number={5}, journal={AUTONOMOUS ROBOTS}, publisher={Springer Science and Business Media LLC}, author={Ge, Qian and Richmond, Turner and Zhong, Boxuan and Marchitto, Thomas M. and Lobaton, Edgar J.}, year={2021}, month={Jun}, pages={709–723} } @article{zhong_silva_li_huang_lobaton_2021, title={Environmental Context Prediction for Lower Limb Prostheses With Uncertainty Quantification}, volume={18}, ISSN={["1558-3783"]}, url={https://doi.org/10.1109/TASE.2020.2993399}, DOI={10.1109/TASE.2020.2993399}, abstractNote={Reliable environmental context prediction is critical for wearable robots (e.g., prostheses and exoskeletons) to assist terrain-adaptive locomotion. This article proposed a novel vision-based context prediction framework for lower limb prostheses to simultaneously predict human’s environmental context for multiple forecast windows. By leveraging the Bayesian neural networks (BNNs), our framework can quantify the uncertainty caused by different factors (e.g., observation noise, and insufficient or biased training) and produce a calibrated predicted probability for online decision-making. We compared two wearable camera locations (a pair of glasses and a lower limb device), independently and conjointly. We utilized the calibrated predicted probability for online decision-making and fusion. We demonstrated how to interpret deep neural networks with uncertainty measures and how to improve the algorithms based on the uncertainty analysis. The inference time of our framework on a portable embedded system was less than 80 ms/frame. The results in this study may lead to novel context recognition strategies in reliable decision-making, efficient sensor fusion, and improved intelligent system design in various applications. Note to Practitioners—This article was motivated by two practical problems in computer vision for wearable robots: First, the performance of deep neural networks is challenged by real-life disturbances. However, reliable confidence estimation is usually unavailable and the factors causing failures are hard to identify. Second, evaluating wearable robots by intuitive trial and error is expensive due to the need for human experiments. Our framework produces a calibrated predicted probability as well as three uncertainty measures. The calibrated probability makes it easy to customize prediction decision criteria by considering how much the corresponding application can tolerate error. This study demonstrated a practical procedure to interpret and improve the performance of deep neural networks with uncertainty quantification. We anticipate that our methodology could be extended to other applications as a general scientific and efficient procedure of evaluating and improving intelligent systems.}, number={2}, journal={IEEE TRANSACTIONS ON AUTOMATION SCIENCE AND ENGINEERING}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Zhong, Boxuan and Silva, Rafael Luiz and Li, Minhan and Huang, He and Lobaton, Edgar}, year={2021}, month={Apr}, pages={458–470} } @inproceedings{silva_starliper_bhosale_taggart_ranganath_sarje_daniele_bozkurt_rufty_lobaton_2020, title={Feasibility Study of Water Stress Detection in Plants using a High-Throughput Low-Cost System}, url={http://dx.doi.org/10.1109/sensors47125.2020.9278711}, DOI={10.1109/sensors47125.2020.9278711}, abstractNote={The importance of efficient water management for crops and its impact on canopy development is undeniable, where high-throughput systems connected to the cloud have a great potential to aggregate measurements for a cyber-physical analysis. In this work, we develop a vision based high-throughput system to aid the identification and early detection of stress caused by water deficit in maize plants. We show that image processing techniques combined with a near infrared camera allow the distinction between a well watered maize plant and one that has its water supply neglected. Our preliminary results demonstrate the potential viability of applying predictive models for early water stress detection.}, booktitle={2020 IEEE SENSORS}, publisher={IEEE}, author={Silva, Rafael L. and Starliper, Nathan and Bhosale, Dinesh Kiran and Taggart, Matthew and Ranganath, Rakshita and Sarje, Trupti and Daniele, Michael and Bozkurt, Alper and Rufty, Thomas and Lobaton, Edgar}, year={2020}, month={Oct} } @article{cole_bozkurt_lobaton_2020, title={Localization of Biobotic Insects Using Low-Cost Inertial Measurement Units}, volume={20}, ISSN={["1424-8220"]}, url={https://doi.org/10.3390/s20164486}, DOI={10.3390/s20164486}, abstractNote={Disaster robotics is a growing field that is concerned with the design and development of robots for disaster response and disaster recovery. These robots assist first responders by performing tasks that are impractical or impossible for humans. Unfortunately, current disaster robots usually lack the maneuverability to efficiently traverse these areas, which often necessitate extreme navigational capabilities, such as centimeter-scale clearance. Recent work has shown that it is possible to control the locomotion of insects such as the Madagascar hissing cockroach (Gromphadorhina portentosa) through bioelectrical stimulation of their neuro-mechanical system. This provides access to a novel agent that can traverse areas that are inaccessible to traditional robots. In this paper, we present a data-driven inertial navigation system that is capable of localizing cockroaches in areas where GPS is not available. We pose the navigation problem as a two-point boundary-value problem where the goal is to reconstruct a cockroach’s trajectory between the starting and ending states, which are assumed to be known. We validated our technique using nine trials that were conducted in a circular arena using a biobotic agent equipped with a thorax-mounted, low-cost inertial measurement unit. Results show that we can achieve centimeter-level accuracy. This is accomplished by estimating the cockroach’s velocity—using regression models that have been trained to estimate the speed and heading from the inertial signals themselves—and solving an optimization problem so that the boundary-value constraints are satisfied.}, number={16}, journal={SENSORS}, publisher={MDPI AG}, author={Cole, Jeremy and Bozkurt, Alper and Lobaton, Edgar}, year={2020}, month={Aug} } @misc{ramos-giraldo_reberg-horton_mirsky_lobaton_locke_henriquez_zuniga_minin_2020, title={Low-cost Smart Camera System for Water Stress Detection in Crops}, url={http://dx.doi.org/10.1109/SENSORS47125.2020.9278744}, DOI={10.1109/sensors47125.2020.9278744}, abstractNote={The availability of easy-to-use, low-cost, and highly scalable tools makes it possible to achieve rapid and widespread adoption of precision agriculture. In this paper we outline the development of a smart camera system to detect drought stress in corn and soybean crops. The system is comprised of a Raspberry Pi Zero W, Raspberry Pi Camera, WittyPi mini, a cooling and solar power system, temperature sensors both inside and outside of the box, and infrared canopy temperature and light sensors. The system was built to collect data in a configurable time frame and has an embedded machine-learning (ML) processing system. The camera was configured using an Internet of Things (IoT) platform to manage the device and send images to the Cloud. One of the challenges for this system was to effectively implement machine learning models on this limited-resource embedded platform. We achieved an accuracy of 74% with the embedded machine learning algorithm when classifying water stress in soybeans.}, journal={2020 IEEE SENSORS}, publisher={IEEE}, author={Ramos-Giraldo, Paula and Reberg-Horton, S. Chris and Mirsky, Steven and Lobaton, Edgar and Locke, Anna M. and Henriquez, Esleyther and Zuniga, Ane and Minin, Artem}, year={2020}, month={Oct} } @inproceedings{latif_gonzalez_dieffenderfer_liao_hernandez_misra_lobaton_bozkurt_2020, title={Preliminary Assessment of Human Biological Responses to Low-level Ozone}, url={http://dx.doi.org/10.1109/sensors47125.2020.9278620}, DOI={10.1109/sensors47125.2020.9278620}, abstractNote={Multi-modal wearable sensors monitoring physiology and environment simultaneously would offer a great promise to manage respiratory health, especially for asthmatic patients. In this study, we present a preliminary investigation of the correlation between ozone exposure, heart rate, heart rate variability, and lung function. As the first step, we tested the effect of low-level ozone exposure in a sample size of four healthy individuals. Test subjects underwent controlled exposure from 0.06 to 0.08 ppm of ozone and filtered air on two separate exposure days. Our results indicate an increment in mean heart rate in three out of four test subjects when exposed to ozone. We have also observed that changes in mean heart rate has a positive correlation with changes in lung function and a negative correlation with changes in neutrophil count. These results provide a baseline understanding of healthy subjects as a control group.}, booktitle={2020 IEEE SENSORS}, publisher={IEEE}, author={Latif, Tahmid and Gonzalez, Laura and Dieffenderfer, James and Liao, Yuwei and Hernandez, Michelle and Misra, Veena and Lobaton, Edgar and Bozkurt, Alper}, year={2020}, month={Oct} } @article{zhong_huang_lobaton_2022, title={Reliable Vision-Based Grasping Target Recognition for Upper Limb Prostheses}, volume={52}, ISSN={["2168-2275"]}, url={https://doi.org/10.1109/TCYB.2020.2996960}, DOI={10.1109/TCYB.2020.2996960}, abstractNote={Computer vision has shown promising potential in wearable robotics applications (e.g., human grasping target prediction and context understanding). However, in practice, the performance of computer vision algorithms is challenged by insufficient or biased training, observation noise, cluttered background, etc. By leveraging Bayesian deep learning (BDL), we have developed a novel, reliable vision-based framework to assist upper limb prosthesis grasping during arm reaching. This framework can measure different types of uncertainties from the model and data for grasping target recognition in realistic and challenging scenarios. A probability calibration network was developed to fuse the uncertainty measures into one calibrated probability for online decision making. We formulated the problem as the prediction of grasping target while arm reaching. Specifically, we developed a 3-D simulation platform to simulate and analyze the performance of vision algorithms under several common challenging scenarios in practice. In addition, we integrated our approach into a shared control framework of a prosthetic arm and demonstrated its potential at assisting human participants with fluent target reaching and grasping tasks.}, number={3}, journal={IEEE TRANSACTIONS ON CYBERNETICS}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Zhong, Boxuan and Huang, He and Lobaton, Edgar}, year={2022}, month={Mar}, pages={1750–1762} } @inproceedings{silva_stone_lobaton_2019, title={A Feasibility Study of a Wearable Real-Time Notification System for Self-Awareness of Body-Rocking Behavior}, url={http://dx.doi.org/10.1109/embc.2019.8857221}, DOI={10.1109/embc.2019.8857221}, abstractNote={Wearable sensors have been shown to be effective for promoting self-awareness, wellness and re-education. In this work, we perform a preliminary study analyzing the real-time detection and annotation of body-rocking behavior in individuals, which is a type of Stereotypical Motor Movement (SMM). We develop a platform for real-time annotation and detection using wireless inertial sensors and an embedded device. The annotations are analyzed in order to study the duration and frequency of the behavior, and they are corrected offline in order to better understand any offsets in the real-time annotation procedure. Finally, we show the feasibility of a real-time feedback system based on a proof of concept algorithm and the necessary computation resources to execute it.}, booktitle={2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Silva, Rafael L. and Stone, Emily and Lobaton, Edgar}, year={2019}, month={Jul} } @article{starliper_mohammadzadeh_songkakul_hernandez_bozkurt_lobaton_2019, title={Activity-Aware Wearable System for Power-Efficient Prediction of Physiological Responses}, volume={19}, ISSN={["1424-8220"]}, url={https://doi.org/10.3390/s19030441}, DOI={10.3390/s19030441}, abstractNote={Wearable health monitoring has emerged as a promising solution to the growing need for remote health assessment and growing demand for personalized preventative care and wellness management. Vital signs can be monitored and alerts can be made when anomalies are detected, potentially improving patient outcomes. One major challenge for the use of wearable health devices is their energy efficiency and battery-lifetime, which motivates the recent efforts towards the development of self-powered wearable devices. This article proposes a method for context aware dynamic sensor selection for power optimized physiological prediction using multi-modal wearable data streams. We first cluster the data by physical activity using the accelerometer data, and then fit a group lasso model to each activity cluster. We find the optimal reduced set of groups of sensor features, in turn reducing power usage by duty cycling these and optimizing prediction accuracy. We show that using activity state-based contextual information increases accuracy while decreasing power usage. We also show that the reduced feature set can be used in other regression models increasing accuracy and decreasing energy burden. We demonstrate the potential reduction in power usage using a custom-designed multi-modal wearable system prototype.}, number={3}, journal={SENSORS}, author={Starliper, Nathan and Mohammadzadeh, Farrokh and Songkakul, Tanner and Hernandez, Michelle and Bozkurt, Alper and Lobaton, Edgar}, year={2019}, month={Feb} } @article{mitra_marchitto_ge_zhong_kanakiya_cook_fehrenbacher_ortiz_tripati_lobaton_2019, title={Automated species-level identification of planktic foraminifera using convolutional neural networks, with comparison to human performance}, volume={147}, ISSN={["1872-6186"]}, url={http://dx.doi.org/10.1016/j.marmicro.2019.01.005}, DOI={10.1016/j.marmicro.2019.01.005}, abstractNote={Picking foraminifera from sediment samples is an essential, but repetitive and low-reward task that is well-suited for automation. The first step toward building a picking robot is the development of an automated identification system. We use machine learning techniques to train convolutional neural networks (CNNs) to identify six species of extant planktic foraminifera that are widely used by paleoceanographers, and to distinguish the six species from other taxa. We employ CNNs that were previously built and trained for image classification. Foraminiferal training and identification use reflected light microscope digital images taken at 16 different illumination angles using a light-emitting diode (LED) ring. Overall machine accuracy, as a combination of precision and recall, is better than 80% even with limited training. We compare machine performance to that of human pickers (six experts and five novices) by tasking each with the identification of 540 specimens based on images. Experts achieved comparable precision but poorer recall relative to the machine, with an average accuracy of 63%. Novices scored lower than experts on both precision and recall, for an overall accuracy of 53%. The machine achieved fairly uniform performance across the six species, while participants' scores were strongly species-dependent, commensurate with their past experience and expertise. The machine was also less sensitive to specimen orientation (umbilical versus spiral views) than the humans. These results demonstrate that our approach can provide a versatile ‘brain’ for an eventual automated robotic picking system.}, journal={MARINE MICROPALEONTOLOGY}, publisher={Elsevier BV}, author={Mitra, R. and Marchitto, T. M. and Ge, Q. and Zhong, B. and Kanakiya, B. and Cook, M. S. and Fehrenbacher, J. S. and Ortiz, J. D. and Tripati, A. and Lobaton, E.}, year={2019}, month={Mar}, pages={16–24} } @inproceedings{li_zhong_liu_lee_fylstra_lobaton_huang_2019, title={Gaze Fixation Comparisons Between Amputees and Able-bodied Individuals in Approaching Stairs and Level-ground Transitions: A Pilot Study}, ISBN={9781538613115}, url={http://dx.doi.org/10.1109/embc.2019.8857388}, DOI={10.1109/embc.2019.8857388}, abstractNote={This paper aims to investigate the visual strategy of transtibial amputees while they are approaching the transition between level-ground and stairs and compare it with that of able-bodied individuals. To this end, we conducted a pilot study where two transtibial amputee subjects and two able-bodied subjects transitioned from level-ground to stairs and vice versa while wearing eye tracking glasses to record gaze fixations. To investigate how vision functioned to both populations for preparing locomotion on new terrains, gaze fixation behavior before the new terrains were analyzed and compared between two populations across all transition cases in the study. Our results presented that, unlike the able-bodied population, amputees had most of their fixations directed on the transition region prior to new terrains. Furthermore, amputees showed an increased need for visual information during transition regions before navigation on stairs than that before navigation onto level-ground. The insights about amputees’ visual behavior gained by the study may lead the future development of technologies related to the intention prediction and the locomotion recognition for amputees.}, booktitle={2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Li, Minhan and Zhong, Boxuan and Liu, Ziwei and Lee, I-Chieh and Fylstra, Bretta L. and Lobaton, Edgar and Huang, He Helen}, year={2019}, month={Jul}, pages={3163–3166} } @article{asadi_chen_han_wu_lobaton_2019, title={LNSNet: Lightweight Navigable Space Segmentation for Autonomous Robots on Construction Sites}, volume={4}, ISSN={2306-5729}, url={http://dx.doi.org/10.3390/data4010040}, DOI={10.3390/data4010040}, abstractNote={An autonomous robot that can monitor a construction site should be able to be can contextually detect its surrounding environment by recognizing objects and making decisions based on its observation. Pixel-wise semantic segmentation in real-time is vital to building an autonomous and mobile robot. However, the learning models’ size and high memory usage associated with real-time segmentation are the main challenges for mobile robotics systems that have limited computing resources. To overcome these challenges, this paper presents an efficient semantic segmentation method named LNSNet (lightweight navigable space segmentation network) that can run on embedded platforms to determine navigable space in real-time. The core of model architecture is a new block based on separable convolution which compresses the parameters of present residual block meanwhile maintaining the accuracy and performance. LNSNet is faster, has fewer parameters and less model size, while provides similar accuracy compared to existing models. A new pixel-level annotated dataset for real-time and mobile navigable space segmentation in construction environments has been constructed for the proposed method. The results demonstrate the effectiveness and efficiency that are necessary for the future development of the autonomous robotics systems.}, number={1}, journal={Data}, publisher={MDPI AG}, author={Asadi, Khashayar and Chen, Pengyu and Han, Kevin and Wu, Tianfu and Lobaton, Edgar}, year={2019}, month={Mar}, pages={40} } @inproceedings{asadi_chen_han_wu_lobaton_2019, title={Real-Time Scene Segmentation Using a Light Deep Neural Network Architecture for Autonomous Robot Navigation on Construction Sites}, url={http://dx.doi.org/10.1061/9780784482438.041}, DOI={10.1061/9780784482438.041}, abstractNote={Camera-equipped unmanned vehicles (UVs) have received a lot of attention in data collection for construction monitoring applications. To develop an autonomous platform, the UV should be able to process multiple modules (e.g., context-awareness, control, localization, and mapping) on an embedded platform. Pixel-wise semantic segmentation provides a UV with the ability to be contextually aware of its surrounding environment. However, in the case of mobile robotic systems with limited computing resources, the large size of the segmentation model and high memory usage requires high computing resources, which a major challenge for mobile UVs (e.g., a small-scale vehicle with limited payload and space). To overcome this challenge, this paper presents a light and efficient deep neural network architecture to run on an embedded platform in real-time. The proposed model segments navigable space on an image sequence (i.e., a video stream), which is essential for an autonomous vehicle that is based on machine vision. The results demonstrate the performance efficiency of the proposed architecture compared to the existing models and suggest possible improvements that could make the model even more efficient, which is necessary for the future development of the autonomous robotics systems.}, booktitle={Computing in Civil Engineering 2019}, publisher={American Society of Civil Engineers}, author={Asadi, Khashayar and Chen, Pengyu and Han, Kevin and Wu, Tianfu and Lobaton, Edgar}, year={2019}, month={Jun} } @inproceedings{gonzalez_paniagua_starliper_lobaton_2019, title={Signal Quality for RR Interval Prediction on Wearable Sensors}, url={http://dx.doi.org/10.1109/embc.2019.8857398}, DOI={10.1109/embc.2019.8857398}, abstractNote={Physiological responses are essential for health monitoring. Wearable devices are providing greater populations of people with the ability to monitor physiological signals during their day to day activities. However, wearable devices are particularly susceptible to degradation of signal quality due to noise from motion artifacts, environment, and user error. In this paper, we compare the impact of including signal quality on predictive models for RR intervals in a real world setting.}, booktitle={2019 41st Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Gonzalez, Laura and Paniagua, Thomas and Starliper, Nathan and Lobaton, Edgar}, year={2019}, month={Jul} } @inproceedings{asadi_jain_qin_sun_noghabaei_cole_han_lobaton_2019, title={Vision-Based Obstacle Removal System for Autonomous Ground Vehicles Using a Robotic Arm}, url={http://dx.doi.org/10.1061/9780784482438.042}, DOI={10.1061/9780784482438.042}, abstractNote={Over the past few years, the use of camera-equipped robotic platforms for data collection and visually monitoring applications has exponentially grown. Cluttered construction sites with many objects (e.g., bricks, pipes, etc.) on the ground are challenging environments for a mobile unmanned ground vehicle (UGV) to navigate. To address this issue, this study presents a mobile UGV equipped with a stereo camera and a robotic arm that can remove obstacles along the UGV's path. To achieve this objective, the surrounding environment is captured by the stereo camera and obstacles are detected. The obstacle's relative location to the UGV is sent to the robotic arm module through Robot Operating System (ROS). Then, the robotic arm picks up and removes the obstacle. The proposed method will greatly enhance the degree of automation and the frequency of data collection for construction monitoring. The proposed system is validated through two case studies. The results successfully demonstrate the detection and removal of obstacles, serving as one of the enabling factors for developing an autonomous UGV with various construction operating applications.}, booktitle={Computing in Civil Engineering 2019}, publisher={American Society of Civil Engineers}, author={Asadi, Khashayar and Jain, Rahul and Qin, Ziqian and Sun, Mingda and Noghabaei, Mojtaba and Cole, Jeremy and Han, Kevin and Lobaton, Edgar}, year={2019}, month={Jun} } @inproceedings{gonzalez_zhong_lobaton_2018, title={A Framework for Physiological Response Prediction with Joint Activity State optimization}, url={http://dx.doi.org/10.1109/embc.2018.8513170}, DOI={10.1109/embc.2018.8513170}, abstractNote={Physiological responses are essential for health monitoring. However, modeling the complex interactions be- tween them across activity and environmental factors can be challenging. In this paper, we introduce a framework that identifies the state of an individual based on their activity, trains predictive models for their physiological response within these states, and jointly optimizes for the states and the models. We apply this framework to respiratory rate prediction based on heart rate and physical activity, and test it on a dataset of 9 individuals performing various activities of daily life.}, booktitle={2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Gonzalez, Laura and Zhong, Boxuan and Lobaton, Edgar}, year={2018}, month={Jul} } @inproceedings{asadi_ramshankar_pullagurla_bhandare_shanbhag_mehta_kundu_han_lobaton_wu_2018, title={Building an Integrated Mobile Robotic System for Real-Time Applications in Construction}, url={http://dx.doi.org/10.22260/isarc2018/0063}, DOI={10.22260/isarc2018/0063}, abstractNote={One of the major challenges of a real-time autonomous robotic system for construction monitoring is to simultaneously localize, map, and navigate over the lifetime of the robot, with little or no human intervention. Past research on Simultaneous Localization and Mapping (SLAM) and context-awareness are two active research areas in the computer vision and robotics communities. The studies that integrate both in real-time into a single modular framework for construction monitoring still need further investigation. A monocular vision system and real-time scene understanding are computationally heavy and the major state-of-the-art algorithms are tested on high-end desktops and/or servers with a high CPU- and/or GPU- computing capabilities, which affect their mobility and deployment for real-world applications. To address these challenges and achieve automation, this paper proposes an integrated robotic computer vision system, which generates a real-world spatial map of the obstacles and traversable space present in the environment in near real-time. This is done by integrating contextual Awareness and visual SLAM into a ground robotics agent. This paper presents the hardware utilization and performance of the aforementioned system for three different outdoor environments, which represent the applicability of this pipeline to diverse outdoor scenes in near real-time. The entire system is also self-contained and does not require user input, which demonstrates the potential of this computer vision system for autonomous navigation.}, booktitle={Proceedings of the International Symposium on Automation and Robotics in Construction (IAARC)}, publisher={International Association for Automation and Robotics in Construction (IAARC)}, author={Asadi, Khashayar and Ramshankar, Hariharan and Pullagurla, Harish and Bhandare, Aishwarya and Shanbhag, Suraj and Mehta, Pooja and Kundu, Spondon and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2018}, month={Jul} } @inproceedings{mohammadzadeh_nam_lobaton_2018, title={Prediction of Physiological Response over Varying Forecast Lengths with a Wearable Health Monitoring Platform}, url={http://dx.doi.org/10.1109/embc.2018.8512276}, DOI={10.1109/embc.2018.8512276}, abstractNote={The goal of this study is to characterize the accuracy of prediction of physiological responses for varying forecast lengths using multi-modal data streams from wearable health monitoring platforms. We specifically focus on predicting breathing rate due to its significance in medical and exercise physiology research. We implement a nonlinear support vector machine regression model for accurate prediction of future values of these physiological signals with forecast windows of up to one minute long. We explore the effects of heart rate and various other sensing modalities in prediction of breathing rate. Results reveal that including other physiological responses and activity information captured by inertial measurements in the regression model improves the breathing rate prediction accuracy. We carried out experiments by collecting and analyzing physiological and activity data outside the lab using a wearable platform composed of various off-the-shelf sensors.}, booktitle={2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Mohammadzadeh, Farrokh and Nam, Chang S. and Lobaton, Edgar}, year={2018}, month={Jul} } @article{asadi_ramshankar_pullagurla_bhandare_shanbhag_mehta_kundu_han_lobaton_wu_2018, title={Vision-based integrated mobile robotic system for real-time applications in construction}, volume={96}, ISSN={0926-5805}, url={http://dx.doi.org/10.1016/j.autcon.2018.10.009}, DOI={10.1016/j.autcon.2018.10.009}, abstractNote={To increase the degree of automation and frequency of data collection for monitoring construction sites, there has been a rapid increase in the number of studies, in the past few years, that developed and/or examined mobile robotic applications in construction. These vision-based platforms capable of autonomous navigation and scene understanding are becoming essential in many construction applications, namely construction sites surveying, work-in-progress monitoring, and existing structure inspection. Simultaneous Localization and Mapping (SLAM) and object recognition for proper context-aware motion planning are some of the core vision techniques that are driving innovation for these robotic systems. To characterize the limitations of current techniques on real-time performance and identify challenges in integration and implementation for construction applications, this paper proposes a mobile robotic platform that incorporates a stack of embedded platforms with integrated Graphical Processing Units (GPUs). This paper presents three case studies to evaluate the performance of the proposed system. The results demonstrate the robustness and feasibility of developing and deploying an autonomous system in the near future.}, journal={Automation in Construction}, publisher={Elsevier BV}, author={Asadi, Khashayar and Ramshankar, Hariharan and Pullagurla, Harish and Bhandare, Aishwarya and Shanbhag, Suraj and Mehta, Pooja and Kundu, Spondon and Han, Kevin and Lobaton, Edgar and Wu, Tianfu}, year={2018}, month={Dec}, pages={470–482} } @inproceedings{diaz_da silva_zhong_huang_lobaton_2018, title={Visual Terrain Identification and Surface Inclination Estimation for Improving Human Locomotion with a Lower-Limb Prosthetic}, volume={2018-July}, ISBN={9781538636466}, url={http://dx.doi.org/10.1109/embc.2018.8512614}, DOI={10.1109/embc.2018.8512614}, abstractNote={Lower-limb robotic prosthetics can benefit from context awareness to provide comfort and safety to the amputee. In this work, we developed a terrain identification and surface inclination estimation system for a prosthetic leg using visual and inertial sensors. We built a dataset from which images with high sharpness are selected using the IMU signal. The images are used for terrain identification and inclination is also computed simultaneously. With such information, the control of a robotized prosthetic leg can be adapted to changes in its surrounding.}, booktitle={2018 40th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Diaz, Jean P. and da Silva, Rafael L. and Zhong, Boxuan and Huang, He Helen and Lobaton, Edgar}, year={2018}, month={Jul}, pages={1817–1820} } @inproceedings{zhong_ge_kanakiya_mitra_marchitto_lobaton_2017, title={A comparative study of image classification algorithms for Foraminifera identification}, url={http://dx.doi.org/10.1109/ssci.2017.8285164}, DOI={10.1109/ssci.2017.8285164}, abstractNote={Identifying Foraminifera (or forams for short) is essential for oceanographic and geoscience research as well as petroleum exploration. Currently, this is mostly accomplished using trained human pickers, routinely taking weeks or even months to accomplish the task. In this paper, a foram identification pipeline is proposed to automatic identify forams based on computer vision and machine learning techniques. A microscope based image capturing system is used to collect a labelled image data set. Various popular image classification algorithms are adapted to this specific task and evaluated under various conditions. Finally, the potential of a weighted cross-entropy loss function in adjusting the trade-off between precision and recall is tested. The classification algorithms provide competitive results when compared to human experts labeling of the data set.}, booktitle={2017 IEEE Symposium Series on Computational Intelligence (SSCI)}, publisher={IEEE}, author={Zhong, Boxuan and Ge, Q. and Kanakiya, B. and Mitra, R. and Marchitto, T. and Lobaton, Edgar}, year={2017}, month={Nov}, pages={3199–3206} } @inproceedings{cole_mohammadzadeh_bollinger_latif_bozkurt_lobaton_2017, title={A study on motion mode identification for cyborg roaches}, url={http://dx.doi.org/10.1109/icassp.2017.7952637}, DOI={10.1109/icassp.2017.7952637}, abstractNote={This paper demonstrates the ability to accurately detect the movement state of Madagascar hissing cockroaches equipped with a custom board containing a five degree of freedom inertial measurement unit. The cockroach moves freely through an unobstructed arena while wirelessly transmitting its accelerometer and gyroscope data. Multiple window sizes, features, and classifiers are assessed. An in-depth analysis of the classification results is performed to better understand the strengths and weaknesses of the classifier and feature set. The conclusions of this study show promise for future work on cockroach motion mode identification and localization.}, booktitle={2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={Cole, Jeremy and Mohammadzadeh, Farrokh and Bollinger, Christopher and Latif, Tahmid and Bozkurt, Alper and Lobaton, Edgar}, year={2017}, month={Mar} } @article{lokare_zhong_lobaton_2017, title={Activity-Aware Physiological Response Prediction Using Wearable Sensors}, volume={2}, ISSN={2411-5134}, url={http://dx.doi.org/10.3390/inventions2040032}, DOI={10.3390/inventions2040032}, abstractNote={Prediction of physiological responses can have a number of applications in the health and medical fields. However, this can be a challenging task due to interdependencies between these responses, physical activities, environmental factors and the individual’s mental state. In this work, we focus on forecasting physiological responses in dynamic scenarios where individuals are performing exercises and complex activities of daily life. We minimize the effect of environmental and physiological factors in order to focus on the effect of physical activities. In particular, we focus on forecasting heart rate and respiratory rate due to their relevance in medical and fitness training. We aim to forecast these physiological responses up to 60 s into the future, study the effect of different predictors that incorporate different sensing modalities and different amounts of historical data and analyze the performance of various strategies for prediction. Activity information is incorporated by clustering the data streams and fitting different predictive models per cluster. The effect of clustering is also studied by performing a hierarchical analysis on the clustering parameter, and we observe that activity clustering does improve the performance in our proposed methodology when predicting physiological response across modalities.}, number={4}, journal={Inventions}, publisher={MDPI AG}, author={Lokare, Namita and Zhong, Boxuan and Lobaton, Edgar}, year={2017}, month={Nov}, pages={32} } @inproceedings{dirafzoon_latif_gong_sichitiu_bozkurt_lobaton_2017, title={Biobotic motion and behavior analysis in response to directional neurostimulation}, url={http://dx.doi.org/10.1109/icassp.2017.7952598}, DOI={10.1109/icassp.2017.7952598}, abstractNote={This paper presents preliminary results for motion behavior analysis of Madagascar hissing cockroach biobots subject to stochastic and periodic neurostimulation pulses corresponding to randomly applied right and left turn, and move forward commands. We present our experimental setup and propose an unguided search strategy based stimulation profile designed for exploration of unknown environments. We study a probabilistic motion model fitted to the trajectories of biobots, perturbed from their natural motion by the stimulation pulses. Furthermore, we provide a statistical assessment of the biobotic directional response to turn commands and its correlation with stimuli profile over time. This study paves the way towards reliable control for more realistic under-rubble search and rescue applications.}, booktitle={2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={Dirafzoon, Alireza and Latif, Tahmid and Gong, Fengyuan and Sichitiu, Mihail and Bozkurt, Alper and Lobaton, Edgar}, year={2017}, month={Mar} } @inproceedings{ge_zhong_kanakiya_mitra_marchitto_lobaton_2017, title={Coarse-to-fine foraminifera image segmentation through 3D and deep features}, url={http://dx.doi.org/10.1109/ssci.2017.8280982}, DOI={10.1109/ssci.2017.8280982}, abstractNote={Foraminifera are single-celled marine organisms, which are usually less than 1 mm in diameter. One of the most common tasks associated with foraminifera is the species identification of thousands of foraminifera contained in rock or ocean sediment samples, which can be a tedious manual procedure. Thus an automatic visual identification system is desirable. Some of the primary criteria for foraminifera species identification come from the characteristics of the shell itself. As such, segmentation of chambers and apertures in foraminifera images would provide powerful features for species identification. Nevertheless, none of the existing image-based, automatic classification approaches make use of segmentation, partly due to the lack of accurate segmentation methods for foraminifera images. In this paper, we propose a learning-based edge detection pipeline, using a coarse-to-fine strategy, to extract the vague edges from foraminifera images for segmentation using a relatively small training set. The experiments demonstrate our approach is able to segment chambers and apertures of foraminifera correctly and has the potential to provide useful features for species identification and other applications such as morphological study of foraminifera shells and foraminifera dataset labeling.}, booktitle={2017 IEEE Symposium Series on Computational Intelligence (SSCI)}, publisher={IEEE}, author={Ge, Qian and Zhong, Boxuan and Kanakiya, Bhargav and Mitra, Ritayan and Marchitto, Thomas and Lobaton, Edgar}, year={2017}, month={Nov} } @inproceedings{zhong_qin_yang_chen_mudrick_taub_azevedo_lobaton_2017, title={Emotion recognition with facial expressions and physiological signals}, url={http://dx.doi.org/10.1109/ssci.2017.8285365}, DOI={10.1109/ssci.2017.8285365}, abstractNote={This paper proposes a temporal information preserving multi-modal emotion recognition framework based on physiological and facial expression data streams. The performance of each component is evaluated and compared individually and after data fusion. Specifically, we compared the effect of different views of cameras on facial expressions for emotion recognition, and combined these views to achieve better performance. A Temporal Information Preserving Framework (TIPF) is proposed to more accurately model the relationships between emotional and physiological states over time. Additionally, different fusion strategies are compared when combining information from different time periods and modalities. The experiments show that, TIPF significantly improves the emotion recognition performance when physiological signals are used and the best performance is achieved when fusing facial expressions and physiological data.}, booktitle={2017 IEEE Symposium Series on Computational Intelligence (SSCI)}, publisher={IEEE}, author={Zhong, Boxuan and Qin, Zikun and Yang, Shuo and Chen, Junyu and Mudrick, Nicholas and Taub, Michelle and Azevedo, Roger and Lobaton, Edgar}, year={2017}, month={Nov}, pages={1170–1177} } @inproceedings{lokare_samadi_zhong_gonzalez_mohammadzadeh_lobaton_2017, title={Energy-efficient activity recognition via multiple time-scale analysis}, url={http://dx.doi.org/10.1109/ssci.2017.8285176}, DOI={10.1109/ssci.2017.8285176}, abstractNote={In this work, we propose a novel power-efficient strategy for supervised human activity recognition using a multiple time-scale approach, which takes into account various window sizes. We assess the proposed methodology on our new multimodal dataset for activities of daily life (ADL), which combines the use of physiological and inertial sensors from multiple wearable devices. We aim to develop techniques that can run efficiently in wearable devices for real-time activity recognition. Our analysis shows that the proposed approach Sequential Maximum-Likelihood (SML) achieves high F1 score across all activities while providing lower power consumption than the standard Maximum-Likelihood (ML) approach.}, booktitle={2017 IEEE Symposium Series on Computational Intelligence (SSCI)}, publisher={IEEE}, author={Lokare, Namita and Samadi, Shamim and Zhong, Boxuan and Gonzalez, Laura and Mohammadzadeh, Farrokh and Lobaton, Edgar}, year={2017}, month={Nov}, pages={1466–1472} } @inproceedings{ge_lobaton_2017, title={Obstacle detection in outdoor scenes based on multi-valued stereo disparity maps}, url={http://dx.doi.org/10.1109/ssci.2017.8280990}, DOI={10.1109/ssci.2017.8280990}, abstractNote={In this paper, we propose a methodology for robust obstacle detection in outdoor scenes for autonomous driving applications using a multi-valued stereo disparity approach. Traditionally, disparity maps computed from stereo pairs only provide a single estimated disparity value for each pixel. However, disparity computation suffers heavily from reflections, lack of texture and repetitive patterns of objects. This may lead to wrong estimates, which can introduce some bias on obstacle detection approaches that make use of the disparity map. To overcome this problem, instead of a single-valued disparity estimation, we propose making use of multiple candidates per pixel. The candidates are selected from a statistical analysis that characterizes the performance of the underlying matching cost function based on two metrics: The number of candidates extracted, and the distance from these candidates to the true disparity value. Then, we construct an aggregate occupancy map in u-disparity space from which obstacle detection is obtained. Experiments show that our approach can recover the correct structure of obstacles on the scene when traditional estimation approaches fail.}, booktitle={2017 IEEE Symposium Series on Computational Intelligence (SSCI)}, publisher={IEEE}, author={Ge, Qian and Lobaton, Edgar}, year={2017}, month={Nov} } @inproceedings{richmond_lokare_lobaton_2017, title={Robust trajectory-based density estimation for geometric structure recovery}, url={http://dx.doi.org/10.23919/eusipco.2017.8081400}, DOI={10.23919/eusipco.2017.8081400}, abstractNote={We propose a method to both quickly and robustly extract geometric information from trajectory data. While point density may be of interest in some applications, trajectories provide different guarantees about our data such as path densities as opposed to location densities provided by points. We aim to utilize the concise nature of quadtrees in two dimensions to reduce run time complexity of counting trajectories in a neighborhood. We compare the accuracy of our methodology to a common current practice for subsampling a structure. Our results show that the proposed method is able to capture the geometric structure. We find an improvement in performance over the current practice in that our method is able to extract only the salient data and ignore trajectory outliers.}, booktitle={2017 25th European Signal Processing Conference (EUSIPCO)}, publisher={IEEE}, author={Richmond, Turner and Lokare, Namita and Lobaton, Edgar}, year={2017}, month={Aug}, pages={1210–1214} } @inproceedings{cole_agcayazi_latif_bozkurt_lobaton_2017, title={Speed estimation based on gait analysis for biobotic agents}, url={http://dx.doi.org/10.1109/icsens.2017.8234224}, DOI={10.1109/icsens.2017.8234224}, abstractNote={Biobotic agents equipped with inertial sensors can be used to explore areas where Global Navigation Satellite Systems such as GPS are not accessible. A common challenge in inertial navigation systems is how to compensate for the error accrued from the inertial measurements. We propose that the distinctive rocking motion present in the gait of some biobotic agents can be used to directly compute the speed of the agent from the inertial measurements, bypassing the need to integrate the speed from the inertial sensors. We present preliminary results highlighting a trend between gait and speed, using measurements obtained from a 9 Degree of Freedom Inertial Measurement Unit mounted on a biobotic agent.}, booktitle={2017 IEEE SENSORS}, publisher={IEEE}, author={Cole, Jeremy and Agcayazi, Talha and Latif, Tahmid and Bozkurt, Alper and Lobaton, Edgar}, year={2017}, month={Oct} } @article{bozkurt_lobaton_sichitiu_2016, title={A Biobotic Distributed Sensor Network for Under-Rubble Search and Rescue}, volume={49}, ISSN={["1558-0814"]}, url={http://dx.doi.org/10.1109/mc.2016.136}, DOI={10.1109/mc.2016.136}, abstractNote={Utilizing the latest neural engineering developments, researchers have enabled biobotic insects that function as search-and-rescue agents to help map under-rubble environments and locate survivors and hazardous conditions. The Web extra at http://youtu.be/oJXEPcv-FMw is a video in which authors Alper Bozkurt, Edgar Lobaton, and Mihail Sichitiu demonstrate the acoustic steering of roach biobots to search for disaster victims trapped under rubble.}, number={5}, journal={COMPUTER}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Bozkurt, Alper and Lobaton, Edgar and Sichitiu, Mihail}, year={2016}, month={May}, pages={38–46} } @article{pomann_staicu_lobaton_mejia_dewey_reich_sweeney_shinohara_2016, title={A LAG FUNCTIONAL LINEAR MODEL FOR PREDICTION OF MAGNETIZATION TRANSFER RATIO IN MULTIPLE SCLEROSIS LESIONS}, volume={10}, ISSN={["1932-6157"]}, url={http://dx.doi.org/10.1214/16-aoas981}, DOI={10.1214/16-aoas981}, abstractNote={We propose a lag functional linear model to predict a response using multiple functional predictors observed at discrete grids with noise. Two procedures are proposed to estimate the regression parameter functions: (1) an approach that ensures smoothness for each value of time using generalized cross-validation; and (2) a global smoothing approach using a restricted maximum likelihood framework. Numerical studies are presented to analyze predictive accuracy in many realistic scenarios. The methods are employed to estimate a magnetic resonance imaging (MRI)-based measure of tissue damage (the magnetization transfer ratio, or MTR) in multiple sclerosis (MS) lesions, a disease that causes damage to the myelin sheaths around axons in the central nervous system. Our method of estimation of MTR within lesions is useful retrospectively in research applications where MTR was not acquired, as well as in clinical practice settings where acquiring MTR is not currently part of the standard of care. The model facilitates the use of commonly acquired imaging modalities to estimate MTR within lesions, and outperforms cross-sectional models that do not account for temporal patterns of lesion development and repair.}, number={4}, journal={Annals of Applied Statistics}, publisher={Institute of Mathematical Statistics}, author={Pomann, Gina-Maria and Staicu, Ana-Maria and Lobaton, Edgar J. and Mejia, Amanda F. and Dewey, Blake E. and Reich, Daniel S. and Sweeney, Elizabeth M. and Shinohara, Russell T.}, year={2016}, month={Dec}, pages={2325–2348} } @article{dirafzoon_bozkurt_lobaton_2017, title={A framework for mapping with biobotic insect networks: From local to global maps}, volume={88}, url={http://dx.doi.org/10.1016/j.robot.2016.11.004}, DOI={10.1016/j.robot.2016.11.004}, abstractNote={We present an approach for global exploration and mapping of unknown environments using a swarm of cyborg insects, known as biobots, for emergency response scenarios under minimal sensing and localization constraints. We exploit natural stochastic motion models and controlled locomotion of biobots in conjunction with an aerial leader to explore and map a domain of interest. A sliding window strategy is adopted to construct local maps from coordinate free encounter information of the agents by means of local metric estimation. Robust topological features from these local representations are extracted using topological data analysis and a classification scheme. These maps are then merged into a global map which can be visualized using a graphical representation, that integrates geometric as well as topological features of the environment. Simulation and experimental results with biologically inspired robotic platform are presented to illustrate and verify the correctness of our approach, which provides building blocks for SLAM with biobotic insects.}, journal={Robotics and Autonomous Systems}, publisher={Elsevier BV}, author={Dirafzoon, Alireza and Bozkurt, Alper and Lobaton, Edgar}, year={2017}, month={Feb}, pages={79–96} } @inproceedings{dirafzoon_lokare_lobaton_2016, title={Action classification from motion capture data using topological data analysis}, url={http://dx.doi.org/10.1109/globalsip.2016.7906043}, DOI={10.1109/globalsip.2016.7906043}, abstractNote={This paper proposes a novel framework for activity recognition from 3D motion capture data using topological data analysis (TDA). We extract point clouds describing the oscillatory patterns of body joints from the principal components of their time series using Taken's delay embedding. Topological persistence from TDA is exploited to extract topological invariants of the constructed point clouds. We propose a feature extraction method from persistence diagrams in order to generate robust low dimensional features used for classification of different activities. Our experimental results demonstrate high separability of generated features, and as a result a high accuracy of classification.}, booktitle={2016 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Dirafzoon, Alireza and Lokare, Namita and Lobaton, Edgar}, year={2016}, month={Dec} } @inproceedings{xiong_latif_lobaton_bozkurt_sichitiu_2016, title={Characterization of RSS variability for biobot localization using 802.15.4 radios}, DOI={10.1109/wisnet.2016.7444305}, abstractNote={A cyber-physically organized swarm of insect biobots or biological robots can aid first responders in search-and-rescue scenarios after natural disasters or earthquakes by establishing an under-rubble sensor network. In such a network, the nodes are represented by the insect biobots equipped with electronic backpacks utilizing a system-on-chip. This application requires effective real-time localization of the mobile sensor nodes. Radio signal strength (RSS) is a measurement of the received signal power, and can be used in estimating the distance between two nodes, which then can help localize the biobotic sensor nodes in the future. This paper investigates RSS variability and its suitability for biobotic localization.}, booktitle={2016 ieee topical conference on wireless sensors and sensor networks (wisnet)}, author={Xiong, H. and Latif, T. and Lobaton, Edgar and Bozkurt, A. and Sichitiu, Mihail L.}, year={2016}, pages={1–3} } @inproceedings{lokare_gonzalez_lobaton_2016, title={Comparing wearable devices with wet and textile electrodes for activity recognition}, url={http://dx.doi.org/10.1109/embc.2016.7591492}, DOI={10.1109/embc.2016.7591492}, abstractNote={This paper explores the idea of identifying activities from muscle activation which is captured by wearable ECG recording devices that use wet and textile electrodes. Most of the devices available today filter out the high frequency components to retain only the signal related to an ECG. We explain how the high frequency components that correspond to muscle activation can be extracted from the recorded signal and can be used to identify activities. We notice that is possible to obtain good performance for both the wet and dry electrodes. However, we observed that signals from the dry textile electrodes introduce less artifacts associated with muscle activation.}, booktitle={2016 38th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Lokare, Namita and Gonzalez, Laura and Lobaton, Edgar}, year={2016}, month={Aug}, pages={3539–3542} } @article{ge_lobaton_2016, title={Consensus-Based Image Segmentation via Topological Persistence}, ISSN={["2160-7508"]}, url={http://dx.doi.org/10.1109/cvprw.2016.135}, DOI={10.1109/cvprw.2016.135}, abstractNote={Image segmentation is one of the most important lowlevel operation in image processing and computer vision. It is unlikely for a single algorithm with a fixed set of parameters to segment various images successfully due to variations between images. However, it can be observed that the desired segmentation boundaries are often detected more consistently than other boundaries in the output of state of-the-art segmentation results. In this paper, we propose a new approach to capture the consensus of information from a set of segmentations generated by varying parameters of different algorithms. The probability of a segmentation curve being present is estimated based on our probabilistic image segmentation model. A connectivity probability map is constructed and persistent segments are extracted by applying topological persistence to the probability map. Finally, a robust segmentation is obtained with the detection of certain segmentation curves guaranteed. The experiments demonstrate our algorithm is able to consistently capture the curves present within the segmentation set.}, journal={PROCEEDINGS OF 29TH IEEE CONFERENCE ON COMPUTER VISION AND PATTERN RECOGNITION WORKSHOPS, (CVPRW 2016)}, publisher={IEEE}, author={Ge, Qian and Lobaton, Edgar}, year={2016}, pages={1050–1057} } @article{dirafzoon_bozkurt_lobaton_2017, title={Geometric Learning and Topological Inference With Biobotic Networks}, volume={3}, ISSN={["2373-776X"]}, url={http://dx.doi.org/10.1109/tsipn.2016.2623093}, DOI={10.1109/tsipn.2016.2623093}, abstractNote={In this study, we present and analyze a framework for geometric and topological estimation for mapping of unknown environments. We consider agents mimicking motion behaviors of cyborg insects, known as biobots, and exploit coordinate-free local interactions among them to infer geometric and topological information about the environment, under minimal sensing and localization constraints. A metric estimation procedure is presented over a graphical representation referred to as the encounter graph in order to construct a geometric point cloud using manifold learning techniques. Topological data analysis (TDA) along with the proposed classification method is used to infer robust topological features of the space (e.g., existence of obstacles). We examine the asymptotic behavior of the proposed metric in terms of the convergence to the geodesic distances in the underlying manifold of the domain, and provide stability analysis results for the topological persistence. The proposed framework and its convergences and stability analysis are demonstrated through numerical simulations and experiments with Hexbugs.}, number={1}, journal={IEEE TRANSACTIONS ON SIGNAL AND INFORMATION PROCESSING OVER NETWORKS}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Dirafzoon, Alireza and Bozkurt, Alper and Lobaton, Edgar}, year={2017}, month={Mar}, pages={200–215} } @inproceedings{lokare_benavides_juneja_lobaton_2016, title={Hierarchical activity clustering analysis for robust graphical structure recovery}, url={http://dx.doi.org/10.1109/globalsip.2016.7906000}, DOI={10.1109/globalsip.2016.7906000}, abstractNote={In this paper we propose a hierarchical activity clustering methodology which incorporates the use of topological persistence analysis. Our clustering methodology captures the hierarchies present in the data and is therefore able to show the dependencies that exist between these activities. We make use of an aggregate persistence diagram to select robust graphical structures present within the dataset. These models are stable over a bound and provide accurate classification results. This approach allows us to select parameters based on the amount of temporal information needed to maintain high accuracy. The key innovations presented in this paper include the hierarchical characterization of the activities over a temporal parameter as well as the characterization and parameter selection based on stability of the results using persistence analysis.}, booktitle={2016 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Lokare, Namita and Benavides, Daniel and Juneja, Sahil and Lobaton, Edgar}, year={2016}, month={Dec} } @inproceedings{latif_yang_lobaton_bozkurt_2016, title={Preliminary statistical assessment towards characterization of biobotic control}, url={http://dx.doi.org/10.1109/embc.2016.7591162}, DOI={10.1109/embc.2016.7591162}, abstractNote={Biobotic research involving neurostimulation of instrumented insects to control their locomotion is finding potential as an alternative solution towards development of centimeter-scale distributed swarm robotics. To improve the reliability of biobotic agents, their control mechanism needs to be precisely characterized. To achieve this goal, this paper presents our initial efforts for statistical assessment of the angular response of roach biobots to the applied bioelectrical stimulus. Subsequent findings can help to understand the effect of each stimulation parameter individually or collectively and eventually reach reliable and consistent biobotic control suitable for real life scenarios.}, booktitle={2016 38th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (EMBC)}, publisher={IEEE}, author={Latif, Tahmid and Yang, Meng and Lobaton, Edgar and Bozkurt, Alper}, year={2016}, month={Aug}, pages={2184–2187} } @article{bozkurt_lobaton_sichitiu_2015, title={Biobotic Insect Sensor Networks for Search and Rescue}, volume={2}, number={2}, journal={Journal of Homeland Defense & Security Information Analysis Center}, author={Bozkurt, A. and Lobaton, E. and Sichitiu, M.}, year={2015} } @inproceedings{dirafzoon_lobaton_bozkurt_2015, title={Exploration and topological mapping with Hexbugs}, url={http://dx.doi.org/10.1145/2737095.2737137}, DOI={10.1145/2737095.2737137}, abstractNote={In this demonstration, we present a topological mapping system to be used with biobotic insects in order to sketch maps of unknown arenas using only neighbor to neighbor interactions among the agents. Biobotic insects fuse the locomotory advantages of insects with wireless sensing technology in form of electronic backpacks to function as search and rescue agents. Our mapping approach is designed for emergency response scenarios, where traditional mapping approaches may fail due to lack of localization information. We demonstrate the performance of our proposed approach instead using Hexbugs, which emulate the natural random motion of biobots. The Hexbugs are dispersed into a maze with unknown structure (Fig. 1), and their local interactions are captured through a visual tracking system. Such information is then exploited into a data analysis engine in order to robustly find the topological structure of the maze.}, booktitle={Proceedings of the 14th International Conference on Information Processing in Sensor Networks}, publisher={ACM}, author={Dirafzoon, Alireza and Lobaton, Edgar and Bozkurt, Alper}, year={2015}, month={Apr} } @article{ge_lokare_lobaton_2015, title={Non-Rigid Image Registration under Non-Deterministic Deformation Bounds}, volume={9287}, ISSN={["1996-756X"]}, url={http://dx.doi.org/10.1117/12.2072530}, DOI={10.1117/12.2072530}, abstractNote={Image registration aims to identify the mapping between corresponding locations in an anatomic structure. Most traditional approaches solve this problem by minimizing some error metric. However, they do not quantify the uncertainty behind their estimates and the feasibility of other solutions. In this work, it is assumed that two images of the same anatomic structure are related via a Lipschitz non-rigid deformation (the registration map). An approach for identifying point correspondences with zero false-negative rate and high precision is introduced under this assumption. This methodology is then extended to registration of regions in an image which is posed as a graph matching problem with geometric constraints. The outcome of this approach is a homeomorphism with uncertainty bounds characterizing its accuracy over the entire image domain. The method is tested by applying deformation maps to the LPBA40 dataset.}, journal={10TH INTERNATIONAL SYMPOSIUM ON MEDICAL INFORMATION PROCESSING AND ANALYSIS}, publisher={SPIE}, author={Ge, Qian and Lokare, Namita and Lobaton, Edgar}, editor={Romero, Eduardo and Lepore, NatashaEditors}, year={2015} } @inproceedings{chattopadhyay_ge_wei_lobaton_2015, title={Robust multi-target tracking in outdoor traffic scenarios via persistence topology based robust motion segmentation}, url={http://dx.doi.org/10.1109/globalsip.2015.7418308}, DOI={10.1109/globalsip.2015.7418308}, abstractNote={In this paper, we present a motion segmentation based robust multi-target tracking technique for on-road obstacles. Our approach uses depth imaging information, and integrates persistence topology for segmentation and min-max network flow for tracking. To reduce time as well as computational complexity, the max flow problem is solved using a dynamic programming algorithm. We classify the sensor reading into regions of stationary and moving parts by aligning occupancy maps obtained from the disparity images and then, incorporate Kalman filter in the network flow algorithm to track the moving objects robustly. Our algorithm has been tested on several real-life stereo datasets and the results show that there is an improvement by a factor of three on robustness when comparing performance with and without the topological persistent detections. We also perform measurement accuracy of our algorithm using popular evaluation metrics for segmentation and tracking, and the results look promising.}, booktitle={2015 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Chattopadhyay, Somrita and Ge, Qian and Wei, Chunpeng and Lobaton, Edgar}, year={2015}, month={Dec}, pages={805–809} } @article{bozkurt_lobaton_sichitiu_hedrick_latif_dirafzoon_whitmire_verderber_marin_xiong_et al._2014, title={Biobotic Insect Swarm based Sensor Networks for Search and Rescue}, volume={9091}, ISSN={["1996-756X"]}, url={http://dx.doi.org/10.1117/12.2053906}, DOI={10.1117/12.2053906}, abstractNote={The potential benefits of distributed robotics systems in applications requiring situational awareness, such as search-and-rescue in emergency situations, are indisputable. The efficiency of such systems requires robotic agents capable of coping with uncertain and dynamic environmental conditions. For example, after an earthquake, a tremendous effort is spent for days to reach to surviving victims where robotic swarms or other distributed robotic systems might play a great role in achieving this faster. However, current technology falls short of offering centimeter scale mobile agents that can function effectively under such conditions. Insects, the inspiration of many robotic swarms, exhibit an unmatched ability to navigate through such environments while successfully maintaining control and stability. We have benefitted from recent developments in neural engineering and neuromuscular stimulation research to fuse the locomotory advantages of insects with the latest developments in wireless networking technologies to enable biobotic insect agents to function as search-and-rescue agents. Our research efforts towards this goal include development of biobot electronic backpack technologies, establishment of biobot tracking testbeds to evaluate locomotion control efficiency, investigation of biobotic control strategies with Gromphadorhina portentosa cockroaches and Manduca sexta moths, establishment of a localization and communication infrastructure, modeling and controlling collective motion by learning deterministic and stochastic motion models, topological motion modeling based on these models, and the development of a swarm robotic platform to be used as a testbed for our algorithms.}, journal={SIGNAL PROCESSING, SENSOR/INFORMATION FUSION, AND TARGET RECOGNITION XXIII}, publisher={SPIE}, author={Bozkurt, A. and Lobaton, E. and Sichitiu, Mihail L. and Hedrick, T. and Latif, T. and Dirafzoon, A. and Whitmire, E. and Verderber, A. and Marin, J. and Xiong, H. and et al.}, editor={Kadar, IvanEditor}, year={2014} } @inproceedings{lokare_ge_snyder_jewell_allibhai_lobaton_2014, title={Manifold learning approach to curve identification with applications to footprint segmentation}, url={http://dx.doi.org/10.1109/cimsivp.2014.7013288}, DOI={10.1109/cimsivp.2014.7013288}, abstractNote={Recognition of animals via images of their footprints is a non-invasive technique recently adopted by researchers interested in monitoring endangered species. One of the challenges that they face is the extraction of features from these images, which are required for this approach. These features are points along the boundary curve of the footprints. In this paper, we propose an innovative technique for extracting these curves from depth images. We formulate the problem of identification of the boundary of the footprint as a pattern recognition problem of a stochastic process over a manifold. This methodology has other applications on segmentation of biological tissue for medical applications and tracking of extreme weather patterns. The problem of pattern identification in the manifold is posed as a shortest path problem, where the path with the smallest cost is identified as the one with the highest likelihood to belong to the stochastic process. Our methodology is tested in a new dataset of normalized depth images of tiger footprints with ground truth selected by experts in the field.}, booktitle={2014 IEEE Symposium on Computational Intelligence for Multimedia, Signal and Vision Processing (CIMSIVP)}, publisher={IEEE}, author={Lokare, Namita and Ge, Qian and Snyder, Wesley and Jewell, Zoe and Allibhai, Sky and Lobaton, Edgar}, year={2014}, month={Dec} } @inproceedings{dirafzoon_betthauser_schornick_benavides_lobaton_2014, title={Mapping of unknown environments using minimal sensing from a stochastic swarm}, url={http://dx.doi.org/10.1109/iros.2014.6943102}, DOI={10.1109/iros.2014.6943102}, abstractNote={Swarms consisting of cyborg-insects or millirobots can be used for mapping and exploration of unstructured environments in emergency-response situations. Under extreme conditions, traditional localization techniques may fail to provide reliable position estimates. Instead, we propose a robust approach to obtain a topological map of an unknown environment using encounter information from a swarm of agents following a stochastic motion model via the use of tools from topological data analysis. A classification approach is introduced to determine the persistent topology features of the space. The approach is analyzed using simulation and experimental data using a swarm robotic platform called the WolfBot. For all experiments, the agents are programmed to follow a stochastic motion model and only rely on encounter information between agents to construct a map of the environment. The results indicate that the proposed approach can identify robust topological features with high accuracy.}, booktitle={2014 IEEE/RSJ International Conference on Intelligent Robots and Systems}, publisher={IEEE}, author={Dirafzoon, Alireza and Betthauser, Joseph and Schornick, Jeff and Benavides, Daniel and Lobaton, Edgar}, year={2014}, month={Sep}, pages={3842–3849} } @inproceedings{dirafzoon_bethhauser_schornick_cole_bozkurt_lobaton_2014, title={Poster abstract: Cyborg-insect networks for mapping of unknown environments}, DOI={10.1109/iccps.2014.6843729}, abstractNote={Cyborg-insect networks are systems that take advantage of existing biological platforms such as cockroaches [2] by attaching small instrumented payloads for sensing and motion control. These agents can be used in applications such as mapping and exploration of environment for emergency response (e.g., search and rescue operations after earthquakes, tsunamis, hurricanes, etc.) These agents can gain access to locations that may not be reachable otherwise by moving underground through smaller locations. The power limitations of such platforms place restrictions on sensing, communication, and motion control. Hence, traditional mapping and exploration techniques may not perform well under these adverse conditions. We propose a robust approach to obtain a topological map of an unknown environment using the coordinate free sensory data obtained from these cyborg-insect networks. In order to minimize control input, we take advantage of the natural behavior of insects in order to estimate a topological model of the environment based only on neighbor-to-neighbor interactions.}, booktitle={2014 acm/ieee international conference on cyber-physical systems (iccps)}, author={Dirafzoon, A. and Bethhauser, J. and Schornick, J. and Cole, J. and Bozkurt, A. and Lobaton, Edgar}, year={2014}, pages={216–216} } @inproceedings{wei_ge_chattopadhyay_lobaton_2014, title={Robust obstacle segmentation based on topological persistence in outdoor traffic scenes}, url={http://dx.doi.org/10.1109/civts.2014.7009483}, DOI={10.1109/civts.2014.7009483}, abstractNote={In this paper, a new methodology for robust segmentation of obstacles from stereo disparity maps in an on-road environment is presented. We first construct a probability of the occupancy map using the UV-disparity methodology. Traditionally, a simple threshold has been applied to segment obstacles from the occupancy map based on the connectivity of the resulting regions; however, this outcome is sensitive to the choice of parameter value. In our proposed method, instead of simple thresholding, we perform a topological persistence analysis on the constructed occupancy map. The topological framework hierarchically encodes all possible segmentation results as a function of the threshold, thus we can identify the regions that are most persistent. This leads to a more robust segmentation. The approach is analyzed using real stereo image pairs from standard datasets.}, booktitle={2014 IEEE Symposium on Computational Intelligence in Vehicles and Transportation Systems (CIVTS)}, publisher={IEEE}, author={Wei, Chunpeng and Ge, Qian and Chattopadhyay, Somrita and Lobaton, Edgar}, year={2014}, month={Dec} } @inproceedings{betthauser_benavides_schornick_o'hara_patel_cole_lobaton_2014, title={WolfBot: A distributed mobile sensing platform for research and education}, url={http://dx.doi.org/10.1109/aseezone1.2014.6820632}, DOI={10.1109/aseezone1.2014.6820632}, abstractNote={Mobile sensor networks are often composed of agents with weak processing capabilities and some means of mobility. However, recent developments in embedded systems have enabled more powerful and portable processing units capable of analyzing complex data streams in real time. Systems with such capabilities are able to perform tasks such as 3D visual localization and tracking of targets. They are also well-suited for environmental monitoring using a combination of cameras, microphones, and sensors for temperature, air-quality, and pressure. Still there are few compact platforms that combine state of the art hardware with accessible software, an open source design, and an affordable price. In this paper, we present an in-depth comparison of several mobile distributed sensor network platforms, and we introduce the WolfBot platform which offers a balance between capabilities, accessibility, cost and an open-design. Experiments analyzing its computer-vision capabilities, power consumption, and system integration are provided.}, booktitle={Proceedings of the 2014 Zone 1 Conference of the American Society for Engineering Education}, publisher={IEEE}, author={Betthauser, Joseph and Benavides, Daniel and Schornick, Jeff and O'Hara, Neal and Patel, Jimit and Cole, Jeremy and Lobaton, Edgar}, year={2014}, month={Apr} } @article{chen_hong_naikal_sastry_tygar_yan_yang_chang_lin_wang_et al._2013, title={A Low-Bandwidth Camera Sensor Platform with Applications in Smart Camera Networks}, volume={9}, ISSN={["1550-4867"]}, url={http://dx.doi.org/10.1145/2422966.2422978}, DOI={10.1145/2422966.2422978}, abstractNote={Smart camera networks have recently emerged as a new class of sensor network infrastructure that is capable of supporting high-power in-network signal processing and enabling a wide range of applications. In this article, we provide an exposition of our efforts to build a low-bandwidth wireless camera network platform, called CITRIC, and its applications in smart camera networks. The platform integrates a camera, a microphone, a frequency-scalable (up to 624 MHz) CPU, 16 MB FLASH, and 64 MB RAM onto a single device. The device then connects with a standard sensor network mote to form a wireless camera mote. With reasonably low power consumption and extensive algorithmic libraries running on a decent operating system that is easy to program, CITRIC is ideal for research and applications in distributed image and video processing. Its capabilities of in-network image processing also reduce communication requirements, which has been high in other existing camera networks with centralized processing. Furthermore, the mote easily integrates with other low-bandwidth sensor networks via the IEEE 802.15.4 protocol. To justify the utility of CITRIC, we present several representative applications. In particular, concrete research results will be demonstrated in two areas, namely, distributed coverage hole identification and distributed object recognition.}, number={2}, journal={ACM TRANSACTIONS ON SENSOR NETWORKS}, publisher={Association for Computing Machinery (ACM)}, author={Chen, Phoebus and Hong, Kirak and Naikal, Nikhil and Sastry, S. Shankar and Tygar, Doug and Yan, Posu and Yang, Allen Y. and Chang, Lung-Chung and Lin, Leon and Wang, Simon and et al.}, year={2013}, month={Mar} } @inproceedings{lobaton_fu_torres_alterovitz_2013, title={Continuous shape estimation of continuum robots using X-ray images}, url={http://dx.doi.org/10.1109/icra.2013.6630653}, DOI={10.1109/icra.2013.6630653}, abstractNote={We present a new method for continuously and accurately estimating the shape of a continuum robot during a medical procedure using a small number of X-ray projection images (e.g., radiographs or fluoroscopy images). Continuum robots have curvilinear structure, enabling them to maneuver through constrained spaces by bending around obstacles. Accurately estimating the robot's shape continuously over time is crucial for the success of procedures that require avoidance of anatomical obstacles and sensitive tissues. Online shape estimation of a continuum robot is complicated by uncertainty in its kinematic model, movement of the robot during the procedure, noise in X-ray images, and the clinical need to minimize the number of X-ray images acquired. Our new method integrates kinematics models of the robot with data extracted from an optimally selected set of X-ray projection images. Our method represents the shape of the continuum robot over time as a deformable surface which can be described as a linear combination of time and space basis functions. We take advantage of probabilistic priors and numeric optimization to select optimal camera configurations, thus minimizing the expected shape estimation error. We evaluate our method using simulated concentric tube robot procedures and demonstrate that obtaining between 3 and 10 images from viewpoints selected by our method enables online shape estimation with errors significantly lower than using the kinematic model alone or using randomly spaced viewpoints.}, booktitle={2013 IEEE International Conference on Robotics and Automation}, publisher={IEEE}, author={Lobaton, Edgar J. and Fu, Jinghua and Torres, Luis G. and Alterovitz, Ron}, year={2013}, month={May}, pages={725–732} } @inproceedings{dirafzoon_lobaton_2013, title={Topological mapping of unknown environments using an unlocalized robotic swarm}, url={http://dx.doi.org/10.1109/iros.2013.6697160}, DOI={10.1109/iros.2013.6697160}, abstractNote={Mapping and exploration are essential tasks for swarm robotic systems. These tasks become extremely challenging when localization information is not available. In this paper, we explore how stochastic motion models and weak encounter information can be exploited to learn topological information about an unknown environment. Our system behavior mimics a probabilistic motion model of cockroaches, as it is inspired by current biobotic (cyborg insect) systems. We employ tools from algebraic topology to extract spatial information of the environment based on neighbor to neighbor interactions among the biologically inspired agents with no need for localization data. This information is used to build a map of persistent topological features of the environment. We analyze the performance of our estimation and propose a switching control mechanism for the motion models to extract features of complex environments in an effective way.}, booktitle={2013 IEEE/RSJ International Conference on Intelligent Robots and Systems}, publisher={IEEE}, author={Dirafzoon, Alireza and Lobaton, Edgar}, year={2013}, month={Nov}, pages={5545–5551} } @article{vasudevan_kurillo_lobaton_bernardin_kreylos_bajcsy_nahrstedt_2011, title={High-Quality Visualization for Geographically Distributed 3-D Teleimmersive Applications}, volume={13}, url={http://dx.doi.org/10.1109/tmm.2011.2123871}, DOI={10.1109/tmm.2011.2123871}, abstractNote={The growing popularity of 3-D movies has led to the rapid development of numerous affordable consumer 3-D displays. In contrast, the development of technology to generate 3-D content has lagged behind considerably. In spite of significant improvements to the quality of imaging devices, the accuracy of the algorithms that generate 3-D data, and the hardware available to render such data, the algorithms available to calibrate, reconstruct, and then visualize such data remain difficult to use, extremely noise sensitive, and unreasonably slow. In this paper, we present a multi-camera system that creates a highly accurate (on the order of a centimeter), 3-D reconstruction of an environment in real-time (under 30 ms) that allows for remote interaction between users. This paper focuses on addressing the aforementioned deficiencies by describing algorithms to calibrate, reconstruct, and render objects in the system. We demonstrate the accuracy and speed of our results on a variety of benchmarks and data collected from our own system.}, number={3}, journal={IEEE Transactions on Multimedia}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Vasudevan, R and Kurillo, G and Lobaton, E and Bernardin, T and Kreylos, O and Bajcsy, R and Nahrstedt, K}, year={2011}, month={Jun}, pages={573–584} } @inproceedings{lobaton_zhang_patil_alterovitz_2011, title={Planning curvature-constrained paths to multiple goals using circle sampling}, url={http://dx.doi.org/10.1109/icra.2011.5980446}, DOI={10.1109/icra.2011.5980446}, abstractNote={We present a new sampling-based method for planning optimal, collision-free, curvature-constrained paths for nonholonomic robots to visit multiple goals in any order. Rather than sampling configurations as in standard sampling-based planners, we construct a roadmap by sampling circles of constant curvature and then generating feasible transitions between the sampled circles. We provide a closed-form formula for connecting the sampled circles in 2D and generalize the approach to 3D workspaces. We then formulate the multigoal planning problem as finding a minimum directed Steiner tree over the roadmap. Since optimally solving the multi-goal planning problem requires exponential time, we propose greedy heuristics to efficiently compute a path that visits multiple goals. We apply the planner in the context of medical needle steering where the needle tip must reach multiple goals in soft tissue, a common requirement for clinical procedures such as biopsies, drug delivery, and brachytherapy cancer treatment. We demonstrate that our multi-goal planner significantly decreases tissue that must be cut when compared to sequential execution of single-goal plans.}, booktitle={2011 IEEE International Conference on Robotics and Automation}, publisher={IEEE}, author={Lobaton, Edgar and Zhang, Jinghe and Patil, Sachin and Alterovitz, Ron}, year={2011}, month={May} } @inproceedings{lobaton_vasudevan_alterovitz_bajcsy_2011, title={Robust topological features for deformation invariant image matching}, url={http://dx.doi.org/10.1109/iccv.2011.6126538}, DOI={10.1109/iccv.2011.6126538}, abstractNote={Local photometric descriptors are a crucial low level component of numerous computer vision algorithms. In practice, these descriptors are constructed to be invariant to a class of transformations. However, the development of a descriptor that is simultaneously robust to noise and invariant under general deformation has proven difficult. In this paper, we introduce the Topological-Attributed Relational Graph (T-ARG), a new local photometric descriptor constructed from homology that is provably invariant to locally bounded deformation. This new robust topological descriptor is backed by a formal mathematical framework. We apply T-ARG to a set of benchmark images to evaluate its performance. Results indicate that T-ARG significantly outperforms traditional descriptors for noisy, deforming images.}, booktitle={2011 International Conference on Computer Vision}, publisher={IEEE}, author={Lobaton, Edgar and Vasudevan, Ram and Alterovitz, Ron and Bajcsy, Ruzena}, year={2011}, month={Nov} } @article{lobaton_vasudevan_bajcsy_sastry_2010, title={A Distributed Topological Camera Network Representation for Tracking Applications}, volume={19}, url={http://dx.doi.org/10.1109/tip.2010.2052273}, DOI={10.1109/tip.2010.2052273}, abstractNote={Sensor networks have been widely used for surveillance, monitoring, and tracking. Camera networks, in particular, provide a large amount of information that has traditionally been processed in a centralized manner employing a priori knowledge of camera location and of the physical layout of the environment. Unfortunately, these conventional requirements are far too demanding for ad-hoc distributed networks. In this article, we present a simplicial representation of a camera network called the camera network complex (CN-complex), that accurately captures topological information about the visual coverage of the network. This representation provides a coordinate-free calibration of the sensor network and demands no localization of the cameras or objects in the environment. A distributed, robust algorithm, validated via two experimental setups, is presented for the construction of the representation using only binary detection information. We demonstrate the utility of this representation in capturing holes in the coverage, performing tracking of agents, and identifying homotopic paths.}, number={10}, journal={IEEE Transactions on Image Processing}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Lobaton, Edgar and Vasudevan, Ramanarayan and Bajcsy, Ruzena and Sastry, Shankar}, year={2010}, month={Oct}, pages={2516–2529} } @inproceedings{vasudevan_lobaton_kurillo_bajcsy_bernardin_hamann_nahrstedt_2010, title={A methodology for remote virtual interaction in teleimmersive environments}, url={http://dx.doi.org/10.1145/1730836.1730871}, DOI={10.1145/1730836.1730871}, abstractNote={Though the quality of imaging devices, the accuracy of algorithms that construct 3D data, and the hardware available to render such data have all improved, the algorithms available to calibrate, reconstruct, and then visualize such data are difficult to use, extremely noise sensitive, and unreasonably slow. In this paper, we describe a multi-camera system that creates a highly accurate (on the order of a centimeter), 3D reconstruction of an environment in real time (under 30 ms) that allows for remote interaction between users. The paper addresses the aforementioned deficiencies by featuring an overview of the technology and algorithms used to calibrate, reconstruct, and render objects in the system. The algorithm produces partial 3D meshes, instead of dense point clouds, which are combined on the renderer to create a unified model of the environment. The chosen representation of the data allows for high compression ratios for transfer to remote sites. We demonstrate the accuracy and speed of our results on a variety of benchmarks and data collected from our own system.}, booktitle={Proceedings of the first annual ACM SIGMM conference on Multimedia systems}, publisher={ACM}, author={Vasudevan, Ram and Lobaton, Edgar and Kurillo, Gregorij and Bajcsy, Ruzena and Bernardin, Tony and Hamann, Bernd and Nahrstedt, Klara}, year={2010}, month={Feb} } @inbook{lobaton_vasudevan_bajcsy_alterovitz_2010, title={Local Occlusion Detection under Deformations Using Topological Invariants}, ISBN={9783642155574 9783642155581}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-15558-1_8}, DOI={10.1007/978-3-642-15558-1_8}, abstractNote={Occlusions provide critical cues about the 3D structure of man-made and natural scenes. We present a mathematical framework and algorithm to detect and localize occlusions in image sequences of scenes that include deforming objects. Our occlusion detector works under far weaker assumptions than other detectors. We prove that occlusions in deforming scenes occur when certain well-defined local topological invariants are not preserved. Our framework employs these invariants to detect occlusions with a zero false positive rate under assumptions of bounded deformations and color variation. The novelty and strength of this methodology is that it does not rely on spatio-temporal derivatives or matching, which can be problematic in scenes including deforming objects, but is instead based on a mathematical representation of the underlying cause of occlusions in a deforming 3D scene. We demonstrate the effectiveness of the occlusion detector using image sequences of natural scenes, including deforming cloth and hand motions.}, booktitle={Computer Vision – ECCV 2010}, publisher={Springer Berlin Heidelberg}, author={Lobaton, Edgar and Vasudevan, Ram and Bajcsy, Ruzena and Alterovitz, Ron}, year={2010}, pages={101–114} } @inproceedings{vasudevan_zhou_kurillo_lobaton_bajcsy_nahrstedt_2010, title={Real-time stereo-vision system for 3D teleimmersive collaboration}, url={http://dx.doi.org/10.1109/icme.2010.5582538}, DOI={10.1109/icme.2010.5582538}, abstractNote={Though the variety of desktop real time stereo vision systems has grown considerably in the past several years, few make any verifiable claims about the accuracy of the algorithms used to construct 3D data or describe how the data generated by such systems, which is large in size, can be effectively distributed. In this paper, we describe a system that creates an accurate (on the order of a centimeter), 3D reconstruction of an environment in real time (under 30 ms) that also allows for remote interaction between users. This paper addresses how to reconstruct, compress, and visualize the 3D environment. In contrast to most commercial desktop real time stereo vision systems our algorithm produces 3D meshes instead of dense point clouds, which we show allows for better quality visualizations. The chosen representation of the data also allows for high compression ratios for transfer to remote sites. We demonstrate the accuracy and speed of our results on a variety of benchmarks.}, booktitle={2010 IEEE International Conference on Multimedia and Expo}, publisher={IEEE}, author={Vasudevan, Ram and Zhou, Zhong and Kurillo, Gregorij and Lobaton, Edgar and Bajcsy, Ruzena and Nahrstedt, Klara}, year={2010}, month={Jul} } @inproceedings{algebraic approach to recovering topological information in distributed camera networks_2009, url={https://ieeexplore.ieee.org/document/5211929}, booktitle={2009 International Conference on Information Processing in Sensor Networks}, year={2009} } @article{lobaton_bayen_2009, title={Modeling and Optimization Analysis of a Single-Flagellum Micro-Structure Through the Method of Regularized Stokeslets}, volume={17}, url={http://dx.doi.org/10.1109/tcst.2008.2011889}, DOI={10.1109/tcst.2008.2011889}, abstractNote={Bacteria such as Rhodobacter sphaeroides use a single flagellum for propulsion and change of orientation. These types of simple organisms have inspired microrobotic designs with potential applications in medicine, which motivates this work. In this paper, an elastic model for a single-flagellum micro-structure is presented and followed by an analysis of the system based on optimization. The model is based on the method of Regularized Stokeslets which allows for a discretization of the system into particles connected by spring forces. The optimization analysis leads to the design of an optimal elasticity distribution that maximizes the mean forward speed of the structure. These elasticity coefficients are obtained through the use of adjoint-based optimization. The results are illustrated through simulations showing improvement on the swimming pattern of the micro-structure.}, number={4}, journal={IEEE Transactions on Control Systems Technology}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Lobaton, E.J. and Bayen, A.M.}, year={2009}, month={Jul}, pages={907–916} } @inproceedings{lobaton_vasudevan_sastry_bajcsy_2009, title={Robust construction of the Camera Network Complex for topology recovery}, url={http://dx.doi.org/10.1109/icdsc.2009.5289401}, DOI={10.1109/icdsc.2009.5289401}, abstractNote={While performing tasks such as estimating the topology of camera network coverage or coordinate-free object tracking and navigation, knowledge of camera position and other geometric constraints about the environment are considered unnecessary. Instead, topological information captured by the construction of a simplicial representation called the CN-Complex can be utilized to perform these tasks. This representation can be thought of as a generalization of the so-called vision graph of a camera network. The construction of this simplicial complex consists of two steps: the decomposition of the camera coverage through the detection of occlusion events, and the discovery of overlapping areas between the multiple decomposed regions. In this paper, we present an algorithm for performing both of these tasks in the presence of multiple targets and noisy observations. The algorithm exploits temporal correlations of the detections to estimate probabilities of overlap in a distributed manner. No correspondence, appearance models, or tracking are utilized. Instead of applying a single threshold on the probabilities, we analyze the persistence of the topological features in our representation through a filtration process. We demonstrate the validity of our approach through simulation and an experimental setup.}, booktitle={2009 Third ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC)}, publisher={IEEE}, author={Lobaton, Edgar and Vasudevan, Ram and Sastry, Shankar and Bajcsy, Ruzena}, year={2009}, month={Aug} } @inproceedings{kurillo_vasudevan_lobaton_bajcsy_2008, title={A Framework for Collaborative Real-Time 3D Teleimmersion in a Geographically Distributed Environment}, url={http://dx.doi.org/10.1109/ism.2008.32}, DOI={10.1109/ism.2008.32}, abstractNote={In this paper, we present a framework for immersive 3D video conferencing and geographically distributed collaboration. Our multi-camera system performs a full-body 3D reconstruction of users in real time and renders their image in a virtual space allowing remote interaction between users and the virtual environment. The paper features an overview of the technology and algorithms used for calibration, capturing, and reconstruction. We introduce stereo mapping using adaptive triangulation which allows for fast (under 25 ms) and robust real-time 3D reconstruction. The chosen representation of the data provides high compression ratios for transfer to a remote site. The algorithm produces partial 3D meshes, instead of dense point clouds, which are combined on the renderer to create a unified model of the user. We have successfully demonstrated the use of our system in various applications such as remote dancing and immersive Tai Chi learning.}, booktitle={2008 Tenth IEEE International Symposium on Multimedia}, publisher={IEEE}, author={Kurillo, Gregorij and Vasudevan, Ramanarayan and Lobaton, Edgar and Bajcsy, Ruzena}, year={2008}, month={Dec} } @inproceedings{chen_ahammad_boyer_huang_lin_lobaton_meingast_oh_wang_yan_et al._2008, title={CITRIC: A low-bandwidth wireless camera network platform}, url={http://dx.doi.org/10.1109/icdsc.2008.4635675}, DOI={10.1109/icdsc.2008.4635675}, abstractNote={In this paper, we propose and demonstrate a novel wireless camera network system, called CITRIC. The core component of this system is a new hardware platform that integrates a camera, a frequency-scalable (up to 624 MHz) CPU, 16MB FLASH, and 64MB RAM onto a single device. The device then connects with a standard sensor network mote to form a camera mote. The design enables in-network processing of images to reduce communication requirements, which has traditionally been high in existing camera networks with centralized processing. We also propose a back-end client/server architecture to provide a user interface to the system and support further centralized processing for higher-level applications. Our camera mote enables a wider variety of distributed pattern recognition applications than traditional platforms because it provides more computing power and tighter integration of physical components while still consuming relatively little power. Furthermore, the mote easily integrates with existing low-bandwidth sensor networks because it can communicate over the IEEE 802.15.4 protocol with other sensor network platforms. We demonstrate our system on three applications: image compression, target tracking, and camera localization.}, booktitle={2008 Second ACM/IEEE International Conference on Distributed Smart Cameras}, publisher={IEEE}, author={Chen, Phoebus and Ahammad, Parvez and Boyer, Colby and Huang, Shih-I and Lin, Leon and Lobaton, Edgar and Meingast, Marci and Oh, Songhwai and Wang, Simon and Yan, Posu and et al.}, year={2008}, month={Sep} } @article{lobaton_salamon_2007, title={Computation of constant mean curvature surfaces: Application to the gas–liquid interface of a pressurized fluid on a superhydrophobic surface}, volume={314}, ISSN={0021-9797}, url={http://dx.doi.org/10.1016/j.jcis.2007.05.059}, DOI={10.1016/j.jcis.2007.05.059}, abstractNote={The interface shape separating a gas layer within a superhydrophobic surface consisting of a square lattice of posts from a pressurized liquid above the surface is computed numerically. The interface shape is described by a constant mean curvature surface that satisfies the Young-Laplace equation with the three-phase gas-liquid-solid contact line assumed pinned at the post outer edge. The numerical method predicts the existence of constant mean curvature solutions from the planar, zero curvature solution up to a maximum curvature that is dependent on the post shape, size and pitch. An overall force balance between surface tension and pressure forces acting on the interface yields predictions for the maximum curvature that agree with the numerical simulations to within one percent for convex shapes such as circular and square posts, but significantly over predicts the maximum curvature for non-convex shapes such as a circular post with a sinusoidal surface perturbation. Changing the post shape to increase the contact line length, while maintaining constant post area, results in increases of 2 to 12% in the maximum computable curvature for contact line length increases of 11 to 77%. Comparisons are made to several experimental studies for interface shape and pressure stability.}, number={1}, journal={Journal of Colloid and Interface Science}, publisher={Elsevier BV}, author={Lobaton, E.J. and Salamon, T.R.}, year={2007}, month={Oct}, pages={184–198} } @inproceedings{lobaton_bayen_2007, title={Modeling and Optimization Analysis of Single Flagellum Bacterial Motion}, url={http://dx.doi.org/10.1109/acc.2007.4282439}, DOI={10.1109/acc.2007.4282439}, abstractNote={Bacteria such as Rhodobacter sphaeroides use a single flagellum for propulsion and change of orientation. Simple organisms such as this have inspired nanorobotic designs with potential applications in medicine which motivates the present work. In this article, an elastic model for a single flagellum bacterium is presented and followed by an analysis of the system based on optimization. The model is based on the method of Regularized Stokeslet which allows for a discretization of the system into particles which are connected by spring forces. An optimal elasticity distribution that maximizes the mean forward speed is obtained. These elasticity coefficients are obtained through the use of an adjoint-based optimization scheme. The results are illustrated through a simulation showing improvement on the swimming patter of the bacteria.}, booktitle={2007 American Control Conference}, publisher={IEEE}, author={Lobaton, Edgar and Bayen, Alexandre}, year={2007}, month={Jul} } @article{ahuja_taylor_lifton_sidorenko_salamon_lobaton_kolodner_krupenkin_2008, title={Nanonails:  A Simple Geometrical Approach to Electrically Tunable Superlyophobic Surfaces}, volume={24}, ISSN={0743-7463 1520-5827}, url={http://dx.doi.org/10.1021/la702327z}, DOI={10.1021/la702327z}, abstractNote={In this work, dynamically tunable, superlyophobic surfaces capable of undergoing a transition from profound superlyophobic behavior to almost complete wetting have been demonstrated for the first time. In the initial state, with no voltage applied, these surfaces exhibit contact angles as high as 150 degrees for a wide variety of liquids with surface tensions ranging from 21.8 mN/m (ethanol) to 72.0 mN/m (water). Upon application of an electrical voltage, a transition from the superlyophobic state to wetting is observed. We have examined experimentally and theoretically the nature of these transitions. The reported results provide novel methods of manipulating liquids on the microscale.}, number={1}, journal={Langmuir}, publisher={American Chemical Society (ACS)}, author={Ahuja, A. and Taylor, J. A. and Lifton, V. and Sidorenko, A. A. and Salamon, T. R. and Lobaton, E. J. and Kolodner, P. and Krupenkin, T. N.}, year={2008}, month={Jan}, pages={9–14} } @article{flores_lobaton_mendezdiez_tlupova_cortez_2005, title={A study of bacterial flagellar bundling}, volume={67}, ISSN={0092-8240}, url={http://dx.doi.org/10.1016/J.BULM.2004.06.006}, DOI={10.1016/J.BULM.2004.06.006}, abstractNote={Certain bacteria, such as Escherichia coli (E. coli) and Salmonella typhimurium (S. typhimurium), use multiple flagella often concentrated at one end of their bodies to induce locomotion. Each flagellum is formed in a left-handed helix and has a motor at the base that rotates the flagellum in a corkscrew motion. We present a computational model of the flagellar motion and their hydrodynamic interaction. The model is based on the equations of Stokes flow to describe the fluid motion. The elasticity of the flagella is modeled with a network of elastic springs while the motor is represented by a torque at the base of each flagellum. The fluid velocity due to the forces is described by regularized Stokeslets and the velocity due to the torques by the associated regularized rotlets. Their expressions are derived. The model is used to analyze the swimming motion of a single flagellum and of a group of three flagella in close proximity to one another. When all flagellar motors rotate counterclockwise, the hydrodynamic interaction can lead to bundling. We present an analysis of the flow surrounding the flagella. When at least one of the motors changes its direction of rotation, the same initial conditions lead to a tumbling behavior characterized by the separation of the flagella, changes in their orientation, and no net swimming motion. The analysis of the flow provides some intuition for these processes.}, number={1}, journal={Bulletin of Mathematical Biology}, publisher={Springer Science and Business Media LLC}, author={Flores, H and Lobaton, E and Mendezdiez, S and Tlupova, S and Cortez, R}, year={2005}, month={Jan}, pages={137–168} }