@article{vincent_ward_moore_chen_pak_yepremyan_wilson_goh_2023, title={CLOVER: Contrastive Learning for Onboard Vision-Enabled Robotics}, ISSN={["1533-6794"]}, DOI={10.2514/1.A35767}, abstractNote={ Current deep-learning models employed by the planetary science community are constrained by a dearth of annotated training data for planetary images. Current models also frequently suffer from inductive bias due to domain shifts when using the same model on data obtained from different spacecraft or different time periods. Moreover, power and compute constraints preclude state-of-the-art vision models from being implemented on robotic spacecraft. In this research, we propose a self-supervised learning (SSL) framework that leverages contrastive learning techniques to improve upon state-of-the-art performance on several published Mars computer vision benchmarks. Our SSL framework enables models to be trained using fewer labels, generalize well to different tasks, and achieve higher computational efficiency. Results on published Mars computer vision benchmarks show that contrastive pretraining outperforms plain supervised learning by 2–10%. We further investigate the importance of dataset heterogeneity in mixed-domain contrastive pretraining. Using self-supervised distillation, we were also able to train a compact ResNet-18 student model to achieve better accuracy than its ResNet-152 teacher model while having 5.2 times fewer parameters. We expect that these SSL techniques will be relevant to the planning of future robotic missions, and remote sensing identification of target destinations with high scientific value. }, journal={JOURNAL OF SPACECRAFT AND ROCKETS}, author={Vincent, Grace M. and Ward, Isaac R. and Moore, Charles and Chen, Jingdao and Pak, Kai and Yepremyan, Alice and Wilson, Brian and Goh, Edwin Y.}, year={2023}, month={Dec} } @article{goh_ward_vincent_pak_chen_wilson_2023, title={Self-supervised Distillation for Computer Vision Onboard Planetary Robots}, ISSN={["1095-323X"]}, DOI={10.1109/AERO55745.2023.10115598}, abstractNote={In situ exploration of planets beyond Mars will largely depend on autonomous robotic agents for the foreseeable future. These autonomous planetary explorers need to perceive and understand their surroundings in order to make decisions that maximize science return and minimize risk. Deep learning has demonstrated strong performance on a variety of computer vision and image processing tasks, and has become the main approach for powering terrestrial autonomous systems from robotic vacuum cleaners to self-driving cars. However, deep learning systems require significant volumes of annotated data to optimize the models' parameters, which is a luxury not afforded by in situ missions to new locations in our Solar Sys-tem. Moreover, space-qualified hardware used on robotic space missions relies on legacy technologies due to power constraints and extensive flight qualification requirements (e.g., radiation tolerance), resulting in computational limitations that prevent the use of deep learning models for real-time robotic perception tasks (e.g., obstacle detection, terrain segmentation). In this paper, we address these two challenges by leveraging self-supervised distillation to train small, efficient deep learning models that can match or outperform state-of-the-art results obtained by significantly larger models on Mars image classification and terrain segmentation tasks. Using a set of 100,000 unlabeled images taken by Curiosity and large self-supervised vision models, we distill a variety of small model architectures and evaluate their performance on the published test sets for the MSL classification benchmark and the AI4Mars segmentation benchmark. Experimental results show that on the MSL v2.1 classification task, the best-performing student ResNet-18 model is able to achieve a model compression ratio of 5.2 when distilled from a pretrained ResNet-152 teacher model. In addition, we show that using in-domain images for distillation and increasing the dataset size for distillation has a positive effect on downstream vision tasks. Overall, results indicate that self-supervised distillation enables small models to achieve state-of-the-art performance on the benchmark datasets, supporting the feasibility of performing real-time inference using these small distilled models on next-generation flight hardware such as the High Performance Spaceflight Computer (HPSC).}, journal={2023 IEEE AEROSPACE CONFERENCE}, author={Goh, Edwin and Ward, Isaac R. and Vincent, Grace and Pak, Kai and Chen, Jingdao and Wilson, Brian}, year={2023} } @article{vincent_pak_martinez_goh_wang_bue_holt_wilson_2023, title={UNSUPERVISED SAR IMAGES FOR SUBMESOSCALE OCEANIC EDDY DETECTION}, ISSN={["2153-6996"]}, DOI={10.1109/IGARSS52108.2023.10282488}, abstractNote={Accurate and efficient identification of submesoscale ocean eddies is crucial for understanding ocean circulation, tracer mixing, and energy transfer, especially in coastal regions. However, current methodologies face challenges due to their reliance on extremely large datasets typically requiring specialized domain expertise. To address these limitations, we propose a novel semi-supervised framework in this study. Leveraging self-supervised contrastive learning, we extract meaningful features from unlabeled SAR images and fine-tune them using a small set of labeled images. By employing SimCLR and MoCo algorithms, we achieve promising outcomes and superior performance in SAR-based submesoscale eddy detection, surpassing supervised techniques. The proposed approach yields a top F1-Macro of 0.85 and 0.80-0.83 for evaluating Mediterranean and California SAR patches, respectively.}, journal={IGARSS 2023 - 2023 IEEE INTERNATIONAL GEOSCIENCE AND REMOTE SENSING SYMPOSIUM}, author={Vincent, Grace and Pak, Kai and Martinez, Diego and Goh, Edwin and Wang, Jinbo and Bue, Brian and Holt, Ben and Wilson, Brian}, year={2023}, pages={2065–2068} }