@article{asthana_krim_sun_roheda_xie_2021, title={Atlantic Hurricane Activity Prediction: A Machine Learning Approach}, volume={12}, ISSN={["2073-4433"]}, url={https://doi.org/10.3390/atmos12040455}, DOI={10.3390/atmos12040455}, abstractNote={Long-term hurricane predictions have been of acute interest in order to protect the community from the loss of lives, and environmental damage. Such predictions help by providing an early warning guidance for any proper precaution and planning. In this paper, we present a machine learning model capable of making good preseason-prediction of Atlantic hurricane activity. The development of this model entails a judicious and non-linear fusion of various data modalities such as sea-level pressure (SLP), sea surface temperature (SST), and wind. A Convolutional Neural Network (CNN) was utilized as a feature extractor for each data modality. This is followed by a feature level fusion to achieve a proper inference. This highly non-linear model was further shown to have the potential to make skillful predictions up to 18 months in advance.}, number={4}, journal={ATMOSPHERE}, publisher={MDPI AG}, author={Asthana, Tanmay and Krim, Hamid and Sun, Xia and Roheda, Siddharth and Xie, Lian}, year={2021}, month={Apr} } @article{roheda_krim_luo_wu_2021, title={Event driven sensor fusion}, volume={188}, ISSN={["1872-7557"]}, DOI={10.1016/j.sigpro.2021.108241}, abstractNote={Multi sensor fusion has long been of interest in target detection and tracking. Different sensors are capable of observing different characteristics about a target, hence, providing additional information toward determining a target’s identity. If used constructively, any additional information should have a positive impact on the performance of the system. In this paper, we consider such a scenario and present a principled approach toward ensuring constructive combination of the various sensors. We look at Decision Level Sensor Fusion under a different light wherein each sensor is said to make a decision on occurrence of certain events that it is capable of observing rather than making a decision on whether a certain target is present. These events are formalized to each sensor according to its potentially extracted attributes to define targets. The proposed technique also explores the extent of dependence between features/events being observed by the sensors, and hence generates more informed probability distributions over the events. In our case, we will study two different datasets. The first one, combines a Radar sensor with an optical sensor for detection of space debris, while the second one combines a seismic sensor with an acoustic sensor in order to detect human and vehicular targets in a field of interest. Provided some additional information about the features of the object, this fusion technique can outperform other existing decision level fusion approaches that may not take into account the relationship between different features. Furthermore, this paper also addresses the issue of coping with damaged sensors when using the model, by learning a hidden space between sensor modalities which can be exploited to safeguard detection performance.}, journal={SIGNAL PROCESSING}, author={Roheda, Siddharth and Krim, Hamid and Luo, Zhi-Quan and Wu, Tianfu}, year={2021}, month={Nov} } @article{roheda_krim_riggan_2021, title={Robust Multi-Modal Sensor Fusion: An Adversarial Approach}, volume={21}, ISSN={["1558-1748"]}, DOI={10.1109/JSEN.2020.3018698}, abstractNote={In recent years, multi-modal fusion has attracted a lot of research interest, both in academia, and in industry. Multimodal fusion entails the combination of information from a set of different types of sensors. Exploiting complementary information from different sensors, we show that target detection and classification problems can greatly benefit from this fusion approach and result in a performance increase. To achieve this gain, the information fusion from various sensors is shown to require some principled strategy to ensure that additional information is constructively used, and has a positive impact on performance. We subsequently demonstrate the viability of the proposed fusion approach by weakening the strong dependence on the functionality of all sensors, hence introducing additional flexibility in our solution and lifting the severe limitation in unconstrained surveillance settings with potential environmental impact. Our proposed data driven approach to multimodal fusion, exploits selected optimal features from an estimated latent space of data across all modalities. This hidden space is learned via a generative network conditioned on individual sensor modalities. The hidden space, as an intrinsic structure, is then exploited in detecting damaged sensors, and in subsequently safeguarding the performance of the fused sensor system. Experimental results show that such an approach can achieve automatic system robustness against noisy/damaged sensors.}, number={2}, journal={IEEE SENSORS JOURNAL}, author={Roheda, Siddharth and Krim, Hamid and Riggan, Benjamin S.}, year={2021}, month={Jan}, pages={1885–1896} }