@article{shen_li_2025, title={A low-channel EEG-to-speech conversion approach for assisting people with communication disorders}, volume={3}, DOI={10.1016/j.smhl.2025.100568}, journal={Smart Health}, author={Shen, Kunning and Li, Huining}, year={2025}, month={Mar} } @article{zhong_jia_li_2025, title={An adaptive multimodal fusion framework for smartphone-based medication adherence monitoring of Parkinson’s disease}, url={https://doi.org/10.1016/j.smhl.2025.100561}, DOI={10.1016/j.smhl.2025.100561}, abstractNote={Ensuring medication adherence for Parkinson’s disease (PD) patients is crucial to relieve patients’ symptoms and better customizing regimens according to patient’s clinical responses. However, traditional self-management approaches are often error-prone and have limited effectiveness in improving adherence. While smartphone-based solutions have been introduced to monitor various PD metrics, including medication adherence, these methods often rely on single-modality data or fail to fully leverage the advantages of multimodal integration. To address the issues, we present an adaptive multimodal fusion framework for monitoring medication adherence of PD based on a smartphone. Specifically, we segment and transform raw data from sensors to spectrograms . Then, we integrate multimodal data with quantification of their qualities and perform gradient modulation based on the contribution of each modality. Afterward, we monitor medication adherence in PD patients by detecting their medicine intake status. We evaluate the performance with the dataset from daily-life scenarios involving 455 patients. The results show that our work can achieve around 94% accuracy in medication adherence monitoring, indicating that our proposed framework is a promising tool to facilitate medication adherence monitoring in PD patients’ daily lives.}, journal={Smart Health}, author={Zhong, Chongxin and Jia, Jinyuan and Li, Huining}, year={2025}, month={Mar} } @article{wang_chen_li_xu_chang_li_2025, title={Continuous prediction of user dropout in a mobile mental health intervention program: An exploratory machine learning approach}, url={https://doi.org/10.1016/j.smhl.2025.100565}, DOI={10.1016/j.smhl.2025.100565}, journal={Smart Health}, author={Wang, Pinxiang and Chen, Hanqi and Li, Zhouyu and Xu, Wenyao and Chang, Yu-Ping and Li, Huining}, year={2025}, month={Mar} } @article{ranganath_zhang_fu_jia_chen_li_xu_2025, title={Hand-Grip Strength Estimation through Bioacoustic Sensing}, volume={11}, DOI={10.1109/bsn66969.2025.11337954}, abstractNote={In order to determine the overall health of an individual, hand grip strength has emerged as a reliable and widely used indicator of muscular and functional health. However, the conventional devices for measuring grip strength, such as dynamometers, require direct interaction with a bulky external device. In this work, we propose a novel, cost-effective approach to estimate grip strength using bio-acoustic signals captured from the forearm via a compact armband equipped with low-power MEMS microphones. Our method performs well on grip strength classification with an accuracy of 93.33%, and as a proof of concept, demonstrates a promising direction for non-invasive grip strength estimation.}, author={Ranganath, Rakshita and Zhang, Zhi and Fu, Yuliang and Jia, Jinyuan and Chen, Ying and Li, Huining and Xu, Chenhan}, year={2025}, month={Nov} } @article{ashik_xie_chen_xu_li_2025, title={Language-Agnostic Speech Biomarker Exploration for Early Dementia Screening}, volume={11}, DOI={10.1109/bsn66969.2025.11337743}, abstractNote={Early dementia detection is a global healthcare priority in diverse populations. In this study, we propose a language-agnostic screening pipeline for dementia detection in the early stage. First, we use speaker diarization to isolate the speech of the target subject from a conversational recording. From the extracted speech segments, we derive a set of acoustic features (e.g., spectral centroid, pitch mean, mel-frequency cepstral coefficients) and linguistic features (e.g., normalized tone contrast, articulation clarity coefficient, articulatory effort coefficient). These features are used to train a ResNet-based binary classifier to distinguish between Healthy Controls (HC) and individuals with Mild Cognitive Impairment (MCI). We evaluated the trained model on a held-out test set comprising speakers of previously unseen languages, achieving an accuracy of 70%. This cross-lingual transfer performance highlights the potential of our approach for scalable, language-independent dementia screening.}, author={Ashik, Josh and Xie, Zongxing and Chen, Ying and Xu, Chenhan and Li, Huining}, year={2025}, month={Nov} } @article{fu_zhang_ranganath_li_liu_sui_li_xu_2025, title={TouchWave: Exploring mmWave-Based Non-Contact Fingertip-Force Sensing in Activities of Daily Living}, volume={11}, DOI={10.1109/bsn66969.2025.11337366}, abstractNote={Fingertip forces are important biomarkers for the detection and management of various conditions, including stroke and Parkinson's disease. This paper presents TouchWave, a non-contact sensing system designed to monitor fingertip forces during activities of daily living (ADL). TouchWave leverages under-cabinet millimeter-wave (mmWave) sensors to capture both macroscopic hand movements and subtle biomechanical cues associated with fingertip force production. A novel signal processing scheme is developed to suppress noise while preserving force-related information in the mmWave signals. Additionally, a hybrid deep neural network model is proposed to estimate highfidelity fingertip forces. A comprehensive evaluation involving 21 participants demonstrates the effectiveness of TouchWave in both controlled settings and ADL scenarios.}, author={Fu, Yuliang and Zhang, Zhi and Ranganath, Rakshita and Li, Zhizhen and Liu, Yuchen and Sui, Ning and Li, Huining and Xu, Chenhan}, year={2025}, month={Nov} } @article{zhong_guo_gehi_xu_li_2025, title={Wearable PPG-to-Multi-Lead ECG Conversion for Cardiac Monitoring}, volume={11}, DOI={10.1109/bsn66969.2025.11337909}, abstractNote={The electrocardiogram (ECG) has been the gold standard for heart disease evaluation due to the rich information about the electrical activity of the heart contained in it. However, existing ECG monitoring devices either lack the capability for continuous monitoring or are unable to support multi-lead ECG recordings. To address the issues, we propose an approach for generating multi-lead ECG from photoplethysmogram (PPG), which can be passively monitored by wearable devices such as smartwatches. The PPG collected from wearable devices is first passed to a trained conditional diffusion model to generate the single-lead ECG, and then through a long short-term memory (LSTM) model to construct and predict the multi-lead ECG. The final outputs can be used to monitor and detect abnormal cardiac patterns in daily life. We evaluate the performance of our proposed approach with the dataset collected from dailylife scenarios involving 32 subjects. The results show that our approach can generate multi-lead ECGs accurately. In addition, a case study is conducted using data collected from the hospital, which demonstrates the effectiveness of our approach in detecting ST elevation. 11ST elevation refers to an upward deviation of the ST segment on an electrocardiogram (ECG) from the baseline, indicating a potential heart attack or other cardiac issues. It is a crucial diagnostic finding in acute myocardial infarction (heart attack) and requires prompt medical attention. It is a key indicator of myocardial ischemia in practice.}, author={Zhong, Chongxin and Guo, Zhishan and Gehi, Anil Kishin and Xu, Chenhan and Li, Huining}, year={2025}, month={Nov} } @article{zhang_zhong_fu_chen_jia_li_xu_2025, title={mV-IMU: mmWave-Enabled Virtual Inertia Measurement Unit for High-Fidelity Activities of Daily Living Monitoring}, volume={11}, DOI={10.1109/bsn66969.2025.11337466}, abstractNote={Monitoring human motion through inertial metrics is vital for healthcare, rehabilitation, and activity recognition. Traditional approaches rely on wearable inertial measurement units (IMUs), which, despite their accuracy, impose burdens due to their intrusive nature, limiting long-term usability. To mitigate this, recent advances explore device-free alternatives, such as pose-based inertial inference from video or mmWave sensing. However, inertial signals derived from pose tracking are prone to error amplification during differentiation. In this paper, we present mV-IMU, a novel mmWave-enabled Virtual Inertial Measurement Unit framework that bypasses pose estimation altogether to directly reconstruct body accelerations from raw mmWave signals. Our approach leverages a deep inertia reconstruction model trained on kinematics-informed features extracted from mmWave point clouds, integrated with a physicsguided optimization scheme for enhanced accuracy. Extensive evaluations show that mV-IMU achieves inertial measurement fidelity close to wearable IMUs, enabling practical, non-intrusive motion monitoring for smart healthcare and rehabilitation contexts.}, author={Zhang, Zhi and Zhong, Chongxin and Fu, Yuliang and Chen, Ying and Jia, Jinyuan and Li, Huining and Xu, Chenhan}, year={2025}, month={Nov} } @article{zhang_li_xu_song_li_xue_wu_xu_2025, title={mmHand: Toward Pixel-Level-Accuracy Hand Localization Using a Single Commodity mmWave Device}, volume={12}, url={https://doi.org/10.1109/JIOT.2025.3546560}, DOI={10.1109/JIOT.2025.3546560}, abstractNote={The hand localization problem has been a longstanding focus due to its many applications. The task involves modeling the hand as a singular point and determining its position within a defined coordinate system. However, due to data modality limitations, existing hand localization technologies face several challenges. For example, vision-based localization raises privacy concerns, while wearable-based methods compromise user comfort. In this article, we introduce mmHand, a new device-free, privacy-preserving dynamic hand localization system with pixel-level accuracy, using a single commodity mmWave device. We first propose a mmImage generation tool to fully extract spatial information from raw mmWave data and introduce a novel 2-D image-format representation of mmWave data. Next, we design a framework that provides a new quality evaluation method and pixel space labeling for the mmWave data. Finally, we present a cross-modality spatial feature-enhanced model with high spatial feature extraction capabilities, which can accurately localize hand positions at the pixel level in the mmWave radar U-V pixel coordinate system. We evaluate the system with experiments on 12 subjects in three scenarios, and the results across four metrics demonstrate the effectiveness of our hand localization system.}, number={12}, journal={IEEE Internet of Things Journal}, author={Zhang, Xiaoyu and Li, Zhengxiong and Xu, Chenhan and Song, Luchuan and Li, Huining and Xue, Hongfei and Wu, Yingxiao and Xu, Wenyao}, year={2025}, month={Feb}, pages={20800–20814} } @article{li_qian_ma_xu_li_li_lin_huang_xu_2023, title={TherapyPal: Towards a Privacy-Preserving Companion Diagnostic Tool based on Digital Symptomatic Phenotyping}, url={https://doi.org/10.1145/3570361.3592499}, DOI={10.1145/3570361.3592499}, abstractNote={As the demand for precision medicine rapidly grows, companion diagnostics is proposed to monitor and evaluate therapeutic effects for adjusting medicine plans in time. Although a set of clinical companion diagnostics tools (e.g., polymerase chain reaction) have been investigated, they are expensive and only accessible in a lab environment, which hinders the promotion to broader patients. In light of this situation, we take the first steps towards developing a real-world companion diagnostic tool by leveraging mobile technology. In this paper, we present TherapyPal, a privacy-preserving medicine effectiveness computational framework by harnessing semantic hashing-based digital symptomatic phenotyping. Specifically, sensor data captured from daily-life activities is first transformed into spectrograms. Then, we develop a hashing learning network to extract privacy-masked symptomatic phenotypes on smartphones. Afterward, symptomatic hashes at different medicine states are fed to a contrastive learning network in the cloud for treatment effectiveness detection. To evaluate the performance, we conduct a clinical study among 65 Parkinson's disease (PD) patients under dopaminergic drug treatment. The results show that TherapyPal can achieve around 84.1% medicine effectiveness detection accuracy among patients and above 0.925 privacy-masked scores for protecting each private attribute, which validates the reliability and security of TherapyPal to be used as a real-world companion diagnostics tool.}, author={Li, Huining and Qian, Xiaoye and Ma, Ruokai and Xu, Chenhan and Li, Zhengxiong and Li, Dongmei and Lin, Feng and Huang, Ming-Chun and Xu, Wenyao}, year={2023}, month={Sep} } @article{li_chen_qian_chen_li_bhattacharjee_zhang_huang_xu_2022, title={An explainable COVID-19 detection system based on human sounds}, url={https://doi.org/10.1016/j.smhl.2022.100332}, DOI={10.1016/j.smhl.2022.100332}, abstractNote={Acoustic signals generated by the human body have often been used as biomarkers to diagnose and monitor diseases. As the pathogenesis of COVID-19 indicates impairments in the respiratory system, digital acoustic biomarkers of COVID-19 are under investigation. In this paper, we explore an accurate and explainable COVID-19 diagnosis approach based on human speech, cough, and breath data using the power of machine learning. We first analyze our design space considerations from the data aspect and model aspect. Then, we perform data augmentation, Mel-spectrogram transformation, and develop a deep residual architecture-based model for prediction. Experimental results show that our system outperforms the baseline, with the ROC-AUC result increased by 5.47%. Finally, we perform an interpretation analysis based on the visualization of the activation map to further validate the model.}, journal={Smart Health}, author={Li, Huining and Chen, Xingyu and Qian, Xiaoye and Chen, Huan and Li, Zhengxiong and Bhattacharjee, Soumyadeep and Zhang, Hanbin and Huang, Ming-Chun and Xu, Wenyao}, year={2022}, month={Oct} } @article{li_chen_xu_li_zhang_qian_li_huang_xu_2022, title={NeuralGait}, url={https://doi.org/10.1145/3569476}, DOI={10.1145/3569476}, abstractNote={Brain health attracts more recent attention as the population ages. Smartphone-based gait sensing and analysis can help identify the risks of brain diseases in daily life for prevention. Existing gait analysis approaches mainly hand-craft temporal gait features or developing CNN-based feature extractors, but they are either prone to lose some inconspicuous pathological information or are only dedicated to a single brain disease screening. We discover that the relationship between gait segments can be used as a principle and generic indicator to quantify multiple pathological patterns. In this paper, we propose NeuralGait, a pervasive smartphone-cloud system that passively captures and analyzes principle gait segments relationship for brain health assessment. On the smartphone end, inertial gait data are collected while putting the smartphone in the pants pocket. We then craft local temporal-frequent gait domain features and develop a self-attention-based gait segment relationship encoder. Afterward, the domain features and relation features are fed to a scalable RiskNet in the cloud for brain health assessment. We also design a pathological hot update protocol to efficiently add new brain diseases in the RiskNet. NeuralGait is practical as it provides brain health assessment with no burden in daily life. In the experiment, we recruit 988 healthy people and 417 patients with a single or combination of PD, TBI, and stroke, and evaluate the brain health assessment using a set of specifically designed metrics including global accuracy, exact accuracy, sensitivity, and false alarm rate. We also demonstrate the generalization (e.g., analysis of feature effectiveness and model efficiency) and inclusiveness of NeuralGait.}, journal={Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies}, author={Li, Huining and Chen, Huan and Xu, Chenhan and Li, Zhengxiong and Zhang, Hanbin and Qian, Xiaoye and Li, Dongmei and Huang, Ming-chun and Xu, Wenyao}, year={2022}, month={Dec} } @article{li_chen_xu_das_chen_li_xiao_huang_xu_2021, title={Privacy computing using deep compression learning techniques for neural decoding}, url={https://doi.org/10.1016/j.smhl.2021.100229}, DOI={10.1016/j.smhl.2021.100229}, journal={Smart Health}, author={Li, Huining and Chen, Huan and Xu, Chenhan and Das, Anarghya and Chen, Xingyu and Li, Zhengxiong and Xiao, Jian and Huang, Ming-Chun and Xu, Wenyao}, year={2021}, month={Nov} } @article{li_zheng_zhong_xu_roma_lamkin_visger_chang_xu_2021, title={Stress prediction using micro-EMA and machine learning during COVID-19 social isolation}, url={https://doi.org/10.1016/j.smhl.2021.100242}, DOI={10.1016/j.smhl.2021.100242}, abstractNote={Accurately predicting users' perceived stress is beneficial to aid early intervention and prevent both mental illness and physical disease during the COVID-19 pandemic. However, the existing perceived stress predicting system needs to collect a large amount of previous data for training but has a limited prediction range (i.e., next 1-2 days). Therefore, we propose a perceived stress prediction system based on the history data of micro-EMA for identifying risks 7 days earlier. Specifically, we first select and deliver an optimal set of micro-EMA questions to users every Monday, Wednesday, and Friday for reducing the burden. Then, we extract time-series features from the past micro-EMA responses and apply an Elastic net regularization model to discard redundant features. After that, selected features are fed to an ensemble prediction model for forecasting fine-grained perceived stress in the next 7 days. Experiment results show that our proposed prediction system can achieve around 4.26 (10.65% of the scale) mean absolute error for predicting the next 7 day's PSS scores, and higher than 81% accuracy for predicting the next 7 day's stress labels.}, journal={Smart Health}, author={Li, Huining and Zheng, Enhao and Zhong, Zijian and Xu, Chenhan and Roma, Nicole and Lamkin, Steven and Visger, Tania T. Von and Chang, Yu-Ping and Xu, Wenyao}, year={2021}, month={Nov} } @article{li_xu_rathore_li_zhang_song_wang_su_lin_ren_et al._2021, title={VocalPrint: A mmWave-Based Unmediated Vocal Sensing System for Secure Authentication}, url={https://doi.org/10.1109/TMC.2021.3084971}, DOI={10.1109/TMC.2021.3084971}, abstractNote={With the continuing growth of voice-controlled devices, voice metrics have been widely used for user identification. However, voice biometrics is vulnerable to replay attacks and ambient noise. We identify that the fundamental vulnerability in voice biometrics is rooted in its indirect sensing modality (e.g., microphone). In this paper, we present VocalPrint , a resilient mmWave interrogation system which directly captures and analyzes the vocal vibrations for user authentication. Specifically, VocalPrint exploits the unique disturbance of the skin-reflect radio frequency (RF) signals around the near-throat region of the user, caused by the vocal vibrations. The complex ambient noise is isolated from the RF signal using a novel resilience-aware clutter suppression approach for preserving fine-grained vocal biometric properties. Afterward, we extract the vocal tract and vocal source features and input them into an ensemble classifier for authentication. VocalPrint is practical as it allows the effortless transition to a smartphone while having sufficient usability due to its non-contact nature. Our experimental results from 41 participants with different interrogation distances, orientations, and body motions show that VocalPrint achieves over 96 percent authentication accuracy even under unfavorable conditions. We demonstrate the resilience of our system against complex noise interference and spoof attacks of various threat levels.}, journal={IEEE Transactions on Mobile Computing}, author={Li, Huining and Xu, Chenhan and Rathore, Aditya Singh and Li, Zhengxiong and Zhang, Hanbin and Song, Chen and Wang, Kun and Su, Lu and Lin, Feng and Ren, Kui and et al.}, year={2021}, month={May} }