@article{farhangi_bian_huang_xiong_wang_guo_2023, title={AA-forecast: anomaly-aware forecast for extreme events}, volume={37}, ISSN={1384-5810 1573-756X}, url={http://dx.doi.org/10.1007/s10618-023-00919-7}, DOI={10.1007/s10618-023-00919-7}, abstractNote={Time series models often are impacted by extreme events and anomalies, both prevalent in real-world datasets. Such models require careful probabilistic forecasts, which is vital in risk management for extreme events such as hurricanes and pandemics. However, it's challenging to automatically detect and learn from extreme events and anomalies for large-scale datasets which often results in extra manual efforts. Here, we propose an anomaly-aware forecast framework that leverages the effects of anomalies to improve its prediction accuracy during the presence of extreme events. Our model has trained to extract anomalies automatically and incorporates them through an attention mechanism to increase the accuracy of forecasts during extreme events. Moreover, the framework employs a dynamic uncertainty optimization algorithm that reduces the uncertainty of forecasts in an online manner. The proposed framework demonstrated consistent superior accuracy with less uncertainty on three datasets with different varieties of anomalies over the current prediction models.}, number={3}, journal={Data Mining and Knowledge Discovery}, publisher={Springer Science and Business Media LLC}, author={Farhangi, Ashkan and Bian, Jiang and Huang, Arthur and Xiong, Haoyi and Wang, Jun and Guo, Zhishan}, year={2023}, month={Mar}, pages={1209–1229} } @article{al arafat_vaidhun_liu_yang_guo_2023, title={Compositional Mixed-Criticality Systems with Multiple Executions and Resource-Budgets Model}, ISSN={["1545-3421"]}, DOI={10.1109/RTAS58335.2023.00013}, abstractNote={Software reusability and system modularity are key features of modern autonomous systems. As a consequence, there is a rapid shift towards hierarchical and compositional architecture, as evidenced by AUTOSAR in automobiles and ROS2 in robotics. The resource-budget supply model is widely applied in the real-time analysis of such systems. Meanwhile, real-time systems with multiple critical levels have received significant attention from the research community and industry. These systems are designed with multiple execution budgets for multiple system-critical levels. Existing studies on mixedcriticality systems consider a dedicated resource supply. This paper considers a novel generalized system model with multiple execution estimations and resource-budget supplies for compositional systems. An analytical model and a demand-bound function-based schedulability test are presented for the EDFbased scheduler in the proposed compositional mixed-criticality system. A range for setting the resource supply period is derived to ensure the schedulability of workloads when supply budgets are known. The general performance of the scheduling framework and its wider applicability is further demonstrated and evaluated using synthetic workloads and resource models, where synthetic workload parameters are derived through a case study on an autonomous driving system.}, journal={2023 IEEE 29TH REAL-TIME AND EMBEDDED TECHNOLOGY AND APPLICATIONS SYMPOSIUM, RTAS}, author={Al Arafat, Abdullah and Vaidhun, Sudharsan and Liu, Liangkai and Yang, Kecheng and Guo, Zhishan}, year={2023}, pages={67–79} } @article{hossain_guo_choi_2023, title={Estimation of Lower Extremity Joint Moments and 3D Ground Reaction Forces Using IMU Sensors in Multiple Walking Conditions: A Deep Learning Approach}, volume={27}, ISSN={["2168-2208"]}, url={https://doi.org/10.1109/JBHI.2023.3262164}, DOI={10.1109/JBHI.2023.3262164}, abstractNote={Human kinetics, specifically joint moments and ground reaction forces (GRFs) can provide important clinical information and can be used to control assistive devices. Traditionally, collection of kinetics is mostly limited to the lab environment because it relies on data that are measured from a motion capture system and floor-embedded force plates to calculate the dynamics via musculoskeletal models. This spatially limited method makes it extremely challenging to measure kinetics outside the laboratory in a variety of walking conditions due to the expensive device setup and large space required. Recently, employing machine learning with IMU sensors are suggested as an alternative method for biomechanical analyses. Although these methods enable estimating human kinetic data outside the laboratory by linking IMU sensor data with kinetics dataset, they are limited to show inaccurate kinetic estimates even in highly repeatable single walking conditions due to the employment of generic deep learning algorithms. Thus, this paper proposes a novel deep learning model, Kinetics-FM-DLR-Ensemble-Net for single limb prediction of hip, knee, and ankle joint moments and 3-dimensional GRFs using three IMU sensors on the thigh, shank, and foot under several representatives walking conditions in daily living, such as treadmill, level-ground, stair, and ramp. This is the first study that implements both joint moments and GRFs in multiple walking conditions using IMU sensors via deep learning. Our deep learning model is versatile and accurate for identifying human kinetics across diverse subjects and walking conditions and outperforms state-of-the-art deep learning model for kinetics estimation by a large margin.}, number={6}, journal={IEEE JOURNAL OF BIOMEDICAL AND HEALTH INFORMATICS}, author={Hossain, Md Sanzid Bin and Guo, Zhishan and Choi, Hwan}, year={2023}, month={Jun}, pages={2829–2840} } @article{gray_moraes_bian_wang_tian_wilson_huang_xiong_guo_2023, title={GLARE: A Dataset for Traffic Sign Detection in Sun Glare}, volume={7}, ISSN={["1558-0016"]}, url={https://doi.org/10.1109/TITS.2023.3294411}, DOI={10.1109/TITS.2023.3294411}, abstractNote={Real-time machine learning object detection algorithms are often found within autonomous vehicle technology and depend on quality datasets. It is essential that these algorithms work correctly in everyday conditions as well as under strong sun glare. Reports indicate glare is one of the two most prominent environment-related reasons for crashes. However, existing datasets, such as the Laboratory for Intelligent & Safe Automobiles Traffic Sign (LISA) Dataset and the German Traffic Sign Recognition Benchmark, do not reflect the existence of sun glare at all. This paper presents the GLARE (GLARE is available at: https://github.com/NicholasCG/GLARE_Dataset) traffic sign dataset: a collection of images with U.S-based traffic signs under heavy visual interference by sunlight. GLARE contains 2,157 images of traffic signs with sun glare, pulled from 33 videos of dashcam footage of roads in the United States. It provides an essential enrichment to the widely used LISA Traffic Sign dataset. Our experimental study shows that although several state-of-the-art baseline architectures have demonstrated good performance on traffic sign detection in conditions without sun glare in the past, they performed poorly when tested against GLARE (e.g., average mAP0.5:0.95 of 19.4). We also notice that current architectures have better detection when trained on images of traffic signs in sun glare performance (e.g., average mAP0.5:0.95 of 39.6), and perform best when trained on a mixture of conditions (e.g., average mAP0.5:0.95 of 42.3).}, journal={IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS}, author={Gray, Nicholas and Moraes, Megan and Bian, Jiang and Wang, Alex and Tian, Allen and Wilson, Kurt and Huang, Yan and Xiong, Haoyi and Guo, Zhishan}, year={2023}, month={Jul} } @article{vaidhun_she_gu_das_yang_guo_2023, title={Precise Mixed-Criticality Scheduling on Varying-Speed Multiprocessors}, url={https://doi.org/10.1109/TC.2022.3197078}, DOI={10.1109/TC.2022.3197078}, abstractNote={While traditional real-time systems analysis requires single pessimistic estimates to represent system parameters, the mixed-criticality (MC) design proposes to use multiple estimates of system parameters with different levels of pessimism, resulting in low critical workloads sacrificed at run-time in order to provide guarantees to high critical workloads. Shortcomings of the MC design were improved recently by the precise MC scheduling technique in which the processor speed is increased at run-time to provide guarantees to both low and high critical workloads. Aiming to extend the precise MC scheduling to multiprocessor computing platforms, this paper proposes three novel scheduling algorithms that are based on virtual-deadline and fluid-scheduling approaches. We prove the correctness of our proposed algorithms through schedulability analysis and also present their theoretical effectiveness via speedup bounds and approximation factor calculations. Finally, we evaluate their performance experimentally via randomly generated task sets and demonstrate that the fluid-scheduling algorithms outperform the virtual-deadline algorithm.}, journal={IEEE Transactions on Computers}, author={Vaidhun, Sudharsan and She, Tianning and Gu, Qijun and Das, Sajal K. and Yang, Kecheng and Guo, Zhishan}, year={2023}, month={Jan} } @article{sun_duan_li_guan_guo_deng_tan_2023, title={Real-Time Scheduling of Autonomous Driving System with Guaranteed Timing Correctness}, ISSN={["1545-3421"]}, DOI={10.1109/RTAS58335.2023.00022}, abstractNote={In the autonomous driving (AD) system, complex data dependencies exist between tasks with different activation rates, making it very hard to analyze systems’ real-time behaviors. This paper formulates the AD system as a multi-rate DAG and proposes an integrated framework to co-analyze the schedulability of individual tasks and the end-to-end latency of task chains in the multi-rate DAG. Integer linear programming (ILP) techniques are developed to guide how to drop redundant workload to increase the chance that timing requirements can be met. This paper proposed one analysis framework which enables an automated process in which designs of the AD system are created, analyzed and refined in an iterative way, i.e., the analysis result in the last iteration provides valuable guidance to redesign the AD system in the next iteration. Experiments are conducted to evaluate the performance of our analysis method.}, journal={2023 IEEE 29TH REAL-TIME AND EMBEDDED TECHNOLOGY AND APPLICATIONS SYMPOSIUM, RTAS}, author={Sun, Jinghao and Duan, Kailu and Li, Xisheng and Guan, Nan and Guo, Zhishan and Deng, Qingxu and Tan, Guozhen}, year={2023}, pages={185–197} } @article{ahmed_al arafat_rizve_hossain_guo_rakin_2023, title={SSDA: Secure Source-Free Domain Adaptation}, ISSN={["1550-5499"]}, DOI={10.1109/ICCV51070.2023.01757}, abstractNote={Source-free domain adaptation (SFDA) is a popular unsupervised domain adaptation method where a pre-trained model from a source domain is adapted to a target domain without accessing any source data. Despite rich results in this area, existing literature overlooks the security challenges of the unsupervised SFDA setting in presence of a malicious source domain owner. This work investigates the effect of a source adversary which may inject a hidden malicious behavior (Backdoor/Trojan) during source training and potentially transfer it to the target domain even after benign training by the victim (target domain owner). Our investigation of the current SFDA setting reveals that because of the unique challenges present in SFDA (e.g., no source data, target label), defending against backdoor attack using existing defenses become practically ineffective in protecting the target model. To address this, we propose a novel target domain protection scheme called secure source-free domain adaptation (SSDA). SSDA adopts a single-shot model compression of a pre-trained source model and a novel knowledge transfer scheme with a spectral-norm-based loss penalty for target training. The proposed static compression and the dynamic training loss penalty are designed to suppress the malicious channels responsive to the backdoor during the adaptation stage. At the same time, the knowledge transfer from an uncompressed auxiliary model helps to recover the benign test accuracy. Our extensive evaluation on multiple dataset and domain tasks against recent backdoor attacks reveal that the proposed SSDA can successfully defend against strong backdoor attacks with little to no degradation in test accuracy compared to the vulnerable baseline SFDA methods. Our code is available at https://github.com/ML-Security-Research-LAB/SSDA.}, journal={2023 IEEE/CVF INTERNATIONAL CONFERENCE ON COMPUTER VISION (ICCV 2023)}, author={Ahmed, Sabbir and Al Arafat, Abdullah and Rizve, Mamshad Nayeem and Hossain, Rahim and Guo, Zhishan and Rakin, Adnan Siraj}, year={2023}, pages={19123–19133} } @article{abdelzaher_agrawal_baruah_burns_davis_guo_hu_2023, title={Scheduling IDK classifiers with arbitrary dependences to minimize the expected time to successful classification}, volume={3}, ISSN={["1573-1383"]}, DOI={10.1007/s11241-023-09395-0}, abstractNote={Abstract}, journal={REAL-TIME SYSTEMS}, author={Abdelzaher, Tarek and Agrawal, Kunal and Baruah, Sanjoy and Burns, Alan and Davis, Robert I. I. and Guo, Zhishan and Hu, Yigong}, year={2023}, month={Mar} } @article{reghenzani_guo_fornaciari_2023, title={Software Fault Tolerance in Real-Time Systems: Identifying the Future Research Questions}, volume={55}, ISSN={0360-0300 1557-7341}, url={http://dx.doi.org/10.1145/3589950}, DOI={10.1145/3589950}, abstractNote={Tolerating hardware faults in modern architectures is becoming a prominent problem due to the miniaturization of the hardware components, their increasing complexity, and the necessity to reduce costs. Software-Implemented Hardware Fault Tolerance approaches have been developed to improve system dependability regarding hardware faults without resorting to custom hardware solutions. However, these come at the expense of making the satisfaction of the timing constraints of the applications/activities harder from a scheduling standpoint. This article surveys the current state-of-the-art of fault tolerance approaches when used in the context of real-time systems, identifying the main challenges and the cross-links between these two topics. We propose a joint scheduling-failure analysis model that highlights the formal interactions among software fault tolerance mechanisms and timing properties. This model allows us to present and discuss many open research questions with the final aim to spur future research activities.}, number={14S}, journal={ACM Computing Surveys}, publisher={Association for Computing Machinery (ACM)}, author={Reghenzani, Federico and Guo, Zhishan and Fornaciari, William}, year={2023}, month={Mar} } @article{zhou_guo_dong_yang_2023, title={TensorRT Implementations of Model Quantization on Edge SoC}, ISSN={["2771-3067"]}, DOI={10.1109/MCSoC60832.2023.00078}, abstractNote={Deep neural networks have shown remarkable capabilities in computer vision applications. However, their complex architectures can pose challenges for efficient real-time deployment on edge devices, as they require significant computational resources and energy costs. To overcome these challenges, TensorRT has been developed to optimize neural network models trained on major frameworks to speed up inference and minimize latency. It enables inference optimization using techniques such as model quantization which reduces computations by lowering the precision of the data type. The focus of our paper is to evaluate the effectiveness of TensorRT for model quantization. We conduct a comprehensive assessment of the accuracy, inference time, and throughput of TensorRT quantized models on an edge device. Our findings indicate that the quantization in TensorRT significantly enhances the efficiency of inference metrics while maintaining a high level of inference accuracy. Additionally, we explore various workflows for implementing quantization using TensorRT and discuss their advantages and disadvantages. Based on our analysis of these workflows, we provide recommendations for selecting an appropriate workflow for different application scenarios.}, journal={2023 IEEE 16TH INTERNATIONAL SYMPOSIUM ON EMBEDDED MULTICORE/MANY-CORE SYSTEMS-ON-CHIP, MCSOC}, author={Zhou, Yuxiao and Guo, Zhishan and Dong, Zheng and Yang, Kecheng}, year={2023}, pages={486–493} } @article{moniruzzaman_yin_bin hossain_choi_guo_2023, title={Wearable Motion Capture: Reconstructing and Predicting 3D Human Poses From Wearable Sensors}, volume={27}, ISSN={["2168-2208"]}, url={https://doi.org/10.1109/JBHI.2023.3311448}, DOI={10.1109/JBHI.2023.3311448}, abstractNote={Reconstructing and predicting 3D human walking poses in unconstrained measurement environments have the potential to use for health monitoring systems for people with movement disabilities by assessing progression after treatments and providing information for assistive device controls. The latest pose estimation algorithms utilize motion capture systems, which capture data from IMU sensors and third-person view cameras. However, third-person views are not always possible for outpatients alone. Thus, we propose the wearable motion capture problem of reconstructing and predicting 3D human poses from the wearable IMU sensors and wearable cameras, which aids clinicians' diagnoses on patients out of clinics. To solve this problem, we introduce a novel Attention-Oriented Recurrent Neural Network (AttRNet) that contains a sensor-wise attention-oriented recurrent encoder, a reconstruction module, and a dynamic temporal attention-oriented recurrent decoder, to reconstruct the 3D human pose over time and predict the 3D human poses at the following time steps. To evaluate our approach, we collected a new WearableMotionCapture dataset using wearable IMUs and wearable video cameras, along with the musculoskeletal joint angle ground truth. The proposed AttRNet shows high accuracy on the new lower-limb WearableMotionCapture dataset, and it also outperforms the state-of-the-art methods on two public full-body pose datasets: DIP-IMU and TotalCaputre.}, number={11}, journal={IEEE JOURNAL OF BIOMEDICAL AND HEALTH INFORMATICS}, author={Moniruzzaman, Md and Yin, Zhaozheng and Bin Hossain, Md Sanzid and Choi, Hwan and Guo, Zhishan}, year={2023}, month={Nov}, pages={5345–5356} } @misc{reghenzani_guo_santinelli_fornaciari_2022, title={A Mixed-Criticality Approach to Fault Tolerance: Integrating Schedulability and Failure Requirements}, url={http://dx.doi.org/10.1109/rtas54340.2022.00011}, DOI={10.1109/rtas54340.2022.00011}, abstractNote={Mixed-Criticality (MC) systems have been widely studied in the past decade, majorly due to their potential to consolidate applications with different criticality levels onto the same platform. In the original design proposed by Vestal, a target probability of failure per hour specified by certification requirements is assigned to each criticality level. These requirements have been mainly conceived for hardware faults. Software fault tolerance techniques are available to mitigate hardware faults, but their adaptation to real-time systems is challenging due to the introduced overhead. This paper proposes an extension to the traditional MC scheduling theory to implement fault tolerance strategies against transient faults, with the goal of complying with both failure and timing requirements. In particular, we introduce the dropping relationships that generalize the concept of criticality and allow, on the one hand, to improve the schedulability analysis, on the other, to control the dependency between tasks satisfying the certification requirements. The simulation study shows a schedulability ratio improvement of 20-30% compared to classical scheduling while maintaining compliance with failure requirements.}, journal={2022 IEEE 28th Real-Time and Embedded Technology and Applications Symposium (RTAS)}, publisher={IEEE}, author={Reghenzani, Federico and Guo, Zhishan and Santinelli, Luca and Fornaciari, William}, year={2022}, month={May} } @misc{farhangi_huang_guo_2022, title={A Novel Deep Learning Model For Hotel Demand and Revenue Prediction amid COVID-19}, ISSN={2572-6862}, url={http://dx.doi.org/10.24251/hicss.2022.217}, DOI={10.24251/hicss.2022.217}, abstractNote={The COVID-19 pandemic has cast a substantial impact on the tourism and hospitality sector. Public policies such as travel restrictions and stay-at-home orders had significantly affected tourist activities and service businesses' operations and profitability. It is essential to develop interpretable forecasting models to support managerial and organizational decision-making. We developed DemandNet, a novel deep learning framework for predicting time series data under the influence of the COVID-19 pandemic. The DemandNet framework has the following unique characteristics. First, it selects the top static and dynamic features embedded in the time series data. Second, it includes a nonlinear model which can provide interpretable insight into the previously seen data. Third, a novel prediction model is developed to leverage the above characteristics to make robust long-term forecasts. We evaluated DemandNet using daily hotel demand and revenue data from eight cities in the US between 2013 and 2020. Our findings reveal that DemandNet outperforms the state-of-art models and can accurately predict the effect of the COVID-19 pandemic on hotel demand and revenue. © 2022 IEEE Computer Society. All rights reserved.}, journal={Proceedings of the Annual Hawaii International Conference on System Sciences}, publisher={Hawaii International Conference on System Sciences}, author={Farhangi, Ashkan and Huang, Arthur and Guo, Zhishan}, year={2022} } @article{hossain_dranetz_choi_guo_2022, title={DeepBBWAE-Net: A CNN-RNN Based Deep SuperLearner for Estimating Lower Extremity Sagittal Plane Joint Kinematics Using Shoe-Mounted IMU Sensors in Daily Living}, volume={26}, url={https://doi.org/10.1109/JBHI.2022.3165383}, DOI={10.1109/JBHI.2022.3165383}, abstractNote={Measurement of human body movement is an essential step in biomechanical analysis. The current standard for human motion capture systems uses infrared cameras to track reflective markers placed on a subject. While these systems can accurately track joint kinematics, the analyses are spatially limited to the lab environment. Though Inertial Measurement Units (IMUs) can eliminate these spatial limitations, those systems are impractical for use in daily living due to the need for many sensors, typically one per body segment. Due to the need for practical and accurate estimation of joint kinematics, this study implements a reduced number of IMU sensors and employs a machine learning algorithm to map sensor data to joint angles. Our developed algorithm estimates hip, knee, and ankle angles in the sagittal plane using two shoe-mounted IMU sensors in different practical walking conditions: treadmill, overground, stair, and slope conditions. Specifically, we propose five deep learning networks that use combinations of Convolutional Neural Networks (CNN) and Gated Recurrent Unit (GRU) based Recurrent Neural Networks (RNN) as base learners for our framework. Using those five baseline models, we propose a novel framework, DeepBBWAE-Net, that implements ensemble techniques such as bagging, boosting, and weighted averaging to improve kinematic predictions. DeepBBWAE-Net predicts joint kinematics for the three joint angles for each of the walking conditions with a Root Mean Square Error (RMSE) 6.93-29.0% lower than the base models individually. This is the first study that uses a reduced number of IMU sensors to estimate kinematics in multiple walking environments.}, number={8}, journal={IEEE Journal of Biomedical and Health Informatics}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Hossain, Md Sanzid Bin and Dranetz, Joseph and Choi, Hwan and Guo, Zhishan}, year={2022}, month={Aug}, pages={3906–3917} } @article{vaidhun_guo_bian_xiong_das_2022, title={Dynamic Path Planning for Unmanned Aerial Vehicles Under Deadline and Sector Capacity Constraints}, volume={6}, url={https://doi.org/10.1109/TETCI.2021.3122743}, DOI={10.1109/TETCI.2021.3122743}, abstractNote={The US NationalAirspace System is currentlyoperating at a level close to its maximum potential. The limitation comes from the workload demand on the air traffic controllers. Currently, the air traffic flow management is based on the flight path requests by the airline operators, whereas the minimum separation assurance between flights is handled strategically by air traffic control personnel. In this paper, we propose a scalable framework that allows path planning for a large number of unmanned aerial vehicles (UAVs) taking into account the deadline and weather constraints. Our proposed solution has a polynomial-time computational complexity that is also verified by measuringthe runtime for typical workloads. We further demonstrate that the proposed framework is able to route 80% of the workloads while not exceeding the sector capacity constraints, even under dynamic weather conditions. Due to low computational complexity, our framework is suitable for a fleet of UAVs where decentralizing the routing process limits the workload demand on the airtraffic personnel.}, number={4}, journal={IEEE Transactions on Emerging Topics in Computational Intelligence}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Vaidhun, Sudharsan and Guo, Zhishan and Bian, Jiang and Xiong, Haoyi and Das, Sajal}, year={2022}, month={Aug}, pages={839–851} } @misc{hossain_choi_guo_2022, title={Estimating lower extremity joint angles during gait using reduced number of sensors count via deep learning}, url={http://dx.doi.org/10.1117/12.2643786}, DOI={10.1117/12.2643786}, abstractNote={Estimating lower extremity joint angle during gait is essential for biomechanical analysis and clinical purposes. Traditionally infrared light-based motion capture systems are used to get the joint angle information. However, such an approach is restricted to the lab environment, limiting the applicability of the method in daily living. Inertial Measurement Units (IMU) sensors can solve this limitation but are needed in each body segment, causing discomfort and impracticality in everyday living. As a result, it is desirable to build a system that can measure joint angles in daily living while ensuring user comfort. For this reason, this paper uses deep learning to estimate joint angle during gait using only two IMU sensors mounted on participants' shoes under four different walking conditions, i.e., treadmill, overground, stair, and slope. Specifically, we leverage Gated Recurrent Unit (GRU), 1D, and 2D convolutional layers to create sub-networks and take their average to get a final model in an end-to-end manner. Extensive evaluations are done on the proposed method, which outperforms the baseline and improves the Root Mean Square Error (RMSE) of joint angle prediction by up to 32.96%.}, journal={Fourteenth International Conference on Digital Image Processing (ICDIP 2022)}, publisher={SPIE}, author={Hossain, Md Sanzid Bin and Choi, Hwan and Guo, Zhishan}, editor={Xie, Yi and Jiang, Xudong and Tao, Wenbing and Zeng, DezeEditors}, year={2022}, month={Oct} } @article{bin hossain_guo_choi_2022, title={Estimation of Hip, Knee, and Ankle Joint Moment Using a Single IMU Sensor on Foot Via Deep Learning}, DOI={10.1145/3551455.3559605}, journal={2022 IEEE/ACM CONFERENCE ON CONNECTED HEALTH: APPLICATIONS, SYSTEMS AND ENGINEERING TECHNOLOGIES (CHASE 2022)}, author={Bin Hossain, Md Sanzid and Guo, Zhishan and Choi, Hwan}, year={2022}, pages={25–33} } @article{bian_arafat_xiong_li_li_chen_wang_dou_guo_2022, title={Machine Learning in Real-Time Internet of Things (IoT) Systems: A Survey}, volume={9}, url={https://doi.org/10.1109/JIOT.2022.3161050}, DOI={10.1109/JIOT.2022.3161050}, abstractNote={Over the last decade, machine learning (ML) and deep learning (DL) algorithms have significantly evolved and been employed in diverse applications, such as computer vision, natural language processing, automated speech recognition, etc. Real-time safety-critical embedded and Internet of Things (IoT) systems, such as autonomous driving systems, UAVs, drones, security robots, etc., heavily rely on ML/DL-based technologies, accelerated with the improvement of hardware technologies. The cost of a deadline (required time constraint) missed by ML/DL algorithms would be catastrophic in these safety-critical systems. However, ML/DL algorithm-based applications have more concerns about accuracy than strict time requirements. Accordingly, researchers from the real-time systems (RTSs) community address the strict timing requirements of ML/DL technologies to include in RTSs. This article will rigorously explore the state-of-the-art results emphasizing the strengths and weaknesses in ML/DL-based scheduling techniques, accuracy versus execution time tradeoff policies of ML algorithms, and security and privacy of learning-based algorithms in real-time IoT systems.}, number={11}, journal={IEEE Internet of Things Journal}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Bian, Jiang and Arafat, Abdullah Al and Xiong, Haoyi and Li, Jing and Li, Li and Chen, Hongyang and Wang, Jun and Dou, Dejing and Guo, Zhishan}, year={2022}, month={Jun}, pages={8364–8386} } @article{guo_vaidhun_satinelli_arefin_wang_yang_2022, title={Mixed-Criticality Scheduling Upon Permitted Failure Probability and Dynamic Priority}, volume={41}, url={https://doi.org/10.1109/TCAD.2021.3053232}, DOI={10.1109/TCAD.2021.3053232}, abstractNote={Many safety-critical real-time systems are considered certified when they meet failure probability requirements with respect to the maximum permitted incidences of failure per hour. In this article, the mixed-criticality task model with multiple worst case execution time (WCET) estimations is extended to incorporate such system-level certification restrictions. A new parameter is added to each task, characterizing the distribution of WCET estimations—the likelihood of all jobs of a task finishing their executions within the less pessimistic WCET estimates. Efficient algorithms are derived for scheduling mixed-criticality systems represented using this model for both uniprocessor and multiprocessor platforms for independent tasks. Furthermore, a 0/1 covariance matrix is introduced to represent the failure dependency between tasks. An efficient algorithm is proposed to schedule such failure-dependent tasks. Experimental analyses show our new model and algorithm outperform current state-of-the-art mixed-criticality scheduling algorithms.}, number={1}, journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Guo, Zhishan and Vaidhun, Sudharsan and Satinelli, Luca and Arefin, Samsil and Wang, Jun and Yang, Kecheng}, year={2022}, month={Jan}, pages={62–75} } @misc{farhangi_sui_hua_bai_huang_guo_2022, title={Protoformer: Embedding Prototypes for Transformers}, ISBN={9783031059322 9783031059339}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-031-05933-9_35}, DOI={10.1007/978-3-031-05933-9_35}, abstractNote={Transformers have been widely applied in text classification. Unfortunately, real-world data contain anomalies and noisy labels that cause challenges for state-of-art Transformers. This paper proposes Protoformer, a novel self-learning framework for Transformers that can leverage problematic samples for text classification. Protoformer features a selection mechanism for embedding samples that allows us to efficiently extract and utilize anomalies prototypes and difficult class prototypes. We demonstrated such capabilities on datasets with diverse textual structures (e.g., Twitter, IMDB, ArXiv). We also applied the framework to several models. The results indicate that Protoformer can improve current Transformers in various empirical settings.}, journal={Advances in Knowledge Discovery and Data Mining}, publisher={Springer International Publishing}, author={Farhangi, Ashkan and Sui, Ning and Hua, Nan and Bai, Haiyan and Huang, Arthur and Guo, Zhishan}, year={2022}, pages={447–458} } @article{bi_he_sun_sun_guo_guan_tan_2022, title={Response Time Analysis for Prioritized DAG Task with Mutually Exclusive Vertices}, ISSN={["1052-8725"]}, DOI={10.1109/RTSS55097.2022.00046}, abstractNote={Directed acyclic graph (DAG) becomes a popular model for modern real-time embedded software. It is really a challenge to bound the worst-case response time (WCRT) of DAG task. Parallelism, dependencies and mutual exclusion become three of the most critical properties of real-time parallel tasks. Recent work applied prioritizing techniques to reduce DAG task's WCRT bound, which has well studied the first two properties, i.e., parallelism and dependencies, but leaves the mutually exclusive property as an open problem. This paper focuses on all the three properties of real-time parallel software, and investigates how to estimate the WCRT of the DAG task model with mutually exclusive vertices and under prioritized list scheduling algorithms. We derive a reasonable WCRT bound for such a complicated DAG task, and prove that the corresponding WCRT bound computation problem is strongly NP-hard. It means that there are no pseudo-polynomial time algorithms to compute the WCRT bound. For the prioritized DAG with a constant number of mutual exclusive vertices, we develop a dynamic programming algorithm that is able to estimate the WCRT bound within pseudo-polynomial time. Experiments are conducted to evaluate the performance of our analysis method implemented with different priority assignment policies against the state-of-the-art.}, journal={2022 IEEE 43RD REAL-TIME SYSTEMS SYMPOSIUM (RTSS 2022)}, author={Bi, Ran and He, Qingqiang and Sun, Jinghao and Sun, Zhenyu and Guo, Zhishan and Guan, Nan and Tan, Guozhen}, year={2022}, pages={460–473} } @misc{arafat_vaidhun_wilson_sun_guo_2022, title={Response time analysis for dynamic priority scheduling in ROS2}, url={http://dx.doi.org/10.1145/3489517.3530447}, DOI={10.1145/3489517.3530447}, abstractNote={Robot Operating System (ROS) is the most popular framework for developing robotics software. Typically, robotics software is safety-critical and employed in real-time systems requiring timing guarantees. Since the first generation of ROS provides no timing guarantee, the recent release of its second generation, ROS2, is necessary and timely, and has since received immense attention from practitioners and researchers. Unfortunately, the existing analysis of ROS2 showed the peculiar scheduling strategy of ROS2 executor, which severely affects the response time of ROS2 applications. This paper proposes a deadline-based scheduling strategy for the ROS2 executor. It further presents an analysis for an end-to-end response time of ROS2 workload (processing chain) and an evaluation of the proposed scheduling strategy for real workloads.}, journal={Proceedings of the 59th ACM/IEEE Design Automation Conference}, publisher={ACM}, author={Arafat, Abdullah Al and Vaidhun, Sudharsan and Wilson, Kurt M. and Sun, Jinghao and Guo, Zhishan}, year={2022}, month={Jul} } @inproceedings{farhangi_bian_huang_xiong_wang_guo_2022, title={Time Series Prediction with Anomaly-Aware Recurrent Neural Networks}, booktitle={Proceedings of European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECMLPKDD)}, author={Farhangi, Ashkan and Bian, Jiang and Huang, Arthur and Xiong, Haoyi and Wang, Jun and Guo, Zhishan}, year={2022} } @article{li_guan_jiang_guo_dong_lv_2022, title={Worst-Case Time Disparity Analysis of Message Synchronization in ROS}, ISSN={["1052-8725"]}, DOI={10.1109/RTSS55097.2022.00014}, abstractNote={Multi-sensor data fusion is essential in autonomous systems to support accurate perception and intelligent decisions. To perform meaningful data fusion, input data from different sensors must be sampled at time points in close propinquity to each other, otherwise the result cannot accurately reflect the status of the physical environment. ROS (Robotic Operating System), a popular software framework for autonomous systems, provides message synchronization mechanisms to address the above problem, by buffering messages carrying data from different sensors and grouping those with similar timestamps. Although message synchronization is widely used in applications developed based on ROS, little knowledge is known about its actual behavior and performance, so it is hard to guarantee the quality of data fusion. In this paper, we model the message synchronization policy in ROS and formally analyze its worst-case time disparity (maximal difference among the timestamps of the messages grouped into the same output set). We conduct experiments to evaluate the precision of the proposed time disparity upper bound against the maximal observed time disparity in real execution, and compare it with the synchronization policy in Apollo Cyber RT, another popular software framework for autonomous driving systems. Experiment results show that our analysis has good precision and ROS outperforms Apollo Cyber RT in terms of both observed worst-case time disparity and the theoretical bound.}, journal={2022 IEEE 43RD REAL-TIME SYSTEMS SYMPOSIUM (RTSS 2022)}, author={Li, Ruoxiang and Guan, Nan and Jiang, Xu and Guo, Zhishan and Dong, Zheng and Lv, Mingsong}, year={2022}, pages={40–52} } @misc{reghenzani_bhuiyan_fornaciari_guo_2021, title={A Multi-Level DPM Approach for Real-Time DAG Tasks in Heterogeneous Processors}, url={http://dx.doi.org/10.1109/rtss52674.2021.00014}, DOI={10.1109/rtss52674.2021.00014}, abstractNote={The modeling and analysis of real-time applications focus on the worst-case scenario because of their strict timing requirements. However, many real-time embedded systems include critical applications requiring not only timing constraints but also other system limitations, such as energy consumption. In this paper, we study the energy-aware real-time scheduling of Directed Acyclic Graph (DAG) tasks. We integrate the Dynamic Power Management (DPM) policy to reduce the Worst-Case Energy Consumption (WCEC), which is an essential requirement for energy-constrained systems. Besides, we extend our analysis with tasks' probabilistic information to improve the Average-Case Energy Consumption (ACEC), which is, instead, a common non-functional requirement of embedded systems. To verify the benefits of our approach in terms of reduced energy consumption, we finally conduct an extensive simulation, followed by an experimental study on an Odroid-H2 board. Compared to the state-of-the-art solution, our approach is able to reduce the power consumption up to 32.1%.}, journal={2021 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Reghenzani, Federico and Bhuiyan, Ashikahmed and Fornaciari, William and Guo, Zhishan}, year={2021}, month={Dec} } @article{huang_fisher_ding_guo_2021, title={A network analysis of cross-occupational skill transferability for the hospitality industry}, volume={33}, ISSN={0959-6119 0959-6119}, url={http://dx.doi.org/10.1108/ijchm-01-2021-0073}, DOI={10.1108/ijchm-01-2021-0073}, abstractNote={ Purpose This paper aims to examine transferable skills and viable career transition pathways for hospitality and tourism workers. Future career prospects are discussed, along with the importance of reskilling for low-wage hospitality workers. }, number={12}, journal={International Journal of Contemporary Hospitality Management}, publisher={Emerald}, author={Huang, Arthur Yan and Fisher, Tyler and Ding, Huiling and Guo, Zhishan}, year={2021}, month={Sep}, pages={4215–4236} } @misc{sanzid_dranetz_choi_guo_2021, title={An Ensemble Machine Learning Approach for the Estimation of Lower Extremity Kinematics Using Shoe-Mounted IMU Sensors}, url={http://dx.doi.org/10.52141/gcmas2021_172}, DOI={10.52141/gcmas2021_172}, abstractNote={INTRODUCTION Evaluating human body movement is an essential step for biomechanical analysis and assessing a disease's condition and progression. The infrared light motion capture system is a standard method for assessing joint kinematics. However, the motion capture system requires a specific setup in a confined area, leading to challenges with quantitative assessment of walking conditions found in daily living. Using multiple inertial measurement units (IMU) sensors allows for the evaluation of movement outside the lab. Still, they are too burdensome for daily living due to needing sensors in specific locations on each limb. There is a need for an accurate kinematics assessment with a reduced number of sensors---this paper focuses on the setting where only one sensor is mounted on each shoe. Machine learning has been used to estimate joint kinematics with a reduced number of IMU sensors [1], but the resultant joint angle errors are not minor enough for applying movement evaluation. This paper proposes a new machine learning algorithm that provides highly accurate and real-time hip, knee, and ankle joint angle estimations in the sagittal plane using two shoe-mounted IMU sensors. We adapted five deep learning networks by implementing various configurations of Convolutional Neural Networks (CNNs) or Long-Short Term Memory (LSTM) Networks and ensembled them all together to leverage the advantage of each model. Our ensemble technique provides high correlation between predicted joint kinematics and kinematics measured with an infrared light motion capture system.}, journal={Abstracts of the 26th Annual Meeting of the GCMAS}, publisher={GCMAS}, author={Sanzid, Md and Dranetz, Joseph and Choi, Hwan and Guo, Zhishan}, year={2021}, month={Sep} } @article{zhao_xiong_bian_guo_xu_dou_2021, title={COMO: Efficient Deep Neural Networks Expansion With COnvolutional MaxOut}, volume={23}, url={https://doi.org/10.1109/TMM.2020.3002614}, DOI={10.1109/TMM.2020.3002614}, abstractNote={In this paper, we extend the classic MaxOut strategy, originally designed for Multiple Layer Preceptors (MLPs), into COnvolutional MaxOut (COMO) — a new strategy making deep convolutional neural networks wider with parameter efficiency. Compared to the existing solutions, such as ResNeXt for ResNet or Inception for VGG-alikes, COMO works well on both linear architectures and the ones with skipped connections and residual blocks. More specifically, COMO adopts a novel split-transform-merge paradigm that extends the layers with spatial resolution reduction into multiple parallel splits. For the layer with COMO, each split passes the input feature maps through a 4D convolution operator with independent batch normalization operators for transformation, then merge into the aggregated output of the original sizes through max-pooling. Such a strategy is expected to tackle the potential classification accuracy degradation due to the spatial resolution reduction, by incorporating the multiple splits and max-pooling-based feature selection. Our experiment using a wide range of deep architectures shows that COMO can significantly improve the classification accuracy of ResNet/VGG-alike networks based on a large number of benchmark datasets. COMO further outperforms the existing solutions, e.g., Inceptions, ResNeXts, SE-ResNet, and Xception, that make networks wider, and it dominates in the comparison of accuracy versus parameter sizes.}, journal={IEEE Transactions on Multimedia}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Zhao, Baoxin and Xiong, Haoyi and Bian, Jiang and Guo, Zhishan and Xu, Cheng-Zhong and Dou, Dejing}, year={2021}, pages={1722–1730} } @article{bian_yang_xiong_wang_fu_sun_guo_2021, title={CRLEDD: Regularized Causalities Learning for Early Detection of Diseases Using Electronic Health Record (EHR) Data}, url={https://doi.org/10.1109/TETCI.2020.3010017}, DOI={10.1109/TETCI.2020.3010017}, abstractNote={The availability of Electronic Health Records (EHR) in health care settings has provided tremendous opportunities for early disease detection. While many supervised learning models have been adopted for EHR-based disease early detection, the ill-posed inverse problem in the parameter learning has imposed a significant challenge on improving the accuracy of these algorithms. In this paper, we propose CRLEDD – Causality-Regularized Learning for Early Detection of Disease, an algorithm to improve the performance of Linear Discriminant Analysis (LDA) on top of diagnosis-frequency vector data representation. While most existing regularization methods exploit sparsity regularization to improve detection performance, CRLEDD provides a unique perspective by ensuring positive semi-definiteness of the sparsified precision matrix used in LDA which is different from the regular regularization method (e.g., L2 regularization). To achieve this goal, CRLEDD employs Graphical Lasso to estimate the precision matrix in the ill-posed settings for enhanced accuracy of LDA classifiers. We perform extensive evaluation of CRLEDD using a large-scale real-world EHR dataset to predict mental health disorders (e.g., depression and anxiety) of college students from 10 universities in the U.S. We compare CRLEDD with other regularized LDA and downstream classifiers. The result shows that CRLEDD outperforms all baselines in terms of accuracy and F1 scores.}, journal={IEEE Transactions on Emerging Topics in Computational Intelligence}, author={Bian, Jiang and Yang, Sijia and Xiong, Haoyi and Wang, Licheng and Fu, Yanjie and Sun, Zeyi and Guo, Zhishan}, year={2021}, month={Aug} } @misc{sun_guan_guo_xue_he_tan_2021, title={Calculating Worst-Case Response Time Bounds for OpenMP Programs with Loop Structures}, url={http://dx.doi.org/10.1109/rtss52674.2021.00022}, DOI={10.1109/rtss52674.2021.00022}, abstractNote={OpenMP is a promising framework for developing parallel real-time software on multi-cores. Recently, many graph-based task models representing realistic features of OpenMP task systems have been proposed and analyzed. However, all previous studies did not model the loop structures, which is common in OpenMP task systems. In this paper, we formulate the workload of OpenMP task systems with loop structures as the cyclic graph model and study how to compute safe upper bounds for the worst-case response time (WCRT). The loop structures combined with the creation of tasks and conditional branches result in a large state space. %of the possible execution flows, i.e., the actual paths taken in the program to generate workload at runtime. Simply unrolling the loop and/or enumerating all the possible execution flows would be computationally intractable. As the major technical contribution, we develop a linear-time dynamic programming algorithm to compute the WCRT bound without unrolling loops or explicitly enumerating the execution flows. Experiments with both synthetic task graphs and realistic OpenMP programs are conducted to evaluate the performance of our method.}, journal={2021 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Sun, Jinghao and Guan, Nan and Guo, Zhishan and Xue, Yekai and He, Jing and Tan, Guozhen}, year={2021}, month={Dec} } @article{bhuiyan_yang_arefin_saifullah_guan_guo_2021, title={Mixed-criticality real-time scheduling of gang task systems}, volume={57}, ISSN={0922-6443 1573-1383}, url={http://dx.doi.org/10.1007/s11241-021-09368-1}, DOI={10.1007/s11241-021-09368-1}, number={3}, journal={Real-Time Systems}, publisher={Springer Science and Business Media LLC}, author={Bhuiyan, Ashikahmed and Yang, Kecheng and Arefin, Samsil and Saifullah, Abusayeed and Guan, Nan and Guo, Zhishan}, year={2021}, month={May}, pages={268–301} } @article{liu_han_zhao_guo_2021, title={Narrowing the speedup factor gap of partitioned EDF}, volume={281}, url={https://doi.org/10.1016/j.ic.2021.104743}, DOI={10.1016/j.ic.2021.104743}, abstractNote={Schedulability is a fundamental problem in analyzing real-time systems, but it often has to be approximated because of the intrinsic computational hardness. Partitioned earliest deadline first (EDF) is one of the most popular polynomial-time and practical scheduler on multiprocessor platforms, and it was shown to have a speedup factor of at most 2.6322 − 1 / m . This paper further improves the factor to 2.5556 − 1 / m for both the constrained-deadline case and the arbitrary-deadline case, and it is very close to the known (non-tight) lower bound of 2.5 − 1 / m . The key ideas are that we develop a novel method to discretize and regularize sporadic task sets that are schedulable on uniprocessors, and we find that the ratio ( ρ ) of the approximate demand bound value to the machine capacity is upper-bounded by 1.5556 for the arbitrary-deadline case, which plays an important role in estimating the speed factor of partitioned EDF.}, journal={Information and Computation}, publisher={Elsevier BV}, author={Liu, Xingwu and Han, Xin and Zhao, Liang and Guo, Zhishan}, year={2021}, month={Dec}, pages={104743} } @article{wang_jiang_guan_guo_liu_yi_2021, title={Partitioning-Based Scheduling of OpenMP Task Systems With Tied Tasks}, volume={32}, url={https://doi.org/10.1109/TPDS.2020.3048373}, DOI={10.1109/TPDS.2020.3048373}, abstractNote={OpenMP is a popular programming framework in both general and high-performance computing and has recently drawn much interest in embedded and real-time computing. Although the execution semantics of OpenMP are similar to the DAG task model, the constraints posed by the OpenMP specification make them significantly more challenging to analyze. A tied task is an important feature in OpenMP that must execute on the same thread throughout its entire life cycle. A previous work [1] succeeded in analyzing the real-time scheduling of tied tasks by modifying the Task Scheduling Constraints (TSCs) in OpenMP specification. In this article, we also study the real-time scheduling of OpenMP task systems with tied tasks but without changing the original TSCs. In particular, we propose a partitioning-based algorithm, P-EDF-omp, by which the tied constraint can be automatically guaranteed as long as an OpenMP task system can be successfully partitioned to a multiprocessor platform. Furthermore, we conduct comprehensive experiments with both synthetic workloads and established OpenMP benchmarks to show that our approach consistently outperforms the work in [1] —even without modifying the TSCs.}, number={6}, journal={IEEE Transactions on Parallel and Distributed Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Wang, Yang and Jiang, Xu and Guan, Nan and Guo, Zhishan and Liu, Xue and Yi, Wang}, year={2021}, month={Jun}, pages={1322–1339} } @misc{she_vaidhun_gu_das_guo_yang_2021, title={Precise Scheduling of Mixed-Criticality Tasks on Varying-Speed Multiprocessors}, url={http://dx.doi.org/10.1145/3453417.3453428}, DOI={10.1145/3453417.3453428}, abstractNote={In conventional real-time systems analysis, each system parameter is specified by a single estimate, which must pessimistically cover the worst case. Mixed-criticality (MC) design has been proposed to mitigate such pessimism by providing a single system parameter with multiple estimates, which often lead to low-critical and high-critical modes. The majority of the works on MC scheduling is based on the approach that low-critical workloads are (fully or partially) sacrificed at the transition instant from low- to high-critical mode. Recently, another approach called precise MC scheduling has been investigated, where no low-critical workload is sacrificed at the mode switch, but instead a processor speed boosting is committed. In this paper, we extend the work on uniprocessor precise MC scheduling to multiprocessor platforms. To tackle this new scheduling problem, we propose two novel algorithms based on the virtual-deadline and fluid-scheduling approaches. For each approach, we present a sufficient schedulability test and prove its correctness. We also evaluate their effectiveness theoretically with speedup bounds and approximation factors as well as experimentally via randomly generated task sets.}, journal={29th International Conference on Real-Time Networks and Systems}, publisher={ACM}, author={She, Tianning and Vaidhun, Sudharsan and Gu, Qijun and Das, Sajal and Guo, Zhishan and Yang, Kecheng}, year={2021}, month={Apr} } @misc{she_guo_gu_yang_2021, title={Reserving Processors by Precise Scheduling of Mixed-Criticality Tasks}, url={http://dx.doi.org/10.1109/rtcsa52859.2021.00020}, DOI={10.1109/rtcsa52859.2021.00020}, abstractNote={Mixed-criticality (MC) scheduling has been proposed to mitigate the pessimism in real-time schedulability analysis that must provide guarantees for the worst case. In most existing work on MC scheduling, low-critical tasks are either dropped or degraded at the criticality mode switch in order to preserve the temporal guarantees for high-critical tasks. Recently, a different direction, called precise MC scheduling, has been investigated. In precise MC scheduling, no low-critical task should be dropped or degraded; instead, the platform processing capacity is augmented at mode switch to accommodate the additional workload by high-critical tasks. In contrast to prior work on this topic with respect to varying processor speed, this work investigates the precise scheduling problem of MC tasks when the number of available processors may vary at the mode switch. To address this new problem, we propose two alternative algorithms by adapting virtual-deadline-based EDF and by fluid scheduling, respectively, and provide a sufficient schedulability test for each. We also conduct schedulability experiments with randomly generated task sets to demonstrate the effectiveness of the proposed algorithms and the benefits of the new scheduling model.}, journal={2021 IEEE 27th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)}, publisher={IEEE}, author={She, Tianning and Guo, Zhishan and Gu, Qijun and Yang, Kecheng}, year={2021}, month={Aug} } @article{wang_xiong_bian_zhu_gao_guo_xu_huan_dou_2021, title={Sampling Sparse Representations with Randomized Measurement Langevin Dynamics}, volume={15}, ISSN={1556-4681 1556-472X}, url={http://dx.doi.org/10.1145/3427585}, DOI={10.1145/3427585}, abstractNote={ Stochastic Gradient Langevin Dynamics (SGLD) have been widely used for Bayesian sampling from certain probability distributions, incorporating derivatives of the log-posterior. With the derivative evaluation of the log-posterior distribution, SGLD methods generate samples from the distribution through performing as a thermostats dynamics that traverses over gradient flows of the log-posterior with certainly controllable perturbation. Even when the density is not known, existing solutions still can first learn the kernel density models from the given datasets, then produce new samples using the SGLD over the kernel density derivatives. In this work, instead of exploring new samples from kernel spaces, a novel SGLD sampler, namely, Randomized Measurement Langevin Dynamics (RMLD) is proposed to sample the high-dimensional sparse representations from the spectral domain of a given dataset. }, number={2}, journal={ACM Transactions on Knowledge Discovery from Data}, publisher={Association for Computing Machinery (ACM)}, author={Wang, Kafeng and Xiong, Haoyi and Bian, Jiang and Zhu, Zhanxing and Gao, Qian and Guo, Zhishan and Xu, Cheng-Zhong and Huan, Jun and Dou, Dejing}, year={2021}, month={Feb}, pages={1–21} } @misc{liu_chen_han_sun_guo_2021, title={Tighter Bounds of Speedup Factor of Partitioned EDF for Constrained-Deadline Sporadic Tasks}, url={http://dx.doi.org/10.1109/rtss52674.2021.00046}, DOI={10.1109/rtss52674.2021.00046}, abstractNote={Even though earliest-deadline-first (EDF) is optimal in terms of uniprocessor schedulability, it is co-NP-hard to precisely verify uniprocessor schedulability for constrained-deadline task sets. The most efficient way to solve this problem in polynomial time is via a partially linear approximation of the demand bound function. Such approximation leads to a simple uniprocessor schedulability testing with speedup factor ρ. Such a result further leads to Deadline-Monotonic Partitioned-EDF on multi-processors with speedup factor of 1+rho -1m (where m is the number of processors). The current state of the art results indicate that ρ is within the range [1.5,14/9]. Especially, it has been a conjecture that ρ=1.5. This paper improves the range of ρ to (1.5026, 1.5380). The improved lower bound disproves the conjecture of lower bound 1.5. A novel technique is to construct an auxiliary function that is larger than the approximate demand bound function but keeps the supremum ρ unchanged. %A novel technique is proposed to enlarge the approximate demand bound function but keeps the upper bound (limit, ρ) unchanged. It solves the dilemma that beating the lower bound 1.5 requires extremely large task sets, while the large size makes it difficult to check the schedulability. This technique not only enables us to disprove 1.5 by a task set of only eight tasks, but also sheds light on future work in transferring/downsizing task sets and deriving utilization bound based tests for various workload abstraction models, such as DAG tasks.}, journal={2021 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Liu, Xingwu and Chen, Zizhao and Han, Xin and Sun, Zhenyu and Guo, Zhishan}, year={2021}, month={Dec} } @inproceedings{sun_wang_duan_lu_ren_guo_tan_2021, title={Toward Real-Time Guaranteed Scheduling for Autonomous Driving Systems}, booktitle={Proceedings of the 42nd IEEE Real-Time Systems Symposium (RTSS 2021), Industry Challenge}, author={Sun, Jinghao and Wang, Tianyi and Duan, Kailu and Lu, Bin and Ren, Jiankang and Guo, Zhishan and Tan, Guozhen}, year={2021}, month={Dec} } @misc{arafat_guo_awad_2021, title={VR-Spy: A Side-Channel Attack on Virtual Key-Logging in VR Headsets}, url={http://dx.doi.org/10.1109/vr50410.2021.00081}, DOI={10.1109/vr50410.2021.00081}, abstractNote={In Virtual Reality (VR), users typically interact with the virtual world using virtual keyboard to insert keywords, surfing the webpages, or typing passwords to access online accounts. Hence, it becomes imperative to understand the security of virtual keystrokes. In this paper, we present VR-Spy, a virtual keystrokes recognition method using channel state information (CSI) of WiFi signals. To the best of our knowledge, this is the first work that uses WiFi signals to recognize virtual keystrokes in VR headsets. The key idea behind VR -Spy is that the side-channel information of fine-granular hand movements associated with each virtual keystroke has a unique gesture pattern in the CSI waveforms. Our novel pattern extraction algorithm leverages signal processing techniques to extract the patterns from the variations of CSI. We implement VR-Spy using two Commercially Off-The-Shelf (COTS) devices, a transmitter (WAVLINK router), and a receiver (Intel NUC with an IWL 5300 NIC). Finally, VR-Spy achieves a virtual keystrokes recognition accuracy of 69.75% in comparison to techniques that assume very advanced adversary models with vision and motion sensors near the victim.}, journal={2021 IEEE Virtual Reality and 3D User Interfaces (VR)}, publisher={IEEE}, author={Arafat, Abdullah Al and Guo, Zhishan and Awad, Amro}, year={2021}, month={Mar}, pages={564–572} } @inproceedings{saifullah_fahmida_modekurthy_fisher_guo_2020, title={CPU Energy-Aware Parallel Real-Time Scheduling}, booktitle={Proceedings of the 32th Euromicro Conference on Real-Time Systems (ECRTS)}, author={Saifullah, Abusayeed and Fahmida, Sezana and Modekurthy, Venkata and Fisher, Nathan and Guo, Zhishan}, year={2020}, month={Jun} } @article{sun_shi_wang_guan_guo_2020, title={Efficient Feasibility Analysis for Graph-Based Real-Time Task Systems}, volume={39}, url={http://dx.doi.org/10.1109/tcad.2020.3012174}, DOI={10.1109/tcad.2020.3012174}, abstractNote={The demand bound function (DBF) is a powerful abstraction to analyze the feasibility/schedulability of real-time tasks. Computing the DBF for expressive system models, such as graph-based tasks, is typically very expensive. In this article, we develop new techniques to drastically improve the DBF computation efficiency for a representative graph-based task model, digraph real-time tasks (DRT). First, we apply the well-known quick processor-demand analysis (QPA) technique, which was originally designed for simple sporadic tasks, to the analysis of DRT. The challenge is that existing analysis techniques of DRT have to compute the demand for each possible interval size, which is contradictory to the idea of QPA that aims to aggressively skip the computation for most interval sizes. To solve this problem, we develop a novel integer linear programming (ILP)-based analysis technique for DRT, to which we can apply QPA to significantly improve the analysis efficiency. Second, we improve the task utilization computation (a major step in DBF computation for DRT) efficiency from pseudo-polynomial complexity to polynomial complexity. Experiments show that our approach can improve the analysis efficiency by dozens of times.}, number={11}, journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Sun, Jinghao and Shi, Rongxiao and Wang, Kexuan and Guan, Nan and Guo, Zhishan}, year={2020}, month={Nov}, pages={3385–3397} } @inproceedings{sun_shi_wang_guan_guo_2020, title={Efficient Feasibility Analysis for Graph-based Real-Time Task Systems}, booktitle={International Conference on Embedded Software (EMSOFT)}, author={Sun, Jinghao and Shi, Rongxiao and Wang, Kexuan and Guan, Nan and Guo, Zhishan}, year={2020}, month={Sep} } @article{bhuiyan_liu_khan_saifullah_guan_guo_2020, title={Energy-Efficient Parallel Real-Time Scheduling on Clustered Multi-Core}, volume={31}, url={https://doi.org/10.1109/TPDS.2020.2985701}, DOI={10.1109/TPDS.2020.2985701}, abstractNote={Energy-efficiency is a critical requirement for computation-intensive real-time applications on multi-core embedded systems. Multi-core processors enable intra-task parallelism, and in this work, we study energy-efficient real-time scheduling of constrained deadline sporadic parallel tasks, where each task is represented as a directed acyclic graph (DAG). We consider a clustered multi-core platform where processors within the same cluster run at the same speed at any given time. A new concept named speed-profile is proposed to model per-task and per-cluster energy-consumption variations during run-time to minimize the expected long-term energy consumption. To our knowledge, no existing work considers energy-aware real-time scheduling of DAG tasks with constrained deadlines, nor on a clustered multi-core platform. The proposed energy-aware real-time scheduler is implemented upon an ODROID XU-3 board to evaluate and demonstrate its feasibility and practicality. To complement our system experiments in large-scale, we have also conducted simulations that demonstrate a CPU energy saving of up to 67 percent through our proposed approach compared to existing methods.}, number={9}, journal={IEEE Transactions on Parallel and Distributed Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Bhuiyan, Ashikahmed and Liu, Di and Khan, Aamir and Saifullah, Abusayeed and Guan, Nan and Guo, Zhishan}, year={2020}, month={Sep}, pages={2097–2111} } @misc{yang_bhuiyan_guo_2020, title={F2VD}, url={http://dx.doi.org/10.1145/3400302.3415716}, DOI={10.1145/3400302.3415716}, abstractNote={Increasingly complex and integrated systems design has led to more timing uncertainty, which may result in pessimism in time-sensitive system design and analysis. To mitigate such pessimism, mixed-criticality (MC) design for real-time systems has been proposed, where highly critical tasks, often with extremely pessimistic execution time estimates, can share the processor with less critical ones in a manner that the latter is sacrificed, completely or partially, to guarantee temporal correctness to the former, when the extremely pessimistic scenario does happen. In contrast to such sacrifice of tasks, the precise MC scheduling model has recently been investigated, where all tasks, including less critical ones, must fully complete their execution in all circumstances. Meanwhile, the processor may operate at a degraded speed when the tasks' runtime behaviors are far from the extreme pessimistic estimates and would recover to the full processing speed once the extremely pessimistic scenario does happen. This paper presents a generalized fluid-scheduling-based solution to this problem, where feasible fluid-scheduling rates for each task are derived from an optimization problem. Furthermore, this paper proposes a novel algorithm F2VD for setting virtual deadlines from any feasible fluid rates, such that any fluid-scheduling-based solution can be converted to a deadline-based scheduling approach with no schedulability loss, where the latter is generally considered much more practical and easier to implement. Experimental studies based on randomly generated task sets are conducted to verify the theoretical results as well as the effectiveness of the proposed algorithms.}, journal={Proceedings of the 39th International Conference on Computer-Aided Design}, publisher={ACM}, author={Yang, Kecheng and Bhuiyan, Ashikahmed and Guo, Zhishan}, year={2020}, month={Nov} } @misc{zsiros_blalock_craig_vaidhun_wang_guo_2020, title={GARDS: Generalized Autonomous Robotic Delivery System}, url={http://dx.doi.org/10.1109/metrocad48866.2020.00013}, DOI={10.1109/metrocad48866.2020.00013}, abstractNote={In this demonstration, we present a generalized platform customized to suit the needs of a fast power-efficient and autonomous delivery system. As an application demonstration, we deployed a mapping and localization system based on a combination of sensor sources. An online navigation algorithm utilizes the map information to deliver to a destination in the mapped area.}, journal={2020 International Conference on Connected and Autonomous Driving (MetroCAD)}, publisher={IEEE}, author={Zsiros, Jade and Blalock, Brian and Craig, Darien and Vaidhun, Sudharsan and Wang, Alexander and Guo, Zhishan}, year={2020}, month={Feb} } @inproceedings{agrawal_baruah_guo_li_vaidhun_2020, title={Hard-Real-Time Routing in Probabilistic Graphs to Minimize Expected Delay}, url={http://dx.doi.org/10.1109/rtss49844.2020.00017}, DOI={10.1109/rtss49844.2020.00017}, abstractNote={This work studies the hard-real-time routing problem in graphs: one needs to travel from a given vertex to another within a hard deadline. For each edge in the network, the worst-case delay that may be encountered across that edge is bounded. As far as this given bound is trustworthy at a very high level of assurance, it must be guaranteed that one will meet the specified deadline. The actual delays across edges are uncertain and the goal is to minimize the total expected delay while meeting the deadline. We propose a comprehensive solution to this problem. Specifically, if the precise a priori estimates of the delay probability distributions are available, we develop an optimal table-driven algorithm that identifies the route with the minimum expected delay. If those estimates are not precise (i.e., unknown or dynamic), we develop an efficient Q-Learning approach that leverages the table-driven algorithm to track the true distributions rapidly, while ensuring to meet the specified hard deadline. The proposed solution suggests a promising direction towards incorporating probabilistic information and learning-based approaches into safety-critical systems without compromising safety guarantees, when it is not feasible to establish the trustworthiness of the probabilistic information at the high assurance levels required for verification purposes.}, booktitle={2020 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Agrawal, Kunal and Baruah, Sanjoy and Guo, Zhishan and Li, Jing and Vaidhun, Sudharsan}, year={2020}, month={Dec} } @misc{guo_yang_yao_awad_2020, title={Inter-task cache interference aware partitioned real-time scheduling}, url={http://dx.doi.org/10.1145/3341105.3374014}, DOI={10.1145/3341105.3374014}, abstractNote={With the increasing number of cores in processors, shared resources like caches are interfering task execution behaviours more heavily and often render global scheduling approaches infeasible in practice. While partitioned scheduling alleviates such interference, in most existing partitioned approaches, constant WCET, which potentially includes all possible interference, must be statically pre-determined prior to the partitioning processes. In this paper, we show that by taking inter-task interference into consideration when making scheduling decisions, resource efficiency can be significantly improved in both temporal and spatial domains for multi/many-core real-time systems. In particular, we propose the inter-task interference matrix (ITIM) to model the inter-task cache/memory interference in a pair-wise manner. Focusing on the problem of interference-aware partitioned scheduling with ITIM, we formalize it as a mixed integer linear program (MILP), which can be solved to achieve optimal solution at the cost of high computational complexity. Meanwhile, we also provide several polynomial-time algorithms to solve the problem approximately. We extensively profile a set of WCET benchmark programs on x86-based multiprocessor server to collect ITIM. The algorithms are evaluated comprehensively, and the evaluation results demonstrate the superior performance of the proposed approaches under various settings.}, journal={Proceedings of the 35th Annual ACM Symposium on Applied Computing}, publisher={ACM}, author={Guo, Zhishan and Yang, Kecheng and Yao, Fan and Awad, Amro}, year={2020}, month={Mar} } @article{bian_xiong_fu_huan_guo_2020, title={MP 2 SDA}, volume={14}, ISSN={1556-4681 1556-472X}, url={http://dx.doi.org/10.1145/3374919}, DOI={10.1145/3374919}, abstractNote={Sparse Discriminant Analysis (SDA) has been widely used to improve the performance of classical Fisher’s Linear Discriminant Analysis in supervised metric learning, feature selection, and classification. With the increasing needs of distributed data collection, storage, and processing, enabling the Sparse Discriminant Learning to embrace the multi-party distributed computing environments becomes an emerging research topic. This article proposes a novel multi-party SDA algorithm, which can learn SDA models effectively without sharing any raw data and basic statistics among machines. The proposed algorithm (1) leverages the direct estimation of SDA to derive a distributed loss function for the discriminant learning, (2) parameterizes the distributed loss function with local/global estimates through bootstrapping, and (3) approximates a global estimation of linear discriminant projection vector by optimizing the “distributed bootstrapping loss function” with gossip-based stochastic gradient descent. Experimental results on both synthetic and real-world benchmark datasets show that our algorithm can compete with the aggregated SDA with similar performance, and significantly outperforms the most recent distributed SDA in terms of accuracy and F1-score.}, number={3}, journal={ACM Transactions on Knowledge Discovery from Data}, publisher={Association for Computing Machinery (ACM)}, author={Bian, Jiang and Xiong, Haoyi and Fu, Yanjie and Huan, Jun and Guo, Zhishan}, year={2020}, month={Mar}, pages={1–22} } @article{liu_guan_guo_yi_2020, title={MiniTEE—A Lightweight TrustZone-Assisted TEE for Real-Time Systems}, volume={9}, url={https://doi.org/10.3390/electronics9071130}, DOI={10.3390/electronics9071130}, abstractNote={While trusted execution environments (TEEs) provide industry standard security and isolation, TEE requests through secure monitor calls (SMCs) attribute to large time overhead and weakened temporal predictability. Moreover, as current available TEE solutions are designed for Linux and/or Android initially, it will encounter many constraints (e.g., driver libraries incompatible, large memory footprint, etc.) when integrating with low-end Real-Time Operating Systems, RTOSs. In this paper, we present MiniTEE to understand, evaluate and discuss the benefits and limitations when integrating TrustZone-assisted TEEs with RTOSs. We demonstrate how MiniTEE can be adequately exploited for meeting the real-time needs, while presenting a low performance overhead to the rich OSs (i.e., low-end RTOSs).}, number={7}, journal={Electronics}, publisher={MDPI AG}, author={Liu, Songran and Guan, Nan and Guo, Zhishan and Yi, Wang}, year={2020}, month={Jul}, pages={1130} } @inproceedings{singh_santinelli_reghenzani_bletsas_guo_2020, title={Mixed Criticality Scheduling of Probabilistic Real-Time Systems}, booktitle={Proceeding of the 10th European Congress on Embedded Real Time Software and Systems}, author={Singh, Jasdeep and Santinelli, Luca and Reghenzani, Federico and Bletsas, Konstantinos and Guo, Zhishan}, year={2020}, month={Jan} } @misc{sun_li_guan_zhu_xiang_guo_yi_2020, title={On Computing Exact WCRT for DAG Tasks}, url={http://dx.doi.org/10.1109/dac18072.2020.9218744}, DOI={10.1109/dac18072.2020.9218744}, abstractNote={Most current real-time parallel applications can be modeled as a directed acyclic graph (DAG) task. Existing worst-case response time (WCRT) bounds (e.g., Graham's bound) derived for DAGs may be very pessimistic. No one precisely knows the gap between the WCRT bound and the actual WCRT. In this paper, we aim to derive the exact WCRT of a DAG task under the list scheduling upon multi-core platforms. We encode the WCRT analysis problem into a satisfaction modular theoretical (SMT) formulation based on insights into the list scheduling algorithm, and prove that our SMT program can solve the WCRT precisely, providing an accurate baseline to measure the tightness of the existing WCRT bounds. Experiments show that our method significantly improves the tightness of the WCRT bound, and is practically quite efficient, e.g., it can analyze DAGs with more than 40 vertices in a few seconds.}, journal={2020 57th ACM/IEEE Design Automation Conference (DAC)}, publisher={IEEE}, author={Sun, Jinghao and Li, Feng and Guan, Nan and Zhu, Wentao and Xiang, Minjie and Guo, Zhishan and Yi, Wang}, year={2020}, month={Jul} } @misc{sun_chi_xu_cao_guan_guo_yi_2020, title={On the Volume Calculation for Conditional DAG Tasks: Hardness and Algorithms}, url={http://dx.doi.org/10.23919/date48585.2020.9116559}, DOI={10.23919/date48585.2020.9116559}, abstractNote={The hardness of analyzing conditional directed acyclic graph (DAG) tasks remains unknown so far. For example, previous researches asserted that the conditional DAG's volume can be solved in polynomial time. However, these researches all assume well-nested structures that are recursively composed by single-source-single-sink parallel and conditional components. For conditional DAGs in general that do not comply with this assumption, the hardness and algorithms of volume computation are still open. In this paper, we construct counterexamples to show that previous work cannot provide a safe upper bound of the conditional DAG's volume in general. Moreover, we prove that the volume computation problem for conditional DAGs is strongly $\mathcal{N}\mathcal{P}$-hard. Finally, we propose an exact algorithm for computing the conditional DAG's volume. Experiments show that our method can significantly improve the accuracy of the conditional DAG's volume estimation.}, journal={2020 Design, Automation & Test in Europe Conference & Exhibition (DATE)}, publisher={IEEE}, author={Sun, Jinghao and Chi, Yaoyao and Xu, Tianfei and Cao, Lei and Guan, Nan and Guo, Zhishan and Yi, Wang}, year={2020}, month={Mar} } @article{bhuiyan_reghenzani_fornaciari_guo_2020, title={Optimizing Energy in Non-Preemptive Mixed-Criticality Scheduling by Exploiting Probabilistic Information}, volume={39}, ISSN={0278-0070 1937-4151}, url={http://dx.doi.org/10.1109/tcad.2020.3012231}, DOI={10.1109/tcad.2020.3012231}, abstractNote={The strict requirements on the timing correctness biased the modeling and analysis of real-time systems toward the worst-case performances. Such focus on the worst-case, however, does not provide enough information to effectively steer the resource/energy optimization. In this article, we integrate a probabilistic-based energy prediction strategy with the precise scheduling of mixed-criticality tasks, where the timing correctness must be met for all tasks at all scenarios. The dynamic voltage and frequency scaling (DVFS) is applied to this precise scheduling policy to enable energy minimization. We propose a probabilistic technique to derive an energy-efficient speed (for the processor) that minimizes the average energy consumption, while guaranteeing the (worst-case) timing correctness for all tasks, including LO-criticality ones, under any execution condition. We present a response time analysis for such systems under the nonpreemptive fixed-priority scheduling policy. Finally, we conduct an extensive simulation campaign based on randomly generated task sets to verify the effectiveness of our algorithm (with respect to energy savings) and it reports up to 46% energy-saving.}, number={11}, journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Bhuiyan, Ashikahmed and Reghenzani, Federico and Fornaciari, William and Guo, Zhishan}, year={2020}, month={Nov}, pages={3906–3917} } @inproceedings{bhuiyan_reghenzani_fornaciari_guo_2020, title={Optimizing Energy in Non-preemptive Mixed-Criticality Scheduling by Exploiting Probabilistic Information}, booktitle={International Conference on Embedded Software (EMSOFT)}, author={Bhuiyan, Ashikahmed and Reghenzani, Federico and Fornaciari, William and Guo, Zhishan}, year={2020} } @inproceedings{hossain_lee_hong_choi_guo_2020, title={Predicting lower limb 3D kinematics during gait using reduced number of wearable sensors via deep learning}, booktitle={Proceedings of the 44th Meetings of the American Society of Biomechanics (ASB)}, author={Hossain, Md Sanzid Bin and Lee, Youngho and Hong, Junghwa and Choi, Hwan and Guo, Zhishan}, year={2020} } @misc{vaidhun_guo_bian_xiong_das_2020, title={Priority-based Multi-Flight Path Planning with Uncertain Sector Capacities}, url={http://dx.doi.org/10.1109/icaci49185.2020.9177760}, DOI={10.1109/icaci49185.2020.9177760}, abstractNote={The United States National Airspace System is currently operating at a level close to its maximum potential. The workload on the system, however, is only going to increase with the influx of unmanned aerial vehicles and soon, commercial space transportation systems. The traffic flow management is currently managed based on the flight path requests by the airline operators; while the minimum separation assurance between flights is handled strategically by air traffic control personnel. A more tactical approach would be to plan for a longer time horizon which is non-trivial given the uncertainties in the airspace due to weather. In this work, we consider a simplified model of the airspace as a grid of sectors and the uncertainties in the airspace are modeled as blocked sectors. In the modeled airspace with uncertainties, we schedule multiple flights using a dynamic shortest path algorithm. A novel cost function based on potential energy fields is proposed for use in the path planning algorithm to handle blocked sectors. A priority-based contention resolution scheme is proposed to extend the solution to multiple flights. We then demonstrate the proposed framework using a simulated test case.}, journal={2020 12th International Conference on Advanced Computational Intelligence (ICACI)}, publisher={IEEE}, author={Vaidhun, Sudharsan and Guo, Zhishan and Bian, Jiang and Xiong, Haoyi and Das, Sajal K.}, year={2020}, month={Aug} } @misc{sun_li_guo_zou_zhang_agrawal_baruah_2020, title={Real-Time Scheduling upon a Host-Centric Acceleration Architecture with Data Offloading}, url={http://dx.doi.org/10.1109/rtas48715.2020.00-17}, DOI={10.1109/rtas48715.2020.00-17}, abstractNote={Challenging scheduling problems arise in the implementation of cyber-physical systems upon heterogeneous platforms with (serial) data offloading and (parallel) computation. In this paper, we adapt techniques from scheduling theory to model, analyze, and derive scheduling algorithms for real-time workloads on such platforms. We characterize the performance of the proposed algorithms, both analytically via the approximation ratio metric and experimentally through simulation experiments upon synthetic workloads that are justified via a case study on a CPU-GPU platform. The evaluation exposes some divergence between the analytical characterization and experimental one; recommendations that seek to balance such divergent characterizations are made regarding the choice of algorithmic approaches.}, journal={2020 IEEE Real-Time and Embedded Technology and Applications Symposium (RTAS)}, publisher={IEEE}, author={Sun, Jinghao and Li, Jing and Guo, Zhishan and Zou, An and Zhang, Xuan and Agrawal, Kunal and Baruah, Sanjoy}, year={2020}, month={Apr} } @misc{agrawal_baruah_guo_li_2020, title={The safe and effective application of probabilistic techniques in safety-critical systems}, url={http://dx.doi.org/10.1145/3400302.3415674}, DOI={10.1145/3400302.3415674}, abstractNote={The use of randomized algorithms in safety-critical systems is investigated. Under the vast majority of circumstances, randomized algorithms out-perform deterministic ones on average; however, it is not obvious how one goes about establishing the correctness of safety-critical systems that use such algorithms. The approach advocated in this work is to exploit the fact that many safety standards allow for small probabilities of failure of even the most critical functionalities. We explore the use of concentration bounds - probabilistic bounds on the likelihood of the performance of a randomized algorithm deviating from its expected performance - to bound the probability of failure of systems that incorporate randomized algorithms, thereby showing compliance with safety standards that allow for small probabilities of failure. We illustrate the use of the proposed approach on several examples that both explain how the approach is to be applied, and demonstrate the benefits of doing so.}, journal={Proceedings of the 39th International Conference on Computer-Aided Design}, publisher={ACM}, author={Agrawal, Kunal and Baruah, Sanjoy and Guo, Zhishan and Li, Jing}, year={2020}, month={Nov} } @article{huang_makridis_baker_medeiros_guo_2020, title={Understanding the Impact of COVID-19 Intervention Policies on the Labor Market of the Hospitality and Retail Industries}, ISSN={1556-5068}, url={http://dx.doi.org/10.2139/ssrn.3637766}, DOI={10.2139/ssrn.3637766}, abstractNote={Using new high-frequency data that covers a representative sample of small business owners, we investigate the effects of the COVID-19 pandemic and the resulting state policies on the retail, hospitality, food, and accommodation sectors. First, we find that business closure policies are associated with a 20-30% reduction of non-salaried workers in the hospitality industry and a 10-20% decline for the retail sector from March-April of 2020. Second, business reopening policies play a statistically significant role in reviving the labor market, though at a slower pace. Third, considerable differences exist in the impact of policies on the labor market by state. Fourth, the rise of new cases on a daily basis is associated with the continued deterioration of the labor market. We use these results to provide policy and managerial recommendations for restoring the labor market and curbing the transmission of COVID-19.}, journal={SSRN Electronic Journal}, publisher={Elsevier BV}, author={Huang, Arthur and Makridis, Christos and Baker, Mark and Medeiros, Marcos and Guo, Zhishan}, year={2020} } @article{huang_makridis_baker_medeiros_guo_2020, title={Understanding the impact of COVID-19 intervention policies on the hospitality labor market}, volume={91}, url={https://doi.org/10.1016/j.ijhm.2020.102660}, DOI={10.1016/j.ijhm.2020.102660}, abstractNote={Using new high-frequency data that covers a representative sample of small businesses in the United States, this study investigates the effects of the COVID-19 pandemic and the resulting state policies on the hospitality industry. First, business closure policies are associated with a 20-30% reduction of non-salaried workers in the food/drink and leisure/entertainment sectors during March-April of 2020. Second, business reopening policies play a statistically significant role in slowly reviving the labor market. Third, considerable differences exist in the impact of policies on the labor market by state. Fourth, the rise of new COVID-19 cases on a daily basis is associated with the continued deterioration of the labor market. Lastly, managerial, practical, and economic implications are described.}, journal={International Journal of Hospitality Management}, publisher={Elsevier BV}, author={Huang, Arthur and Makridis, Christos and Baker, Mark and Medeiros, Marcos and Guo, Zhishan}, year={2020}, month={Oct}, pages={102660} } @article{xiong_cheng_bian_hu_sun_guo_2019, title={ $\mathcal{DBSDA}$ : Lowering the Bound of Misclassification Rate for Sparse Linear Discriminant Analysis via Model Debiasing}, volume={30}, ISSN={2162-237X 2162-2388}, url={http://dx.doi.org/10.1109/tnnls.2018.2846783}, DOI={10.1109/tnnls.2018.2846783}, abstractNote={Linear discriminant analysis (LDA) is a well-known technique for linear classification, feature extraction, and dimension reduction. To improve the accuracy of LDA under the high dimension low sample size (HDLSS) settings, shrunken estimators, such as Graphical Lasso, can be used to strike a balance between biases and variances. Although the estimator with induced sparsity obtains a faster convergence rate, however, the introduced bias may also degrade the performance. In this paper, we theoretically analyze how the sparsity and the convergence rate of the precision matrix (also known as inverse covariance matrix) estimator would affect the classification accuracy by proposing an analytic model on the upper bound of an LDA misclassification rate. Guided by the model, we propose a novel classifier, $\mathcal {DBSDA}$ , which improves classification accuracy through debiasing. Theoretical analysis shows that $\mathcal {DBSDA}$ possesses a reduced upper bound of misclassification rate and better asymptotic properties than sparse LDA (SDA). We conduct experiments on both synthetic datasets and real application datasets to confirm the correctness of our theoretical analysis and demonstrate the superiority of $\mathcal {DBSDA}$ over LDA, SDA, and other downstream competitors under HDLSS settings.}, number={3}, journal={IEEE Transactions on Neural Networks and Learning Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Xiong, Haoyi and Cheng, Wei and Bian, Jiang and Hu, Wenqing and Sun, Zeyi and Guo, Zhishan}, year={2019}, month={Mar}, pages={707–717} } @article{zhao_liu_jiang_liu_xue_xie_yang_guo_2019, title={CASS: Criticality-Aware Standby-Sparing for real-time systems}, volume={100}, ISSN={1383-7621}, url={http://dx.doi.org/10.1016/j.sysarc.2019.101661}, DOI={10.1016/j.sysarc.2019.101661}, abstractNote={The standby-sparing (SS) is a promising technique which deploys the dual-processor platform, i.e., one primary processor and one spare processor, to achieve fault tolerance for real-time systems. In the existing SS framework, all applications have their backup copies on the spare processor, but, in practice, not all applications on a system are equally important to the system. Some low critical tasks may be traded off for other system objectives. Motivated by this, in this paper, we integrate the concept of criticality into the SS framework. Such integration enables the SS framework to further reduce energy consumption. We propose an offline approach to determine an energy-efficient frequency for the primary processor. Additionally, as the cluster systems are emerging as the mainstream computing platform, we consider the SS technique on the cluster/island systems and propose an algorithm to determine the energy-efficient algorithm for such systems. We evaluate the proposed approach on synthetic tasks and real-platforms. The experimental results demonstrate the effectiveness of our proposed framework in terms of energy efficiency.}, journal={Journal of Systems Architecture}, publisher={Elsevier BV}, author={Zhao, Mingxiong and Liu, Di and Jiang, Xu and Liu, Weichen and Xue, Gang and Xie, Cheng and Yang, Yun and Guo, Zhishan}, year={2019}, month={Nov}, pages={101661} } @article{yang_guo_xiong_ding_yin_wunsch_2019, title={Data-Driven Robust Control of Discrete-Time Uncertain Linear Systems via Off-Policy Reinforcement Learning}, volume={30}, url={https://doi.org/10.1109/TNNLS.2019.2897814}, DOI={10.1109/TNNLS.2019.2897814}, abstractNote={This paper presents a model-free solution to the robust stabilization problem of discrete-time linear dynamical systems with bounded and mismatched uncertainty. An optimal controller design method is derived to solve the robust control problem, which results in solving an algebraic Riccati equation (ARE). It is shown that the optimal controller obtained by solving the ARE can robustly stabilize the uncertain system. To develop a model-free solution to the translated ARE, off-policy reinforcement learning (RL) is employed to solve the problem in hand without the requirement of system dynamics. In addition, the comparisons between on- and off-policy RL methods are presented regarding the robustness to probing noise and the dependence on system dynamics. Finally, a simulation example is carried out to validate the efficacy of the presented off-policy RL approach.}, number={12}, journal={IEEE Transactions on Neural Networks and Learning Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Yang, Yongliang and Guo, Zhishan and Xiong, Haoyi and Ding, Da-Wei and Yin, Yixin and Wunsch, Donald C.}, year={2019}, month={Dec}, pages={3735–3747} } @misc{yang_guo_2019, title={EDF-Based Mixed-Criticality Scheduling with Graceful Degradation by Bounded Lateness}, url={http://dx.doi.org/10.1109/rtcsa.2019.8864559}, DOI={10.1109/rtcsa.2019.8864559}, abstractNote={Mixed-criticality (MC) scheduling has been proposed for embedded real-time systems to alleviate the dilemma between runtime resource utilization and worst-case temporal guarantees for critical functions. The approach of dropping all low-criticality tasks upon a mode switch has been criticized for potentially over-degraded performance. In this paper, we focus on the graceful degradation for MC scheduling by providing bounded lateness for certain low-critical tasks. We define MCQOS-schedulability that massages the required bounded lateness into the definition of conventional MC-schedulability. A virtual deadline based scheduler (EDF-VDS) is proposed with utilization-based MCQOS-schedulability test and and closed-form lateness bounds.}, journal={2019 IEEE 25th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)}, publisher={IEEE}, author={Yang, Kecheng and Guo, Zhishan}, year={2019}, month={Aug} } @misc{guo_bhuiyan_liu_khan_saifullah_guan_2019, title={Energy-Efficient Real-Time Scheduling of DAGs on Clustered Multi-Core Platforms}, url={http://dx.doi.org/10.1109/rtas.2019.00021}, DOI={10.1109/rtas.2019.00021}, abstractNote={With the growth of computation-intensive real-time applications on multi-core embedded systems, energy-efficient real-time scheduling becomes crucial. Multi-core processors enable intra-task parallelism, and there has been much progress on exploiting that, while there has been only a little progress on energy-efficient multi-core real-time scheduling as yet. In this work, we study energy-efficient real-time scheduling of constrained deadline sporadic parallel tasks, where each task is represented as a directed acyclic graph (DAG). We consider a clustered multi-core platform where processors within the same cluster run at the same speed at any given time. A new concept named speed-profile is proposed to model per-task and per-cluster energy-consumption variations during run-time to minimize the expected long-term energy consumption. To our knowledge, no existing work considers energy-aware real-time scheduling of DAG tasks with constrained deadlines, nor on a clustered multi-core platform. The proposed energy-aware realtime scheduler is implemented upon an ODROID XU-3 board to evaluate and demonstrate its feasibility and practicality. To complement our system experiments in large-scale, we have also conducted simulations that demonstrate a CPU energy saving of up to 57% through our proposed approach compared to existing methods.}, journal={2019 IEEE Real-Time and Embedded Technology and Applications Symposium (RTAS)}, publisher={IEEE}, author={Guo, Zhishan and Bhuiyan, Ashikahmed and Liu, Di and Khan, Aamir and Saifullah, Abusayeed and Guan, Nan}, year={2019}, month={Apr} } @article{he_jiang_guan_guo_2019, title={Intra-Task Priority Assignment in Real-Time Scheduling of DAG Tasks on Multi-Cores}, volume={30}, url={https://doi.org/10.1109/TPDS.2019.2910525}, DOI={10.1109/TPDS.2019.2910525}, abstractNote={Real-time scheduling and analysis of parallel tasks modeled as directed acyclic graphs (DAG) have been intensively studied in recent years. However, no existing work has explored the execution order of eligible vertices within a DAG task. In this paper, we show that this intra-task vertex execution order has a large impact on system schedulability and propose to control the execution order by vertex-level priority assignment. We develop analysis techniques to bound the worst-case response time for the proposed scheduling strategy and design heuristics for proper priority assignment to improve system schedulability as much as possible. We further extend the proposed approach to the general setting of multiple recurrent DAG tasks. Experiments with both realistic parallel benchmark applications and randomly generated workload show that our method consistently outperforms state-of-the-art methods with different task graph structures and parameter configurations.}, number={10}, journal={IEEE Transactions on Parallel and Distributed Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={He, Qingqiang and Jiang, Xu and Guan, Nan and Guo, Zhishan}, year={2019}, month={Oct}, pages={2283–2295} } @misc{singh_santinelli_reghenzani_bletsas_doose_guo_2019, title={Mixed Criticality Scheduling of Probabilistic Real-Time Systems}, ISBN={9783030355395 9783030355401}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-030-35540-1_6}, DOI={10.1007/978-3-030-35540-1_6}, abstractNote={In this paper we approach the problem of Mixed Criticality (MC) for probabilistic real-time systems where tasks execution times are described with probabilistic distributions. In our analysis, the task enters high criticality mode if its response time exceeds a certain threshold, which is a slight deviation from a more classical approach in MC. We do this to obtain an application oriented MC system in which criticality mode changes depend on actual scheduled execution. This is in contrast to classical approaches which use task execution time to make criticality mode decisions, because execution time is not affected by scheduling while the response time is. We use a graph-based approach to seek for an optimal MC schedule by exploring every possible MC schedule the task set can have. The schedule we obtain minimizes the probability of the system entering high criticality mode. In turn, this aims at maximizing the resource efficiency by the means of scheduling without compromising the execution of the high criticality tasks and minimizing the loss of lower criticality functionality. The proposed approach is applied to test cases for validation purposes.}, journal={Dependable Software Engineering. Theories, Tools, and Applications}, publisher={Springer International Publishing}, author={Singh, Jasdeep and Santinelli, Luca and Reghenzani, Federico and Bletsas, Konstantinos and Doose, David and Guo, Zhishan}, year={2019}, pages={89–105} } @misc{bhuiyan_yang_arefin_saifullah_guan_guo_2019, title={Mixed-Criticality Multicore Scheduling of Real-Time Gang Task Systems}, url={http://dx.doi.org/10.1109/rtss46320.2019.00048}, DOI={10.1109/rtss46320.2019.00048}, abstractNote={Mixed-criticality (MC) scheduling of sequential tasks (with no intra-task parallelism) has been well-explored by the real-time systems community. However, till date, there has been little progress on MC scheduling of parallel tasks. MC scheduling of parallel tasks is highly challenging due to the requirement of various assurances under different criticality levels. In this work, we address the MC scheduling of parallel tasks of gang model that allows workloads to execute on multiple cores simultaneously. Such a workload model represents an efficient mode-based parallel processing scheme with many potential applications. To schedule such task sets, we propose a new technique GEDF-VD, which integrates Global Earliest Deadline First (GEDF) and Earliest Deadline First with Virtual Deadline (EDF-VD). We prove the correctness of GEDF-VD and provide a detailed quantitative evaluation in terms of speedup bound in both the MC and the non-MC cases. Specifically, we show that GEDF provides a speedup bound of 2 for non-MC gang tasks, while the speedup for GEDF-VD considering MC gang tasks is √5 + 1. Experiments on randomly generated gang task sets are conducted to validate our theoretical findings and to demonstrate the effectiveness of the proposed approach.}, journal={2019 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Bhuiyan, Ashik ahmed and Yang, Kecheng and Arefin, Samsil and Saifullah, Abusayeed and Guan, Nan and Guo, Zhishan}, year={2019}, month={Dec} } @misc{wang_yang_ding_yin_guo_wunsch_2019, title={Model-Free Temporal Difference Learning for Non-Zero-Sum Games}, url={http://dx.doi.org/10.1109/ijcnn.2019.8851866}, DOI={10.1109/ijcnn.2019.8851866}, abstractNote={In this paper, we consider the two-player nonzero-sum games problem for continuous-time linear dynamic systems. It is shown that the non-zero-sum games problem results in solving the coupled algebraic Riccati equations, which are nonlinear algebraic matrix equations. Compared with the algebraic Riccati equation of the linear dynamic systems with only one player, the coupled algebraic Riccati equations of nonzero-sum games with multi-player are more difficult to be solved directly. First, the policy iteration algorithm is introduced to find the Nash equilibrium of the non-zero-sum games, which is the sufficient and necessary condition to solve the coupled algebraic Riccati equations. However, the policy iteration algorithm is offline and requires complete knowledge of the system dynamics. To overcome the above issues, a novel online iterative algorithm, named integral temporal difference learning algorithm, is developed. Moreover, an equivalent compact form of the integral temporal difference learning algorithm is also presented. It is shown that the integral temporal difference learning algorithm can be implemented in an online fashion and requires only partial knowledge of the system dynamics. In addition, in each iteration step, the closed-loop stability using the integral temporal difference learning algorithm is analyzed. Finally, the simulation study shows the effectiveness of the presented algorithm.}, journal={2019 International Joint Conference on Neural Networks (IJCNN)}, publisher={IEEE}, author={Wang, Liming and Yang, Yongliang and Ding, Dawei and Yin, Yixin and Guo, Zhishan and Wunsch, Donald C.}, year={2019}, month={Jul} } @misc{bian_wang_zhang_wang_huang_guo_2019, title={On Generating Dominators of Customer Preferences}, url={http://dx.doi.org/10.1109/bigdata47090.2019.9006194}, DOI={10.1109/bigdata47090.2019.9006194}, abstractNote={Manufacturing decisions on how to design new products have tremendous impact on the profitability of the manufacturer. This problem has recently attracted extensive research interests and motivated highly productive activities in developing the microeconomic framework for data mining and finding skyline objects in high-dimensional data. In this paper, we investigate a basic designing problem: designing products that satisfy the preferences of all customers. We formalize this problem as generating dominators (products) that dominate the preference dataset. The problem is naturally related to the microeconomic framework of data mining and the problem of finding skyline objects. The designing problem can be optimized from either the manufacturer’s perspective or the customer’s perspective. Our framework integrates these two perspectives and achieves optimization in a single effort. We show that this problem is NP-complete and study its computational properties. A deterministic greedy algorithm and a randomized greedy algorithm are developed. Extensive experimental evaluation on both real and simulated datasets demonstrates the effectiveness and efficiency of the proposed algorithms.}, journal={2019 IEEE International Conference on Big Data (Big Data)}, publisher={IEEE}, author={Bian, Jiang and Wang, Weibo and Zhang, Xiang and Wang, Wei and Huang, Arthur and Guo, Zhishan}, year={2019}, month={Dec} } @misc{bhuiyan_sruti_guo_yang_2019, title={Precise scheduling of mixed-criticality tasks by varying processor speed}, url={http://dx.doi.org/10.1145/3356401.3356410}, DOI={10.1145/3356401.3356410}, abstractNote={In this paper, we extend the imprecise mixed-criticality (IMC) model to precise scheduling of tasks. We also integrate the IMC model with the dynamic voltage and frequency scaling (DVFS) technique to enable energy minimization. The challenge in precise scheduling of MC systems is to guarantee the timing correctness all tasks under both pessimistic and optimistic assumptions simultaneously. To our knowledge, this is the first work to address the integration of DVFS energy-conserving techniques with precise scheduling of all tasks of the MC model. We present utilization based schedulability tests and sufficient conditions for such systems under two well-known MC frameworks, EDF-VD and MCF. A quantitative study in the forms of speedup bound and approximation ratio are derived for the unified model. Empirical studies based on randomly generated sets are conducted to verify the theoretical results as well as the effectiveness of the proposed algorithms.}, journal={Proceedings of the 27th International Conference on Real-Time Networks and Systems}, publisher={ACM}, author={Bhuiyan, Ashikahmed and Sruti, Sai and Guo, Zhishan and Yang, Kecheng}, year={2019}, month={Nov} } @misc{li_xiong_guo_wang_xu_2019, title={SmartPC: Hierarchical Pace Control in Real-Time Federated Learning System}, url={http://dx.doi.org/10.1109/rtss46320.2019.00043}, DOI={10.1109/rtss46320.2019.00043}, abstractNote={Federated Learning is a technique for learning AI models through the collaboration of a large number of resourceconstrained mobile devices, while preserving data privacy. Instead of aggregating the training data from devices, Federated Learning uses multiple rounds of parameter aggregation to train a model, wherein the participating devices are coordinated to incrementally update a shared model with their own parameters locally learned. To efficiently deploy Federated Learning system over mobile devices, several critical issues including realtimeliness and energy efficiency should be well addressed. This paper proposes SmartPC, a hierarchical online pace control framework for Federated Learning that balances the training time and model accuracy in an energy-efficient manner. SmartPC consists of two layers of pace control: global and local. Prior to every training round, the global controller first oversees the status (e.g., connectivity, availability, and energy/resource remained) of every participating device, then selects qualified devices and assigns them a well-estimated virtual deadline for task completion. Within such virtual deadline, a statistically significant proportion (e.g., 60%) of the devices are expected to complete one round of their local training and model updates, while the overall progress of multi-round training procedure is kept up adaptively. On each device, a local pace controller then dynamically adjusts device settings such as CPU frequency so that the learning task is able to meet the deadline with the least amount of energy consumption. We performed extensive experiments to evaluate SmartPC on both Android smartphones and simulation platforms using well-known datasets. The experiment results show that SmartPC reduces up to 32:8% energy consumption on mobile devices and achieves a speedup of 2.27 in training time without model accuracy degradation.}, journal={2019 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Li, Li and Xiong, Haoyi and Guo, Zhishan and Wang, Jun and Xu, Cheng-Zhong}, year={2019}, month={Dec} } @article{xiong_wang_bian_zhu_xu_guo_huan_2019, title={SpHMC: Spectral Hamiltonian Monte Carlo}, volume={33}, ISSN={2374-3468 2159-5399}, url={http://dx.doi.org/10.1609/aaai.v33i01.33015516}, DOI={10.1609/aaai.v33i01.33015516}, abstractNote={Stochastic Gradient Hamiltonian Monte Carlo (SGHMC) methods have been widely used to sample from certain probability distributions, incorporating (kernel) density derivatives and/or given datasets. Instead of exploring new samples from kernel spaces, this piece of work proposed a novel SGHMC sampler, namely Spectral Hamiltonian Monte Carlo (SpHMC), that produces the high dimensional sparse representations of given datasets through sparse sensing and SGHMC. Inspired by compressed sensing, we assume all given samples are low-dimensional measurements of certain high-dimensional sparse vectors, while a continuous probability distribution exists in such high-dimensional space. Specifically, given a dictionary for sparse coding, SpHMC first derives a novel likelihood evaluator of the probability distribution from the loss function of LASSO, then samples from the high-dimensional distribution using stochastic Langevin dynamics with derivatives of the logarithm likelihood and Metropolis–Hastings sampling. In addition, new samples in low-dimensional measuring spaces can be regenerated using the sampled high-dimensional vectors and the dictionary. Extensive experiments have been conducted to evaluate the proposed algorithm using real-world datasets. The performance comparisons on three real-world applications demonstrate the superior performance of SpHMC beyond baseline methods.}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, publisher={Association for the Advancement of Artificial Intelligence (AAAI)}, author={Xiong, Haoyi and Wang, Kafeng and Bian, Jiang and Zhu, Zhanxing and Xu, Cheng-Zhong and Guo, Zhishan and Huan, Jun}, year={2019}, month={Jul}, pages={5516–5524} } @misc{farhangi_bian_wang_guo_2019, title={Work-in-Progress: A Deep Learning Strategy for I/O Scheduling in Storage Systems}, url={http://dx.doi.org/10.1109/rtss46320.2019.00066}, DOI={10.1109/rtss46320.2019.00066}, abstractNote={Under the big data era, there is a crucial need to improve the performance of storage systems for data-intensive applications. Data-intensive applications tend to behave in a predictable manner, which can be exploited for improving the performance of the storage system. At the storage level, we propose a deep recurrent neural network that learns the patterns of I/O requests and predicts the upcoming ones, such that memory contents can be pre-loaded at the right time to prevent cache/memory misses. Preliminary experimental results, on two real-world I/O logs of storage systems (from financial and web search), are reported-they partially demonstrate the effectiveness of the proposed method.}, journal={2019 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Farhangi, Ashkan and Bian, Jiang and Wang, Jun and Guo, Zhishan}, year={2019}, month={Dec} } @article{sun_guan_jiang_chang_guo_deng_yi_2018, title={A Capacity Augmentation Bound for Real-Time Constrained-Deadline Parallel Tasks Under GEDF}, volume={37}, url={https://doi.org/10.1109/TCAD.2018.2857362}, DOI={10.1109/TCAD.2018.2857362}, abstractNote={Capacity augmentation bound is a widely used quantitative metric in theoretical studies of schedulability analysis for directed acyclic graph (DAG) parallel real-time tasks, which not only quantifies the suboptimality of the scheduling algorithms, but also serves as a simple linear-time schedulability test. Earlier studies on capacity augmentation bounds of the sporadic DAG task model were either restricted to a single DAG task or a set of tasks with implicit deadlines. In this paper, we consider parallel tasks with constrained deadlines under global earliest deadline first policy. We first show that it is impossible to obtain a constant bound for our problem setting, and derive both lower and upper bounds of the capacity augmentation bound as a function with respect to the maximum ratio of task period to deadline. Our upper bound is at most 1.47 times larger than the optimal one. We conduct experiments to compare the acceptance ratio of our capacity augmentation bound with the existing schedulability test also having linear-time complexity. The results show that our capacity augmentation bound significantly outperforms the existing linear-time schedulability test under different parameter settings.}, number={11}, journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Sun, Jinghao and Guan, Nan and Jiang, Xu and Chang, Shuangshuang and Guo, Zhishan and Deng, Qingxu and Yi, Wang}, year={2018}, month={Nov}, pages={2200–2211} } @inproceedings{sun_guan_jiang_chang_guo_deng_yi_2018, title={A Capacity Augmentation Bound for Real-Time Constrained-Deadline Parallel Tasks under GEDF}, booktitle={International Conference on Embedded Software (EMSOFT)}, author={Sun, Jinghao and Guan, Nan and Jiang, Xu and Chang, Shuangshuang and Guo, Zhishan and Deng, Qingxu and Yi, Wang}, year={2018}, month={Oct} } @misc{santinelli_guo_2018, title={A Sensitivity Analysis for Mixed Criticality: Trading Criticality with Computational Resource}, url={http://dx.doi.org/10.1109/etfa.2018.8502493}, DOI={10.1109/etfa.2018.8502493}, abstractNote={Mixing workloads with multiple criticality levels raises challenges both in timing analysis and schedulability analysis. The timing models have to characterize the different behaviors that real-time tasks can experience under the various criticality modes. Instead, the schedulability analysis has to combine every task and task interactions providing several guarantees, depending on the criticality level demanded at runtime. With this work, at first we propose representations to model every possible system criticality mode as a combination of task criticality modes. A set of bounding functions is obtained, a bound for each mode combination thus corresponding to a system criticality level. Secondly, we develop the schedulability analysis that applies such sets and derives schedulability conditions with mixed criticalities. The tasks are scheduled with fixed priority and earlies deadline first, and various levels of schedulability are defined from the mode combinations. Finally, we make use of the sensitivity analysis to evaluate the impact that multi mode task behaviors have on schedulability. Trade-offs between schedulability, criticality levels and resource availability are explored. A mixed critical real-time system case study validates the framework proposed.}, journal={2018 IEEE 23rd International Conference on Emerging Technologies and Factory Automation (ETFA)}, publisher={IEEE}, author={Santinelli, Luca and Guo, Zhishan}, year={2018}, month={Sep} } @book{silvestri_goss_guo_bhuiyan_2018, title={Algorithms CS2500}, publisher={Missouri University of Science and Technology Scholars' Mine Course Materials}, author={Silvestri, Simone and Goss, Ken and Guo, Zhishan and Bhuiyan, Ashikahmed}, year={2018} } @misc{han_zhao_guo_liu_2018, title={An Improved Speedup Factor for Sporadic Tasks with Constrained Deadlines Under Dynamic Priority Scheduling}, url={http://dx.doi.org/10.1109/rtss.2018.00058}, DOI={10.1109/rtss.2018.00058}, abstractNote={Schedulability is a fundamental problem in real-time scheduling, but it has to be approximated due to the intrinsic computational hardness. As the most popular algorithm for deciding schedulability on multiprocess platforms, the speedup factor of partitioned-EDF is challenging to analyze and is far from being determined. Partitioned-EDF was first proposed in 2005 by Barush and Fisher [1], and was shown to have a speedup factor at most 3-1/m, meaning that if the input of sporadic tasks is feasible on m processors with speed one, partitioned-EDF will always succeed on m processors with speed 3-1/m. In 2011, this upper bound was improved to 2.6322-1/m by Chen and Chakraborty [2], and no more improvements have appeared ever since then. In this paper, we develop a novel method to discretize and regularize sporadic tasks, which enables us to improve, in the case of constrained deadlines, the speedup factor of partitioned-EDF to 2.5556-1/m, very close to the asymptotic lower bound 2.5 in [2].}, journal={2018 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Han, Xin and Zhao, Liang and Guo, Zhishan and Liu, Xingwu}, year={2018}, month={Dec} } @misc{mao_green_wang_xiong_guo_2018, title={DRESS: Dynamic RESource-Reservation Scheme for Congested Data-Intensive Computing Platforms}, url={http://dx.doi.org/10.1109/cloud.2018.00095}, DOI={10.1109/cloud.2018.00095}, abstractNote={In the past few years, we have envisioned an increasing number of businesses start driving by big data analytics, such as Amazon recommendations and Google Advertisements. At the back-end side, the businesses are powered by big data processing platforms to quickly extract information and make decisions. Running on top of a computing cluster, those platforms utilize scheduling algorithms to allocate resources. An efficient scheduler is crucial to the system performance due to limited resources, e.g. CPU and Memory, and a large number of user demands. However, besides requests from clients and current status of the system, it has limited knowledge about execution length of the running jobs, and incoming jobs' resource demands, which make assigning resources a challenging task. If most of the resources are occupied by a long-running job, other jobs will have to keep waiting until it releases them. This paper presents a new scheduling strategy, named DRESS that particularly aims to optimize the allocation among jobs with various demands. Specifically, it classifies the jobs into two categories based on their requests, reserves a portion of resources for each of category, and dynamically adjusts the reserved ratio by monitoring the pending requests and estimating release patterns of running jobs. The results demonstrate DRESS significantly reduces the completion time for one category, up to 76.1% in our experiments, and in the meanwhile, maintains a stable overall system performance.}, journal={2018 IEEE 11th International Conference on Cloud Computing (CLOUD)}, publisher={IEEE}, author={Mao, Ying and Green, Victoria and Wang, Jiayin and Xiong, Haoyi and Guo, Zhishan}, year={2018}, month={Jul} } @misc{xiong_cheng_fu_hu_bian_guo_2018, title={De-biasing Covariance-Regularized Discriminant Analysis}, url={http://dx.doi.org/10.24963/ijcai.2018/401}, DOI={10.24963/ijcai.2018/401}, abstractNote={Fisher's Linear Discriminant Analysis (FLD) is a well-known technique for linear classification, feature extraction and dimension reduction. The empirical FLD relies on two key estimations from the data -- the mean vector for each class and the (inverse) covariance matrix. To improve the accuracy of FLD under the High Dimension Low Sample Size (HDLSS) settings, Covariance-Regularized FLD (CRLD) has been proposed to use shrunken covariance estimators, such as Graphical Lasso, to strike a balance between biases and variances. Though CRLD could obtain better classification accuracy, it usually incurs bias and converges to the optimal result with a slower asymptotic rate. Inspired by the recent progress in de-biased Lasso, we propose a novel FLD classifier, DBLD, which improves classification accuracy of CRLD through de-biasing. Theoretical analysis shows that DBLD possesses better asymptotic properties than CRLD. We conduct experiments on both synthetic datasets and real application datasets to confirm the correctness of our theoretical analysis and demonstrate the superiority of DBLD over classical FLD, CRLD and other downstream competitors under HDLSS settings.}, journal={Proceedings of the Twenty-Seventh International Joint Conference on Artificial Intelligence}, publisher={International Joint Conferences on Artificial Intelligence Organization}, author={Xiong, Haoyi and Cheng, Wei and Fu, Yanjie and Hu, Wenqing and Bian, Jiang and Guo, Zhishan}, year={2018}, month={Jul} } @article{bhuiyan_guo_saifullah_guan_xiong_2018, title={Energy-Efficient Real-Time Scheduling of DAG Tasks}, volume={17}, ISSN={1539-9087 1558-3465}, url={http://dx.doi.org/10.1145/3241049}, DOI={10.1145/3241049}, abstractNote={This work studies energy-aware real-time scheduling of a set of sporadic Directed Acyclic Graph (DAG) tasks with implicit deadlines. While meeting all real-time constraints, we try to identify the best task allocation and execution pattern such that the average power consumption of the whole platform is minimized. To our knowledge, this is the first work that addresses the power consumption issue in scheduling multiple DAG tasks on multi-cores and allows intra-task processor sharing. First, we adapt the decomposition-based framework for federated scheduling and propose an energy-sub-optimal scheduler. Then, we derive an approximation algorithm to identify processors to be merged together for further improvements in energy-efficiency. The effectiveness of the proposed approach is evaluated both theoretically via approximation ratio bounds and also experimentally through simulation study. Experimental results on randomly generated workloads show that our algorithms achieve an energy saving of 60% to 68% compared to existing DAG task schedulers.}, number={5}, journal={ACM Transactions on Embedded Computing Systems}, publisher={Association for Computing Machinery (ACM)}, author={Bhuiyan, Ashikahmed and Guo, Zhishan and Saifullah, Abusayeed and Guan, Nan and Xiong, Haoyi}, year={2018}, month={Sep}, pages={1–25} } @misc{guo_baruah_2018, title={Mixed-Criticality Real-Time Systems}, ISBN={9783642544774 9783642544774}, url={http://dx.doi.org/10.1007/978-3-642-54477-4_6-2}, DOI={10.1007/978-3-642-54477-4_6-2}, journal={Cyber-Physical Systems: A Reference}, publisher={Springer Berlin Heidelberg}, author={Guo, Zhishan and Baruah, Sanjoy}, year={2018}, month={Oct}, pages={1–20} } @misc{guo_santinelli_yang_2018, title={Mixed-Criticality Scheduling with Limited HI-Criticality Behaviors}, ISBN={9783319999326 9783319999333}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-99933-3_13}, DOI={10.1007/978-3-319-99933-3_13}, abstractNote={Due to size, weight, and power considerations, there is an emerging trend in real-time embedded systems design towards implementing functionalities of different levels of importance upon a shared platform, or implementing Mixed-Criticality (MC) systems. Much existing work on MC scheduling focuses on the classic Vestal model, where upon a mode switch, it is pessimistically assumed that all tasks may simultaneously exceed their less pessimistic execution time estimations, or lo-WCETs. In this paper, a less pessimistic MC model is proposed for system designers to specify the maximum number of tasks that may simultaneously exceed their lo-WCETs. The applicability and schedulability of the classic EDF-VD scheduler under this newly proposed model are studied, and a new schedulability test is presented. Experiments demonstrate that, by applying the proposed model and new schedulability test, significantly better schedulability can be achieved.}, journal={Dependable Software Engineering. Theories, Tools, and Applications}, publisher={Springer International Publishing}, author={Guo, Zhishan and Santinelli, Luca and Yang, Kecheng}, year={2018}, pages={187–199} } @inproceedings{han_guo_2018, title={Resource Augmentation Bounds of EDF and Partitioned-EDF for Sporadic Tasks with Constrained Deadlines}, booktitle={Real-Time Scheduling Open Problems Seminar (RTSOPS)}, author={Han, Xin and Guo, Zhishan}, year={2018}, month={Jul} } @misc{guo_yang_vaidhun_arefin_das_xiong_2018, title={Uniprocessor Mixed-Criticality Scheduling with Graceful Degradation by Completion Rate}, url={http://dx.doi.org/10.1109/rtss.2018.00052}, DOI={10.1109/rtss.2018.00052}, abstractNote={The scheduling of mixed-criticality (MC) systems with graceful degradation is considered, where LO-criticality tasks are guaranteed some service in HI mode in the form of minimum cumulative completion rates. First, we present an easy to implement admission-control procedure to determine which LO-criticality jobs to complete in HI mode. Then, we propose a demand-bound-function-based MC schedulability test that runs in pseudo-polynomial time for such systems under EDF-VD scheduling, wherein two virtual deadline setting heuristics are considered. Furthermore, we discuss a mechanism for the system to switch back from HI to LO mode and quantify the maximum time duration such recovery process would take. Finally, we show the effectiveness of our proposed method by experimental evaluation in comparison to state-of-the-art MC schedulers.}, journal={2018 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Guo, Zhishan and Yang, Kecheng and Vaidhun, Sudharsan and Arefin, Samsil and Das, Sajal K. and Xiong, Haoyi}, year={2018}, month={Dec} } @inproceedings{singh_santinelli_guo_brunel_doose_infantes_2018, title={Use of probabilities and formal methods to control system criticality levels}, booktitle={Real-Time Scheduling Open Problems Seminar (RTSOPS)}, author={Singh, Jasdeep and Santinelli, Luca and Guo, Zhishan and Brunel, Julien and Doose, David and Infantes, Guillaume}, year={2018}, month={Jul} } @misc{sruti_bhuiyan_guo_2018, title={Work-in-Progress: Precise Scheduling of Mixed-Criticality Tasks by Varying Processor Speed}, url={http://dx.doi.org/10.1109/rtss.2018.00033}, DOI={10.1109/rtss.2018.00033}, abstractNote={The traditional mixed-criticality (MC) model does not allow less critical tasks to execute during an event of the error and exception. Recently, the imprecise MC (IMC) model has been proposed where, even for exceptional events, less critical tasks also receive some amount of (degraded) service, e.g., a task overruns its execution demand. In this work, we present our ongoing effort to extend the IMC model to the precise scheduling of tasks and integrate with the dynamic voltage and frequency scaling (DVFS) scheme to enable energy minimization. Precise scheduling of MC systems is highly challenging because of its requirement to simultaneously guarantee the timing correctness of all tasks under both pessimistic and less pessimistic assumptions. We propose an utilization-based schedulability test and sufficient schedulability conditions for such systems under earliest deadline first with virtual deadline (EDF-VD) scheduling policy. For this unified model, we present a quantitative study in the forms of speedup bound and approximation ratio. Finally, both theoretical and experimental analysis will be conducted to prove the correctness of our algorithm and to demonstrate its effectiveness.}, journal={2018 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Sruti, Sai and Bhuiyan, Ashik Ahmed and Guo, Zhishan}, year={2018}, month={Dec} } @misc{zhang_wang_jiang_guo_2018, title={Work-in-Progress: RWS - A Roulette Wheel Scheduler for Preventing Execution Pattern Leakage}, url={http://dx.doi.org/10.1109/rtas.2018.00016}, DOI={10.1109/rtas.2018.00016}, abstractNote={Many real-time systems are safety-critical, where reliability is crucial. Under traditional scheduling mechanism, the execution patterns of the tasks on such system can be easily derived from side-channel attacks, such that attackers can launch short high-priority tasks at critical instants which may cause deadline miss for high-critical tasks. In order to protect the system from such kind of attacks, this paper proposes the roulette wheel scheduler (RWS) to randomize the task execution pattern. Under RWS, probabilities will be assigned to each task at predefined scheduling points, and the choice for execution is randomized, such that the execution pattern is no longer fixed. We formalize the concept of schedule entropy the additional safety provided by any randomized scheduler. It is used to measure the amount of uncertainty introduced by the new scheduler.}, journal={2018 IEEE Real-Time and Embedded Technology and Applications Symposium (RTAS)}, publisher={IEEE}, author={Zhang, Ying and Wang, Lingxiang and Jiang, Wei and Guo, Zhishan}, year={2018}, month={Apr} } @misc{xiong_cheng_hu_bian_guo_2017, title={AWDA: An Adaptive Wishart Discriminant Analysis}, url={http://dx.doi.org/10.1109/icdm.2017.62}, DOI={10.1109/icdm.2017.62}, abstractNote={Linear Discriminant Analysis (LDA) is widely-used for supervised dimension reduction and linear classification. Classical LDA, however, suffers from the ill-posed estimation problem on data with high dimension and low sample size (HDLSS). To cope with this problem, in this paper, we propose an Adaptive Wishart Discriminant Analysis (AWDA) for classification, that makes predictions in an ensemble way. Comparing to existing approaches, AWDA has two advantages: 1) leveraging theWishart distribution, AWDA ensembles multiple LDA classifiers parameterized by the sampled covariance matrices via a Bayesian Voting Scheme, which theoretically improves the robustness of classification, compared to LDA classifiers using a single (probably ill-posed) covariance matrix estimator; 2) AWDA updates the weights for voting optimally to adapt the local information of each new input data, so as to enable the nonlinear classification. Theoretical analysis indicates that AWDA guarantees a close approximation to the optimal Bayesian inference and thus achieves robust performance on high dimensional data. Extensive experiments on real-world datasets show that our approach outperforms state-of-the-art algorithms by a large margin.}, journal={2017 IEEE International Conference on Data Mining (ICDM)}, publisher={IEEE}, author={Xiong, Haoyi and Cheng, Wei and Hu, Wenqing and Bian, Jiang and Guo, Zhishan}, year={2017}, month={Nov} } @inproceedings{guo_bhuiyan_saifullah_guan_xiong_2017, title={Energy-Efficient Multi-Core Scheduling for Real-Time DAG Tasks}, booktitle={Proceedings of the 29th Euromicro Conference on Real-Time Systems (ECRTS)}, author={Guo, Zhishan and Bhuiyan, Ashikahmed and Saifullah, Abusayeed and Guan, Nan and Xiong, Haoyi}, year={2017}, month={Jun} } @book{guo_2017, title={Guaranteeing some service upon mode switch in mixed-criticality systems}, url={https://www.ece.ucf.edu/~zsguo/pubs/conference_workshop/Dagstuhl17131.pdf}, number={17131}, journal={Mixed Criticality on Multicore/Manycore Platforms}, author={Guo, Zhishan}, year={2017}, month={Mar} } @misc{yang_wunsch_guo_yin_2017, title={Hamiltonian-Driven Adaptive Dynamic Programming Based on Extreme Learning Machine}, ISBN={9783319590714 9783319590721}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-59072-1_24}, DOI={10.1007/978-3-319-59072-1_24}, abstractNote={In this paper, a novel frame work of reinforcement learning for continuous time dynamical system is presented based on the Hamiltonian functional and extreme learning machine. The idea of solution search in the optimization is introduced to find the optimal control policy in the optimal control problem. The optimal control search consists of three steps: evaluation, comparison and improvement of arbitrary admissible policy. The Hamiltonian functional plays an important role in the above framework, under which only one critic is required in the adaptive critic structure. The critic network is implemented by the extreme learning machine. Finally, simulation study is conducted to verify the effectiveness of the presented algorithm.}, journal={Advances in Neural Networks - ISNN 2017}, publisher={Springer International Publishing}, author={Yang, Yongliang and Wunsch, Donald and Guo, Zhishan and Yin, Yixin}, year={2017}, pages={197–205} } @misc{zhang_guo_koutsoukos_2017, title={Handling write backs in multi-level cache analysis for WCET estimation}, url={http://dx.doi.org/10.1145/3139258.3139269}, DOI={10.1145/3139258.3139269}, abstractNote={In this paper, we investigate how to soundly analyze multi-level caches that employ write-back policy at each level for worst-case execution time (WCET) estimation. To the best of our knowledge, there is only one existing approach for dealing with write backs in multi-level cache analysis. However, as shown in the paper, this existing approach is not sound. In order to soundly handle write backs, at a cache level, we need to consider whether a memory block is potentially dirty and when such a potentially dirty block may be evicted from the cache. To this end, we introduce a dirty attribute into persistence analysis for tracking dirty blocks, and over-approximate a write back window for each possible write back. Based on the overestimated write back occurring times, we propose an approach that can soundly deal with write backs in analysis of multi-level (unified) caches for WCET estimation. Possible write back costs are also integrated into path analysis. We evaluate the proposed approach on a set of benchmarks to demonstrate its effectiveness.}, journal={Proceedings of the 25th International Conference on Real-Time Networks and Systems}, publisher={ACM}, author={Zhang, Zhenkai and Guo, Zhishan and Koutsoukos, Xenofon}, year={2017}, month={Oct} } @misc{zhang_guo_wang_xiong_zhang_2017, title={Integrating Cache-Related Preemption Delay into GEDF Analysis for Multiprocessor Scheduling with On-chip Cache}, url={http://dx.doi.org/10.1109/trustcom/bigdatase/icess.2017.317}, DOI={10.1109/trustcom/bigdatase/icess.2017.317}, abstractNote={Most existing multiprocessor schedulability analysis assumes zero cost for preemptions and migrations. In order for those analysis to be correct, execution time estimations are often inflated by a certain (pessimistic) factor, leading to severe waste of computing resource. In this paper, a novel Global Earliest Deadline First (GEDF) schedulability test is proposed, where Cache-Related Preemption Delay (CRPD) is separately modeled and integrated. Specifically, multiple analyses for estimating CRPD bounds are conducted based on the refined estimation of the maximal number of preemptions, leading to tighter G-EDF schedulability tests. The experimental study is conducted to demonstrate the performance of the proposed methods.}, journal={2017 IEEE Trustcom/BigDataSE/ICESS}, publisher={IEEE}, author={Zhang, Ying and Guo, Zhishan and Wang, Lingxiang and Xiong, Haoyi and Zhang, Zhenkai}, year={2017}, month={Aug} } @misc{bian_xiong_cheng_hu_guo_fu_2017, title={Multi-party Sparse Discriminant Learning}, url={http://dx.doi.org/10.1109/icdm.2017.86}, DOI={10.1109/icdm.2017.86}, abstractNote={Sparse Discriminant Analysis (SDA) has been widely used to improve the performance of classical Fisher's Linear Discriminant Analysis in supervised metric learning, feature selection and classification. With the increasing needs of distributed data collection, storage and processing, enabling the Sparse Discriminant Learning to embrace the Multi-Party distributed computing environments becomes an emerging research topic. This paper proposes a novel Multi-Party SDA algorithm, which can learn SDA models effectively without sharing any raw dataand basic statistics among machines. The proposed algorithm 1) leverages the direct estimation of SDA [1] to derive a distributed loss function for the discriminant learning, 2) parameterizes the distributed loss function with local/global estimates through bootstrapping, and 3) approximates a global estimation of linear discriminant projection vector by optimizing the "distributed bootstrapping loss function" with gossip-based stochastic gradient descent. Experimental results on both synthetic and real-world benchmark datasets show that our algorithm can compete with the centralized SDA with similar performance, and significantly outperforms the most recent distributed SDA [2] in terms of accuracy and F1-score.}, journal={2017 IEEE International Conference on Data Mining (ICDM)}, publisher={IEEE}, author={Bian, Jiang and Xiong, Haoyi and Cheng, Wei and Hu, Wenqing and Guo, Zhishan and Fu, Yanjie}, year={2017}, month={Nov} } @article{xiong_zhang_guo_chen_barnes_2017, title={Near-Optimal Incentive Allocation for Piggyback Crowdsensing}, volume={55}, ISSN={0163-6804}, url={http://dx.doi.org/10.1109/mcom.2017.1600748}, DOI={10.1109/mcom.2017.1600748}, abstractNote={Piggyback crowdsensing (PCS) is a novel energy- efficient mobile crowdsensing paradigm that reduces the energy consumption of crowdsensing tasks by leveraging smartphone app opportunities (SAOs). This article, based on several fundamental assumptions of incentive payment for PCS task participation and spatial-temporal coverage assessment for collected sensor data, first proposes two alternating data collection goals. Goal 1 is maximizing overall spatial-temporal coverage under a predefined incentive budget constraint; goal 2 is minimizing total incentive payment while ensuring predefined spatial-temporal coverage for collected sensor data, all on top of the PCS task model. With all of the above assumptions, settings, and models, we introduce CrowdMind -- a generic incentive allocation framework for the two optimal data collection goals, on top of the PCS model. We evaluated CrowdMind extensively using a large-scale real-world SAO dataset for the two incentive allocation problems. The results demonstrate that compared to baseline algorithms, CrowdMind achieves better spatial-temporal coverage under the same incentive budget constraint, while costing less in total incentive payments and ensuring the same spatial-temporal coverage, under various coverage/incentive settings. Further, a short theoretical analysis is presented to analyze the performance of Crowd- Mind in terms of the optimization with total incentive cost and overall spatial-temporal coverage objectives/constraints.}, number={6}, journal={IEEE Communications Magazine}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Xiong, Haoyi and Zhang, Daqing and Guo, Zhishan and Chen, Guanling and Barnes, Laura E.}, year={2017}, pages={120–125} } @misc{yang_guo_wunsch_yin_2017, title={Off-policy reinforcement learning for robust control of discrete-time uncertain linear systems}, url={http://dx.doi.org/10.23919/chicc.2017.8027737}, DOI={10.23919/chicc.2017.8027737}, abstractNote={In this paper, an off-policy reinforcement learning method is developed for the robust stabilizing controller design of discrete-time uncertain linear systems. The proposed robust control design consists of two steps. First, the robust control problem is transformed to an optimal control problem. Second, the off-policy RL method is used to design the optimal control policy which guarantees the robust stability of the original system with uncertainty. The condition for the equivalence between the robust control problem and the optimal control problem is discussed. The off-policy does not require any knowledge of the system knowledge and efficiently utilize the data collected from on-line to improve the performance of approximate optimal control policy in each iteration successively. Finally, a simulation example is carried out to verify the effectiveness of the presented algorithm for the robust control problem of discrete-time linear system with uncertainty.}, journal={2017 36th Chinese Control Conference (CCC)}, publisher={IEEE}, author={Yang, Yongliang and Guo, Zhishan and Wunsch, Donald and Yin, Yixin}, year={2017}, month={Jul} } @misc{santinelli_guo_2017, title={On the Criticality of Probabilistic Worst-Case Execution Time Models}, ISBN={9783319694825 9783319694832}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-69483-2_4}, DOI={10.1007/978-3-319-69483-2_4}, abstractNote={Probabilistic approaches to timing analysis derive probability distributions to upper bound task execution time. The main purpose of probability distributions instead of deterministic bounds, is to have more flexible and less pessimistic worst-case models. However, in order to guarantee safe probabilistic worst-case models, every possible execution condition needs to be taken into account. In this work, we propose probabilistic representations which is able to model every task and system execution conditions, included the worst-cases. Combining probabilities and multiple conditions offers a flexible and accurate representation that can be applied with mixed-critical task models and fault effect characterizations on task executions. A case study with single- and multi-core real-time systems is provided to illustrate the completeness and versatility of the representation framework we provide.}, journal={Dependable Software Engineering. Theories, Tools, and Applications}, publisher={Springer International Publishing}, author={Santinelli, Luca and Guo, Zhishan}, year={2017}, pages={59–74} } @book{guo_2017, title={Regarding the Optimality of Speedup Bounds of Mixed-Criticality Schedulability Tests}, number={17131}, journal={Mixed Criticality on Multicore/Manycore Platforms}, author={Guo, Zhishan}, year={2017}, month={Mar} } @misc{vaidhun_arefin_guo_xiong_das_2017, title={Response time in mixed-critical pervasive systems}, url={http://dx.doi.org/10.1109/uic-atc.2017.8397530}, DOI={10.1109/uic-atc.2017.8397530}, abstractNote={Pervasive computing systems at large scale rely on real-time scheduling on the top of distributed and networked computing environments. From an user experience perspective, while the requirements on the response time for specific applications might be different, the mixed-criticality in real-time scheduling, which provide diverse response time guarantee for applications, is often required. In this paper, we study the real-time scheduling problem in mixed-critical pervasive computing systems. We first analyze the response time requirements for common networked pervasive computing systems, and model the mixed-criticality using the minimum response time Quality-of-Service (QoS) that should be guaranteed even in the worst-case. Then, we propose to leverage Fixed-Priority Rate-Monotonic (FPRM) Scheduler for real-time scheduling. We evaluate FPRM using synthetic workloads generated according to the real-world pervasive computing systems. Both simulation experiments and worst-case analytical results show that, when sufficient resources are given, all pervasive computing tasks can be completed subject to the response time requirements strictly with mixed-criticality guarantees ensured.}, journal={2017 IEEE SmartWorld, Ubiquitous Intelligence & Computing, Advanced & Trusted Computed, Scalable Computing & Communications, Cloud & Big Data Computing, Internet of People and Smart City Innovation (SmartWorld/SCALCOM/UIC/ATC/CBDCom/IOP/SCI)}, publisher={IEEE}, author={Vaidhun, Sudharsan and Arefin, Samsil and Guo, Zhishan and Xiong, Haoyi and Das, Sajal K.}, year={2017}, month={Aug} } @misc{guo_sruti_ward_baruah_2017, title={Sustainability in Mixed-Criticality Scheduling}, url={http://dx.doi.org/10.1109/rtss.2017.00010}, DOI={10.1109/rtss.2017.00010}, abstractNote={Sustainability is a formalization of the requirement for scheduling algorithms and schedulability tests that a system deemed to be correctly schedulable should remain so if its run-time behavior is better than anticipated. The notion of sustainability is extended to mixed-criticality systems, and sustainability properties are determined for a variety of widely-studied uniprocessor and multi-processor mixed-criticality scheduling algorithms.}, journal={2017 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Guo, Zhishan and Sruti, Sai and Ward, Bryan C. and Baruah, Sanjoy}, year={2017}, month={Dec} } @misc{guo_zhang_wang_zhang_2017, title={Work-in-Progress: Cache-Aware Partitioned EDF Scheduling for Multi-core Real-Time Systems}, url={http://dx.doi.org/10.1109/rtss.2017.00054}, DOI={10.1109/rtss.2017.00054}, abstractNote={As the number of cores and utilization of the system are increasing quickly, shared resources like caches are interfering tasks' execution behaviors more heavily. In order to achieve resource efficiency in both temporal and spatial domains for multi-core real-time systems, caches should be taken into consideration when performing partitions. In this paper, partitioned Earliest Deadline First (EDF) scheduling on a preemptive multi-core platform is considered. We propose a new system model that covers inter-task cache interference and describe some ongoing work in identifying proper partition schemes under such settings.}, journal={2017 IEEE Real-Time Systems Symposium (RTSS)}, publisher={IEEE}, author={Guo, Zhishan and Zhang, Ying and Wang, Lingxiang and Zhang, Zhenkai}, year={2017}, month={Dec} } @article{guo_baruah_2016, title={A Neurodynamic Approach for Real-Time Scheduling via Maximizing Piecewise Linear Utility}, volume={27}, ISSN={2162-237X 2162-2388}, url={http://dx.doi.org/10.1109/tnnls.2015.2466612}, DOI={10.1109/tnnls.2015.2466612}, abstractNote={In this paper, we study a set of real-time scheduling problems whose objectives can be expressed as piecewise linear utility functions. This model has very wide applications in scheduling-related problems, such as mixed criticality, response time minimization, and tardiness analysis. Approximation schemes and matrix vectorization techniques are applied to transform scheduling problems into linear constraint optimization with a piecewise linear and concave objective; thus, a neural network-based optimization method can be adopted to solve such scheduling problems efficiently. This neural network model has a parallel structure, and can also be implemented on circuits, on which the converging time can be significantly limited to meet real-time requirements. Examples are provided to illustrate how to solve the optimization problem and to form a schedule. An approximation ratio bound of 0.5 is further provided. Experimental studies on a large number of randomly generated sets suggest that our algorithm is optimal when the set is nonoverloaded, and outperforms existing typical scheduling strategies when there is overload. Moreover, the number of steps for finding an approximate solution remains at the same level when the size of the problem (number of jobs within a set) increases.}, number={2}, journal={IEEE Transactions on Neural Networks and Learning Systems}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Guo, Zhishan and Baruah, Sanjoy K.}, year={2016}, month={Feb}, pages={238–248} } @article{cheng_guo_zhang_wang_2016, title={CGC}, volume={10}, ISSN={1556-4681 1556-472X}, url={http://dx.doi.org/10.1145/2903147}, DOI={10.1145/2903147}, abstractNote={ Multi-view graph clustering aims to enhance clustering performance by integrating heterogeneous information collected in different domains. Each domain provides a different view of the data instances. Leveraging cross-domain information has been demonstrated an effective way to achieve better clustering results. Despite the previous success, existing multi-view graph clustering methods usually assume that different views are available for the same set of instances. Thus, instances in different domains can be treated as having strict one-to-one relationship. In many real-life applications, however, data instances in one domain may correspond to multiple instances in another domain. Moreover, relationships between instances in different domains may be associated with weights based on prior (partial) knowledge. In this article, we propose a flexible and robust framework, Co-regularized Graph Clustering (CGC), based on non-negative matrix factorization (NMF), to tackle these challenges. CGC has several advantages over the existing methods. First, it supports many-to-many cross-domain instance relationship. Second, it incorporates weight on cross-domain relationship. Third, it allows partial cross-domain mapping so that graphs in different domains may have different sizes. Finally, it provides users with the extent to which the cross-domain instance relationship violates the in-domain clustering structure, and thus enables users to re-evaluate the consistency of the relationship. We develop an efficient optimization method that guarantees to find the global optimal solution with a given confidence requirement. The proposed method can automatically identify noisy domains and assign smaller weights to them. This helps to obtain optimal graph partition for the focused domain. Extensive experimental results on UCI benchmark datasets, newsgroup datasets, and biological interaction networks demonstrate the effectiveness of our approach. }, number={4}, journal={ACM Transactions on Knowledge Discovery from Data}, publisher={Association for Computing Machinery (ACM)}, author={Cheng, Wei and Guo, Zhishan and Zhang, Xiang and Wang, Wei}, year={2016}, month={May}, pages={1–27} } @misc{santinelli_guo_george_2016, title={Fault-aware sensitivity analysis for probabilistic real-time systems}, url={http://dx.doi.org/10.1109/dft.2016.7684072}, DOI={10.1109/dft.2016.7684072}, abstractNote={In probabilistic real-time modeling, diverse task execution conditions can be characterized with probabilistic distributions, where multiple execution time thresholds are represented, each with an exceeding probability. Comparing to traditional deterministic real-time, probabilistic approaches provide more flexibility in system behavior modeling, which may result in more precise schedulability analysis. With this work, we combine sensitivity analysis and probabilistic models of fault effects on task execution behaviors. The goal is to develop probabilistic schedulability analysis that is applicable to both faulty and non-faulty execution conditions. While the probabilities accurately characterize faults and faults effects on worst-case execution times, the probabilistic schedulability analysis both qualifies and quantifies faults impacts on system schedulability.}, journal={2016 IEEE International Symposium on Defect and Fault Tolerance in VLSI and Nanotechnology Systems (DFT)}, publisher={IEEE}, author={Santinelli, Luca and Guo, Zhishan and George, Laurent}, year={2016}, month={Sep} } @misc{guo_2016, title={Mixed-Criticality Scheduling on Varying-Speed Platforms with Bounded Performance Drop Rate}, url={http://dx.doi.org/10.1109/smartcomp.2016.7501705}, DOI={10.1109/smartcomp.2016.7501705}, abstractNote={As modern cyber-physical systems (CPS) are often mobile, their operating environments varies during run-time in an unpredictable way. Existing varying-speed platform model often takes immediate performance change into consideration, which may be too pessimistic in their analysis, as the aforementioned environmental changes (e.g., thermal) often results in mild performance drops. A more sophisticated varying-speed platform model is proposed to more precisely analyze the real-time schedulability of such systems, which takes the physical limitation of performance deceleration of the CPS into consideration. A simple and efficient algorithm named EDF-VD is adapted to schedule workload with multiple importance levels upon such platforms, and corresponding schedulability tests are provided.}, journal={2016 IEEE International Conference on Smart Computing (SMARTCOMP)}, publisher={IEEE}, author={Guo, Zhishan}, year={2016}, month={May} } @inproceedings{baruah_easwaran_guo_2016, title={Mixed-criticality scheduling to minimize makespan}, booktitle={Proceedings of the 36th IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science (FSTTCS)}, author={Baruah, Sanjoy and Easwaran, Arvind and Guo, Zhishan}, year={2016} } @phdthesis{guo_2016, title={Real-Time Scheduling of Mixed-Critical Workloads upon Platforms with Uncertainties}, school={University of North Carolina-Chapel Hill}, author={Guo, Zhishan}, year={2016} } @misc{baruah_burns_guo_2016, title={Scheduling Mixed-Criticality Systems to Guarantee Some Service under All Non-erroneous Behaviors}, url={http://dx.doi.org/10.1109/ecrts.2016.12}, DOI={10.1109/ecrts.2016.12}, abstractNote={Many reactive systems must be designed and analyzed prior to deployment in the presence of considerable epistemic uncertainty: the precise nature of the external environment the system will encounter, as well as the run-time behavior of the platform upon which it is implemented, cannot be predicted with complete certainty prior to deployment. The widely-studied Vestal model for mixed-criticality workloads addresses uncertainties in estimating the worst-case execution time (WCET) of real-time code. Different estimations, at different levels of assurance, are made about these WCET values, it is required that all functionalities execute correctly if the less conservative assumptions hold, while only the more critical functionalities are required to execute correctly in the (presumably less likely) event that the less conservative assumptions fail to hold but the more conservative assumptions do. A generalization of the Vestal model is considered here, in which a degraded (but non-zero) level of service is required for the less critical functionalities even in the event of only the more conservative assumptions holding. An algorithm is derived for scheduling dual-criticality implicit-deadline sporadic task systems specified in this more general model upon preemptive uniprocessor platforms, and proved to be speedup-optimal.}, journal={2016 28th Euromicro Conference on Real-Time Systems (ECRTS)}, publisher={IEEE}, author={Baruah, Sanjoy and Burns, Alan and Guo, Zhishan}, year={2016}, month={Jul} } @book{guo_liu_xu_yang_2015, title={A Survey of Real-Time Automotive Systems}, institution={University of North Carolina at Chapel Hill}, author={Guo, Zhishan and Liu, Rui and Xu, Xinghao and Yang, Kecheng}, year={2015} } @article{crowley_zhabotynsky_sun_huang_pakatci_kim_wang_morgan_calaway_aylor_et al._2015, title={Analyses of allele-specific gene expression in highly divergent mouse crosses identifies pervasive allelic imbalance}, volume={47}, ISSN={1061-4036 1546-1718}, url={http://dx.doi.org/10.1038/ng.3222}, DOI={10.1038/ng.3222}, abstractNote={Fernando Pardo-Manuel de Villena and colleagues generate a 3 × 3 diallel cross of three inbred mouse lines and examine gene expression in multiple tissues. They identify allelic imbalance favoring the expression of the paternal allele across the genome. Complex human traits are influenced by variation in regulatory DNA through mechanisms that are not fully understood. Because regulatory elements are conserved between humans and mice, a thorough annotation of cis regulatory variants in mice could aid in further characterizing these mechanisms. Here we provide a detailed portrait of mouse gene expression across multiple tissues in a three-way diallel. Greater than 80% of mouse genes have cis regulatory variation. Effects from these variants influence complex traits and usually extend to the human ortholog. Further, we estimate that at least one in every thousand SNPs creates a cis regulatory effect. We also observe two types of parent-of-origin effects, including classical imprinting and a new global allelic imbalance in expression favoring the paternal allele. We conclude that, as with humans, pervasive regulatory variation influences complex genetic traits in mice and provide a new resource toward understanding the genetic control of transcription in mammals.}, number={4}, journal={Nature Genetics}, publisher={Springer Science and Business Media LLC}, author={Crowley, James J and Zhabotynsky, Vasyl and Sun, Wei and Huang, Shunping and Pakatci, Isa Kemal and Kim, Yunjung and Wang, Jeremy R and Morgan, Andrew P and Calaway, John D and Aylor, David L and et al.}, year={2015}, month={Mar}, pages={353–360} } @misc{guo_santinelli_yang_2015, title={EDF Schedulability Analysis on Mixed-Criticality Systems with Permitted Failure Probability}, url={http://dx.doi.org/10.1109/rtcsa.2015.8}, DOI={10.1109/rtcsa.2015.8}, abstractNote={Many safety critical real-time systems are considered certified when they meet failure probability requirements with respect to the maximum permitted incidences of failure per hour. In this paper, the mixed-criticality task model with multiple worst case execution time (WCET) estimations is extended to incorporate such system-level certification restrictions. A new parameter is added to each task, characterizing the distribution of the WCET estimations -- the likelihood of all jobs of a task finishing their executions within the less pessimistic WCET estimate. An efficient algorithm named LFF-Clustering is derived for scheduling mixed-criticality systems represented by this model. Experimental analyses show our new model and algorithm out-perform current state-of-the-art mixed-criticality scheduling algorithms.}, journal={2015 IEEE 21st International Conference on Embedded and Real-Time Computing Systems and Applications}, publisher={IEEE}, author={Guo, Zhishan and Santinelli, Luca and Yang, Kecheng}, year={2015}, month={Aug} } @book{guo_2015, title={MC Scheduling on Varying-Speed Processors}, volume={5}, number={315121}, journal={Dagstuhl Reports}, author={Guo, Zhishan}, year={2015}, pages={128–129} } @misc{baruah_easwaran_guo_2015, title={MC-Fluid: Simplified and Optimally Quantified}, url={http://dx.doi.org/10.1109/rtss.2015.38}, DOI={10.1109/rtss.2015.38}, abstractNote={The fluid scheduling model allows for schedules in which an individual task may be assigned a fraction of a processor at each time instant. These assignments are subject to the constraints that no fraction exceeds one and the sum of all the assigned fractions do not exceed the sum of the computing capacities of all the processors at any instant. An algorithm, MC-Fluid, has recently been proposed for scheduling systems of mixed-criticality implicit-deadline sporadic tasks under the fluid scheduling model. MC-Fluid has been shown to have a speedup bound no worse than (1 + √5)/2 or ≈ 1.618 for scheduling dual-criticality systems. We derive here a simplified variant of MC-Fluid called MCF, that has run-time linear in the number of tasks. We prove that this simplified variant has a speedup bound no worse than 4/3 for dual-criticality systems, and show that this implies that MC-Fluid, too, has a speedup bound no worse than 4/3. We know from prior results in uniprocessor mixed-criticality scheduling that no algorithm may have a speedup bound smaller than 4/3, allowing us to conclude that MCF and MC-Fluid are in fact speedup-optimal for dual-criticality scheduling.}, journal={2015 IEEE Real-Time Systems Symposium}, publisher={IEEE}, author={Baruah, Sanjoy and Easwaran, Arvind and Guo, Zhishan}, year={2015}, month={Dec} } @inproceedings{baruah_guo_2015, title={Mixed-criticality job models: a comparison}, booktitle={Proceedings of the 36th IEEE Real-Time Systems Symposium (RTSS), Workshop on Mixed-Criticality Systems}, author={Baruah, Sanjoy and Guo, Zhishan}, year={2015}, month={Dec} } @misc{guo_baruah_2015, title={The concurrent consideration of uncertainty in WCETs and processor speeds in mixed-criticality systems}, url={http://dx.doi.org/10.1145/2834848.2834852}, DOI={10.1145/2834848.2834852}, abstractNote={Most prior work on mixed-criticality (MC) scheduling has focused on a model in which multiple WCET parameters are specified for each job, the interpretation being that the larger values represent "safer" estimates of the job's true WCET. More recently, a different MC model has been studied in which it is assumed that the precise speed of the processor upon which the system is implemented varies in an a priori unknown manner during runtime, and estimates must be made about how low the actual speed may fall. The research reported in this paper seeks to integrate the varying-speed MC model and the multi-WCET one into a unified framework. A general model is proposed in which each job may have multiple WCETs specified, and the precise speed of the processor upon which the system is implemented may vary during run-time. We reinterpreted the key idea behind the table-driven MC scheduling scheme proposed in one of our recent work, and provide a more efficient algorithm named LE-EDF. This algorithm strictly generalizes algorithms that were previously separately proposed for MC scheduling of systems with multiple WCETs as well as for MC scheduling on variable-speed processors. It is shown that LE-EDF outperforms (via simulation) and/or dominates existing algorithms (under theoretical proof). LE-EDF is also compared with optimal clairvoyant algorithm using the metric of speedup factor.}, journal={Proceedings of the 23rd International Conference on Real Time and Networks Systems}, publisher={ACM}, author={Guo, Zhishan and Baruah, Sanjoy}, year={2015}, month={Nov} } @misc{guo_baruah_2015, title={Uniprocessor EDF scheduling of AVR task systems}, url={http://dx.doi.org/10.1145/2735960.2735976}, DOI={10.1145/2735960.2735976}, abstractNote={The adaptive varying-rate (AVR) task model has been proposed as a means of modeling certain physically-derived constraints in CPS's in a manner that is more accurate (less pessimistic) than is possible using prior task models from real-time scheduling theory. Existing work on schedulability analysis of systems of AVR tasks is primarily restricted to fixed-priority scheduling; this paper establishes schedulability analysis results for systems of AVR and sporadic tasks under Earliest Deadline First (EDF) scheduling. The proposed analysis techniques are evaluated both theoretically via the speedup factor metric, and experimentally via schedulability experiments on randomly-generated task systems.}, journal={Proceedings of the ACM/IEEE Sixth International Conference on Cyber-Physical Systems}, publisher={ACM}, author={Guo, Zhishan and Baruah, Sanjoy K.}, year={2015}, month={Apr} } @inproceedings{cheng_zhang_guo_shi_wang_2014, title={Graph Regularized Dual Lasso for Robust eQTL Mapping}, booktitle={Proceedings of the 22nd Annual International Conference on Intelligent Systems for Molecular Biology (ISMB)}, author={Cheng, Wei and Zhang, Xiang and Guo, Zhishan and Shi, Yu and Wang, Wei}, year={2014}, month={Jul} } @article{cheng_zhang_guo_shi_wang_2014, title={Graph-regularized dual Lasso for robust eQTL mapping}, volume={30}, ISSN={1367-4811 1367-4803}, url={http://dx.doi.org/10.1093/bioinformatics/btu293}, DOI={10.1093/bioinformatics/btu293}, abstractNote={Abstract}, number={12}, journal={Bioinformatics}, publisher={Oxford University Press (OUP)}, author={Cheng, Wei and Zhang, Xiang and Guo, Zhishan and Shi, Yu and Wang, Wei}, year={2014}, month={Jun}, pages={i139–i148} } @article{guo_baruah_2014, title={Implementing mixed-criticality systems upon a preemptive varying-speed processor}, volume={1}, DOI={10.4230/LITES-v001-i002-a003}, abstractNote={A mixed criticality (MC) workload consists of components of varying degrees of importance (or "criticalities"); the more critical components typically need to have their correctness validated to greater levels of assurance than the less critical ones. The problem of executing such a MC workload upon a preemptive processor whose effective speed may vary during run-time, in a manner that is not completely known prior to run-time, is considered. Such a processor is modeled as being characterized by several execution speeds: a normal speed and several levels of degraded speed. Under normal circumstances it will execute at or above its normal speed; conditions during run-time may cause it to execute slower. It is desired that all components of the MC workload execute correctly under normal circumstances. If the processor speed degrades, it should nevertheless remain the case that the more critical components execute correctly (although the less critical ones need not do so). In this work, we derive an optimal algorithm for scheduling MC workloads upon such platforms; achieving optimality does not require that the processor be able to monitor its own run-time speed. For the sub-case of the general problem where there are only two criticality levels defined, we additionally provide an implementation that is asymptotically optimal in terms of run-time efficiency.}, number={2}, journal={Leibniz Transactions on Embedded Systems (LITES)}, author={Guo, Zhishan and Baruah, Sanjoy}, year={2014}, pages={3:1–3:19} } @misc{guo_baruah_2014, title={Mixed-Criticality Scheduling upon Varying-Speed Multiprocessors}, url={http://dx.doi.org/10.1109/dasc.2014.50}, DOI={10.1109/dasc.2014.50}, abstractNote={An increasing trend in embedded computing is the moving towards mixed-criticality (MC) systems, in which functionalities of different importance degrees (criticalities) are implemented upon a common platform. Most previous work on MC scheduling focuses on the aspect that different timing analysis tools may result in multiple WCET estimations for each "job" (piece of code). Recently, a different MC model has been proposed, targeting systems with varying execution speeds. It is assumed that the precise speed of the processor upon which the system is implemented varies in an a priori unknown manner during runtime, and estimates must be made as to how low the actual speed may fall. Prior work has dealt with uniprocessor platforms of this kind, the research reported in this paper seeks to generalize this prior work to be applicable to multicore platforms. In our method, a linear program (LP) is constructed based on necessary and sufficient scheduling conditions, and according to its solution, jobs are executed in a processor-sharing based method. Optimality of the algorithm is proved, and an example is constructed to show the necessity of processor sharing.}, journal={2014 IEEE 12th International Conference on Dependable, Autonomic and Secure Computing}, publisher={IEEE}, author={Guo, Zhishan and Baruah, Sanjoy}, year={2014}, month={Aug} } @misc{baruah_guo_2014, title={Scheduling Mixed-Criticality Implicit-Deadline Sporadic Task Systems upon a Varying-Speed Processor}, url={http://dx.doi.org/10.1109/rtss.2014.15}, DOI={10.1109/rtss.2014.15}, abstractNote={A mixed criticality (MC) workload consists of components of varying degrees of importance (or "criticalites"). The problem of executing a MC workload, modeled as a collection of independent implicit-deadline sporadic tasks executing upon a preemptive uniprocessor, is considered. Suitable scheduling strategies are devised for scheduling such systems despite uncertainty and unpredictability in both the amount of execution needed by the tasks, and the effective speed of the processor. These scheduling strategies allow for simultaneously making efficient use of platform resources and ensuring the correctness of the more critical workload components at greater levels of assurance.}, journal={2014 IEEE Real-Time Systems Symposium}, publisher={IEEE}, author={Baruah, Sanjoy and Guo, Zhishan}, year={2014}, month={Dec} } @misc{cheng_zhang_guo_wu_sullivan_wang_2013, title={Flexible and robust co-regularized multi-domain graph clustering}, url={http://dx.doi.org/10.1145/2487575.2487582}, DOI={10.1145/2487575.2487582}, abstractNote={Multi-view graph clustering aims to enhance clustering performance by integrating heterogeneous information collected in different domains. Each domain provides a different view of the data instances. Leveraging cross-domain information has been demonstrated an effective way to achieve better clustering results. Despite the previous success, existing multi-view graph clustering methods usually assume that different views are available for the same set of instances. Thus instances in different domains can be treated as having strict one-to-one relationship. In many real-life applications, however, data instances in one domain may correspond to multiple instances in another domain. Moreover, relationships between instances in different domains may be associated with weights based on prior (partial) knowledge. In this paper, we propose a flexible and robust framework, CGC (Co-regularized Graph Clustering), based on non-negative matrix factorization (NMF), to tackle these challenges. CGC has several advantages over the existing methods. First, it supports many-to-many cross-domain instance relationship. Second, it incorporates weight on cross-domain relationship. Third, it allows partial cross-domain mapping so that graphs in different domains may have different sizes. Finally, it provides users with the extent to which the cross-domain instance relationship violates the in-domain clustering structure, and thus enables users to re-evaluate the consistency of the relationship. Extensive experimental results on UCI benchmark data sets, newsgroup data sets and biological interaction networks demonstrate the effectiveness of our approach.}, journal={Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining}, publisher={ACM}, author={Cheng, Wei and Zhang, Xiang and Guo, Zhishan and Wu, Yubao and Sullivan, Patrick F. and Wang, Wei}, year={2013}, month={Aug} } @misc{baruah_guo_2013, title={Mixed-Criticality Scheduling upon Varying-Speed Processors}, url={http://dx.doi.org/10.1109/rtss.2013.15}, DOI={10.1109/rtss.2013.15}, abstractNote={A varying-speed processor is characterized by two execution speeds: a normal speed and a degraded speed. Under normal circumstances it will execute at its normal speed, conditions during run-time may cause it to execute more slowly (but no slower than at its degraded speed). The problem of executing an integrated workload, consisting of some more important components and some less important ones, upon such a varying-speed processor is considered. It is desired that all components execute correctly under normal circumstances, whereas the more important components should execute correctly (although the less important components need not) if the processor runs at any speed no slower than its specified degraded speed.}, journal={2013 IEEE 34th Real-Time Systems Symposium}, publisher={IEEE}, author={Baruah, Sanjoy and Guo, Zhishan}, year={2013}, month={Dec} } @misc{guo_baruah_2013, title={Mixed-criticality scheduling upon non-monitored varying-speed processors}, url={http://dx.doi.org/10.1109/sies.2013.6601488}, DOI={10.1109/sies.2013.6601488}, abstractNote={A varying-speed processor is characterized by two execution speeds: a normal speed and a degraded speed. Under normal circumstances it will execute at its normal speed; unexpected conditions may occur during run-time that cause it to execute slowly (but no slower than at its degraded speed). A processor that is self-monitoring immediately knows if its speed falls below its normal speed during run-time; by contrast, a non-monitored processor cannot detect such degradation in performance during run-time. The problem of executing an integrated workload, consisting of some more important components and some less important ones, upon a non-monitored varying-speed processor is considered. It is desired that all components execute correctly under normal circumstances, whereas the more important components should execute correctly (although the less important components need not) if the processor runs at any speed no slower than its specified degraded speed.}, journal={2013 8th IEEE International Symposium on Industrial Embedded Systems (SIES)}, publisher={IEEE}, author={Guo, Zhishan and Baruah, Sanjoy}, year={2013}, month={Jun} } @inproceedings{french_guo_baruah_2013, place={France}, title={Scheduling mixed-criticality workloads upon unreliable processors}, booktitle={The 11th Workshop on Models and Algorithms for Planning and Scheduling Problems (MAPSP)}, author={French, Alexandra and Guo, Zhishan and Baruah, Sanjoy}, year={2013}, month={Jun} } @article{liu_guo_wang_2012, title={A one-layer recurrent neural network for constrained pseudoconvex optimization and its application for dynamic portfolio optimization}, volume={26}, ISSN={0893-6080}, url={http://dx.doi.org/10.1016/j.neunet.2011.09.001}, DOI={10.1016/j.neunet.2011.09.001}, abstractNote={In this paper, a one-layer recurrent neural network is proposed for solving pseudoconvex optimization problems subject to linear equality and bound constraints. Compared with the existing neural networks for optimization (e.g., the projection neural networks), the proposed neural network is capable of solving more general pseudoconvex optimization problems with equality and bound constraints. Moreover, it is capable of solving constrained fractional programming problems as a special case. The convergence of the state variables of the proposed neural network to achieve solution optimality is guaranteed as long as the designed parameters in the model are larger than the derived lower bounds. Numerical examples with simulation results illustrate the effectiveness and characteristics of the proposed neural network. In addition, an application for dynamic portfolio optimization is discussed.}, journal={Neural Networks}, publisher={Elsevier BV}, author={Liu, Qingshan and Guo, Zhishan and Wang, Jun}, year={2012}, month={Feb}, pages={99–109} } @inbook{guo_sun_guo_2012, title={Control Allocation of Flying-Wing with Multi-Effectors Based on T-S Fuzzy Model}, url={http://dx.doi.org/10.1115/1.859810.paper38}, DOI={10.1115/1.859810.paper38}, booktitle={International Conference on Mechanical and Electrical Technology, 3rd, (ICMET-London 2011), Volumes 1–3}, publisher={ASME}, author={Guo, Yi and Sun, Fuchun and Guo, Zhishan}, year={2012}, month={Jan}, pages={231–235} } @misc{liu_guo_zhang_jojic_wang_2012, title={Metric Learning from Relative Comparisons by Minimizing Squared Residual}, url={http://dx.doi.org/10.1109/icdm.2012.38}, DOI={10.1109/icdm.2012.38}, abstractNote={Recent studies [1] -- [5] have suggested using constraints in the form of relative distance comparisons to represent domain knowledge: d(a, b) <; d(c, d) where d(·) is the distance function and a, b, c, d are data objects. Such constraints are readily available in many problems where pairwise constraints are not natural to obtain. In this paper we consider the problem of learning a Mahalanobis distance metric from supervision in the form of relative distance comparisons. We propose a simple, yet effective, algorithm that minimizes a convex objective function corresponding to the sum of squared residuals of constraints. We also extend our model and algorithm to promote sparsity in the learned metric matrix. Experimental results suggest that our method consistently outperforms existing methods in terms of clustering accuracy. Furthermore, the sparsity extension leads to more stable estimation when the dimension is high and only a small amount of supervision is given.}, journal={2012 IEEE 12th International Conference on Data Mining}, publisher={IEEE}, author={Liu, Eric Yi and Guo, Zhishan and Zhang, Xiang and Jojic, Vladimir and Wang, Wei}, year={2012}, month={Dec} } @article{guo_liu_wang_2011, title={A One-Layer Recurrent Neural Network for Pseudoconvex Optimization Subject to Linear Equality Constraints}, volume={22}, ISSN={1045-9227 1941-0093}, url={http://dx.doi.org/10.1109/tnn.2011.2169682}, DOI={10.1109/tnn.2011.2169682}, abstractNote={In this paper, a one-layer recurrent neural network is presented for solving pseudoconvex optimization problems subject to linear equality constraints. The global convergence of the neural network can be guaranteed even though the objective function is pseudoconvex. The finite-time state convergence to the feasible region defined by the equality constraints is also proved. In addition, global exponential convergence is proved when the objective function is strongly pseudoconvex on the feasible region. Simulation results on illustrative examples and application on chemical process data reconciliation are provided to demonstrate the effectiveness and characteristics of the neural network.}, number={12}, journal={IEEE Transactions on Neural Networks}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Guo, Zhishan and Liu, Qingshan and Wang, Jun}, year={2011}, month={Dec}, pages={1892–1900} } @misc{guo_wang_2011, title={Information retrieval from large data sets via multiple-winners-take-all}, url={http://dx.doi.org/10.1109/iscas.2011.5938154}, DOI={10.1109/iscas.2011.5938154}, abstractNote={Recently, a continuous-time k-winners-take-all (kWTA) network with a single state variable and a hard-limiting activation function and its discrete-time counterpart were developed. These kWTA networks have proven properties of finite-time global convergence and simple architectures. In this paper, the kWTA networks are applied for information retrieval, such as web search. The weights or scores of pages in two real-world data sets are calculated with the PageRank algorithm, based on which experimental results of kWTA networks are provided. The results show that the kWTA networks converge faster as the size of the problem grows, which renders them as a promising approach to large-scale data set information retrieval problems.}, journal={2011 IEEE International Symposium of Circuits and Systems (ISCAS)}, publisher={IEEE}, author={Guo, Zhishan and Wang, Jun}, year={2011}, month={May} } @misc{guo_wang_2010, title={A neurodynamic optimization approach to constrained sparsity maximization based on alternative objective functions}, url={http://dx.doi.org/10.1109/ijcnn.2010.5596553}, DOI={10.1109/ijcnn.2010.5596553}, abstractNote={In recent years, constrained sparsity maximization problems received tremendous attention in the context of compressive sensing. Because the formulated constrained L0 norm minimization problem is NP-hard, constrained L1 norm minimization is usually used to compute approximate sparse solutions. In this paper, we introduce several alternative objective functions, such as weighted L1 norm, Laplacian, hyperbolic secant, and Gaussian functions, as approximations of the L0 norm. A one-layer recurrent neural network is applied to compute the optimal solutions to the reformulated constrained minimization problems subject to equality constraints. Simulation results in terms of time responses, phase diagrams, and tabular data are provided to demonstrate the superior performance of the proposed neurodynamic optimization approach to constrained sparsity maximization based on the problem reformulations.}, journal={The 2010 International Joint Conference on Neural Networks (IJCNN)}, publisher={IEEE}, author={Guo, Zhishan and Wang, Jun}, year={2010}, month={Jul} } @inproceedings{wang_guo_2010, place={Berlin, Heidelberg}, series={Lecture Notes in Computer Science}, title={Parametric Sensitivity and Scalability of k-Winners-Take-All Networks with a Single State Variable and Infinity-Gain Activation Functions}, DOI={10.1007/978-3-642-13278-0_11}, booktitle={Advances in Neural Networks - ISNN 2010}, publisher={Springer}, author={Wang, Jun and Guo, Zhishan}, editor={Zhang, L. and Lu, BL and Kwok, J.Editors}, year={2010}, month={May}, collection={Lecture Notes in Computer Science} } @misc{guo_lu_xi_sun_2009, place={Berlin Heidelberg}, series={Lecture Notes in Computer Science}, title={An Effective Dimension Reduction Approach to Chinese Document Classification Using Genetic Algorithm}, ISBN={9783642015090 9783642015106}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-01510-6_55}, DOI={10.1007/978-3-642-01510-6_55}, abstractNote={Different kinds of methods have been proposed in Chinese document classification, while high dimension of feature vector is one of the most significant limits in these methods. In this paper, an important difference is pointed out between Chinese document classification and English document classification. Then an efficient approach is proposed to reduce the dimension of feature vector in Chinese document classification using Genetic Algorithm. Through merely choosing the set of much more “important” features, the proposed method significantly reduces the number of Chinese feature words. Experiments combining with several relative studies show that the proposed method has great effect on dimension reduction with little loss in correctly classified rate.}, journal={Advances in Neural Networks – ISNN 2009}, publisher={Springer Berlin Heidelberg}, author={Guo, Zhishan and Lu, Li and Xi, Shijia and Sun, Fuchun}, year={2009}, pages={480–489}, collection={Lecture Notes in Computer Science} } @phdthesis{guo_2009, title={Fault-Tolerant Control Allocation of Unmanned Flying-Wings Flight Vehicles}, school={Tsinghua University}, author={Guo, Zhishan}, year={2009} } @book{liang_liu_wang_li_cao_cao_dai_guo_li_luo_et al._2008, title={THU and ICRC at TRECVID 2008}, institution={National Institute of Standards and Technology}, author={Liang, Yingyu and Liu, Xiaobing and Wang, Zhikun and Li, Jiammin and Cao, Binbin and Cao, Zhichao and Dai, Zhenlong and Guo, Zhishan and Li, Wen and Luo, Leigang and et al.}, year={2008} } @book{yuan_guo_lv_wan_zhang_wang_liu_liu_zhu_wang_et al._2007, title={THU and ICRC at TRECVID 2007}, institution={National Institute of Standards and Technology}, author={Yuan, Jinhui and Guo, Zhishan and Lv, Li and Wan, Wei and Zhang, Teng and Wang, Dong and Liu, Xiaobing and Liu, Cailiang and Zhu, Shengqi and Wang, Duanpeng and et al.}, year={2007} } @article{xu_guo_2004, title={Three-failure match}, volume={34}, number={11}, journal={Mathematics in Practice and Theory}, author={Xu, Wenbing and Guo, Zhishan}, year={2004}, pages={14–19} } @phdthesis{guo, title={A Neurodynamic Optimization Approach to Constrained Pseudoconvex Optimization}, school={Chinese University of Hong Kong}, author={Guo, Zhishan} }