@article{dubey_cammarota_varna_kumar_aysu_2023, title={Hardware-Software Co-design for Side-Channel Protected Neural Network Inference}, ISSN={["2835-5709"]}, DOI={10.1109/HOST55118.2023.10133716}, abstractNote={Physical side-channel attacks are a major threat to stealing confidential data from devices. There has been a recent surge in such attacks on edge machine learning (ML) hardware to extract the model parameters. Consequently, there has also been work, although limited, on building corresponding defenses against such attacks. Current solutions take either fully software-or fully hardware-centric approaches, which are limited in performance and flexibility, respectively. In this paper, we propose the first hardware-software co-design solution for building side-channel-protected ML hardware. Our solution targets edge devices and addresses both performance and flexibility needs. To that end, we develop a secure RISCV-based coprocessor design that can execute a neural network implemented in C/C++. Our coprocessor uses masking to execute various neural network operations like weighted summations, activation functions, and output layer computation in a sidechannel secure fashion. We extend the original RV32I instruction set with custom instructions to control the masking gadgets inside the secure coprocessor. We further use the custom instructions to implement easy-to-use APIs that are exposed to the end-user as a shared library. Finally, we demonstrate the empirical sidechannel security of the design up to 1M traces.}, journal={2023 IEEE INTERNATIONAL SYMPOSIUM ON HARDWARE ORIENTED SECURITY AND TRUST, HOST}, author={Dubey, Anuj and Cammarota, Rosario and Varna, Avinash and Kumar, Raghavan and Aysu, Aydin}, year={2023}, pages={155–166} } @article{dubey_cammarota_suresh_aysu_2022, title={Guarding Machine Learning Hardware Against Physical Side-channel Attacks}, volume={18}, ISSN={["1550-4840"]}, DOI={10.1145/3465377}, abstractNote={Machine learning (ML) models can be trade secrets due to their development cost. Hence, they need protection against malicious forms of reverse engineering (e.g., in IP piracy). With a growing shift of ML to the edge devices, in part for performance and in part for privacy benefits, the models have become susceptible to the so-called physical side-channel attacks.}, number={3}, journal={ACM JOURNAL ON EMERGING TECHNOLOGIES IN COMPUTING SYSTEMS}, author={Dubey, Anuj and Cammarota, Rosario and Suresh, Vikram and Aysu, Aydin}, year={2022}, month={Jul} } @article{dubey_karabulut_awad_aysu_2022, title={High-Fidelity Model Extraction Attacks via Remote Power Monitors}, DOI={10.1109/AICAS54282.2022.9869973}, abstractNote={This paper shows the first side-channel attack on neural network (NN) IPs through a remote power monitor. We demonstrate that a remote monitor implemented with time-to-digital converters can be exploited to steal the weights from a hardware implementation of NN inference. Such an attack alleviates the need to have physical access to the target device and thus expands the attack vector to multi-tenant cloud FPGA platforms. Our results quantify the effectiveness of the attack on an FPGA implementation of NN inference and compare it to an attack with physical access. We demonstrate that it is indeed possible to extract the weights using DPA with 25000 traces if the SNR is sufficient. The paper, therefore, motivates secure virtualization-to protect the confidentiality of high-valued NN model IPs in multi-tenant execution environments, platform developers need to employ strong countermeasures against physical side-channel attacks.}, journal={2022 IEEE INTERNATIONAL CONFERENCE ON ARTIFICIAL INTELLIGENCE CIRCUITS AND SYSTEMS (AICAS 2022): INTELLIGENT TECHNOLOGY IN THE POST-PANDEMIC ERA}, author={Dubey, Anuj and Karabulut, Emre and Awad, Amro and Aysu, Aydin}, year={2022}, pages={328–331} } @article{dubey_cammarota_aysu_2020, title={BoMaNet: Boolean Masking of an Entire Neural Network}, ISSN={["1933-7760"]}, DOI={10.1145/3400302.3415649}, abstractNote={Recent work on stealing machine learning (ML) models from inference engines with physical side-channel attacks warrant an urgent need for effective side-channel defenses. This work proposes the first fully-masked neural network inference engine design. Masking uses secure multi-party computation to split the secrets into random shares and to decorrelate the statistical relation of secret-dependent computations to side-channels (e.g., the power draw). In this work, we construct secure hardware primitives to mask all the linear and non-linear operations in a neural network. We address the challenge of masking integer addition by converting each addition into a sequence of XOR and AND gates and by augmenting Trichina's secure Boolean masking style. We improve the traditional Trichina's AND gates by adding pipelining elements for better glitch-resistance and we architect the whole design to sustain a throughput of 1 masked addition per cycle. We implement the proposed secure inference engine on a Xilinx Spartan-6 (XC6SLX75) FPGA. The results show that masking incurs an overhead of 3.5% in latency and 5.9× in area. Finally, we demonstrate the security of the masked design with 2M traces.}, journal={2020 IEEE/ACM INTERNATIONAL CONFERENCE ON COMPUTER AIDED-DESIGN (ICCAD)}, author={Dubey, Anuj and Cammarota, Rosario and Aysu, Aydin}, year={2020} } @article{dubey_gupta_cho_2020, title={Characterization of Limit State for Seismic Fragility Assessment of T-Joints in Piping System}, volume={142}, ISSN={["1528-8978"]}, DOI={10.1115/1.4047041}, abstractNote={Abstract}, number={5}, journal={JOURNAL OF PRESSURE VESSEL TECHNOLOGY-TRANSACTIONS OF THE ASME}, author={Dubey, Ankit R. and Gupta, Abhinav and Cho, Sung Gook}, year={2020}, month={Oct} }