@article{arefeen_li_uddin_das_2023, title={MetaMorphosis: Task-oriented Privacy Cognizant Feature Generation for Multi-task Learning}, DOI={10.1145/3576842.3582372}, abstractNote={With the growth of computer vision applications, deep learning, and edge computing contribute to ensuring practical collaborative intelligence (CI) by distributing the workload among edge devices and the cloud. However, running separate single-task models on edge devices is inefficient regarding the required computational resource and time. In this context, multi-task learning allows leveraging a single deep learning model for performing multiple tasks, such as semantic segmentation and depth estimation on incoming video frames. This single processing pipeline generates common deep features that are shared among multi-task modules. However, in a collaborative intelligence scenario, generating common deep features has two major issues. First, the deep features may inadvertently contain input information exposed to the downstream modules (violating input privacy). Second, the generated universal features expose a piece of collective information than what is intended for a certain task, in which features for one task can be utilized to perform another task (violating task privacy). This paper proposes a novel deep learning-based privacy-cognizant feature generation process called “MetaMorphosis” that limits inference capability to specific tasks at hand. To achieve this, we propose a channel squeeze-excitation based feature metamorphosis module, Cross-SEC, to achieve distinct attention of all tasks and a de-correlation loss function with differential-privacy to train a deep learning model that produces distinct privacy-aware features as an output for the respective tasks. With extensive experimentation on four datasets consisting of diverse images related to scene understanding and facial attributes, we show that MetaMorphosis outperforms recent adversarial learning and universal feature generation methods by guaranteeing privacy requirements in an efficient way for image and video analytics.}, journal={PROCEEDINGS 8TH ACM/IEEE CONFERENCE ON INTERNET OF THINGS DESIGN AND IMPLEMENTATION, IOTDI 2023}, author={Arefeen, Md Adnan and Li, Zhouyu and Uddin, Md Yusuf Sarwar and Das, Anupam}, year={2023}, pages={288–300} } @article{zhang_sabir_das_2023, title={Speaker Orientation-Aware Privacy Control to Thwart Misactivation of Voice Assistants}, ISSN={["1530-0889"]}, DOI={10.1109/DSN58367.2023.00061}, abstractNote={Smart home voice assistants (VAs) such as Amazon Echo and Google Home have become popular because of the convenience they provide through voice commands. VAs continuously listen to detect the wake command and send the subsequent audio data to the manufacturer-owned cloud service for processing to identify actionable commands. However, research has shown that VAs are prone to replay attack and accidental activations when the wake words are spoken in the background (either by a human or played through a mechanical speaker). Existing privacy controls are not effective in preventing such misactivations. This raises privacy and security concerns for the users as their conversations can be recorded and relayed to the cloud without their knowledge. Recent studies have shown that the visual gaze plays an important role when interacting with conservation agents such as VAs, and users tend to turn their heads or body toward the VA when invoking it. In this paper, we propose a device-free, non-obtrusive acoustic sensing system called HeadTalk to thwart the misactivation of VAs. The proposed system leverages the user's head direction information and verifies that a human generates the sound to minimize accidental activations. Our extensive evaluation shows that HeadTalk can accurately infer a speaker's head orientation with an average accuracy of 96.14% and distinguish human voice from a mechanical speaker with an equal error rate of 2.58%. We also conduct a user interaction study to assess how users perceive our proposed approach compared to existing privacy controls. Our results suggest that HeadTalk can not only enhance the security and privacy controls for VAs but do so in a usable way without requiring any additional hardware.}, journal={2023 53RD ANNUAL IEEE/IFIP INTERNATIONAL CONFERENCE ON DEPENDABLE SYSTEMS AND NETWORKS, DSN}, author={Zhang, Shaohu and Sabir, Aafaq and Das, Anupam}, year={2023}, pages={597–610} } @article{zhang_li_das_2023, title={VoicePM: A Robust Privacy Measurement on Voice Anonymity}, DOI={10.1145/3558482.3590175}, abstractNote={Voice-based human-computer interaction has become pervasive in laptops, smartphones, home voice assistants, and Internet of Thing (IoT) devices. However, voice interaction comes with security and privacy risks. Numerous privacy-preserving measures have been proposed for hiding the speaker's identity while maintaining speech intelligibility. However, existing works do not consider the overall tradeoff between speech utility, speaker verification, and inference of voice attributes, including emotional state, age, accent, and gender. In this study, we first develop a tradeoff metric to capture voice biometrics as well as different voice attributes. We then propose VoicePM, a robust Voice Privacy Measurement framework, to study the feasibility of applying different state-of-the-art voice anonymization solutions to achieve the optimum tradeoff between privacy and utility. We conduct extensive experiments using anonymization approaches covering signal processing, voice synthesis, voice conversion, and adversarial techniques on three speech datasets that include both English and Chinese speakers to showcase the effectiveness and feasibility of VoicePM.}, journal={PROCEEDINGS OF THE 16TH ACM CONFERENCE ON SECURITY AND PRIVACY IN WIRELESS AND MOBILE NETWORKS, WISEC 2023}, author={Zhang, Shaohu and Li, Zhouyu and Das, Anupam}, year={2023}, pages={215–226} } @article{sabir_lafontaine_das_2022, title={Hey Alexa, Who Am I Talking to?: Analyzing Users' Perception and Awareness Regarding Third-party Alexa Skills}, DOI={10.1145/3491102.3517510}, abstractNote={The Amazon Alexa voice assistant provides convenience through automation and control of smart home appliances using voice commands. Amazon allows third-party applications known as skills to run on top of Alexa to further extend Alexa’s capability. However, as multiple skills can share the same invocation phrase and request access to sensitive user data, growing security and privacy concerns surround third-party skills. In this paper, we study the availability and effectiveness of existing security indicators or a lack thereof to help users properly comprehend the risk of interacting with different types of skills. We conduct an interactive user study (inviting active users of Amazon Alexa) where participants listen to and interact with real-world skills using the official Alexa app. We find that most participants fail to identify the skill developer correctly (i.e., they assume Amazon also develops the third-party skills) and cannot correctly determine which skills will be automatically activated through the voice interface. We also propose and evaluate a few voice-based skill type indicators, showcasing how users would benefit from such voice-based indicators.}, journal={PROCEEDINGS OF THE 2022 CHI CONFERENCE ON HUMAN FACTORS IN COMPUTING SYSTEMS (CHI' 22)}, author={Sabir, Aafaq and Lafontaine, Evan and Das, Anupam}, year={2022} } @article{lentzsch_shah_andow_degeling_das_enck_2021, title={Hey Alexa, is this Skill Safe?: Taking a Closer Look at the Alexa Skill Ecosystem}, DOI={10.14722/ndss.2021.23111}, abstractNote={—Amazon’s voice-based assistant, Alexa, enables users to directly interact with various web services through natural language dialogues. It provides developers with the option to create third-party applications (known as Skills ) to run on top of Alexa. While such applications ease users’ interaction with smart devices and bolster a number of additional services, they also raise security and privacy concerns due to the personal setting they operate in. This paper aims to perform a systematic analysis of the Alexa skill ecosystem. We perform the first large-scale analysis of Alexa skills, obtained from seven different skill stores totaling to 90,194 unique skills. Our analysis reveals several limitations that exist in the current skill vetting process. We show that not only can a malicious user publish a skill under any arbitrary developer/company name, but she can also make backend code changes after approval to coax users into revealing unwanted information. We, next, formalize the different skill-squatting techniques and evaluate the efficacy of such techniques. We find that while certain approaches are more favorable than others, there is no substantial abuse of skill squatting in the real world. Lastly, we study the prevalence of privacy policies across different categories of skill, and more importantly the policy content of skills that use the Alexa permission model to access sensitive user data. We find that around 23.3% of such skills do not fully disclose the data types associated with the permissions requested. We conclude by providing some suggestions for strengthening the overall ecosystem, and thereby enhance transparency for end-users.}, journal={28TH ANNUAL NETWORK AND DISTRIBUTED SYSTEM SECURITY SYMPOSIUM (NDSS 2021)}, author={Lentzsch, Christopher and Shah, Sheel Jayesh and Andow, Benjamin and Degeling, Martin and Das, Anupam and Enck, William}, year={2021} } @article{lafontaine_sabir_das_2021, title={Understanding People's Attitude and Concerns towards Adopting IoT Devices}, DOI={10.1145/3411763.3451633}, abstractNote={The proliferation of the Internet of Things (IoT) has started transforming our lifestyle through automation of home appliances. However, there are users who are hesitant to adopt IoT devices due to various privacy and security concerns. In this paper, we elicit peoples’ attitude and concerns towards adopting IoT devices. We conduct an online survey and collect responses from 232 participants from three different geographic regions (United States, Europe, and India); the participants consist of both adopters and non-adopters of IoT devices. Through data analysis, we determine that there are both similarities and differences in perceptions and concerns between adopters and non-adopters. For example, even though IoT and non-IoT users share similar security and privacy concerns, IoT users are more comfortable using IoT devices in private settings compared to non-IoT users. Furthermore, when comparing users’ attitude and concerns across different geographic regions, we found similarities between participants from the US and Europe, yet participants from India showcased contrasting behavior. For instance, we found that participants from India were more trusting in their government to properly protect consumer data and were more comfortable using IoT devices in a variety of public settings, compared to participants from the US and Europe. Based on our findings, we provide recommendations to reduce users’ concerns in adopting IoT devices, and thereby enhance user trust towards adopting IoT devices.}, journal={EXTENDED ABSTRACTS OF THE 2021 CHI CONFERENCE ON HUMAN FACTORS IN COMPUTING SYSTEMS (CHI'21)}, author={Lafontaine, Evan and Sabir, Aafaq and Das, Anupam}, year={2021} } @article{das_acar_borisov_pradeep_2018, title={The Web's Sixth Sense: A Study of Scripts Accessing Smartphone Sensors}, DOI={10.1145/3243734.3243860}, abstractNote={We present the first large-scale measurement of smartphone sensor API usage and stateless tracking on the mobile web. We extend the OpenWPM web privacy measurement tool to develop OpenWPM-Mobile, adding the ability to emulate plausible sensor values for different smartphone sensors such as motion, orientation, proximity and light. Using OpenWPM-Mobile we find that one or more sensor APIs are accessed on 3695 of the top 100K websites by scripts originating from 603 distinct domains. We also detect fingerprinting attempts on mobile platforms, using techniques previously applied in the desktop setting. We find significant overlap between fingerprinting scripts and scripts accessing sensor data. For example, 63% of the scripts that access motion sensors also engage in browser fingerprinting. To better understand the real-world uses of sensor APIs, we cluster JavaScript programs that access device sensors and then perform automated code comparison and manual analysis. We find a significant disparity between the actual and intended use cases of device sensor as drafted by W3C. While some scripts access sensor data to enhance user experience, such as orientation detection and gesture recognition, tracking and analytics are the most common use cases among the scripts we analyzed. We automated the detection of sensor data exfiltration and observed that the raw readings are frequently sent to remote servers for further analysis. Finally, we evaluate available countermeasures against the misuse of sensor APIs. We find that popular tracking protection lists such as EasyList and Disconnect commonly fail to block most tracking scripts that misuse sensors. Studying nine popular mobile browsers we find that even privacy-focused browsers, such as Brave and Firefox Focus, fail to implement mitigations suggested by W3C, which includes limiting sensor access from insecure contexts and cross-origin iframes. We have reported these issues to the browser vendors.}, journal={PROCEEDINGS OF THE 2018 ACM SIGSAC CONFERENCE ON COMPUTER AND COMMUNICATIONS SECURITY (CCS'18)}, author={Das, Anupam and Acar, Gunes and Borisov, Nikita and Pradeep, Amogh}, year={2018}, pages={1515–1532} }