@article{vasiliadis_karampelas_shevtsov_papadopoulos_ioannidis_kapravelos_2024, title={WRIT: Web Request Integrity and Attestation Against Malicious Browser Extensions}, volume={21}, ISSN={["1941-0018"]}, DOI={10.1109/TDSC.2023.3322516}, abstractNote={The powerful capabilities of modern browsers have pushed the web application logic to the user side, in order to minimize latency, increase scalability of the service and improve users' quality of experience. What is more, browsers provide a rich toolchest for browser extensions to provide additional functionality, but at the same time enable them to become a powerful vehicle for malicious actors. Such actors may spy, phish or fraud users, thus making the user's browser untrusted for the web servers. In this paper, we present WRIT, a practical framework that enables websites to protect critical functionality from abuse in the presence of malicious extensions. In WRIT, the integrity of outgoing web requests is attested and verified to ensure they were triggered by a user's action and not automatically generated by a malicious browser extension. WRIT is immediately applicable by leveraging existing HTML5 and other native browser features and does not require any modification of the browser. Performance results of our prototype show that it adds a negligible 7.29 ms latency to sensitive user-triggered actions (e.g., post message).}, number={4}, journal={IEEE TRANSACTIONS ON DEPENDABLE AND SECURE COMPUTING}, author={Vasiliadis, Giorgos and Karampelas, Apostolos and Shevtsov, Alexandros and Papadopoulos, Panagiotis and Ioannidis, Sotiris and Kapravelos, Alexandros}, year={2024}, pages={3082–3095} } @article{jueckstock_snyder_sarker_kapravelos_livshits_2022, title={Measuring the Privacy vs. Compatibility Trade-off in Preventing Third-Party Stateful Tracking}, DOI={10.1145/3485447.3512231}, abstractNote={Despite active privacy research on sophisticated web tracking techniques (e.g., fingerprinting, cache collusion, bounce tracking, CNAME cloaking), most tracking on the web is basic “stateful” tracking enabled by classical browser storage policies sharing per-site storage across all HTTP contexts. Alternative, privacy-preserving storage policies, especially for third-party contexts, have been proposed and even deployed, but these can break websites that presume traditional, non-partitioned storage. Such breakage discourages privacy-preserving experimentation, cementing the dismal status quo. Our work measures the privacy vs. compatibility trade-offs of representative third-party storage policies to enable design of browsers that are both compatible and privacy respecting. Our contributions include web-scale measurements of page behaviors under multiple third-party storage policies inspired by production browsers. We define metrics for measuring aggregate effects on web privacy and compatibility, including a novel system for quantitatively estimating aggregate website breakage under different policies. We find that making third-party storage partitioned by first-party, and lifetimes by site-session achieves the best privacy and compatibility trade-off. We provide complete measurement datasets and storage policy implementations.}, journal={PROCEEDINGS OF THE ACM WEB CONFERENCE 2022 (WWW'22)}, author={Jueckstock, Jordan and Snyder, Peter and Sarker, Shaown and Kapravelos, Alexandros and Livshits, Benjamin}, year={2022}, pages={710–720} } @article{subramani_jueckstock_kapravelos_perdisci_2022, title={SoK: Workerounds - Categorizing Service Worker Attacks and Mitigations}, DOI={10.1109/EuroSP53844.2022.00041}, abstractNote={Service Workers (SWs) are a powerful feature at the core of Progressive Web Apps, namely web applications that can continue to function when the user's device is offline and that have access to device sensors and capabilities previously accessible only by native applications. During the past few years, researchers have found a number of ways in which SWs may be abused to achieve different malicious purposes. For instance, SWs may be abused to build a web-based botnet, launch DDoS attacks, or perform cryptomining; they may be hijacked to create persistent cross-site scripting (XSS) attacks; they may be leveraged in the context of side-channel attacks to compromise users' privacy; or they may be abused for phishing or social engineering attacks using web push notifications-based malvertising. In this paper, we reproduce and analyze known attack vectors related to SWs and explore new abuse paths that have not previously been considered. We systematize the attacks into different categories, and then analyze whether, how, and estimate when these attacks have been published and mitigated by different browser vendors. Then, we discuss a number of open SW security problems that are currently unmitigated, and propose SW behavior monitoring approaches and new browser policies that we believe should be implemented by browsers to further improve SW security. Furthermore, we implement a proof-of-concept version of several policies in the Chromium code base, and also measure the behavior of SWs used by highly popular web applications with respect to these new policies. Our measurements show that it should be feasible to implement and enforce stricter SW security policies without a significant impact on most legitimate production SWs.}, journal={2022 IEEE 7TH EUROPEAN SYMPOSIUM ON SECURITY AND PRIVACY (EUROS&P 2022)}, author={Subramani, Karthika and Jueckstock, Jordan and Kapravelos, Alexandros and Perdisci, Roberto}, year={2022}, pages={555–571} } @article{ajmani_koishybayev_kapravelos_2022, title={yoU aRe a Liar://A Unified Framework for Cross-Testing URL Parsers}, ISSN={["2770-8411"]}, DOI={10.1109/SPW54247.2022.9833883}, abstractNote={A variety of attacks, including phishing, remote-code execution, server-side request forgery, and hostname redirection, are delivered to users over the web. The beginning of most of the web exploits is an innocent-looking URL. Malformed or misinterpreted URLs can lead to remote code execution attacks as well. The IETF and WHATWG standards organizations define the components of a URL and act as an implementation guide for URL parsers. They state which characters are allowed in each portion of the URL and loosely suggest what to do in case an undefined character is present in the URL. The existence of two standards is the first concern, and the addition of server-side request forgery in the latest version of OWASP Top 10, suggests that neither of these standards is being followed accurately and concisely. Moreover, neither of these specifications describe an exact implementation standard, causing inconsistencies in the way the various parsers interpret the same URL. For example, malicious users can find ways to craft URLs to look like they are pointing to one resource but actually direct the user to different one. This problem is worsened when one application uses two separate parsers for validation and resource fetching.In this paper, we design a framework that unifies the testing suites of 8 URL parsers from popular web-related projects and highlights the inconsistencies between them. We examine and dive deep into the URL parser implementation across the most popular libraries, browsers, and command-line tools, and discover many open areas for exploitation. Our findings include identifying categories of inconsistencies, developing proof-of-concept exploits, and highlighting the need for a comprehensive implementation standard to be developed and enforced at the earliest.}, journal={2022 43RD IEEE SYMPOSIUM ON SECURITY AND PRIVACY WORKSHOPS (SPW 2022)}, author={Ajmani, Dashmeet Kaur and Koishybayev, Igibek and Kapravelos, Alexandros}, year={2022}, pages={51–58} } @article{akhavani_jueckstock_su_kapravelos_kirda_lu_2021, title={Browserprint: An Analysis of the Impact of Browser Features on Fingerprintability and Web Privacy}, volume={13118}, ISBN={["978-3-030-91355-7"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-030-91356-4_9}, abstractNote={Web browsers are indispensable applications in our daily lives. Millions of users use web browsers for a wide range of activities such as social media, online shopping, emails, or surfing the web. The evolution of increasingly more complicated web applications relies on browsers constantly adding and removing features. At the same time, some of these web services use browser fingerprinting to track and profile their users with clear disregard for their web privacy. In this paper, we perform an empirical analysis of browser features evolution and aim to evaluate browser fingerprintability. By analyzing 33 Google Chrome, 31 Mozilla Firefox, and 33 Opera major browser versions released through 2016 to 2020, we discover that all of these browsers have unique feature sets which makes them different from each other. By comparing these features to the fingerprinting APIs presented in literature that have appeared in this field, we conclude that all of these browser versions are uniquely fingerprintable. Our results show an alarming trend that browsers are becoming more fingerprintable over time because newer versions contain more fingerprintable APIs compared to older ones.}, journal={INFORMATION SECURITY (ISC 2021)}, author={Akhavani, Seyed Ali and Jueckstock, Jordan and Su, Junhua and Kapravelos, Alexandros and Kirda, Engin and Lu, Long}, year={2021}, pages={161–176} } @article{chen_ilia_polychronakis_kapravelos_2021, title={Cookie Swap Party: Abusing First-PartyCookies for Web Tracking}, DOI={10.1145/3442381.3449837}, abstractNote={As a step towards protecting user privacy, most web browsers perform some form of third-party HTTP cookie blocking or periodic deletion by default, while users typically have the option to select even stricter blocking policies. As a result, web trackers have shifted their efforts to work around these restrictions and retain or even improve the extent of their tracking capability. In this paper, we shed light into the increasingly used practice of relying on first-party cookies that are set by third-party JavaScript code to implement user tracking and other potentially unwanted capabilities. Although unlike third-party cookies, first-party cookies are not sent automatically by the browser to third-parties on HTTP requests, this tracking is possible because any included third-party code runs in the context of the parent page, and thus can fully set or read existing first-party cookies—which it can then leak to the same or other third parties. Previous works that survey user privacy on the web in relation to cookies, third-party or otherwise, have not fully explored this mechanism. To address this gap, we propose a dynamic data flow tracking system based on Chromium to track the leakage of first-party cookies to third parties, and used it to conduct a large-scale study of the Alexa top 10K websites. In total, we found that 97.72% of the websites have first-party cookies that are set by third-party JavaScript, and that on 57.66% of these websites there is at least one such cookie that contains a unique user identifier that is diffused to multiple third parties. Our results highlight the privacy-intrusive capabilities of first-party cookies, even when a privacy-savvy user has taken mitigative measures such as blocking third-party cookies, or employing popular crowd-sourced filter lists such as EasyList/EasyPrivacy and the Disconnect list.}, journal={PROCEEDINGS OF THE WORLD WIDE WEB CONFERENCE 2021 (WWW 2021)}, author={Chen, Quan and Ilia, Panagiotis and Polychronakis, Michalis and Kapravelos, Alexandros}, year={2021}, pages={2117–2129} } @article{zhang_oest_cho_sun_johnson_wardman_sarker_kapravelos_bao_wang_et al._2021, title={CrawlPhish: Large-Scale Analysis of Client-Side Cloaking Techniques in Phishing}, ISSN={["1558-4046"]}, DOI={10.1109/MSEC.2021.3129992}, abstractNote={Phishing websites with advanced evasion techniques are a critical threat to Internet users because they delay detection by current antiphishing systems. We present CrawlPhish, a framework for automatically detecting and categorizing the client-side (e.g., JavaScript) evasion used by phishing websites.}, journal={IEEE SECURITY & PRIVACY}, author={Zhang, Penghui and Oest, Adam and Cho, Haehyun and Sun, Zhibo and Johnson, R. C. and Wardman, Brad and Sarker, Shaown and Kapravelos, Alexandros and Bao, Tiffany and Wang, Ruoyu and et al.}, year={2021}, month={Dec} } @article{dinh_cho_martin_oest_zeng_kapravelos_ahn_bao_wang_doupe_et al._2021, title={Favocado: Fuzzing the Binding Code of JavaScript Engines Using Semantically Correct Test Cases}, DOI={10.14722/ndss.2021.24224}, abstractNote={JavaScript runtime systems include some specialized programming interfaces, called binding layers. Binding layers translate data representations between JavaScript and unsafe low-level languages, such as C and C++, by converting data between different types. Due to the wide adoption of JavaScript (and JavaScript engines) in the entire computing ecosystem, discovering bugs in JavaScript binding layers is critical. Nonetheless, existing JavaScript fuzzers cannot adequately fuzz binding layers due to two major challenges: Generating syntactically and semantically correct test cases and reducing the size of the input space for fuzzing. In this paper, we propose Favocado, a novel fuzzing approach that focuses on fuzzing binding layers of JavaScript runtime systems. Favocado can generate syntactically and semantically correct JavaScript test cases through the use of extracted semantic information and careful maintaining of execution states. This way, test cases that Favocado generates do not raise unintended runtime exceptions, which substantially increases the chance of triggering binding code. Additionally, exploiting a unique feature (relative isolation) of binding layers, Favocado significantly reduces the size of the fuzzing input space by splitting DOM objects into equivalence classes and focusing fuzzing within each equivalence class. We demonstrate the effectiveness of Favocado in our experiments and show that Favocado outperforms a stateof-the-art DOM fuzzer. Finally, during the evaluation, we find 61 previously unknown bugs in four JavaScript runtime systems (Adobe Acrobat Reader, Foxit PDF Reader, Chromium, and WebKit). 33 of these bugs are security vulnerabilities.}, journal={28TH ANNUAL NETWORK AND DISTRIBUTED SYSTEM SECURITY SYMPOSIUM (NDSS 2021)}, author={Dinh, Sung Ta and Cho, Haehyun and Martin, Kyle and Oest, Adam and Zeng, Kyle and Kapravelos, Alexandros and Ahn, Gail-Joon and Bao, Tiffany and Wang, Ruoyu and Doupe, Adam and et al.}, year={2021} } @article{jueckstock_sarker_snyder_beggs_papadopoulos_varvello_livshits_kapravelos_2021, title={Towards Realistic and Reproducible Web Crawl Measurements}, DOI={10.1145/3442381.3450050}, abstractNote={Accurate web measurement is critical for understanding and improving security and privacy online. Such measurements implicitly assume that automated crawls generalize to typical web user experience. But anecdotal evidence suggests the web behaves differently when seen via well-known measurement endpoints or measurement automation frameworks, for various reasons. Our work improves the state of web privacy and security by investigating how key measurements differ when using naive crawling tool defaults vs. careful attempts to match “real” users across the Tranco top 25k web domains. We find web privacy and security measurements significantly affected by vantage point and browser configuration. We conclude that unless researchers ensure their web measurement tools match real world user experience, the research community is likely missing important signals systematically. For example, we find browser configuration alone causing shifts in 19% of known ad and tracking domains encountered and altering the loading frequency of up to 10% of distinct JavaScript code units executed. We find network vantage point having similar, though less dramatic, effects on the same web metrics. To ensure reproducibility, we carefully document our methodology and publish both our code and collected data.}, journal={PROCEEDINGS OF THE WORLD WIDE WEB CONFERENCE 2021 (WWW 2021)}, author={Jueckstock, Jordan and Sarker, Shaown and Snyder, Peter and Beggs, Aidan and Papadopoulos, Panagiotis and Varvello, Matteo and Livshits, Benjamin and Kapravelos, Alexandros}, year={2021}, pages={80–91} } @article{starov_laperdrix_kapravelos_nikiforakis_2019, title={Unnecessarily Identifiable: Quantifying the fingerprintability of browser extensions due to bloat}, DOI={10.1145/3308558.3313458}, abstractNote={In this paper, we investigate to what extent the page modifications that make browser extensions fingerprintable are necessary for their operation. We characterize page modifications that are completely unnecessary for the extension's functionality as extension bloat. By analyzing 58,034 extensions from the Google Chrome store, we discovered that 5.7% of them were unnecessarily identifiable because of extension bloat. To protect users against unnecessary extension fingerprinting due to bloat, we describe the design and implementation of an in-browser mechanism that provides coarse-grained access control for extensions on all websites. The proposed mechanism and its built-in policies, does not only protect users from fingerprinting, but also offers additional protection against malicious extensions exfiltrating user data from sensitive websites.}, journal={WEB CONFERENCE 2019: PROCEEDINGS OF THE WORLD WIDE WEB CONFERENCE (WWW 2019)}, author={Starov, Oleksii and Laperdrix, Pierre and Kapravelos, Alexandros and Nikiforakis, Nick}, year={2019}, pages={3244–3250} } @article{jueckstock_kapravelos_2019, title={VisibleV8: In-browser Monitoring of JavaScript in the Wild}, DOI={10.1145/3355369.3355599}, abstractNote={Modern web security and privacy research depends on accurate measurement of an often evasive and hostile web. No longer just a network of static, hyperlinked documents, the modern web is alive with JavaScript (JS) loaded from third parties of unknown trustworthiness. Dynamic analysis of potentially hostile JS currently presents a cruel dilemma: use heavyweight in-browser solutions that prove impossible to maintain, or use lightweight inline JS solutions that are detectable by evasive JS and which cannot match the scope of coverage provided by in-browser systems. We present VisibleV8, a dynamic analysis framework hosted inside V8, the JS engine of the Chrome browser, that logs native function or property accesses during any JS execution. At less than 600 lines (only 67 of which modify V8's existing behavior), our patches are lightweight and have been maintained from Chrome versions 63 through 72 without difficulty. VV8 consistently outperforms equivalent inline instrumentation, and it intercepts accesses impossible to instrument inline. This comprehensive coverage allows us to isolate and identify 46 JavaScript namespace artifacts used by JS code in the wild to detect automated browsing platforms and to discover that 29% of the Alexa top 50k sites load content which actively probes these artifacts.}, journal={IMC'19: PROCEEDINGS OF THE 2019 ACM INTERNET MEASUREMENT CONFERENCE}, author={Jueckstock, Jordan and Kapravelos, Alexandros}, year={2019}, pages={393–405} } @article{beggs_kapravelos_2019, title={Wild Extensions: Discovering and Analyzing Unlisted Chrome Extensions}, volume={11543}, ISBN={["978-3-030-22037-2"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-030-22038-9_1}, abstractNote={With browsers being a ubiquitous, if not required, method to access the web, they represent a unique and universal threat vector. Browsers can run third-party extensions virtually invisibly in the background after a quick install. In this paper, we explore the abuse of browser extensions that achieve installations via suspicious methods. We scan the web for links to extension installations by performing a web crawling of the Alexa top 10,000 websites with recursive sub-page depth of 4 and leverage other tools to search for artifacts in the source code of webpages. We discover pages that have links to both listed and unlisted extensions, many times pointing to multiple different extensions that share the same name. Using this data, we were able to find 1,097 unlisted browser extensions ranging from internal directory lookup tools to hidden Google Docs extensions that pose a serious threat to their 127 million users.}, journal={DETECTION OF INTRUSIONS AND MALWARE, AND VULNERABILITY ASSESSMENT (DIMVA 2019)}, author={Beggs, Aidan and Kapravelos, Alexandros}, year={2019}, pages={3–22} } @article{chen_kapravelos_2018, title={Mystique: Uncovering Information Leakage from Browser Extensions}, DOI={10.1145/3243734.3243823}, abstractNote={Browser extensions are small JavaScript, CSS and HTML programs that run inside the browser with special privileges. These programs, often written by third parties, operate on the pages that the browser is visiting, giving the user a programmatic way to configure the browser. The privacy implications that arise by allowing privileged third-party code to execute inside the users' browser are not well understood. In this paper, we develop a taint analysis framework for browser extensions and use it to perform a large scale study of extensions in regard to their privacy practices. We first present a hybrid approach to traditional taint analysis: by leveraging the fact that extension source code is available to the runtime JavaScript engine, we implement as well as enhance traditional taint analysis using information gathered from static data flow and control-flow analysis of the JavaScript source code. Based on this, we further modify the Chromium browser to support taint tracking for extensions. We analyzed 178,893 extensions crawled from the Chrome Web Store between September 2016 and March 2018, as well as a separate set of all available extensions (2,790 in total) for the Opera browser at the time of analysis. From these, our analysis flagged 3,868 (2.13%) extensions as potentially leaking privacy-sensitive information. The top 10 most popular Chrome extensions that we confirmed to be leaking privacy-sensitive information have more than 60 million users combined. We ran the analysis on a local Kubernetes cluster and were able to finish within a month, demonstrating the feasibility of our approach for large-scale analysis of browser extensions. At the same time, our results emphasize the threat browser extensions pose to user privacy, and the need for countermeasures to safeguard against misbehaving extensions that abuse their privileges.}, journal={PROCEEDINGS OF THE 2018 ACM SIGSAC CONFERENCE ON COMPUTER AND COMMUNICATIONS SECURITY (CCS'18)}, author={Chen, Quan and Kapravelos, Alexandros}, year={2018}, pages={1687–1700} } @article{invernizzi_thomas_kapravelos_comanescu_picod_bursztein_2016, title={Cloak of Visibility: Detecting When Machines Browse A Different Web}, ISSN={["1081-6011"]}, DOI={10.1109/sp.2016.50}, abstractNote={The contentious battle between web services and miscreants involved in blackhat search engine optimization and malicious advertisements has driven the underground to develop increasingly sophisticated techniques that hide the true nature of malicious sites. These web cloaking techniques hinder the effectiveness of security crawlers and potentially expose Internet users to harmful content. In this work, we study the spectrum of blackhat cloaking techniques that target browser, network, or contextual cues to detect organic visitors. As a starting point, we investigate the capabilities of ten prominent cloaking services marketed within the underground. This includes a first look at multiple IP blacklists that contain over 50 million addresses tied to the top five search engines and tens of anti-virus and security crawlers. We use our findings to develop an anti-cloaking system that detects split-view content returned to two or more distinct browsing profiles with an accuracy of 95.5% and a false positive rate of 0.9% when tested on a labeled dataset of 94,946 URLs. We apply our system to an unlabeled set of 135,577 search and advertisement URLs keyed on high-risk terms (e.g., luxury products, weight loss supplements) to characterize the prevalence of threats in the wild and expose variations in cloaking techniques across traffic sources. Our study provides the first broad perspective of cloaking as it affects Google Search and Google Ads and underscores the minimum capabilities necessary of security crawlers to bypass the state of the art in mobile, rDNS, and IP cloaking.}, journal={2016 IEEE SYMPOSIUM ON SECURITY AND PRIVACY (SP)}, author={Invernizzi, Luca and Thomas, Kurt and Kapravelos, Alexandros and Comanescu, Oxana and Picod, Jean-Michel and Bursztein, Elie}, year={2016}, pages={743–758} } @inbook{de maio_kapravelos_shoshitaishvili_kruegel_vigna_2014, title={PExy: The Other Side of Exploit Kits}, ISBN={9783319085081 9783319085098}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-319-08509-8_8}, DOI={10.1007/978-3-319-08509-8_8}, abstractNote={The drive-by download scene has changed dramatically in the last few years. What was a disorganized ad-hoc generation of malicious pages by individuals has evolved into sophisticated, easily extensible frameworks that incorporate multiple exploits at the same time and are highly configurable. We are now dealing with exploit kits.In this paper we focus on the server-side part of drive-by downloads by automatically analyzing the source code of multiple exploit kits. We discover through static analysis what checks exploit-kit authors perform on the server to decide which exploit is served to which client and we automatically generate the configurations to extract all possible exploits from every exploit kit. We also examine the source code of exploit kits and look for interesting coding practices, their detection mitigation techniques, the similarities between them and the rise of Exploit-as-a-Service through a highly customizable design. Our results indicate that even with a perfect drive-by download analyzer it is not trivial to trigger the expected behavior from an exploit kit so that it is classified appropriately as malicious.}, booktitle={Detection of Intrusions and Malware, and Vulnerability Assessment}, publisher={Springer International Publishing}, author={De Maio, Giancarlo and Kapravelos, Alexandros and Shoshitaishvili, Yan and Kruegel, Christopher and Vigna, Giovanni}, year={2014}, pages={132–151} } @inbook{kapravelos_cova_kruegel_vigna_2011, title={Escape from Monkey Island: Evading High-Interaction Honeyclients}, ISBN={9783642224232 9783642224249}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-22424-9_8}, DOI={10.1007/978-3-642-22424-9_8}, abstractNote={High-interaction honeyclients are the tools of choice to detect malicious web pages that launch drive-by-download attacks. Unfortunately, the approach used by these tools, which, in most cases, is to identify the side-effects of a successful attack rather than the attack itself, leaves open the possibility for malicious pages to perform evasion techniques that allow one to execute an attack without detection or to behave in a benign way when being analyzed. In this paper, we examine the security model that high-interaction honeyclients use and evaluate their weaknesses in practice. We introduce and discuss a number of possible attacks, and we test them against several popular, well-known high-interaction honeyclients. Our attacks evade the detection of these tools, while successfully attacking regular visitors of malicious web pages.}, booktitle={Detection of Intrusions and Malware, and Vulnerability Assessment}, publisher={Springer Berlin Heidelberg}, author={Kapravelos, Alexandros and Cova, Marco and Kruegel, Christopher and Vigna, Giovanni}, year={2011}, pages={124–143} } @inbook{kapravelos_polakis_athanasopoulos_ioannidis_markatos_2010, title={D(e|i)aling with VoIP: Robust Prevention of DIAL Attacks}, ISBN={9783642154966 9783642154973}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-15497-3_40}, DOI={10.1007/978-3-642-15497-3_40}, abstractNote={We carry out attacks using Internet services that aim to keep telephone devices busy, hindering legitimate callers from gaining access. We use the term DIAL (Digitally Initiated Abuse of teLephones), or, in the simple form, Dial attack, to refer to this behavior. We develop a simulation environment for modeling a Dial attack in order to quantify its full potential and measure the effect of attack parameters. Based on the simulation’s results we perform the attack in the real-world. By using a Voice over IP (VoIP) provider as the attack medium, we manage to hold an existing landline device busy for 85% of the attack duration by issuing only 3 calls per second and, thus, render the device unusable. The attack has zero financial cost, requires negligible computational resources and cannot be traced back to the attacker. Furthermore, the nature of the attack is such that anyone can launch a Dial attack towards any telephone device.Our investigation of existing countermeasures in VoIP providers shows that they follow an all-or-nothing approach, but most importantly, that their anomaly detection systems react slowly against our attacks, as we managed to issue tens of thousands of calls before getting spotted. To cope with this, we propose a flexible anomaly detection system for VoIP calls, which promotes fairness for callers. With our system in place it is hard for an adversary to keep the device busy for more than 5% of the duration of the attack.}, booktitle={Computer Security – ESORICS 2010}, publisher={Springer Berlin Heidelberg}, author={Kapravelos, Alexandros and Polakis, Iasonas and Athanasopoulos, Elias and Ioannidis, Sotiris and Markatos, Evangelos P.}, year={2010}, pages={663–678} } @inbook{armenatzoglou_marketakis_kriara_apostolopoulos_papavasiliou_kampas_kapravelos_kartsonakis_linardakis_nikitaki_et al._2009, title={FleXConf: A Flexible Conference Assistant Using Context-Aware Notification Services}, ISBN={9783642052897 9783642052903}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-05290-3_20}, DOI={10.1007/978-3-642-05290-3_20}, abstractNote={Integrating context-aware notification services to ubiquitous computing systems aims at the provision of the right information to the right users, at the right time, in the right place, and on the right device, and constitutes a significant step towards the realization of the Ambient Intelligence vision. In this paper, we present FlexConf, a semantics-based system that supports location-based, personalized notification services for the assistance of conference attendees. Its special features include an ontology-based representation model, rule-based context-aware reasoning, and a novel positioning system for indoor environments.}, booktitle={Lecture Notes in Computer Science}, publisher={Springer Berlin Heidelberg}, author={Armenatzoglou, Nikos and Marketakis, Yannis and Kriara, Lito and Apostolopoulos, Elias and Papavasiliou, Vicky and Kampas, Dimitris and Kapravelos, Alexandros and Kartsonakis, Eythimis and Linardakis, Giorgos and Nikitaki, Sofia and et al.}, year={2009}, pages={108–117} } @inbook{friedl_ubik_kapravelos_polychronakis_markatos_2009, title={Realistic Passive Packet Loss Measurement for High-Speed Networks}, ISBN={9783642016448 9783642016455}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-01645-5_1}, DOI={10.1007/978-3-642-01645-5_1}, abstractNote={Realistic and accurate packet loss measurement of production traffic has been challenging, since the frequently-used active monitoring approaches using probe packets cannot capture the packet loss experienced by the traffic of individual user applications. In this paper, we present a new approach for the accurate measurement of the packet loss rate faced by actual production traffic based on passive network monitoring. In contrast to previous work, our method is able to pinpoint the packet loss rate experienced by the individual traffic flows of concurrently running applications. Experimental results suggest that our approach measures packet loss with 100% accuracy for network speeds as high as 12 Gbit/s, while traditional ICMP-based approaches were usually much less accurate. We also report experiences from a real-world deployment of our method in several 10 Gbit/s links of European research networks, where it has been successfully operational for several months.}, booktitle={Traffic Monitoring and Analysis}, publisher={Springer Berlin Heidelberg}, author={Friedl, Aleš and Ubik, Sven and Kapravelos, Alexandros and Polychronakis, Michalis and Markatos, Evangelos P.}, year={2009}, pages={1–7} }