@inproceedings{lima_costa_souza_teixeira_fonseca_d'amorim_ribeiro_miranda_2023, title={Do you see any problem? On the Developers Perceptions in Test Smells Detection}, url={https://doi.org/10.1145/3629479.3629485}, DOI={10.1145/3629479.3629485}, abstractNote={Developers are continuously implementing changes to meet demands coming from users. In the context of test-driven development, before any new code is added, a test case should be written to make sure new changes do not introduce bugs. During this process, developers and testers might adopt bad design choices, which may lead to the introduction of the so-called Test Smells in the code. Test Smells are bad solutions for implementing or designing test code. We perform a broader study to investigate the participants’ perceptions about the presence of Test Smells. We analyze whether certain factors related to the participant’ profiles concerning background and experience may influence their perception of Test Smells. Also, we analyze if the heuristics adopted by developers influence their perceptions about the existence of Test Smells. We analyze commits of open source projects to identify the introduction of Test Smells. Then, we conduct an empirical study with 25 participants that evaluate instances of 10 different smell types. For each Test Smell type, we analyze the agreement among participants, and we assess the influence of different factors on the participants’ evaluations. Altogether, more than 1250 evaluations were made. The results indicate that participants present a low agreement on detecting all 10 Test Smells types analyzed in our study. The results also suggest that factors related to background and experience do not have a consistent effect on the agreement among the participants. On the other hand, the results indicate that the agreement is consistently influenced by specific heuristics employed by participants. Our findings reveal that the participants detect Test Smells in significantly different ways. As a consequence, these findings introduce some questions concerning the results of previous studies that do not consider the different perceptions of participants on detecting Test Smells.}, author={Lima, Rodrigo and Costa, Keila and Souza, Jairo and Teixeira, Leopoldo and Fonseca, Baldoino and D'Amorim, Marcelo and Ribeiro, Márcio and Miranda, Breno}, year={2023}, month={Nov} } @article{reid_d'amorim_wagner_treude_2023, title={NCQ: Code Reuse Support for Node.js Developers}, volume={49}, ISSN={["1939-3520"]}, url={https://doi.org/10.1109/TSE.2023.3248113}, DOI={10.1109/TSE.2023.3248113}, abstractNote={Code reuse is an important part of software development. The adoption of code reuse practices is especially common among Node.js developers. The Node.js package manager, NPM, indexes over 1 Million packages and developers often seek out packages to solve programming tasks. Due to the vast number of packages, selecting the right package is difficult and time consuming. With the goal of improving productivity of developers that heavily reuse code through third-party packages, we present Node Code Query (NCQ), a Read-Eval-Print-Loop environment that allows developers to 1) search for NPM packages using natural language queries, 2) search for code snippets related to those packages, 3) automatically correct errors in these code snippets, 4) quickly setup new environments for testing those snippets, and 5) transition between search and editing modes. In two user studies with a total of 20 participants, we find that participants begin programming faster and conclude tasks faster with NCQ than with baseline approaches, and that they like, among other features, the search for code snippets and packages. Our results suggest that NCQ makes Node.js developers more efficient in reusing code.}, number={5}, journal={IEEE TRANSACTIONS ON SOFTWARE ENGINEERING}, author={Reid, Brittany and d'Amorim, Marcelo and Wagner, Markus and Treude, Christoph}, year={2023}, month={May}, pages={3205–3225} } @article{torres_costa_amaral_pastro_bonifacio_d'amorim_legunsen_bodden_dias canedo_2023, title={Runtime Verification of Crypto APIs: An Empirical Study}, volume={49}, ISSN={["1939-3520"]}, url={https://doi.org/10.1109/TSE.2023.3301660}, DOI={10.1109/TSE.2023.3301660}, abstractNote={Misuse of cryptographic (crypto) APIs is a noteworthy cause of security vulnerabilities. For this reason, static analyzers were recently proposed for detecting crypto API misuses. They differ in strengths and weaknesses, and they might miss bugs. Motivated by the inherent limitations of static analyzers, this article reports on a study of runtime verification (RV) as a dynamic-analysis-based alternative for crypto API misuse detection. RV monitors program runs against formal specifications; it was shown to be effective and efficient for amplifying the bug-finding ability of software tests. We focus on the popular JCA crypto API and write 22 RV specifications based on expert-validated rules in a static analyzer. We monitor these specifications while running tests in five benchmarks. Lastly, we compare the accuracy of our RV-based approach, RVSec, with those of three state-of-the-art crypto API misuses detectors: CogniCrypt, CryptoGuard, and CryLogger. Results show that RVSec has higher accuracy in four benchmarks and is on par with CryptoGuard in the fifth. Overall, RVSec achieves an average ${\boldsymbol{F}}_{1}$ F 1 measure of 95%, compared with 83%, 78%, and 86% for CogniCrypt, CryptoGuard, and CryLogger, respectively. We highlight the strengths and limitations of these tools and show that RV is effective for detecting crypto API misuses. We also discuss how static and dynamic analysis can complement each other for detecting crypto API misuses.}, number={10}, journal={IEEE TRANSACTIONS ON SOFTWARE ENGINEERING}, author={Torres, Adriano and Costa, Pedro and Amaral, Luis and Pastro, Jonata and Bonifacio, Rodrigo and d'Amorim, Marcelo and Legunsen, Owolabi and Bodden, Eric and Dias Canedo, Edna}, year={2023}, month={Oct}, pages={4510–4525} } @article{molina_d'amorim_aguirre_2023, title={SpecFuzzer: A Tool for Inferring Class Specifications via Grammar-based Fuzzing}, ISSN={["1527-1366"]}, DOI={10.1109/ASE56229.2023.00024}, abstractNote={In object-oriented design, class specifications are primarily used to express properties describing the intended behavior of the class methods and constraints on class' objects. Although the presence of these specifications is important for various software engineering tasks such as test generation, bug finding and automated debugging, developers rarely write them. In this tool demo we present the details of SPEcFuzZER, a tool that aims at alleviating the problem of writing class specifications by using a combination of grammar-based fuzzing, dynamic invariant detection and mutation analysis to auto-maticallyautomatically infer specifications for Java classes. Given a class under analysis, SPEcFuzZER uses (i) a generator of candidate assertions derived from a grammar automatically extracted from the class; (ii) a dynamic invariant detector -Daikon- in order to discard the assertions invalidated by a test suite; and (iii) a mutation-based mechanism to cluster and rank assertions, so that similar constraints are grouped together and the stronger assertions are prioritized. The tool is available on GitHub at https://github.com/facumolina/specfuzzer, and the demo video can be found on YouTube: https://youtu.be/IfakNCbzOUg.}, journal={2023 38TH IEEE/ACM INTERNATIONAL CONFERENCE ON AUTOMATED SOFTWARE ENGINEERING, ASE}, author={Molina, Facundo and d'Amorim, Marcelo and Aguirre, Nazareno}, year={2023}, pages={2094–2097} } @article{barbosa_ferreira_pinto_d'amorim_miranda_2023, title={Test Flakiness Across Programming Languages}, url={https://doi.org/10.1109/TSE.2022.3208864}, DOI={10.1109/TSE.2022.3208864}, abstractNote={Regression Testing (RT) is a quality-assurance practice commonly adopted in the software industry to check if functionality remains intact after code changes. Test flakiness is a serious problem for RT. A test is said to be flaky when it non-deterministically passes or fails on a fixed environment. Prior work studied test flakiness primarily on Java programs. It is unclear, however, how problematic is test flakiness for software written in other programming languages. This paper reports on a study focusing on three central aspects of test flakiness: concentration, similarity, and cost. Considering concentration, our results show that, for any given programming language that we studied (C, Go, Java, JS, and Python), most issues could be explained by a small fraction of root causes (5/13 root causes cover 78.07% of the issues) and could be fixed by a relatively small fraction of fix strategies (10/23 fix strategies cover 85.20% of the issues). Considering similarity, although there were commonalities in root causes and fixes across languages (e.g., concurrency and async wait are common causes of flakiness in most languages), we also found important differences (e.g., flakiness due to improper release of resources are more common in C), suggesting that there is opportunity to fine tuning analysis tools. Considering cost, we found that issues related to flaky tests are resolved either very early once they are posted ($< $<10 days), suggesting relevance, or very late ($>$>100 days), suggesting irrelevance.}, journal={IEEE Transactions on Software Engineering}, author={Barbosa, Keila and Ferreira, Ronivaldo and Pinto, Gustavo and d'Amorim, Marcelo and Miranda, Breno}, year={2023}, month={Apr} } @inproceedings{reis_abreu_d'amorim_fortunato_2022, title={Leveraging Practitioners’ Feedback to Improve a Security Linter}, url={https://doi.org/10.1145/3551349.3560419}, DOI={10.1145/3551349.3560419}, abstractNote={Infrastructure-as-Code (IaC) is a technology that enables the management and distribution of infrastructure through code instead of manual processes. In 2020, Palo Alto Network’s Unit 42 announced the discovery of over 199K vulnerable IaC templates through their “Cloud Threat” Report. This report highlights the importance of tools to prevent vulnerabilities from reaching production. Unfortunately, we observed through a comprehensive study that a security linter for IaC scripts is not reliable yet—high false positive rates. Our approach to tackling this problem was to leverage community expertise to improve the precision of this tool. More precisely, we interviewed professional developers to collect their feedback on the root causes of imprecision of the state-of-the-art security linter for Puppet. From that feedback, we developed a linter adjusting 7 rules of an existing linter ruleset and adding 3 new rules. We conducted a new study with 131 practitioners, which helped us improve the tool’s precision significantly and achieve a final precision of . An important takeaway from this paper is that obtaining professional feedback is fundamental to improving the rules’ precision and extending the rulesets, which is critical for the usefulness and adoption of lightweight tools, such as IaC security linters.}, author={Reis, Sofia and Abreu, Rui and d'Amorim, Marcelo and Fortunato, Daniel}, year={2022}, month={Oct} } @article{cabral_miranda_lima_d'amorim_2022, title={RVprio: A tool for prioritizing runtime verification violations}, url={https://doi.org/10.1002/stvr.1813}, DOI={10.1002/stvr.1813}, abstractNote={Summary}, journal={Software Testing, Verification and Reliability}, author={Cabral, Lucas and Miranda, Breno and Lima, Igor and d'Amorim, Marcelo}, year={2022}, month={Aug} } @article{alcantara_padilha_abreu_d'amorim_2022, title={Syrius: Synthesis of Rules for Intrusion Detectors}, url={https://doi.org/10.1109/TR.2021.3061297}, DOI={10.1109/TR.2021.3061297}, abstractNote={Network intrusion detection systems (NIDS) are popular tools to defend local networks against attacks. These systems monitor the network traffic and flag suspicious behavior. Rule-based NIDS do that by checking the network traffic against a set of rules, which become obsolete as attackers learn new strategies to circumvent existing defenses. This article proposes synthesis of suricata rules (Syrius), a novel approach to synthesize rules for rule-based NIDS. Syrius leverages malicious (positive) and benign (negative) traffic to create rules for new attacks. Syrius is organized as a pipeline of three components to 1) create an overspecified seed rule, 2) derive plausible rules from the seed, and 3) rank plausible rules. We evaluated Syrius against a set of 21 network attacks with various characteristics. Syrius was capable of generating the correct rule among the top-3 and top-1 rules of the ranking, respectively, in 80.1% and 47.6% of the cases.}, journal={IEEE Transactions on Reliability}, author={Alcantara, Lucas and Padilha, Guilherme and Abreu, Rui and d'Amorim, Marcelo}, year={2022}, month={Mar} } @inproceedings{stocco_nunes_d'amorim_tonella_2022, title={ThirdEye: Attention Maps for Safe Autonomous Driving Systems}, url={https://doi.org/10.1145/3551349.3556968}, DOI={10.1145/3551349.3556968}, abstractNote={Automated online recognition of unexpected conditions is an indispensable component of autonomous vehicles to ensure safety even in unknown and uncertain situations. In this paper we propose a runtime monitoring technique rooted in the attention maps computed by explainable artificial intelligence techniques. Our approach, implemented in a tool called ThirdEye, turns attention maps into confidence scores that are used to discriminate safe from unsafe driving behaviours. The intuition is that uncommon attention maps are associated with unexpected runtime conditions. In our empirical study, we evaluated the effectiveness of different configurations of ThirdEye at predicting simulation-based injected failures induced by both unknown conditions (adverse weather and lighting) and unsafe/uncertain conditions created with mutation testing. Results show that, overall, ThirdEye can predict 98% misbehaviours, up to three seconds in advance, outperforming a state-of-the-art failure predictor for autonomous vehicles.}, author={Stocco, Andrea and Nunes, Paulo J. and D'Amorim, Marcelo and Tonella, Paolo}, year={2022}, month={Oct} } @article{lima_silva_miranda_pinto_d’amorim_2021, title={Exposing bugs in JavaScript engines through test transplantation and differential testing}, url={https://doi.org/10.1007/s11219-020-09537-8}, DOI={10.1007/s11219-020-09537-8}, abstractNote={JavaScript is a popular programming language today with several implementations competing for market dominance. Although a specification document and a conformance test suite exist to guide engine development, bugs occur and have important practical consequences. Implementing correct engines is challenging because the spec is intentionally incomplete and evolves frequently. This paper investigates the use of test transplantation and differential testing for revealing functional bugs in JavaScript engines. The former technique runs the regression test suite of a given engine on another engine. The latter technique fuzzes existing inputs and then compares the output produced by different engines with a differential oracle. We conducted experiments with engines from five major players—Apple, Facebook, Google, Microsoft, and Mozilla—to assess the effectiveness of test transplantation and differential testing. Our results indicate that both techniques revealed several bugs, many of which are confirmed by developers. We reported 35 bugs with test transplantation (23 of these bugs confirmed and 19 fixed) and reported 24 bugs with differential testing (17 of these confirmed and 10 fixed). Results indicate that most of these bugs affected two engines—Apple’s JSC and Microsoft’s ChakraCore (24 and 26 bugs, respectively). To summarize, our results show that test transplantation and differential testing are easy to apply and very effective in finding bugs in complex software, such as JavaScript engines.}, journal={Software Quality Journal}, author={Lima, Igor and Silva, Jefferson and Miranda, Breno and Pinto, Gustavo and d’Amorim, Marcelo}, year={2021}, month={Mar} } @article{melo_wiese_dramorim_2021, title={Using Docker to Assist Q&A Forum Users}, url={https://doi.org/10.1109/TSE.2019.2956919}, DOI={10.1109/TSE.2019.2956919}, abstractNote={Q&A forums are today a valuable tool to assist developers in programming tasks. Unfortunately, contributions to these forums are often unclear and incomplete. Docker is a container solution that enables software developers to encapsulate an operating environment and could help address reproducibility issues. This artile reports on a feasibility study to evaluate if Docker can help improve reproducibility in Stack Overflow. We started surveying Stack Overflow users to understand their perceptions on the proposal of using Docker to reproduce Stack Overflow posts. Participants were critical and mentioned two important aspects: cost and need. To validate their criticism, we conducted an exploratory study focused on understanding how costly the task of creating containers for posts is for developers. Overall, results indicate that the cost of creating containers is not high, especially due to the fact that dockerfiles are highly similar and small. Based on these findings we developed a tool, dubbed Frisk, to assist developers in creating containers for those posts. We then conducted a user study to evaluate interest of Stack Overflow developers on the tool. We found that, on average, users spent nearly ten minutes interacting with Frisk and that 45.3% of the 563 Frisk sessions we created for existing posts resulted in a successful access to the corresponding web service by the owners of the post. Overall, this artile provides early evidence that the use of Docker in Q&A forums should be encouraged for configuration-related posts.}, journal={IEEE Transactions on Software Engineering}, author={Melo, Luis and Wiese, Igor and drAmorim, Marcelo}, year={2021}, month={Nov} } @article{lima_cândido_d’amorim_2020, title={Practical detection of CMS plugin conflicts in large plugin sets}, url={https://doi.org/10.1016/j.infsof.2019.106212}, DOI={10.1016/j.infsof.2019.106212}, abstractNote={Content Management Systems (CMS), such as WordPress, are a very popular category of software for creating web sites and blogs. These systems typically build on top of plugin architectures. Unfortunately, it is not uncommon that the combined activation of multiple plugins in a CMS web site will produce unexpected behavior. Conflict-detection techniques exist but they do not scale. This paper proposes Pena, a technique to detect conflicts in large sets of plugins as those present in plugin market places. Pena takes on input a configuration, consisting of a potentially large set of plugins, and reports on output the offending plugin combinations. Pena uses an iterative divide-and-conquer search to explore the large space of plugin combinations and a staged filtering process to eliminate false alarms. We evaluated Pena with plugins selected from the WordPress official repository and compared its efficiency and accuracy against the technique that checks conflicts in all pairs of plugins. Results show that Pena is 12.4x to 19.6x more efficient than the comparison baseline and can find as many conflicts as it.}, journal={Information and Software Technology}, author={Lima, Igor and Cândido, Jeanderson and d’Amorim, Marcelo}, year={2020}, month={Feb} } @inproceedings{2017 ieee international conference on software testing, verification and validation, icst 2017, tokyo, japan, march 13-17, 2017_2017, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7922464}, publisher={IEEE Computer Society}, year={2017} } @inproceedings{souto_d’amorim_gheyi_2017, title={Balancing soundness and efficiency for practical testing of configurable systems}, url={https://doi.org/10.1109/ICSE.2017.64}, DOI={10.1109/ICSE.2017.64}, abstractNote={Testing configurable systems is important and challenging due to the enormous space of configurations where errors can hide. Existing approaches to test these systems are often costly or unreliable. This paper proposes S-SPLat, a technique that combines heuristic sampling with symbolic search to obtain both breadth and depth in the exploration of the configuration space. S-SPLat builds on SPLat, our previously developed technique, that explores all reachable configurations from tests. In contrast to its predecessor, S-SPLat sacrifices soundness in favor of efficiency. We evaluated our technique on eight software product lines of various sizes and on a large configurable system – GCC. Considering the results for GCC, S-SPLat was able to reproduce all five bugs that we previously found in a previous study with SPLat but much faster and it was able to find two new bugs in a recent release of GCC. Results suggest that it is preferable to use a combination of simple heuristics to drive the symbolic search as opposed to a single heuristic. S-SPLat and our experimental infrastructure are publicly available.}, booktitle={Proceedings of the 39th International Conference on Software Engineering, ICSE 2017, Buenos Aires, Argentina, May 20-28, 2017}, author={Souto, Sabrina and d’Amorim, Marcelo and Gheyi, Rohit}, year={2017}, pages={632–642} } @article{xie_cai_liu_wang_acharya_d’amorim_ma_2017, title={Preface}, volume={32}, url={https://doi.org/10.1007/s11390-017-1782-3}, DOI={10.1007/s11390-017-1782-3}, number={6}, journal={J. Comput. Sci. Technol.}, author={Xie, Tao and Cai, Yuanfang and Liu, Xuanzhe and Wang, Xiaoyin and Acharya, Mithun P. and d’Amorim, Marcelo and Ma, Xiaoxing}, year={2017}, pages={1057–1059} } @inproceedings{perez_abreu_d’amorim_2017, title={Prevalence of Single-Fault Fixes and Its Impact on Fault Localization}, url={https://doi.org/10.1109/ICST.2017.9}, DOI={10.1109/ICST.2017.9}, abstractNote={Several fault predictors were proposed in the context of Spectrum-based Fault Localization approaches to rank software components in order of suspiciousness of being the root-cause of observed failures. Previous work has also shown that some of the fault predictors (near-)optimally rank software components, provided that there is one fault in the system. Despite this, further work is being spent on creating more complex, computationally expensive, model-based techniques that can handle multiple-faulted scenarios accurately. However, our hypothesis is that when software is being developed, bugs arise one-at-a-time and therefore can be considered as single-faulted scenarios. We describe an approach to mine repositories, find bug-fixes, and catalog them according to the number of faults they fix, to assess the prevalence of single-fault fixes. Our empirical study using 279 open-source projects reveals that there is a prevalence of single-fault fixes, with over 82% of all fixes only eliminating one bug from the system, enabling the use of simpler, (near-)optimal, fault predictors. Moreover, we draw on the practical implications of our findings to influence and set direction for future research.}, booktitle={2017 IEEE International Conference on Software Testing, Verification and Validation, ICST 2017, Tokyo, Japan, March 13-17, 2017}, author={Perez, Alexandre and Abreu, Rui and d’Amorim, Marcelo}, year={2017}, pages={12–22} } @inproceedings{rosu_penta_nguyen_2017, title={Proceedings of the 32nd IEEE/ACM International Conference on Automated Software Engineering, ASE 2017, Urbana, IL, USA, October 30 - November 03, 2017}, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=8106906}, publisher={IEEE Computer Society}, year={2017} } @inproceedings{proceedings of the 39th international conference on software engineering, icse 2017, buenos aires, argentina, may 20-28, 2017_2017, url={http://dl.acm.org/citation.cfm?id=3097368}, publisher={IEEE / ACM}, editor={Sebasti\’aEditor}, year={2017} } @inproceedings{test suite parallelization in open-source projects: a study on its usage and impact_2017, url={https://doi.org/10.1109/ASE.2017.8115695}, DOI={10.1109/ASE.2017.8115695}, abstractNote={Dealing with high testing costs remains an important problem in Software Engineering. Test suite parallelization is an important approach to address this problem. This paper reports our findings on the usage and impact of test suite parallelization in open-source projects. It provides recommendations to practitioners and tool developers to speed up test execution. Considering a set of 468 popular Java projects we analyzed, we found that 24% of the projects contain costly test suites but parallelization features still seem underutilized in practice — only 19.1% of costly projects use parallelization. The main reported reason for adoption resistance was the concern to deal with concurrency issues. Results suggest that, on average, developers prefer high predictability than high performance in running tests.}, booktitle={Proceedings of the 32nd IEEE/ACM International Conference on Automated Software Engineering, ASE 2017, Urbana, IL, USA, October 30 - November 03, 2017}, year={2017}, pages={838–848} } @article{souto_d’amorim_2017, title={Time-Space Efficient Regression Testing for Configurable Systems}, volume={abs/1702.03457}, url={http://arxiv.org/abs/1702.03457}, journal={CoRR}, author={Souto, Sabrina and d’Amorim, Marcelo}, year={2017} } @inproceedings{bloem_arbel_2016, title={Hardware and Software: Verification and Testing - 12th International Haifa Verification Conference, HVC 2016, Haifa, Israel, November 14-17, 2016, Proceedings}, volume={10028}, url={https://doi.org/10.1007/978-3-319-49052-6}, DOI={10.1007/978-3-319-49052-6}, abstractNote={This book constitutes the refereed proceedings of the 12th International Haifa Verification Conference, HVC 2016, held in Haifa, Israel in November 2016. The 13 revised full papers and one tool paper}, year={2016} } @inproceedings{li_d’amorim_orso_2016, title={Iterative User-Driven Fault Localization}, url={https://doi.org/10.1007/978-3-319-49052-6_6}, DOI={10.1007/978-3-319-49052-6_6}, abstractNote={Because debugging is a notoriously expensive activity, numerous automated debugging techniques have been proposed in the literature. In the last ten years, statistical fault localization emerged as the most popular approach to automated debugging. One problem with statistical fault localization techniques is that they tend to make strong assumptions on how developers behave during debugging. These assumptions are often unrealistic, which considerably limits the practical applicability and effectiveness of these techniques. To mitigate this issue, we propose Swift, an iterative user-driven technique designed to support developers during debugging. Swift (1) leverages statistical fault localization to identify suspicious methods, (2) generates high-level queries to the developer about the correctness of specific executions of the most suspicious methods, (3) uses the feedback from the developer to improve the localization results, and (4) repeats this cycle until the fault has been localized. Our empirical evaluation of Swift, performed on 26 faults in 5 programs, produced promising results; on average, Swift required less than 10 user queries to identify the fault. Most importantly, these queries were only about input/output relationships for specific executions of the methods, which developers should be able to answer quickly and without having to look at the code. We believe that Swift is a first important step towards defining fault localization techniques that account for the presence of humans in the loop and are practically applicable.}, booktitle={Hardware and Software: Verification and Testing - 12th International Haifa Verification Conference, HVC 2016, Haifa, Israel, November 14-17, 2016, Proceedings}, author={Li, Xiangyu and d’Amorim, Marcelo and Orso, Alessandro}, year={2016}, pages={82–98} } @inproceedings{cohen_grunske_whalen_2015, title={30th IEEE/ACM International Conference on Automated Software Engineering, ASE 2015, Lincoln, NE, USA, November 9-13, 2015}, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7371449}, publisher={IEEE Computer Society}, year={2015} } @inproceedings{faster bug detection for software product lines with incomplete feature models_2015, url={http://doi.acm.org/10.1145/2791060.2791093}, DOI={10.1145/2791060.2791093}, abstractNote={A software product line (SPL) is a family of programs that are differentiated by features --- increments in functionality. Systematically testing an SPL is challenging because it requires running each test of a test suite against a combinatorial number of programs. Feature models capture dependencies among features and can (1) reduce the space of programs to test and (2) enable accurate categorization of failing tests as failures of programs or the tests themselves, not as failures due to illegal combinations of features. In practice, sadly, feature models are not always available. We introduce SPLif, the first approach for testing SPLs that does not require the a priori availability of feature models. Our insight is to use a profile of passing and failing test runs to quickly identify failures that are indicative of real problems in test or code rather than specious failures due to illegal feature combinations. Experimental results on five SPLs and one large configurable system (GCC) demonstrate the effectiveness of our approach. SPLif enabled the discovery of five news bugs in GCC, three of which have already been fixed.}, booktitle={Proceedings of the 19th International Conference on Software Product Line, SPLC 2015, Nashville, TN, USA, July 20-24, 2015}, year={2015}, pages={151–160} } @inproceedings{borges_filieri_d’amorim_pasareanu_2015, title={Iterative distribution-aware sampling for probabilistic symbolic execution}, url={http://doi.acm.org/10.1145/2786805.2786832}, DOI={10.1145/2786805.2786832}, abstractNote={Probabilistic symbolic execution aims at quantifying the probability of reaching program events of interest assuming that program inputs follow given probabilistic distributions. The technique collects constraints on the inputs that lead to the target events and analyzes them to quantify how likely it is for an input to satisfy the constraints. Current techniques either handle only linear constraints or only support continuous distributions using a “discretization” of the input domain, leading to imprecise and costly results. We propose an iterative distribution-aware sampling approach to support probabilistic symbolic execution for arbitrarily complex mathematical constraints and continuous input distributions. We follow a compositional approach, where the symbolic constraints are decomposed into sub-problems whose solution can be solved independently. At each iteration the convergence rate of the com- putation is increased by automatically refocusing the analysis on estimating the sub-problems that mostly affect the accuracy of the results, as guided by three different ranking strategies. Experiments on publicly available benchmarks show that the proposed technique improves on previous approaches in terms of scalability and accuracy of the results.}, booktitle={Proceedings of the 2015 10th Joint Meeting on Foundations of Software Engineering, ESEC/FSE 2015, Bergamo, Italy, August 30 - September 4, 2015}, author={Borges, Mateus and Filieri, Antonio and d’Amorim, Marcelo and Pasareanu, Corina S.}, year={2015}, pages={866–877} } @inproceedings{schmidt_2015, title={Proceedings of the 19th International Conference on Software Product Line, SPLC 2015, Nashville, TN, USA, July 20-24, 2015}, url={http://dl.acm.org/citation.cfm?id=2791060}, publisher={ACM}, year={2015} } @inproceedings{nitto_harman_heymans_2015, title={Proceedings of the 2015 10th Joint Meeting on Foundations of Software Engineering, ESEC/FSE 2015, Bergamo, Italy, August 30 - September 4, 2015}, url={http://doi.acm.org/10.1145/2786805}, DOI={10.1145/2786805}, publisher={ACM}, year={2015} } @inproceedings{barros_just_millstein_vines_dietl_d’amorim_ernst_2015, title={Static Analysis of Implicit Control Flow: Resolving Java Reflection and Android Intents (T)}, url={https://doi.org/10.1109/ASE.2015.69}, DOI={10.1109/ASE.2015.69}, abstractNote={Implicit or indirect control flow is a transfer of control between procedures using some mechanism other than an explicit procedure call. Implicit control flow is a staple design pattern that adds flexibility to system design. However, it is challenging for a static analysis to compute or verify properties about a system that uses implicit control flow. This paper presents static analyses for two types of implicit control flow that frequently appear in Android apps: Java reflection and Android intents. Our analyses help to resolve where control flows and what data is passed. This information improves the precision of downstream analyses, which no longer need to make conservative assumptions about implicit control flow. We have implemented our techniques for Java. We enhanced an existing security analysis with a more precise treatment of reflection and intents. In a case study involving ten real-world Android apps that use both intents and reflection, the precision of the security analysis was increased on average by two orders of magnitude. The precision of two other downstream analyses was also improved.}, booktitle={30th IEEE/ACM International Conference on Automated Software Engineering, ASE 2015, Lincoln, NE, USA, November 9-13, 2015}, author={Barros, Paulo and Just, René and Millstein, Suzanne and Vines, Paul and Dietl, Werner and d’Amorim, Marcelo and Ernst, Michael D.}, year={2015}, pages={669–679} } @inproceedings{rungta_tkachuk_2014, title={2014 International Symposium on Model Checking of Software, SPIN 2014, Proceedings, San Jose, CA, USA, July 21-23, 2014}, url={http://dl.acm.org/citation.cfm?id=2632362}, publisher={ACM}, year={2014} } @inproceedings{liu_araújo_d’amorim_taghdiri_2014, title={A Comparative Study of Incremental Constraint Solving Approaches in Symbolic Execution}, url={https://doi.org/10.1007/978-3-319-13338-6_21}, DOI={10.1007/978-3-319-13338-6_21}, abstractNote={Constraint solving is a major source of cost in Symbolic Execution (SE). This paper presents a study to assess the importance of some sensible options for solving constraints in SE. The main observation is that stack-based approaches to incremental solving is often much faster compared to cache-based approaches, which are more popular. Considering all 96 C programs from the KLEE benchmark that we analyzed, the median speedup obtained with a (non-optimized) stack-based approach was of 5x. Results suggest that tools should take advantage of incremental solving support from modern SMT solvers and researchers should look for ways to combine stack- and cache-based approaches to reduce execution cost even further. Instructions to reproduce results are available online: http://asa.iti.kit.edu/130_392.php}, booktitle={Hardware and Software: Verification and Testing - 10th International Haifa Verification Conference, HVC 2014, Haifa, Israel, November 18-20, 2014. Proceedings}, author={Liu, Tianhai and Araújo, Mateus and d’Amorim, Marcelo and Taghdiri, Mana}, year={2014}, pages={284–299} } @inproceedings{o’boyle_pingali_2014, title={ACM SIGPLAN Conference on Programming Language Design and Implementation, PLDI '14, Edinburgh, United Kingdom - June 09 - 11, 2014}, url={http://dl.acm.org/citation.cfm?id=2594291}, publisher={ACM}, year={2014} } @inproceedings{borges_filieri_d’amorim_pasareanu_visser_2014, title={Compositional solution space quantification for probabilistic software analysis}, url={http://doi.acm.org/10.1145/2594291.2594329}, DOI={10.1145/2594291.2594329}, abstractNote={Probabilistic software analysis aims at quantifying how likely a target event is to occur during program execution. Current approaches rely on symbolic execution to identify the conditions to reach the target event and try to quantify the fraction of the input domain satisfying these conditions. Precise quantification is usually limited to linear constraints, while only approximate solutions can be provided in general through statistical approaches. However, statistical approaches may fail to converge to an acceptable accuracy within a reasonable time. We present a compositional statistical approach for the efficient quantification of solution spaces for arbitrarily complex constraints over bounded floating-point domains. The approach leverages interval constraint propagation to improve the accuracy of the estimation by focusing the sampling on the regions of the input domain containing the sought solutions. Preliminary experiments show significant improvement on previous approaches both in results accuracy and analysis time.}, booktitle={ACM SIGPLAN Conference on Programming Language Design and Implementation, PLDI '14, Edinburgh, United Kingdom - June 09 - 11, 2014}, author={Borges, Mateus and Filieri, Antonio and d’Amorim, Marcelo and Pasareanu, Corina S. and Visser, Willem}, year={2014}, pages={123–132} } @article{rimsa_d’amorim_pereira_silva bigonha_2014, title={Efficient static checker for tainted variable attacks}, volume={80}, url={https://doi.org/10.1016/j.scico.2013.03.012}, DOI={10.1016/j.scico.2013.03.012}, abstractNote={Tainted flow attacks originate from program inputs maliciously crafted to exploit software vulnerabilities. These attacks are common in server-side scripting languages, such as PHP. In 1997, Ørbæk and Palsberg formalized the problem of detecting these exploits as an instance of type-checking, and gave an O(V3) algorithm to solve it, where V is the number of program variables. A similar algorithm was, ten years later, implemented on the Pixy tool. In this paper we give an O(V2) solution to the same problem. Our solution uses Bodik et al.’s extended Static Single Assignment (e-SSA) program representation. The e-SSA form can be efficiently computed and it enables us to solve the problem via a sparse dataflow analysis. Using the same infrastructure, we compared a state-of-the-art dataflow solution with our technique. Both approaches have detected 36 vulnerabilities in well known PHP programs. Our results show that our approach tends to outperform the dataflow algorithm for larger inputs. We have reported the new bugs that we found, and an implementation of our algorithm is publicly available at https://github.com/rimsa/tainted-phc.git.}, journal={Sci. Comput. Program.}, author={Rimsa, Andrei and d’Amorim, Marcelo and Pereira, Fernando Magno Quintão and Silva Bigonha, Roberto}, year={2014}, pages={91–105} } @inproceedings{yahav_2014, title={Hardware and Software: Verification and Testing - 10th International Haifa Verification Conference, HVC 2014, Haifa, Israel, November 18-20, 2014. Proceedings}, volume={8855}, url={https://doi.org/10.1007/978-3-319-13338-6}, DOI={10.1007/978-3-319-13338-6}, abstractNote={This book constitutes the refereed proceedings of the 10th International Haifa Verification Conference, HVC 2014, held in Haifa, Israel, in November 2014. The 17 revised full papers and 4 short papers}, publisher={Springer}, year={2014} } @inproceedings{phan_malacaria_pasareanu_d’amorim_2014, title={Quantifying information leaks using reliability analysis}, url={http://doi.acm.org/10.1145/2632362.2632367}, DOI={10.1145/2632362.2632367}, abstractNote={We report on our work-in-progress into the use of reliability analysis to quantify information leaks. In recent work we have proposed a software reliability analysis technique that uses symbolic execution and model counting to quantify the probability of reaching designated program states, e.g. assert violations, under uncertainty conditions in the environment. The technique has many applications beyond reliability analysis, ranging from program understanding and debugging to analysis of cyber-physical systems. In this paper we report on a novel application of the technique, namely Quantitative Information Flow analysis (QIF). The goal of QIF is to measure information leakage of a program by using information-theoretic metrics such as Shannon entropy or Renyi entropy. We exploit the model counting engine of the reliability analyzer over symbolic program paths, to compute an upper bound of the maximum leakage over all possible distributions of the confidential data. We have implemented our approach into a prototype tool, called QILURA, and explore its effectiveness on a number of case studies.}, booktitle={2014 International Symposium on Model Checking of Software, SPIN 2014, Proceedings, San Jose, CA, USA, July 21-23, 2014}, author={Phan, Quoc-Sang and Malacaria, Pasquale and Pasareanu, Corina S. and d’Amorim, Marcelo}, year={2014}, pages={105–108} } @inproceedings{denney_bultan_zeller_2013, title={2013 28th IEEE/ACM International Conference on Automated Software Engineering, ASE 2013, Silicon Valley, CA, USA, November 11-15, 2013}, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6684409}, publisher={IEEE}, year={2013} } @inproceedings{campos_abreu_fraser_d’amorim_2013, title={Entropy-based test generation for improved fault localization}, url={https://doi.org/10.1109/ASE.2013.6693085}, DOI={10.1109/ASE.2013.6693085}, abstractNote={Spectrum-based Bayesian reasoning can effectively rank candidate fault locations based on passing/failing test cases, but the diagnostic quality highly depends on the size and diversity of the underlying test suite. As test suites in practice often do not exhibit the necessary properties, we present a technique to extend existing test suites with new test cases that optimize the diagnostic quality. We apply probability theory concepts to guide test case generation using entropy, such that the amount of uncertainty in the diagnostic ranking is minimized. Our ENTBUG prototype extends the search-based test generation tool EVOSUITE to use entropy in the fitness function of its underlying genetic algorithm, and we applied it to seven real faults. Empirical results show that our approach reduces the entropy of the diagnostic ranking by 49% on average (compared to using the original test suite), leading to a 91% average reduction of diagnosis candidates needed to inspect to find the true faulty one.}, booktitle={2013 28th IEEE/ACM International Conference on Automated Software Engineering, ASE 2013, Silicon Valley, CA, USA, November 11-15, 2013}, author={Campos, José and Abreu, Rui and Fraser, Gordon and d’Amorim, Marcelo}, year={2013}, pages={257–267} } @inproceedings{meyer_baresi_mezini_2013, title={Joint Meeting of the European Software Engineering Conference and the ACM SIGSOFT Symposium on the Foundations of Software Engineering, ESEC/FSE'13, Saint Petersburg, Russian Federation, August 18-26, 2013}, url={http://dl.acm.org/citation.cfm?id=2491411}, publisher={ACM}, year={2013} } @inproceedings{splat: lightweight dynamic analysis for reducing combinatorics in testing configurable systems_2013, url={http://doi.acm.org/10.1145/2491411.2491459}, DOI={10.1145/2491411.2491459}, abstractNote={Many programs can be configured through dynamic and/or static selection of configuration variables. A software product line (SPL), for example, specifies a family of programs where each program is defined by a unique combination of features. Systematically testing SPL programs is expensive as it can require running each test against a combinatorial number of configurations. Fortunately, a test is often independent of many configuration variables and need not be run against every combination. Configurations that are not required for a test can be pruned from execution. This paper presents SPLat, a new way to dynamically prune irrelevant configurations: the configurations to run for a test can be determined during test execution by monitoring accesses to configuration variables. SPLat achieves an optimal reduction in the number of configurations and is lightweight compared to prior work that used static analysis and heavyweight dynamic execution. Experimental results on 10 SPLs written in Java show that SPLat substantially reduces the total test execution time in many cases. Moreover, we demonstrate the scalability of SPLat by applying it to a large industrial code base written in Ruby on Rails.}, booktitle={Joint Meeting of the European Software Engineering Conference and the ACM SIGSOFT Symposium on the Foundations of Software Engineering, ESEC/FSE'13, Saint Petersburg, Russian Federation, August 18-26, 2013}, year={2013}, pages={257–267} } @inproceedings{antoniol_bertolino_labiche_2012, title={Fifth IEEE International Conference on Software Testing, Verification and Validation, ICST 2012, Montreal, QC, Canada, April 17-21, 2012}, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6200016}, publisher={IEEE Computer Society}, year={2012} } @inproceedings{borges_d’amorim_anand_bushnell_pasareanu_2012, title={Symbolic Execution with Interval Solving and Meta-heuristic Search}, url={https://doi.org/10.1109/ICST.2012.91}, DOI={10.1109/ICST.2012.91}, abstractNote={A challenging problem in symbolic execution is to solve complex mathematical constraints such as constraints that include floating-point variables and transcendental functions. The inability to solve such constraints limit the application scope of symbolic execution. In this paper, we present a new method to solve such complex math constraints. Our method combines two existing: meta-heuristic search and interval solving. Conceptually, the combination explores the synergy of the individual methods to improve constraint solving. We implemented the new method in the CORAL constraint-solving infrastructure, and evaluated its effectiveness on a set of publicly-available software from the aerospace domain. Results indicate that the new method can solve significantly more complex mathematical constraints than previous techniques.}, booktitle={Fifth IEEE International Conference on Software Testing, Verification and Validation, ICST 2012, Montreal, QC, Canada, April 17-21, 2012}, author={Borges, Mateus and d’Amorim, Marcelo and Anand, Saswat and Bushnell, David H. and Pasareanu, Corina S.}, year={2012}, pages={111–120} } @inproceedings{alexander_pasareanu_hosking_2011, title={26th IEEE/ACM International Conference on Automated Software Engineering (ASE 2011), Lawrence, KS, USA, November 6-10, 2011}, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6093623}, publisher={IEEE Computer Society}, year={2011} } @inproceedings{souza_borges_d’amorim_pasareanu_2011, title={CORAL: Solving Complex Constraints for Symbolic PathFinder}, url={https://doi.org/10.1007/978-3-642-20398-5_26}, DOI={10.1007/978-3-642-20398-5_26}, abstractNote={Symbolic execution is a powerful automated technique for generating test cases. Its goal is to achieve high coverage of software. One major obstacle in adopting the technique in practice is its inability to handle complex mathematical constraints. To address the problem, we have integrated CORAL’s heuristic solvers into NASA Ames’ Symbolic PathFinder symbolic execution tool. CORAL’s solvers have been designed to deal with mathematical constraints and their heuristics have been improved based on examples from the aerospace domain. This integration significantly broadens the application of Symbolic PathFinder at NASA and in industry.}, booktitle={NASA Formal Methods - Third International Symposium, NFM 2011, Pasadena, CA, USA, April 18-20, 2011. Proceedings}, author={Souza, Matheus and Borges, Mateus and d’Amorim, Marcelo and Pasareanu, Corina S.}, year={2011}, pages={359–374} } @inproceedings{knoop_2011, title={Compiler Construction - 20th International Conference, CC 2011, Held as Part of the Joint European Conferences on Theory and Practice of Software, ETAPS 2011, Saarbrücken, Germany, March 26-April 3, 2011. Proceedings}, volume={6601}, url={https://doi.org/10.1007/978-3-642-19861-8}, DOI={10.1007/978-3-642-19861-8}, publisher={Springer}, year={2011} } @inproceedings{fault-localization using dynamic slicing and change impact analysis_2011, url={https://doi.org/10.1109/ASE.2011.6100114}, DOI={10.1109/ASE.2011.6100114}, abstractNote={Spectrum-based fault-localization tools, such as Tarantula, have been developed to help guide developers towards faulty statements in a system under test. These tools report statements ranked in order of suspiciousness. Unfortunately, the reported statements can often be unrelated to the error. This paper evaluates the impact of several approaches to ignoring such unrelated statements in order to improve the effectiveness of fault-localization tools.}, booktitle={26th IEEE/ACM International Conference on Automated Software Engineering (ASE 2011), Lawrence, KS, USA, November 6-10, 2011}, year={2011}, pages={520–523} } @inproceedings{bobaru_havelund_holzmann_joshi_2011, title={NASA Formal Methods - Third International Symposium, NFM 2011, Pasadena, CA, USA, April 18-20, 2011. Proceedings}, volume={6617}, url={https://doi.org/10.1007/978-3-642-20398-5}, DOI={10.1007/978-3-642-20398-5}, abstractNote={This book constitutes the refereed proceedings of the Third International Symposium on NASA Formal Methods, NFM 2011, held in Pasadena, CA, USA, in April 2011. The 26 revised full papers presented tog}, publisher={Springer}, year={2011} } @inproceedings{rimsa_d’amorim_pereira_2011, title={Tainted Flow Analysis on e-SSA-Form Programs}, url={https://doi.org/10.1007/978-3-642-19861-8_8}, DOI={10.1007/978-3-642-19861-8_8}, abstractNote={Tainted flow attacks originate from program inputs maliciously crafted to exploit software vulnerabilities. These attacks are common in server-side scripting languages, such as PHP. In 1997, Ørbæk and Palsberg formalized the problem of detecting these exploits as an instance of type-checking, and gave an O(V 3) algorithm to solve it, where V is the number of program variables. A similar algorithm was, ten years later, implemented on the Pixy tool. In this paper we give an O(V 2) solution to the same problem. Our solution uses Bodik et al.’s extended Static Single Assignment (e-SSA) program representation. The e-SSA form can be efficiently computed and it enables us to solve the problem via a sparse data-flow analysis. Using the same infrastructure, we compared a state-of-the-art data-flow solution with our technique. Both approaches have detected 36 vulnerabilities in well known PHP programs. Our results show that our approach tends to outperform the data-flow algorithm for bigger inputs. We have reported the bugs that we found, and an implementation of our algorithm is now publicly available.}, booktitle={Compiler Construction - 20th International Conference, CC 2011, Held as Part of the Joint European Conferences on Theory and Practice of Software, ETAPS 2011, Saarbrücken, Germany, March 26-April 3, 2011. Proceedings}, author={Rimsa, Andrei and d’Amorim, Marcelo and Pereira, Fernando Magno Quintão}, year={2011}, pages={124–143} } @article{assertion checking in j-sim simulation models of network protocols_2010, volume={86}, url={https://doi.org/10.1177/0037549709349326}, DOI={10.1177/0037549709349326}, abstractNote={ Verification and validation (V&V) is a critically important phase in the development life cycle of a simulation model. In the context of network simulation, traditional network simulators perform well in using a simulation model for evaluating the performance of a network protocol but lack the capability to check the “correctness” of the simulation model being used. To address this problem, we have extended J-Sim—an open-source component-based network simulator written entirely in Java—with a state space exploration (SSE) capability that explores the state space created by a network simulation model, up to a configurable maximum depth, in order to find an execution (if any) that violates an assertion, i.e. a property specifying an invariant that must always hold true in all states. In this paper, we elaborate on the SSE framework in J-Sim and present one of our fairly complex case studies, namely verifying the simulation model of the Ad-hoc On-demand Distance Vector (AODV) routing protocol for wireless ad-hoc networks. The SSE framework makes use of protocol-specific properties along two orthogonal dimensions: state similarity and state ranking. State similarity determines whether a state is “similar to” another in order to enable the implementation of stateful search. State ranking determines whether a state is “better than” another in order to enable the implementation of best-first search (BeFS). Specifically, we develop protocol-specific search heuristics to guide SSE towards finding assertion violations in less time. We evaluate the efficiency of our SSE framework by comparing its performance with that of a state-of-the-art model checker for Java programs, namely Java PathFinder (JPF). The results of the comparison show that the time needed to find an assertion violation by our SSE framework in J-Sim can be significantly less than that in JPF unless a substantial amount of programming effort is spent in JPF to make its performance close to that of our SSE framework. }, number={11}, journal={Simulation}, year={2010}, pages={651–673} } @article{takaki_cavalcanti_gheyi_iyoda_d’amorim_prudêncio_2010, title={Randomized constraint solvers: a comparative study}, volume={6}, url={https://doi.org/10.1007/s11334-010-0124-1}, DOI={10.1007/s11334-010-0124-1}, number={3}, journal={ISSE}, author={Takaki, Mitsuo and Cavalcanti, Diego and Gheyi, Rohit and Iyoda, Juliano and d’Amorim, Marcelo and Prudêncio, Ricardo Bastos Cavalcante}, year={2010}, pages={243–253} } @inproceedings{takaki_cavalcanti_gheyi_iyoda_d’amorim_prudêncio_2009, title={A Comparative Study of Randomized Constraint Solvers for Random-Symbolic Testing}, booktitle={First NASA Formal Methods Symposium - NFM 2009, Moffett Field, California, USA, April 6-8, 2009.}, author={Takaki, Mitsuo and Cavalcanti, Diego and Gheyi, Rohit and Iyoda, Juliano and d’Amorim, Marcelo and Prudêncio, Ricardo Bastos Cavalcante}, year={2009}, pages={56–65} } @inproceedings{bertolini_peres_d’amorim_mota_2009, title={An Empirical Evaluation of Automated Black Box Testing Techniques for Crashing GUIs}, url={https://doi.org/10.1109/ICST.2009.27}, DOI={10.1109/ICST.2009.27}, abstractNote={This paper reports an empirical evaluation of four black-box testing techniques for crashing programs through their GUI interface: SH, AF, DH, and BxT. The techniques vary in their level of automation and the results they offer. The experiments we conducted quantify execution time and the capability of finding a crash for each technique on 8 different cellular phone configurations with historical (real) errors. The results show that AF and BxT offered better precision (i.e., the fraction of runs that end in a crash out of the total number of runs) than SH and DH (AF and BxT found crashes in all 8 configurations), and BxT crashes the application the fastest more often (5 out of 8 cases). The experiments reveal that the selection of the random seed to AF and BxT results in a high variance of execution time (i.e., the time the technique takes to either crash the application or timeout in 40h): the mean (across 8 phone configurations) of the standard deviation of execution times (for 10 runs per each phone configuration) is 7.79h for AF and 5.21h for BxT. Despite this fact, AF and BxT could crash the application consistently: the mean of the precision (fraction of the 10 runs that results in a crash) is 74% for AF and 69% for BxT.}, booktitle={Second International Conference on Software Testing Verification and Validation, ICST 2009, Denver, Colorado, USA, April 1-4, 2009}, author={Bertolini, Cristiano and Peres, Glaucia and d’Amorim, Marcelo and Mota, Alexandre}, year={2009}, pages={21–30} } @inproceedings{denney_giannakopoulou_pasareanu_2009, title={First NASA Formal Methods Symposium - NFM 2009, Moffett Field, California, USA, April 6-8, 2009}, volume={NASA/CP-2009-215407}, year={2009} } @inproceedings{second international conference on software testing verification and validation, icst 2009, denver, colorado, usa, april 1-4, 2009_2009, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=4815321}, publisher={IEEE Computer Society}, year={2009} } @inproceedings{sch\"a_2008, title={30th International Conference on Software Engineering (ICSE 2008), Leipzig, Germany, May 10-18, 2008}, publisher={ACM}, year={2008} } @article{d’amorim_lauterburg_marinov_2008, title={Delta Execution for Efficient State-Space Exploration of Object-Oriented Programs}, volume={34}, url={https://doi.org/10.1109/TSE.2008.37}, DOI={10.1109/TSE.2008.37}, abstractNote={We present Delta execution, a technique that speeds up state-space exploration of object-oriented programs. State-space exploration is the essence of model checking and an increasingly popular approach for automating test generation. A key issue in exploration of object-oriented programs is handling the program state, in particular the heap. We exploit the fact that many execution paths in state-space exploration partially overlap. Delta execution simultaneously operates on several states/heaps and shares the common parts across the executions, separately executing only the "deltas" where the executions differ. We implemented Delta execution in two model checkers: JPF, a popular general-purpose model checker for Java programs, and BOX, a specialized model checker that we developed for efficient exploration of sequential Java programs. The results for bounded-exhaustive exploration of ten basic subject programs and one larger case study show that Delta execution reduces exploration time from 1.06x to 126.80x (with median 5.60x) in JPF and from 0.58x to 4.16x (with median 2.23x) in BOX. The results for a non-exhaustive exploration in JPF show that Delta execution reduces exploration time from 0.92x to 6.28x (with median 4.52x).}, number={5}, journal={IEEE Trans. Software Eng.}, author={d’Amorim, Marcelo and Lauterburg, Steven and Marinov, Darko}, year={2008}, pages={597–613} } @inproceedings{gvero_gligoric_lauterburg_d’amorim_marinov_khurshid_2008, title={State extensions for java pathfinder}, url={http://doi.acm.org/10.1145/1368088.1368224}, DOI={10.1145/1368088.1368224}, abstractNote={Java PathFinder (JPF) is an explicit-state model checker for Java programs. JPF implements a backtrackable Java Virtual Machine (JVM) that provides non-deterministic choices and control over thread scheduling. JPF is itself implemented in Java and runs on top of a host JVM. JPF represents the JVM state of the program being checked and performs three main operations on this state representation: bytecode execution, state backtracking, and state comparison. This paper summarizes four extensions that we have developed to the JPF state representation and operations. One extension provides a new functionality to JPF, and three extensions improve performance of JPF in various scenarios. Some of our code has already been included in publicly available JPF.}, booktitle={30th International Conference on Software Engineering (ICSE 2008), Leipzig, Germany, May 10-18, 2008}, author={Gvero, Tihomir and Gligoric, Milos and Lauterburg, Steven and d’Amorim, Marcelo and Marinov, Darko and Khurshid, Sarfraz}, year={2008}, pages={863–866} } @inproceedings{d’amorim_lauterburg_marinov_2007, title={Delta execution for efficient state-space exploration of object-oriented programs}, url={http://doi.acm.org/10.1145/1273463.1273472}, DOI={10.1145/1273463.1273472}, abstractNote={We present Delta execution, a technique that speeds up state-space exploration of object-oriented programs. State-space exploration is the essence of model checking and an increasingly popular approach for automating test generation. A key issue in exploration of object-oriented programs is handling the program state, in particular the heap. We exploit the fact that many execution paths in state-space exploration partially overlap. Delta execution simultaneously operates on several states/heaps and shares the common parts across the executions, separately executing only the "deltas" where the executions differ. We implemented Delta execution in two model checkers: JPF, a popular general-purpose model checker for Java programs, and BOX, a specialized model checker that we developed for efficient exploration of sequential Java programs. The results for bounded-exhaustive exploration of ten basic subject programs and one larger case study show that Delta execution reduces exploration time from 1.06x to 126.80x (with median 5.60x) in JPF and from 0.58x to 4.16x (with median 2.23x) in BOX. The results for a non-exhaustive exploration in JPF show that Delta execution reduces exploration time from 0.92x to 6.28x (with median 4.52x).}, booktitle={Proceedings of the ACM/SIGSOFT International Symposium on Software Testing and Analysis, ISSTA 2007, London, UK, July 9-12, 2007}, author={d’Amorim, Marcelo and Lauterburg, Steven and Marinov, Darko}, year={2007}, pages={50–60} } @inproceedings{rosenblum_elbaum_2007, title={Proceedings of the ACM/SIGSOFT International Symposium on Software Testing and Analysis, ISSTA 2007, London, UK, July 9-12, 2007}, url={http://doi.acm.org/10.1145/1273463}, DOI={10.1145/1273463}, publisher={ACM}, year={2007} } @inproceedings{21st ieee/acm international conference on automated software engineering (ase 2006), 18-22 september 2006, tokyo, japan_2006, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=4019543}, publisher={IEEE Computer Society}, year={2006} } @inproceedings{21st ieee/acm international conference on automated software engineering (ase 2006), 18-22 september 2006, tokyo, japan_2006, url={http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=4019543}, publisher={IEEE Computer Society}, year={2006} } @inproceedings{d’amorim_pacheco_xie_marinov_ernst_2006, title={An Empirical Comparison of Automated Generation and Classification Techniques for Object-Oriented Unit Testing}, url={https://doi.org/10.1109/ASE.2006.13}, DOI={10.1109/ASE.2006.13}, abstractNote={Testing involves two major activities: generating test inputs and determining whether they reveal faults. Automated test generation techniques include random generation and symbolic execution. Automated test classification techniques include ones based on uncaught exceptions and violations of operational models inferred from manually provided tests. Previous research on unit testing for object-oriented programs developed three pairs of these techniques: model-based random testing, exception-based random testing, and exception-based symbolic testing. We develop a novel pair, model-based symbolic testing. We also empirically compare all four pairs of these generation and classification techniques. The results show that the pairs are complementary (i.e., reveal faults differently), with their respective strengths and weaknesses}, booktitle={21st IEEE/ACM International Conference on Automated Software Engineering (ASE 2006), 18-22 September 2006, Tokyo, Japan}, author={d’Amorim, Marcelo and Pacheco, Carlos and Xie, Tao and Marinov, Darko and Ernst, Michael D.}, year={2006}, pages={59–68} } @article{checking and correcting behaviors of java programs at runtime with java-mop_2006, volume={144}, url={https://doi.org/10.1016/j.entcs.2006.02.002}, DOI={10.1016/j.entcs.2006.02.002}, abstractNote={Monitoring-oriented programming (MOP) is a software development and analysis technique in which monitoring plays a fundamental role. MOP users can add their favorite or domain-specific requirements specification formalisms into the framework by means of logic plug-ins, which essentially comprise monitor synthesis algorithms for properties expressed as formulae. The properties are specified together with declarations stating where and how to automatically integrate the corresponding monitor into the system, as well as what to do if the property is violated or validated. In this paper we present Java-MOP, an MOP environment for developing robust Java applications. Based upon a carefully designed specification schema and upon several logic plug-ins, Java-MOP allows users to specify and monitor properties which can refer not only to the current program state, but also to the entire execution trace of a program, including past and future behaviors.}, number={4}, journal={Electr. Notes Theor. Comput. Sci.}, year={2006}, pages={3–20} } @article{d’amorim_rosu_2005, title={An Equational Specification for the Scheme Language}, volume={11}, url={https://doi.org/10.3217/jucs-011-07-1327}, DOI={10.3217/jucs-011-07-1327}, abstractNote={This work describes the formal semantics of Scheme 3 as an equational theory in the Maude rewriting system. The semantics is based on continuations and is highly modular. We briefly investigate the relationship between our methodology for defining programming languages and other semantic formalisms. We conclude by show- ing some performance results of the interpreter obtained for free from the executable specification.}, number={7}, journal={J. UCS}, author={d’Amorim, Marcelo and Rosu, Grigore}, year={2005}, pages={1327–1348} } @inproceedings{etessami_rajamani_2005, title={Computer Aided Verification, 17th International Conference, CAV 2005, Edinburgh, Scotland, UK, July 6-10, 2005, Proceedings}, volume={3576}, url={https://doi.org/10.1007/b138445}, DOI={10.1007/b138445}, abstractNote={This volume contains the proceedings of the International Conference on Computer Aided Veri?cation (CAV), held in Edinburgh, Scotland, July 6–10, 2005. CAV 2005 was the seventeenth in a series of conf}, publisher={Springer}, year={2005} } @inproceedings{d’amorim_rosu_2005, title={Efficient Monitoring of omega-Languages}, url={https://doi.org/10.1007/11513988_36}, DOI={10.1007/11513988_36}, abstractNote={We present a technique for generating efficient monitors for ω-regular-languages. We show how Büchi automata can be reduced in size and transformed into special, statistically optimal nondeterministic finite state machines, called binary transition tree finite state machines (BTT-FSMs), which recognize precisely the minimal bad prefixes of the original ω-regular-language. The presented technique is implemented as part of a larger monitoring framework and is available for download.}, booktitle={Computer Aided Verification, 17th International Conference, CAV 2005, Edinburgh, Scotland, UK, July 6-10, 2005, Proceedings}, author={d’Amorim, Marcelo and Rosu, Grigore}, year={2005}, pages={364–378} } @article{d’amorim_havelund_2005, title={Event-based runtime verification of java programs}, volume={30}, url={http://doi.acm.org/10.1145/1082983.1083249}, DOI={10.1145/1082983.1083249}, abstractNote={We introduce the temporal logic HAWK and its supporting tool for runtime verification of Java programs. A monitor for a HAWK formula checks if a finite trace of program events satisfies the formula. HAWK is a programming-oriented extension of the rule-based EAGLE logic that has been shown capable of defining and implementing a range of finite trace monitoring logics, including future and past time temporal logic, metric (real-time) temporal logics, interval logics, forms of quantified temporal logics, extended regular expressions, state machines, and others. Monitoring is achieved on a state-by-state basis avoiding any need to store the input trace. HAWK extends EAGLE with constructs for capturing parameterized program events such as method calls and method returns. Parameters can be executing thread, the objects that methods are called upon, arguments to methods, and return values. HAWK allows one to refer to these in formulae. The tool synthesizes monitors from formulae and automates program instrumentation.}, number={4}, journal={ACM SIGSOFT Software Engineering Notes}, author={d’Amorim, Marcelo and Havelund, Klaus}, year={2005}, pages={1–7} } @inproceedings{a formal monitoring-based framework for software development and analysis_2004, url={https://doi.org/10.1007/978-3-540-30482-1_31}, DOI={10.1007/978-3-540-30482-1_31}, abstractNote={A formal framework for software development and analysis is presented, which aims at reducing the gap between formal specification and implementation by integrating the two and allowing them together to form a system. It is called monitoring-oriented programming (MOP), since runtime monitoring is supported and encouraged as a fundamental principle. Monitors are automatically synthesized from formal specifications and integrated at appropriate places in the program, according to user-configurable attributes. Violations and/or validations of specifications can trigger user-defined code at any points in the program, in particular recovery code, outputting/sending messages, or raising exceptions. The major novelty of MOP is its generality w.r.t. logical formalisms: it allows users to insert their favorite or domain-specific specification formalisms via logic plug-in modules. A WWW repository has been created, allowing MOP users to download and upload logic plug-ins. An experimental prototype tool, called Java-MOP, is also discussed, which currently supports most but not all of the desired MOP features.}, booktitle={Formal Methods and Software Engineering, 6th International Conference on Formal Engineering Methods, ICFEM 2004, Seattle, WA, USA, November 8-12, 2004, Proceedings}, year={2004}, pages={357–372} } @inproceedings{davies_schulte_barnett_2004, title={Formal Methods and Software Engineering, 6th International Conference on Formal Engineering Methods, ICFEM 2004, Seattle, WA, USA, November 8-12, 2004, Proceedings}, volume={3308}, url={https://doi.org/10.1007/b102837}, DOI={10.1007/b102837}, abstractNote={Formal engineering methods are changing the way that software systems are - veloped.Withlanguageandtoolsupport,theyarebeingusedforautomaticcode generation, and for the automatic abstraction and checki}, publisher={Springer}, year={2004} } @inproceedings{a design for jtrader, an internet trading service_2001, url={https://doi.org/10.1007/3-540-48206-7_14}, DOI={10.1007/3-540-48206-7_14}, abstractNote={Over the last few years, Service Discovery Protocols (SDP) like SLP, Jini, UPnP and Bluetooth demand special interest on scientific community and industry since they promise a new engagement opportunity between services and clients. Despite the fact that these protocols were first concerned with connecting devices, they also play an important role on information services trading which is very relevant considering Internet’s large scale. This paper presents the design of JTrader, an Internet Trading Service based on Jini Technology whose main purpose is to address root problems around service trading on the Internet.}, booktitle={Innovative Internet Computing Systems, International Workshop IICS 2001, Ilmenau, Germany, June 21-22, 2001, Proceedings}, year={2001}, pages={159–166} } @inproceedings{b\"o_2001, title={Innovative Internet Computing Systems, International Workshop IICS 2001, Ilmenau, Germany, June 21-22, 2001, Proceedings}, volume={2060}, url={https://doi.org/10.1007/3-540-48206-7}, DOI={10.1007/3-540-48206-7}, abstractNote={Nowadays, the Internet is the most commonly used medium for the exchange of data in di?erent forms. Presently, over 60 million machines have access to the Internet and to its resources. However, the I}, publisher={Springer}, year={2001} }