@article{tsiatis_davidian_2022, title={Group sequential methods for interim monitoring of randomized clinical trials with time-lagged outcome}, volume={9}, ISSN={["1097-0258"]}, DOI={10.1002/sim.9580}, abstractNote={The primary analysis in two‐arm clinical trials usually involves inference on a scalar treatment effect parameter; for example, depending on the outcome, the difference of treatment‐specific means, risk difference, risk ratio, or odds ratio. Most clinical trials are monitored for the possibility of early stopping. Because ordinarily the outcome on any given subject can be ascertained only after some time lag, at the time of an interim analysis, among the subjects already enrolled, the outcome is known for only a subset and is effectively censored for those who have not been enrolled sufficiently long for it to be observed. Typically, the interim analysis is based only on the data from subjects for whom the outcome has been ascertained. A goal of an interim analysis is to stop the trial as soon as the evidence is strong enough to do so, suggesting that the analysis ideally should make the most efficient use of all available data, thus including information on censoring as well as other baseline and time‐dependent covariates in a principled way. A general group sequential framework is proposed for clinical trials with a time‐lagged outcome. Treatment effect estimators that take account of censoring and incorporate covariate information at an interim analysis are derived using semiparametric theory and are demonstrated to lead to stronger evidence for early stopping than standard approaches. The associated test statistics are shown to have the independent increments structure, so that standard software can be used to obtain stopping boundaries.}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2022}, month={Sep} } @misc{joffe_babiker_ellenberg_fix_griffin_hunsberger_kalil_levine_makgoba_moore_et al._2021, title={Data and Safety Monitoring of COVID-19 Vaccine Clinical Trials}, volume={224}, ISSN={["1537-6613"]}, DOI={10.1093/infdis/jiab263}, abstractNote={Abstract}, number={12}, journal={JOURNAL OF INFECTIOUS DISEASES}, author={Joffe, Steven and Babiker, Abdel and Ellenberg, Susan S. and Fix, Alan and Griffin, Marie R. and Hunsberger, Sally and Kalil, Jorge and Levine, Myron M. and Makgoba, Malegapuru W. and Moore, Renee H. and et al.}, year={2021}, month={Dec}, pages={1995–2000} } @article{tsiatis_davidian_2021, title={Estimating vaccine efficacy over time after a randomized study is unblinded}, volume={8}, ISSN={["1541-0420"]}, DOI={10.1111/biom.13509}, abstractNote={Abstract}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2021}, month={Aug} } @article{tsiatis_davidian_holloway_2021, title={Estimation of the odds ratio in a proportional odds model with censored time-lagged outcome in a randomized clinical trial}, volume={12}, ISSN={["1541-0420"]}, url={https://doi.org/10.1111/biom.13603}, DOI={10.1111/biom.13603}, abstractNote={Abstract}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T.}, year={2021}, month={Dec} } @article{tsiatis_davidian_2021, title={Rejoinder: Estimating vaccine efficacy over time after a randomized study is unblinded}, volume={8}, ISSN={["1541-0420"]}, DOI={10.1111/biom.13539}, abstractNote={We are honored to have our work critiqued by such distinguished, internationally recognized authorities on vaccine efficacy and vaccine trials. When the first author (AAT) was appointed to the Data and SafetyMonitoring Board for the U.S. government-sponsored COVID-19 vaccine trials, we were embarrassingly unacquainted with even the basic concepts in this area, startingwith the definition of vaccine efficacy (VE), and it was to the fundamental work of these researcherswe turned to get up to speed. Responding to the points they raise has enhanced our understanding of the area and the role of ourworkwithin it.We comment on the issues raised in each discussion in turn; because all note challenges posed by viral variants, we address this point separately at the end.}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2021}, month={Aug} } @article{cools_johnson_camm_bassand_verheugt_yang_tsiatis_fitzmaurice_goldhaber_kayani_et al._2021, title={Risks associated with discontinuation of oral anticoagulation in newly diagnosed patients with atrial fibrillation: Results from the GARFIELD-AF Registry}, volume={7}, ISSN={["1538-7836"]}, DOI={10.1111/jth.15415}, abstractNote={Oral anticoagulation (OAC) in atrial fibrillation (AF) reduces the risk of stroke/systemic embolism (SE). The impact of OAC discontinuation is less well documented.}, journal={JOURNAL OF THROMBOSIS AND HAEMOSTASIS}, author={Cools, Frank and Johnson, Dana and Camm, Alan J. and Bassand, Jean-Pierre and Verheugt, Freek W. A. and Yang, Shu and Tsiatis, Anastasios and Fitzmaurice, David A. and Goldhaber, Samuel Z. and Kayani, Gloria and et al.}, year={2021}, month={Jul} } @misc{kim_tsiatis_2020, title={Independent increments in group sequential tests: a review}, volume={44}, ISSN={["2013-8830"]}, DOI={10.2436/20.8080.02.101}, number={2}, journal={SORT-STATISTICS AND OPERATIONS RESEARCH TRANSACTIONS}, author={Kim, Kyung Mann and Tsiatis, Anastasios A.}, year={2020}, pages={223–264} } @misc{ruppert_yin_davidian_tsiatis_byrd_woyach_mandrekar_2019, title={Application of a sequential multiple assignment randomized trial (SMART) design in older patients with chronic lymphocytic leukemia}, volume={30}, ISSN={["1569-8041"]}, DOI={10.1093/annonc/mdz053}, abstractNote={BACKGROUND Ibrutinib therapy is safe and effective in patients with chronic lymphocytic leukemia (CLL). Currently, ibrutinib is administered continuously until disease progression. Combination regimens with ibrutinib are being developed to deepen response which could allow for ibrutinib maintenance (IM) discontinuation. Among untreated older patients with CLL, clinical investigators had the following questions: (i) does ibrutinib + venetoclax + obinutuzumab (IVO) with IM have superior progression-free survival (PFS) compared with ibrutinib + obinutuzumab (IO) with IM, and (ii) does the treatment strategy of IVO + IM for patients without minimal residual disease complete response (MRD- CR) or IVO + IM discontinuation for patients with MRD- CR have superior PFS compared with IO + IM. DESIGN Conventional designs randomize patients to IO with IM or IVO with IM to address the first objective, or randomize patients to each treatment strategy to address the second objective. A sequential multiple assignment randomized trial (SMART) design and analysis is proposed to address both objectives. RESULTS A SMART design strategy is appropriate when comparing adaptive interventions, which are defined by an individual's sequence of treatment decisions and guided by intermediate outcomes, such as response to therapy. A review of common applications of SMART design strategies is provided. Specific to the SMART design previously considered for Alliance study A041702, the general structure of the SMART is presented, an approach to sample size and power calculations when comparing adaptive interventions embedded in the SMART with a time-to-event end point is fully described, and analyses plans are outlined. CONCLUSION SMART design strategies can be used in cancer clinical trials with adaptive interventions to identify optimal treatment strategies. Further, standard software exists to provide sample size, power calculations, and data analysis for a SMART design.}, number={4}, journal={ANNALS OF ONCOLOGY}, author={Ruppert, A. S. and Yin, J. and Davidian, M. and Tsiatis, A. A. and Byrd, J. C. and Woyach, J. A. and Mandrekar, S. J.}, year={2019}, month={Apr}, pages={542–550} } @book{tsiatis_davidian_holloway_laber_2019, title={Dynamic Treatment Regimes}, ISBN={9780429192692}, url={http://dx.doi.org/10.1201/9780429192692}, DOI={10.1201/9780429192692}, publisher={Chapman and Hall/CRC}, author={Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T. and Laber, Eric B.}, year={2019}, month={Dec} } @article{triolo_fouts_pyle_yu_gottlieb_steck_greenbaum_atkinson_baidal_battaglia_et al._2019, title={Identical and Nonidentical Twins: Risk and Factors Involved in Development of Islet Autoimmunity and Type 1 Diabetes}, volume={42}, ISSN={["1935-5548"]}, DOI={10.2337/dc18-0288}, abstractNote={ OBJECTIVE There are variable reports of risk of concordance for progression to islet autoantibodies and type 1 diabetes in identical twins after one twin is diagnosed. We examined development of positive autoantibodies and type 1 diabetes and the effects of genetic factors and common environment on autoantibody positivity in identical twins, nonidentical twins, and full siblings. }, number={2}, journal={DIABETES CARE}, author={Triolo, Taylor M. and Fouts, Alexandra and Pyle, Laura and Yu, Liping and Gottlieb, Peter A. and Steck, Andrea K. and Greenbaum, C. J. and Atkinson, M. and Baidal, D. and Battaglia, M. and et al.}, year={2019}, month={Feb}, pages={192–199} } @article{zhang_laber_davidian_tsiatis_2018, title={Interpretable Dynamic Treatment Regimes}, volume={113}, ISSN={["1537-274X"]}, DOI={10.1080/01621459.2017.1345743}, abstractNote={ABSTRACT Precision medicine is currently a topic of great interest in clinical and intervention science.  A key component of precision medicine is that it is evidence-based, that is, data-driven, and consequently there has been tremendous interest in estimation of precision medicine strategies using observational or randomized study data. One way to formalize precision medicine is through a treatment regime, which is a sequence of decision rules, one per stage of clinical intervention, that map up-to-date patient information to a recommended treatment. An optimal treatment regime is defined as maximizing the mean of some cumulative clinical outcome if applied to a population of interest. It is well-known that even under simple generative models an optimal treatment regime can be a highly nonlinear function of patient information. Consequently, a focal point of recent methodological research has been the development of flexible models for estimating optimal treatment regimes. However, in many settings, estimation of an optimal treatment regime is an exploratory analysis intended to generate new hypotheses for subsequent research and not to directly dictate treatment to new patients. In such settings, an estimated treatment regime that is interpretable in a domain context may be of greater value than an unintelligible treatment regime built using “black-box” estimation methods. We propose an estimator of an optimal treatment regime composed of a sequence of decision rules, each expressible as a list of “if-then” statements that can be presented as either a paragraph or as a simple flowchart that is immediately interpretable to domain experts. The discreteness of these lists precludes smooth, that is, gradient-based, methods of estimation and leads to nonstandard asymptotics. Nevertheless, we provide a computationally efficient estimation algorithm, prove consistency of the proposed estimator, and derive rates of convergence. We illustrate the proposed methods using a series of simulation examples and application to data from a sequential clinical trial on bipolar disorder. Supplementary materials for this article are available online.}, number={524}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Zhang, Yichi and Laber, Eric B. and Davidian, Marie and Tsiatis, Anastasios A.}, year={2018}, pages={1541–1549} } @article{yang_tsiatis_blazing_2018, title={Modeling survival distribution as a function of time to treatment discontinuation: A dynamic treatment regime approach}, volume={74}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12845}, abstractNote={Summary}, number={3}, journal={BIOMETRICS}, author={Yang, Shu and Tsiatis, Anastasios A. and Blazing, Michael}, year={2018}, month={Sep}, pages={900–909} } @article{hager_tsiatis_davidian_2018, title={Optimal two-stage dynamic treatment regimes from a classification perspective with censored survival data}, volume={74}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12894}, abstractNote={Summary}, number={4}, journal={BIOMETRICS}, author={Hager, Rebecca and Tsiatis, Anastasios A. and Davidian, Marie}, year={2018}, month={Dec}, pages={1180–1192} } @article{greenbaum_atkinson_baidal_battaglia_bingley_bosi_buckner_clements_colman_dimeglio_et al._2017, title={Effect of oral insulin on prevention of diabetes in relatives of patients with type 1 diabetes a randomized clinical trial}, volume={318}, number={19}, journal={Journal of the American Medical Association}, author={Greenbaum, C. and Atkinson, M. and Baidal, D. and Battaglia, M. and Bingley, P. and Bosi, E. and Buckner, J. and Clements, M. and Colman, P. and DiMeglio, L. and et al.}, year={2017}, pages={1891–1902} } @article{bai_tsiatis_lu_song_2017, title={Optimal treatment regimes for survival endpoints using locally-efficient doubly-robust estimator from a classification perspective}, volume={23}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-016-9376-x}, abstractNote={A treatment regime at a single decision point is a rule that assigns a treatment, among the available options, to a patient based on the patient’s baseline characteristics. The value of a treatment regime is the average outcome of a population of patients if they were all treated in accordance to the treatment regime, where large values are desirable. The optimal treatment regime is a regime which results in the greatest value. Typically, the optimal treatment regime is estimated by positing a regression relationship for the outcome of interest as a function of treatment and baseline characteristics. However, this can lead to suboptimal treatment regimes when the regression model is misspecified. We instead consider value search estimators for the optimal treatment regime where we directly estimate the value for any treatment regime and then maximize this estimator over a class of regimes. For many studies the primary outcome of interest is survival time which is often censored. We derive a locally efficient, doubly robust, augmented inverse probability weighted complete case estimator for the value function with censored survival data and study the large sample properties of this estimator. The optimization is realized from a weighted classification perspective that allows us to use available off the shelf software. In some studies one treatment may have greater toxicity or side effects, thus we also consider estimating a quality adjusted optimal treatment regime that allows a patient to trade some additional risk of death in order to avoid the more invasive treatment.}, number={4}, journal={Lifetime Data Analysis}, author={Bai, X. and Tsiatis, A. and Lu, W. and Song, R.}, year={2017}, pages={585–604} } @article{vock_durheim_tsuang_copeland_tsiatis_davidian_neely_lederer_palmer_2017, title={Survival benefit of lung transplantation in the modern era of lung allocation}, volume={14}, number={2}, journal={Annals of the American Thoracic Society}, author={Vock, D. M. and Durheim, M. T. and Tsuang, W. M. and Copeland, C. A. F. and Tsiatis, A. A. and Davidian, M. and Neely, M. L. and Lederer, D. J. and Palmer, S. M.}, year={2017}, pages={172–181} } @article{bai_tsiatis_2016, title={A log rank type test in observational survival studies with stratified sampling}, volume={22}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-015-9331-2}, abstractNote={In randomized clinical trials, the log rank test is often used to test the null hypothesis of the equality of treatment-specific survival distributions. In observational studies, however, the ordinary log rank test is no longer guaranteed to be valid. In such studies we must be cautious about potential confounders; that is, the covariates that affect both the treatment assignment and the survival distribution. In this paper, two cases were considered: the first is when it is believed that all the potential confounders are captured in the primary database, and the second case where a substudy is conducted to capture additional confounding covariates. We generalize the augmented inverse probability weighted complete case estimators for treatment-specific survival distribution proposed in Bai et al. (Biometrics 69:830–839, 2013) and develop the log rank type test in both cases. The consistency and double robustness of the proposed test statistics are shown in simulation studies. These statistics are then applied to the data from the observational study that motivated this research.}, number={2}, journal={LIFETIME DATA ANALYSIS}, author={Bai, Xiaofei and Tsiatis, Anastasios A.}, year={2016}, month={Apr}, pages={280–298} } @article{zhang_tsiatis_davidian_zhang_laber_2016, title={Estimating optimal treatment regimes from a classification perspective (vol 1, pg 103, 2012)}, volume={5}, ISSN={["2049-1573"]}, DOI={10.1002/sta4.124}, abstractNote={StatVolume 5, Issue 1 p. 278-278 Erratum Estimating optimal treatment regimes from a classification perspective Baqun Zhang, Corresponding Author Baqun Zhang baqun.zhang@northwestern.edu Department of Preventive Medicine, Northwestern University, Chicago, IL, 60611 USAE-mail: baqun.zhang@northwestern.eduSearch for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMin Zhang, Min Zhang Department of Biotatistics, University of Michigan, Ann Arbor, MI, 48109-2029 USASearch for more papers by this authorEric Laber, Eric Laber Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this author Baqun Zhang, Corresponding Author Baqun Zhang baqun.zhang@northwestern.edu Department of Preventive Medicine, Northwestern University, Chicago, IL, 60611 USAE-mail: baqun.zhang@northwestern.eduSearch for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMin Zhang, Min Zhang Department of Biotatistics, University of Michigan, Ann Arbor, MI, 48109-2029 USASearch for more papers by this authorEric Laber, Eric Laber Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this author First published: 04 November 2016 https://doi.org/10.1002/sta4.124Read the full textAboutPDF ToolsRequest permissionExport citationAdd to favoritesTrack citation ShareShare Give accessShare full text accessShare full-text accessPlease review our Terms and Conditions of Use and check box below to share full-text version of article.I have read and accept the Wiley Online Library Terms and Conditions of UseShareable LinkUse the link below to share a full-text version of this article with your friends and colleagues. Learn more.Copy URL Share a linkShare onFacebookTwitterLinkedInRedditWechat No abstract is available for this article. Volume5, Issue12016Pages 278-278 RelatedInformation}, number={1}, journal={STAT}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Laber, Eric}, year={2016}, pages={278–278} } @article{milanzi_molenberghs_alonso_kenward_tsiatis_davidian_verbeke_2015, title={Estimation After a Group Sequential Trial}, volume={7}, ISSN={["1867-1772"]}, DOI={10.1007/s12561-014-9112-6}, abstractNote={Group sequential trials are one important instance of studies for which the sample size is not fixed a priori but rather takes one of a finite set of pre-specified values, dependent on the observed data. Much work has been devoted to the inferential consequences of this design feature. Molenberghs et al. (Statistical Methods in Medical Research, 2012) and Milanzi et al. (Properties of estimators in exponential family settings with observation-based stopping rules, 2012) reviewed and extended the existing literature, focusing on a collection of seemingly disparate, but related, settings, namely completely random sample sizes, group sequential studies with deterministic and random stopping rules, incomplete data, and random cluster sizes. They showed that the ordinary sample average is a viable option for estimation following a group sequential trial, for a wide class of stopping rules and for random outcomes with a distribution in the exponential family. Their results are somewhat surprising in the sense that the sample average is not optimal, and further, there does not exist an optimal, or even, unbiased linear estimator. However, the sample average is asymptotically unbiased, both conditionally upon the observed sample size as well as marginalized over it. By exploiting ignorability they showed that the sample average is the conventional maximum likelihood estimator. They also showed that a conditional maximum likelihood estimator is finite sample unbiased, but is less efficient than the sample average and has the larger mean squared error. Asymptotically, the sample average and the conditional maximum likelihood estimator are equivalent. This previous work is restricted, however, to the situation in which the the random sample size can take only two values, $$N=n$$ or $$N=2n$$ . In this paper, we consider the more practically useful setting of sample sizes in a the finite set $$\{n_1,n_2,\dots ,n_L\}$$ . It is shown that the sample average is then a justifiable estimator , in the sense that it follows from joint likelihood estimation, and it is consistent and asymptotically unbiased. We also show why simulations can give the false impression of bias in the sample average when considered conditional upon the sample size. The consequence is that no corrections need to be made to estimators following sequential trials. When small-sample bias is of concern, the conditional likelihood estimator (CLE) provides a relatively straightforward modification to the sample average. Finally, it is shown that classical likelihood-based standard errors and confidence intervals can be applied, obviating the need for technical corrections.}, number={2}, journal={STATISTICS IN BIOSCIENCES}, author={Milanzi, Elasma and Molenberghs, Geert and Alonso, Ariel and Kenward, Michael G. and Tsiatis, Anastasios A. and Davidian, Marie and Verbeke, Geert}, year={2015}, month={Oct}, pages={187–205} } @article{zhang_laber_tsiatis_davidian_2015, title={Using Decision Lists to Construct Interpretable and Parsimonious Treatment Regimes}, volume={71}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12354}, abstractNote={Summary}, number={4}, journal={BIOMETRICS}, author={Zhang, Yichi and Laber, Eric B. and Tsiatis, Anastasios and Davidian, Marie}, year={2015}, month={Dec}, pages={895–904} } @article{laber_zhao_regh_davidian_tsiatis_stanford_zeng_song_kosorok_2015, title={Using pilot data to size a two-arm randomized trial to find a nearly optimal personalized treatment strategy}, volume={35}, ISSN={0277-6715}, url={http://dx.doi.org/10.1002/SIM.6783}, DOI={10.1002/SIM.6783}, abstractNote={A personalized treatment strategy formalizes evidence‐based treatment selection by mapping patient information to a recommended treatment. Personalized treatment strategies can produce better patient outcomes while reducing cost and treatment burden. Thus, among clinical and intervention scientists, there is a growing interest in conducting randomized clinical trials when one of the primary aims is estimation of a personalized treatment strategy. However, at present, there are no appropriate sample size formulae to assist in the design of such a trial. Furthermore, because the sampling distribution of the estimated outcome under an estimated optimal treatment strategy can be highly sensitive to small perturbations in the underlying generative model, sample size calculations based on standard (uncorrected) asymptotic approximations or computer simulations may not be reliable. We offer a simple and robust method for powering a single stage, two‐armed randomized clinical trial when the primary aim is estimating the optimal single stage personalized treatment strategy. The proposed method is based on inverting a plugin projection confidence interval and is thereby regular and robust to small perturbations of the underlying generative model. The proposed method requires elicitation of two clinically meaningful parameters from clinical scientists and uses data from a small pilot study to estimate nuisance parameters, which are not easily elicited. The method performs well in simulated experiments and is illustrated using data from a pilot study of time to conception and fertility awareness. Copyright © 2015 John Wiley & Sons, Ltd.}, number={8}, journal={Statistics in Medicine}, publisher={Wiley}, author={Laber, Eric B. and Zhao, Ying-Qi and Regh, Todd and Davidian, Marie and Tsiatis, Anastasios and Stanford, Joseph B. and Zeng, Donglin and Song, Rui and Kosorok, Michael R.}, year={2015}, month={Oct}, pages={1245–1256} } @article{laber_tsiatis_davidian_holloway_2014, title={Combining Biomarkers to Optimize Patient Treatment Recommendations Discussions}, volume={70}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12187}, abstractNote={We congratulate the Kang, Janes, and Huang (hereafter KJH) on an interesting and powerful new method for estimating an optimal treatment rule, also referred to as an optimal treatment regime. Their proposed method relies on having a high-quality estimator for the regression of outcome on biomarkers and treatment, which the authors obtain using a novel boosting algorithm. Methods for constructing treatment rules/regimes that rely on outcome models are sometimes called indirect or regression-based methods because the treatment rule is inferred from the outcome model (Barto and Dieterich, 1988). Regression-based methods are appealing because they can be used to make prognostic predictions as well as treatment recommendations. While it is common practice to use parametric or semiparametric models in regression-based approaches (Robins, 2004; Chakraborty and Moodie, 2013; Laber et al., 2014; Schulte et al., 2014), there is growing interest in using nonparametric methods to avoid model misspecification (Zhao et al., 2011; Moodie et al., 2013). In contrast, direct estimation methods, also known as policy-search methods, try to weaken or eliminate dependence on correct outcome models and instead attempt to search for the best treatment rule within a pre-specified class of rules (Orellana, Rotnitzky, and Robins, 2010; Zhang et al., 2012a,b; Zhao et al., 2012; Zhang et al., 2013). Direct estimation methods make fewer assumptions about the outcome model, which may make them more robust to model misspecification but potentially more variable. We derive a direct estimation analog to the method of KJH, which we term value boosting. The method is based on recasting the problem of estimating an optimal treatment rule as a weighted classification problem (Zhang et al., 2012a; Zhao et al., 2012). We show how the method of KJH can be used with existing policy-search methods to construct a treatment rule that is interpretable, logistically feasible, parsimonious, or otherwise appealing.}, number={3}, journal={BIOMETRICS}, author={Laber, Eric B. and Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T.}, year={2014}, month={Sep}, pages={707–710} } @article{molenberghs_kenward_aerts_verbeke_tsiatis_davidian_rizopoulos_2014, title={On random sample size, ignorability, ancillarity, completeness, separability, and degeneracy: Sequential trials, random sample sizes, and missing data}, volume={23}, ISSN={["1477-0334"]}, DOI={10.1177/0962280212445801}, abstractNote={ The vast majority of settings for which frequentist statistical properties are derived assume a fixed, a priori known sample size. Familiar properties then follow, such as, for example, the consistency, asymptotic normality, and efficiency of the sample average for the mean parameter, under a wide range of conditions. We are concerned here with the alternative situation in which the sample size is itself a random variable which may depend on the data being collected. Further, the rule governing this may be deterministic or probabilistic. There are many important practical examples of such settings, including missing data, sequential trials, and informative cluster size. It is well known that special issues can arise when evaluating the properties of statistical procedures under such sampling schemes, and much has been written about specific areas (Grambsch P. Sequential sampling based on the observed Fisher information to guarantee the accuracy of the maximum likelihood estimator. Ann Stat 1983; 11: 68–77; Barndorff-Nielsen O and Cox DR. The effect of sampling rules on likelihood statistics. Int Stat Rev 1984; 52: 309–326). Our aim is to place these various related examples into a single framework derived from the joint modeling of the outcomes and sampling process and so derive generic results that in turn provide insight, and in some cases practical consequences, for different settings. It is shown that, even in the simplest case of estimating a mean, some of the results appear counterintuitive. In many examples, the sample average may exhibit small sample bias and, even when it is unbiased, may not be optimal. Indeed, there may be no minimum variance unbiased estimator for the mean. Such results follow directly from key attributes such as non-ancillarity of the sample size and incompleteness of the minimal sufficient statistic of the sample size and sample sum. Although our results have direct and obvious implications for estimation following group sequential trials, there are also ramifications for a range of other settings, such as random cluster sizes, censored time-to-event data, and the joint modeling of longitudinal and time-to-event data. Here, we use the simplest group sequential setting to develop and explicate the main results. Some implications for random sample sizes and missing data are also considered. Consequences for other related settings will be considered elsewhere. }, number={1}, journal={STATISTICAL METHODS IN MEDICAL RESEARCH}, author={Molenberghs, Geert and Kenward, Michael G. and Aerts, Marc and Verbeke, Geert and Tsiatis, Anastasios A. and Davidian, Marie and Rizopoulos, Dimitris}, year={2014}, month={Feb}, pages={11–41} } @article{schulte_tsiatis_laber_davidian_2014, title={Q- and A-Learning Methods for Estimating Optimal Dynamic Treatment Regimes}, volume={29}, ISSN={["0883-4237"]}, DOI={10.1214/13-sts450}, abstractNote={In clinical practice, physicians make a series of treatment decisions over the course of a patient's disease based on his/her baseline and evolving characteristics. A dynamic treatment regime is a set of sequential decision rules that operationalizes this process. Each rule corresponds to a decision point and dictates the next treatment action based on the accrued information. Using existing data, a key goal is estimating the optimal regime, that, if followed by the patient population, would yield the most favorable outcome on average. Q- and A-learning are two main approaches for this purpose. We provide a detailed account of these methods, study their performance, and illustrate them using data from a depression study.}, number={4}, journal={STATISTICAL SCIENCE}, author={Schulte, Phillip J. and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2014}, month={Nov}, pages={640–661} } @article{vock_tsiatis_davidian_laber_tsuang_copeland_palmer_2013, title={Assessing the Causal Effect of Organ Transplantation on the Distribution of Residual Lifetime}, volume={69}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12084}, abstractNote={Summary}, number={4}, journal={BIOMETRICS}, author={Vock, David M. and Tsiatis, Anastasios A. and Davidian, Marie and Laber, Eric B. and Tsuang, Wayne M. and Copeland, C. Ashley Finlen and Palmer, Scott M.}, year={2013}, month={Dec}, pages={820–829} } @article{bai_tsiatis_sean m. o'brien_2013, title={Doubly-Robust Estimators of Treatment-Specific Survival Distributions in Observational Studies with Stratified Sampling}, volume={69}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12076}, abstractNote={Summary}, number={4}, journal={BIOMETRICS}, author={Bai, Xiaofei and Tsiatis, Anastasios A. and Sean M. O'Brien}, year={2013}, month={Dec}, pages={830–839} } @article{daniel_tsiatis_2013, title={Efficient estimation of the distribution of time to composite endpoint when some endpoints are only partially observed}, volume={19}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-013-9261-9}, abstractNote={Two common features of clinical trials, and other longitudinal studies, are (1) a primary interest in composite endpoints, and (2) the problem of subjects withdrawing prematurely from the study. In some settings, withdrawal may only affect observation of some components of the composite endpoint, for example when another component is death, information on which may be available from a national registry. In this paper, we use the theory of augmented inverse probability weighted estimating equations to show how such partial information on the composite endpoint for subjects who withdraw from the study can be incorporated in a principled way into the estimation of the distribution of time to composite endpoint, typically leading to increased efficiency without relying on additional assumptions above those that would be made by standard approaches. We describe our proposed approach theoretically, and demonstrate its properties in a simulation study.}, number={4}, journal={LIFETIME DATA ANALYSIS}, author={Daniel, Rhian M. and Tsiatis, Anastasios A.}, year={2013}, month={Oct}, pages={513–546} } @article{zhang_tsiatis_laber_davidian_2013, title={Robust estimation of optimal dynamic treatment regimes for sequential treatment decisions}, volume={100}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/ast014}, abstractNote={A dynamic treatment regime is a list of sequential decision rules for assigning treatment based on a patient's history. Q- and A-learning are two main approaches for estimating the optimal regime, i.e., that yielding the most beneficial outcome in the patient population, using data from a clinical trial or observational study. Q-learning requires postulated regression models for the outcome, while A-learning involves models for that part of the outcome regression representing treatment contrasts and for treatment assignment. We propose an alternative to Q- and A-learning that maximizes a doubly robust augmented inverse probability weighted estimator for population mean outcome over a restricted class of regimes. Simulations demonstrate the method's performance and robustness to model misspecification, which is a key concern.}, number={3}, journal={BIOMETRIKA}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2013}, month={Sep}, pages={681–694} } @article{zhang_tsiatis_laber_davidian_2012, title={A Robust Method for Estimating Optimal Treatment Regimes}, volume={68}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2012.01763.x}, abstractNote={Summary A treatment regime is a rule that assigns a treatment, among a set of possible treatments, to a patient as a function of his/her observed characteristics, hence “personalizing” treatment to the patient. The goal is to identify the optimal treatment regime that, if followed by the entire population of patients, would lead to the best outcome on average. Given data from a clinical trial or observational study, for a single treatment decision, the optimal regime can be found by assuming a regression model for the expected outcome conditional on treatment and covariates, where, for a given set of covariates, the optimal treatment is the one that yields the most favorable expected outcome. However, treatment assignment via such a regime is suspect if the regression model is incorrectly specified. Recognizing that, even if misspecified, such a regression model defines a class of regimes, we instead consider finding the optimal regime within such a class by finding the regime that optimizes an estimator of overall population mean outcome. To take into account possible confounding in an observational study and to increase precision, we use a doubly robust augmented inverse probability weighted estimator for this purpose. Simulations and application to data from a breast cancer clinical trial demonstrate the performance of the method.}, number={4}, journal={BIOMETRICS}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2012}, month={Dec}, pages={1010–1018} } @article{zhang_tsiatis_davidian_zhang_laber_2012, title={Estimating optimal treatment regimes from a classification perspective}, volume={1}, ISSN={2049-1573}, url={http://dx.doi.org/10.1002/sta.411}, DOI={10.1002/sta.411}, abstractNote={A treatment regime maps observed patient characteristics to a recommended treatment. Recent technological advances have increased the quality, accessibility, and volume of patient‐level data; consequently, there is a growing need for powerful and flexible estimators of an optimal treatment regime that can be used with either observational or randomized clinical trial data. We propose a novel and general framework that transforms the problem of estimating an optimal treatment regime into a classification problem wherein the optimal classifier corresponds to the optimal treatment regime. We show that commonly employed parametric and semi‐parametric regression estimators, as well as recently proposed robust estimators of an optimal treatment regime can be represented as special cases within our framework. Furthermore, our approach allows any classification procedure that can accommodate case weights to be used without modification to estimate an optimal treatment regime. This introduces a wealth of new and powerful learning algorithms for use in estimating treatment regimes. We illustrate our approach using data from a breast cancer clinical trial. Copyright © 2012 John Wiley & Sons, Ltd.}, number={1}, journal={Stat}, publisher={Wiley}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Laber, Eric}, year={2012}, month={Oct}, pages={103–114} } @article{vock_davidian_tsiatis_muir_2012, title={Mixed model analysis of censored longitudinal data with flexible random-effects density}, volume={13}, ISSN={["1468-4357"]}, DOI={10.1093/biostatistics/kxr026}, abstractNote={Mixed models are commonly used to represent longitudinal or repeated measures data. An additional complication arises when the response is censored, for example, due to limits of quantification of the assay used. While Gaussian random effects are routinely assumed, little work has characterized the consequences of misspecifying the random-effects distribution nor has a more flexible distribution been studied for censored longitudinal data. We show that, in general, maximum likelihood estimators will not be consistent when the random-effects density is misspecified, and the effect of misspecification is likely to be greatest when the true random-effects density deviates substantially from normality and the number of noncensored observations on each subject is small. We develop a mixed model framework for censored longitudinal data in which the random effects are represented by the flexible seminonparametric density and show how to obtain estimates in SAS procedure NLMIXED. Simulations show that this approach can lead to reduction in bias and increase in efficiency relative to assuming Gaussian random effects. The methods are demonstrated on data from a study of hepatitis C virus.}, number={1}, journal={BIOSTATISTICS}, author={Vock, David M. and Davidian, Marie and Tsiatis, Anastasios A. and Muir, Andrew J.}, year={2012}, month={Jan}, pages={61–73} } @article{tsiatis_davidian_2011, title={Connections between survey calibration estimators and semiparametric models for incomplete data discussion}, volume={79}, number={2}, journal={International Statistical Review}, author={Tsiatis, A. A. and Davidian, M.}, year={2011}, pages={221–223} } @article{tsiatis_davidian_cao_2011, title={Improved Doubly Robust Estimation When Data Are Monotonely Coarsened, with Application to Longitudinal Studies with Dropout}, volume={67}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2010.01476.x}, abstractNote={Summary A routine challenge is that of making inference on parameters in a statistical model of interest from longitudinal data subject to dropout, which are a special case of the more general setting of monotonely coarsened data. Considerable recent attention has focused on doubly robust (DR) estimators, which in this context involve positing models for both the missingness (more generally, coarsening) mechanism and aspects of the distribution of the full data, that have the appealing property of yielding consistent inferences if only one of these models is correctly specified. DR estimators have been criticized for potentially disastrous performance when both of these models are even only mildly misspecified. We propose a DR estimator applicable in general monotone coarsening problems that achieves comparable or improved performance relative to existing DR methods, which we demonstrate via simulation studies and by application to data from an AIDS clinical trial.}, number={2}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie and Cao, Weihua}, year={2011}, month={Jun}, pages={536–545} } @article{zhang_tsiatis_davidian_pieper_mahaffey_2011, title={Inference on treatment effects from a randomized clinical trial in the presence of premature treatment discontinuation: the SYNERGY trial}, volume={12}, ISSN={["1465-4644"]}, DOI={10.1093/biostatistics/kxq054}, abstractNote={The Superior Yield of the New Strategy of Enoxaparin, Revascularization, and GlYcoprotein IIb/IIIa inhibitors (SYNERGY) was a randomized, open-label, multicenter clinical trial comparing 2 anticoagulant drugs on the basis of time-to-event endpoints. In contrast to other studies of these agents, the primary, intent-to-treat analysis did not find evidence of a difference, leading to speculation that premature discontinuation of the study agents by some subjects may have attenuated the apparent treatment effect and thus to interest in inference on the difference in survival distributions were all subjects in the population to follow the assigned regimens, with no discontinuation. Such inference is often attempted via ad hoc analyses that are not based on a formal definition of this treatment effect. We use SYNERGY as a context in which to describe how this effect may be conceptualized and to present a statistical framework in which it may be precisely identified, which leads naturally to inferential methods based on inverse probability weighting.}, number={2}, journal={BIOSTATISTICS}, author={Zhang, Min and Tsiatis, Anastasios A. and Davidian, Marie and Pieper, Karen S. and Mahaffey, Kenneth W.}, year={2011}, month={Apr}, pages={258–269} } @article{lu_tsiatis_2011, title={Semiparametric estimation of treatment effect with time-lagged response in the presence of informative censoring}, volume={17}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-011-9199-8}, abstractNote={In many randomized clinical trials, the primary response variable, for example, the survival time, is not observed directly after the patients enroll in the study but rather observed after some period of time (lag time). It is often the case that such a response variable is missing for some patients due to censoring that occurs when the study ends before the patient’s response is observed or when the patients drop out of the study. It is often assumed that censoring occurs at random which is referred to as noninformative censoring; however, in many cases such an assumption may not be reasonable. If the missing data are not analyzed properly, the estimator or test for the treatment effect may be biased. In this paper, we use semiparametric theory to derive a class of consistent and asymptotically normal estimators for the treatment effect parameter which are applicable when the response variable is right censored. The baseline auxiliary covariates and post-treatment auxiliary covariates, which may be time-dependent, are also considered in our semiparametric model. These auxiliary covariates are used to derive estimators that both account for informative censoring and are more efficient then the estimators which do not consider the auxiliary covariates.}, number={4}, journal={LIFETIME DATA ANALYSIS}, author={Lu, Xiaomin and Tsiatis, Anastasios A.}, year={2011}, month={Oct}, pages={566–593} } @article{brinkley_tsiatis_anstrom_2010, title={A Generalized Estimator of the Attributable Benefit of an Optimal Treatment Regime}, volume={66}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2009.01282.x}, abstractNote={Summary For many diseases where there are several treatment options often there is no consensus on the best treatment to give individual patients. In such cases, it may be necessary to define a strategy for treatment assignment; that is, an algorithm that dictates the treatment an individual should receive based on their measured characteristics. Such a strategy or algorithm is also referred to as a treatment regime. The optimal treatment regime is the strategy that would provide the most public health benefit by minimizing as many poor outcomes as possible. Using a measure that is a generalization of attributable risk (AR) and notions of potential outcomes, we derive an estimator for the proportion of events that could have been prevented had the optimal treatment regime been implemented. Traditional AR studies look at the added risk that can be attributed to exposure of some contaminant; here we will instead study the benefit that can be attributed to using the optimal treatment strategy. We will show how regression models can be used to estimate the optimal treatment strategy and the attributable benefit of that strategy. We also derive the large sample properties of this estimator. As a motivating example, we will apply our methods to an observational study of 3856 patients treated at the Duke University Medical Center with prior coronary artery bypass graft surgery and further heart‐related problems requiring a catheterization. The patients may be treated with either medical therapy alone or a combination of medical therapy and percutaneous coronary intervention without a general consensus on which is the best treatment for individual patients.}, number={2}, journal={BIOMETRICS}, author={Brinkley, Jason and Tsiatis, Anastasios and Anstrom, Kevin J.}, year={2010}, month={Jun}, pages={512–522} } @article{lu_jiang_tsiatis_2010, title={Multiple Imputation Approaches for the Analysis of Dichotomized Responses in Longitudinal Studies with Missing Data}, volume={66}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2010.01405.x}, abstractNote={Summary Often a binary variable is generated by dichotomizing an underlying continuous variable measured at a specific time point according to a prespecified threshold value. In the event that the underlying continuous measurements are from a longitudinal study, one can use the repeated‐measures model to impute missing data on responder status as a result of subject dropout and apply the logistic regression model on the observed or otherwise imputed responder status. Standard Bayesian multiple imputation techniques (Rubin, 1987, in Multiple Imputation for Nonresponse in Surveys) that draw the parameters for the imputation model from the posterior distribution and construct the variance of parameter estimates for the analysis model as a combination of within‐ and between‐imputation variances are found to be conservative. The frequentist multiple imputation approach that fixes the parameters for the imputation model at the maximum likelihood estimates and construct the variance of parameter estimates for the analysis model using the results of Robins and Wang (2000, Biometrika 87, 113–124) is shown to be more efficient. We propose to apply (Kenward and Roger, 1997, Biometrics 53, 983–997) degrees of freedom to account for the uncertainty associated with variance–covariance parameter estimates for the repeated measures model.}, number={4}, journal={BIOMETRICS}, author={Lu, Kaifeng and Jiang, Liqiu and Tsiatis, Anastasios A.}, year={2010}, month={Dec}, pages={1202–1208} } @article{cao_tsiatis_davidian_2009, title={Improving efficiency and robustness of the doubly robust estimator for a population mean with incomplete data}, volume={96}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/asp033}, abstractNote={Abstract}, number={3}, journal={BIOMETRIKA}, author={Cao, Weihua and Tsiatis, Anastasios A. and Davidian, Marie}, year={2009}, month={Sep}, pages={723–734} } @article{tsiatis_davidian_zhang_lu_2008, title={Covariate adjustment for two-sample treatment comparisons in randomized clinical trials: A principled yet flexible approach}, volume={27}, ISSN={["1097-0258"]}, DOI={10.1002/sim.3113}, abstractNote={Abstract}, number={23}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Lu, Xiaomin}, year={2008}, month={Oct}, pages={4658–4677} } @article{nelson_sun_tsiatis_mark_2008, title={Empirical estimation of life expectancy from large clinical trials: Use of left-truncated, right-censored survival analysis methodology}, volume={27}, ISSN={["1097-0258"]}, DOI={10.1002/sim.3355}, abstractNote={Abstract}, number={26}, journal={STATISTICS IN MEDICINE}, author={Nelson, Charlotte L. and Sun, Jie L. and Tsiatis, Anastasios A. and Mark, Daniel B.}, year={2008}, month={Nov}, pages={5525–5555} } @article{zhang_tsiatis_davidian_2008, title={Improving efficiency of inferences in randomized clinical trials using auxiliary covariates}, volume={64}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2007.00976.x}, abstractNote={Summary The primary goal of a randomized clinical trial is to make comparisons among two or more treatments. For example, in a two‐arm trial with continuous response, the focus may be on the difference in treatment means; with more than two treatments, the comparison may be based on pairwise differences. With binary outcomes, pairwise odds ratios or log odds ratios may be used. In general, comparisons may be based on meaningful parameters in a relevant statistical model. Standard analyses for estimation and testing in this context typically are based on the data collected on response and treatment assignment only. In many trials, auxiliary baseline covariate information may also be available, and it is of interest to exploit these data to improve the efficiency of inferences. Taking a semiparametric theory perspective, we propose a broadly applicable approach to adjustment for auxiliary covariates to achieve more efficient estimators and tests for treatment parameters in the analysis of randomized clinical trials. Simulations and applications demonstrate the performance of the methods.}, number={3}, journal={BIOMETRICS}, author={Zhang, Min and Tsiatis, Anastasios A. and Davidian, Marie}, year={2008}, month={Sep}, pages={707–715} } @article{lu_tsiatis_2008, title={Improving the efficiency of the log-rank test using auxiliary covariates}, volume={95}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/asn003}, abstractNote={Under the assumption of proportional hazards, the log-rank test is optimal for testing the null hypothesis , where denotes the logarithm of the hazard ratio. However, if there are additional covariates that correlate with survival times, making use of their information will increase the efficiency of the log-rank test. We apply the theory of semiparametrics to characterize a class of regular and asymptotically linear estimators for when auxiliary covariates are incorporated into the model, and derive estimators that are more efficient. The Wald tests induced by these estimators are shown to be more powerful than the log-rank test. Simulation studies are used to illustrate the gains in efficiency. Copyright 2008, Oxford University Press.}, number={3}, journal={BIOMETRIKA}, author={Lu, Xiaomin and Tsiatis, Anastasios A.}, year={2008}, month={Sep}, pages={679–694} } @article{lokhnygina_tsiatis_2008, title={Optimal two-stage group-sequential designs}, volume={138}, ISSN={["0378-3758"]}, DOI={10.1016/j.jspi.2007.06.011}, abstractNote={We derive optimal two-stage adaptive group-sequential designs for normally distributed data which achieve the minimum of a mixture of expected sample sizes at the range of plausible values of a normal mean. Unlike standard group-sequential tests, our method is adaptive in that it allows the group size at the second look to be a function of the observed test statistic at the first look. Using optimality criteria, we construct two-stage designs which we show have advantage over other popular adaptive methods. The employed computational method is a modification of the backward induction algorithm applied to a Bayesian decision problem.}, number={2}, journal={JOURNAL OF STATISTICAL PLANNING AND INFERENCE}, author={Lokhnygina, Yuliya and Tsiatis, Anastasios A.}, year={2008}, month={Feb}, pages={489–499} } @article{mark_anstrom_sun_clapp-channing_tsiatis_davidson-ray_lee_bardy_2008, title={Quality of life with defibrillator therapy or amiodarone in heart failure}, volume={359}, ISSN={["0028-4793"]}, DOI={10.1056/NEJMoa0706719}, abstractNote={BACKGROUND Implantable cardioverter-defibrillator (ICD) therapy significantly prolongs life in patients at increased risk for sudden death from depressed left ventricular function. However, whether this increased longevity is accompanied by deterioration in the quality of life is unclear. METHODS In a randomized trial, we compared ICD therapy or amiodarone with state-of-the-art medical therapy alone in 2521 patients who had stable heart failure with depressed left ventricular function. We prospectively measured quality of life at baseline and at months 3, 12, and 30; data collection was 93 to 98% complete. The Duke Activity Status Index (which measures cardiac physical functioning) and the Medical Outcomes Study 36-Item Short-Form Mental Health Inventory 5 (which measures psychological well-being) were prespecified primary outcomes. Multiple additional quality-of-life outcomes were also examined. RESULTS Psychological well-being in the ICD group, as compared with medical therapy alone, was significantly improved at 3 months (P=0.01) and at 12 months (P=0.003) but not at 30 months. No clinically or statistically significant differences in physical functioning among the study groups were observed. Additional quality-of-life measures were improved in the ICD group at 3 months, 12 months, or both, but there was no significant difference at 30 months. ICD shocks in the month preceding a scheduled assessment were associated with a decreased quality of life in multiple domains. The use of amiodarone had no significant effects on the primary quality-of-life outcomes. CONCLUSIONS In a large primary-prevention population with moderately symptomatic heart failure, single-lead ICD therapy was not associated with any detectable adverse quality-of-life effects during 30 months of follow-up.}, number={10}, journal={NEW ENGLAND JOURNAL OF MEDICINE}, author={Mark, Daniel B. and Anstrom, Kevin J. and Sun, Jie L. and Clapp-Channing, Nancy E. and Tsiatis, Anastasios A. and Davidson-Ray, Linda and Lee, Kerry L. and Bardy, Gust H.}, year={2008}, month={Sep}, pages={999–1008} } @article{banerjee_tsiatis_2006, title={Adaptive two-stage designs in phase II clinical trials}, volume={25}, ISSN={["1097-0258"]}, DOI={10.1002/sim.2501}, abstractNote={Two‐stage designs have been widely used in phase II clinical trials. Such designs are desirable because they allow a decision to be made on whether a treatment is effective or not after the accumulation of the data at the end of each stage. Optimal fixed two‐stage designs, where the sample size at each stage is fixed in advance, were proposed by Simon when the primary outcome is a binary response. This paper proposes an adaptive two‐stage design which allows the sample size at the second stage to depend on the results at the first stage. Using a Bayesian decision‐theoretic construct, we derive optimal adaptive two‐stage designs; the optimality criterion being minimum expected sample size under the null hypothesis. Comparisons are made between Simon's two‐stage fixed design and the new design with respect to this optimality criterion. Copyright © 2006 John Wiley & Sons, Ltd.}, number={19}, journal={STATISTICS IN MEDICINE}, author={Banerjee, Anindita and Tsiatis, Anastasios A.}, year={2006}, month={Oct}, pages={3382–3395} } @article{mark_nelson_anstrom_al-khatib_tsiatis_cowper_clapp-channing_davidson-ray_poole_johnson_et al._2006, title={Cost-effectiveness of defibrillator therapy or amiodarone in chronic stable heart failure - Results from the Sudden Cardiac Death in Heart Failure Trial (SCD-HeFT)}, volume={114}, ISSN={["0009-7322"]}, DOI={10.1161/circulationaha.105.581884}, abstractNote={ Background— In the Sudden Cardiac Death in Heart Failure Trial (SCD-HeFT), implantable cardioverter-defibrillator (ICD) therapy significantly reduced all-cause mortality rates compared with medical therapy alone in patients with stable, moderately symptomatic heart failure, whereas amiodarone had no benefit on mortality rates. We examined long-term economic implications of these results. }, number={2}, journal={CIRCULATION}, author={Mark, Daniel B. and Nelson, Charlotte L. and Anstrom, Kevin J. and Al-Khatib, Sana M. and Tsiatis, Anastasios A. and Cowper, Patricia A. and Clapp-Channing, Nancy E. and Davidson-Ray, Linda and Poole, Jeanne E. and Johnson, George and et al.}, year={2006}, month={Jul}, pages={135–142} } @article{tsiatis_2006, title={Information-based monitoring of clinical trials}, volume={25}, ISSN={["0277-6715"]}, DOI={10.1002/sim.2625}, abstractNote={When designing a clinical trial to compare the effect of different treatments on response, a key issue facing the statistician is to determine how large a study is necessary to detect a clinically important difference with sufficient power. This is the case whether the study will be analysed only once (single‐analysis) or whether it will be monitored periodically with the possibility of early stopping (group‐sequential). Standard sample size calculations are based on both the magnitude of difference that is considered clinically important as well as values for the nuisance parameters in the statistical model. For planning purposes, best guesses are made for the value of the nuisance parameters and these are used to determine the sample size. However, if these guesses are incorrect this will affect the subsequent power to detect the clinically important difference. It is argued in this paper that statistical precision is directly related to Statistical Information and that the study should continue until the requisite statistical information is obtained. This is referred to as information‐based design and analysis of clinical trials. We also argue that this type of methodology is best suited with group‐sequential trials which monitor the data periodically and allow for estimation of the statistical information as the study progresses. Copyright © 2006 John Wiley & Sons, Ltd.}, number={19}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A.}, year={2006}, month={Oct}, pages={3236–3244} } @article{ma_tsiatis_2006, title={On closed form semiparametric estimators for measurement error models}, volume={16}, number={1}, journal={Statistica Sinica}, author={Ma, Y. Y. and Tsiatis, A. A.}, year={2006}, pages={183–193} } @article{wahed_tsiatis_2006, title={Semiparametric efficient estimation of survival distributions in two-stage randomisation designs in clinical trials with censored data}, volume={93}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/93.1.163}, abstractNote={Two-stage randomisation designs are useful in the evaluation of combination therapies where patients are initially randomised to an induction therapy and then, depending upon their response and consent, are randomised to a maintenance therapy. In this paper we derive the best regular asymptotically linear estimator for the survival distribution and related quantities of treatment regimes. We propose an estimator which is easily computable and is more efficient than existing estimators. Large-sample properties of the proposed estimator are derived and comparisons with other estimators are made using simulation. Copyright 2006, Oxford University Press.}, number={1}, journal={BIOMETRIKA}, author={Wahed, AS and Tsiatis, AA}, year={2006}, month={Mar}, pages={163–177} } @book{tsiatis_2006, title={Semiparametric theory and missing data}, ISBN={0387324488}, publisher={New York: Springer}, author={Tsiatis, A. A.}, year={2006} } @article{lu_tsiatis_2006, title={Semiparametric transformation models for the case-cohort study}, volume={93}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/93.1.207}, abstractNote={A general class of semiparametric transformation models is studied for analysing survival data from the case-cohort design, which was introduced by Prentice (1986). Weighted estimating equations are proposed for simultaneous estimation of the regression parameters and the transformation function. It is shown that the resulting regression estimators are asymptotically normal, with variance-covariance matrix that has a closed form and can be consistently estimated by the usual plug-in method. Simulation studies show that the proposed approach is appropriate for practical use. An application to a case-cohort dataset from the Atherosclerosis Risk in Communities study is also given to illustrate the methodology. Copyright 2006, Oxford University Press.}, number={1}, journal={BIOMETRIKA}, author={Lu, WB and Tsiatis, AA}, year={2006}, month={Mar}, pages={207–214} } @article{lu_tsiatis_2005, title={Comparison between two partial likelihood approaches for the competing risks model with missing cause of failure}, volume={11}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-004-5638-0}, abstractNote={In many clinical studies where time to failure is of primary interest, patients may fail or die from one of many causes where failure time can be right censored. In some circumstances, it might also be the case that patients are known to die but the cause of death information is not available for some patients. Under the assumption that cause of death is missing at random, we compare the Goetgbebeur and Ryan (1995, Biometrika, 82, 821-833) partial likelihood approach with the Dewanji (1992, Biometrika, 79, 855-857) partial likelihood approach. We show that the estimator for the regression coefficients based on the Dewanji partial likelihood is not only consistent and asymptotically normal, but also semiparametric efficient. While the Goetghebeur and Ryan estimator is more robust than the Dewanji partial likelihood estimator against misspecification of proportional baseline hazards, the Dewanji partial likelihood estimator allows the probability of missing cause of failure to depend on covariate information without the need to model the missingness mechanism. Tests for proportional baseline hazards are also suggested and a robust variance estimator is derived.}, number={1}, journal={LIFETIME DATA ANALYSIS}, author={Lu, KF and Tsiatis, AA}, year={2005}, month={Mar}, pages={29–40} } @article{reed_anstrom_bakhai_briggs_califf_cohen_drummond_glick_gnanasakthy_hlatky_et al._2005, title={Conducting economic evaluations alongside multinational clinical trials: Toward a research consensus}, volume={149}, ISSN={["1097-5330"]}, DOI={10.1016/j.ahj.2004.11.001}, abstractNote={Demand for economic evaluations in multinational clinical trials is increasing, but there is little consensus about how such studies should be conducted and reported. At a workshop in Durham, North Carolina, we sought to identify areas of agreement about how the primary findings of economic evaluations in multinational clinical trials should be generated and presented. In this paper, we propose a framework for classifying multinational economic evaluations according to (a) the sources of an analyst's estimates of resource use and clinical effectiveness and (b) the analyst's method of estimating costs. We review existing studies in the cardiology literature in the context of the proposed framework. We then describe important methodological and practical considerations in conducting multinational economic evaluations and summarize the advantages and disadvantages of each approach. Finally, we describe opportunities for future research. Delineation of the various approaches to multinational economic evaluation may assist researchers, peer reviewers, journal editors, and decision makers in evaluating the strengths and limitations of particular studies.}, number={3}, journal={AMERICAN HEART JOURNAL}, author={Reed, SD and Anstrom, KJ and Bakhai, A and Briggs, AH and Califf, RM and Cohen, DJ and Drummond, MF and Glick, HA and Gnanasakthy, A and Hlatky, MA and et al.}, year={2005}, month={Mar}, pages={434–443} } @article{ma_genton_tsiatis_2005, title={Locally efficient semiparametric estimators for generalized skew-elliptical distributions}, volume={100}, ISSN={["0162-1459"]}, DOI={10.1198/016214505000000079}, abstractNote={We consider a class of generalized skew-normal distributions that is useful for selection modeling and robustness analysis and derive a class of semiparametric estimators for the location and scale parameters of the central part of the model. We show that these estimators are consistent and asymptotically normal. We present the semiparametric efficiency bound and derive the locally efficient estimator that achieves this bound if the model for the skewing function is correctly specified. The estimators that we propose are consistent and asymptotically normal even if the model for the skewing function is misspecified, and we compute the loss of efficiency in such cases. We conduct a simulation study and provide an illustrative example. Our method is applicable to generalized skew-elliptical distributions.}, number={471}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Ma, YY and Genton, MG and Tsiatis, AA}, year={2005}, month={Sep}, pages={980–989} } @article{allen_satten_tsiatis_2005, title={Locally-efficient robust estimation of haplotype-disease association in family-based studies}, volume={92}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/92.3.559}, abstractNote={Modelling human genetic variation is critical to understanding the genetic basis of complex disease. The Human Genome Project has discovered millions of binary DNA sequence variants, called single nucleotide polymorphisms, and millions more may exist. As coding for proteins takes place along chromosomes, organisation of polymorphisms along each chromosome, the haplotype phase structure, may prove to be most important in discovering genetic variants associated with disease. As haplotype phase is often uncertain, procedures that model the distribution of parental haplotypes can, if this distribution is misspecified, lead to substantial bias in parameter estimates even when complete genotype information is available. Using a geometric approach to estimation in the presence of nuisance parameters, we address this problem and develop locally-efficient estimators of the effect of haplotypes on disease that are robust to incorrect estimates of haplotype frequencies. The methods are demonstrated with a simulation study of a case-parent design. Copyright 2005, Oxford University Press.}, number={3}, journal={BIOMETRIKA}, author={Allen, AS and Satten, GA and Tsiatis, AA}, year={2005}, month={Sep}, pages={559–571} } @article{davidian_tsiatis_leon_2005, title={Semiparametric estimation of treatment effect in a pretest-posttest study with missing data}, volume={20}, number={3}, journal={Statistical Science}, author={Davidian, M. and Tsiatis, A. A. and Leon, S.}, year={2005}, pages={261–282} } @article{gao_tsiatis_2005, title={Semiparametric estimators for the regression coefficients in the linear transformation competing risks model with missing cause of failure}, volume={92}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/92.4.875}, abstractNote={SUMMARY We consider the problem of estimating the regression coefficients in a competing risks model, where the relationship between the cause-specific hazard for the cause of interest and covariates is described using linear transformation models and when cause of failure is missing at random for a subset of individuals. Using the theory of Robins et al. (1994) for missing data problems and the approach of Chen et al. (2002) for estimating regression coefficients for linear transformation models, we derive augmented inverse probability weighted complete-case estimators for the regression coefficients that are doubly robust. Simulations demonstrate the relevance of the theory in finite samples.}, number={4}, journal={BIOMETRIKA}, author={Gao, GZ and Tsiatis, AA}, year={2005}, month={Dec}, pages={875–891} } @article{johnson_tsiatis_2005, title={Semiparametric inference in observational duration-response studies, with duration possibly right-censored}, volume={92}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/92.3.605}, abstractNote={Once treatment is found to be effective in clinical studies, attention often focuses on optimum or efficacious treatment delivery. In treatment duration-response studies, the optimum treatment delivery refers to the treatment length that optimises the mean response. In many studies, the treatment length is often left to the discretion of an attending investigator or physician but may be abruptly terminated because of treatment-terminating events. Thus, a recommended treatment length often delineates a 'treatment duration policy' which prescribes that treatment be given for a specified length of time or until a treatment-terminating event occurs, whichever comes first. Estimating a functional relationship between the response and a treatment duration policy, continuously in time, is the focus of this paper. Copyright 2005, Oxford University Press.}, number={3}, journal={BIOMETRIKA}, author={Johnson, BA and Tsiatis, AA}, year={2005}, month={Sep}, pages={605–618} } @article{tsiatis_davidian_2005, title={Statistical issues arising in the Women's Health Initiative - Discussion}, volume={61}, ISSN={["1541-0420"]}, DOI={10.1111/j.0006-341X.2005.454_9.x}, abstractNote={Summary. A brief overview of the design of the Women’s Health Initiative (WHI) clinical trial and observational study is provided along with a summary of results from the postmenopausal hormone therapy clinical trial components. Since its inception in 1992, the WHI has encountered a number of statistical issues where further methodology developments are needed. These include measurement error modeling and analysis procedures for dietary and physical activity assessment; clinical trial monitoring methods when treatments may affect multiple clinical outcomes, either beneficially or adversely; study design and analysis procedures for high-dimensional genomic and proteomic data; and failure time data analysis procedures when treatment group hazard ratios are time dependent. This final topic seems important in resolving the discrepancy between WHI clinical trial and observational study results on postmenopausal hormone therapy and cardiovascular disease. Consistent evidence from over 40 epidemiologic studies demonstrates that postmenopausal women who use estrogen therapy after the menopause have significantly lower rates of heart disease than women who do not take estrogen ... the evidence clearly supports a clinically important protection against heart disease for postmenopausal women who use estrogen.}, number={4}, journal={BIOMETRICS}, author={Tsiatis, AA and Davidian, M}, year={2005}, month={Dec}, pages={933–935} } @article{pieper_tsiatis_davidian_hasselblad_kleiman_boersma_chang_griffin_armstrong_califf_et al._2004, title={Differential Treatment Benefit of Platelet Glycoprotein IIb/IIIa Inhibition With Percutaneous Coronary Intervention Versus Medical Therapy for Acute Coronary Syndromes}, volume={109}, ISSN={0009-7322 1524-4539}, url={http://dx.doi.org/10.1161/01.cir.0000112570.97220.89}, DOI={10.1161/01.cir.0000112570.97220.89}, abstractNote={ Background— Although many believe that platelet glycoprotein IIb/IIIa inhibitors should be used only in acute coronary syndrome patients undergoing percutaneous coronary intervention, supporting data from randomized clinical trials are tenuous. The assumption that these agents are useful only in conjunction with percutaneous coronary intervention is based primarily on inappropriate subgroup analyses performed across the glycoprotein IIb/IIIa inhibitor trials. }, number={5}, journal={Circulation}, publisher={Ovid Technologies (Wolters Kluwer Health)}, author={Pieper, Karen S. and Tsiatis, Anastasios A. and Davidian, Marie and Hasselblad, Vic and Kleiman, Neal S. and Boersma, Eric and Chang, Wei-Ching and Griffin, Jeffrey and Armstrong, Paul W. and Califf, Robert M. and et al.}, year={2004}, month={Feb}, pages={641–646} } @article{johnson_tsiatis_2004, title={Estimating mean response as a function of treatment duration in an observational study, where duration may be informatively censored}, volume={60}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2004.00175.x}, abstractNote={Summary.  After a treatment is found to be effective in a clinical study, attention often focuses on the effect of treatment duration on outcome. Such an analysis facilitates recommendations on the most beneficial treatment duration. In many studies, the treatment duration, within certain limits, is left to the discretion of the investigators. It is often the case that treatment must be terminated prematurely due to an adverse event, in which case a recommended treatment duration is part of a policy that treats patients for a specified length of time or until a treatment‐censoring event occurs, whichever comes first. Evaluating mean response for a particular treatment‐duration policy from observational data is difficult due to censoring and the fact that it may not be reasonable to assume patients are prognostically similar across all treatment strategies. We propose an estimator for mean response as a function of treatment‐duration policy under these conditions. The method uses potential outcomes and embodies assumptions that allow consistent estimation of the mean response. The estimator is evaluated through simulation studies and demonstrated by application to the ESPRIT infusion trial coordinated at Duke University Medical Center.}, number={2}, journal={BIOMETRICS}, author={Johnson, BA and Tsiatis, AA}, year={2004}, month={Jun}, pages={315–323} } @article{tsiatis_davidian_2004, title={Joint modeling of longitudinal and time-to-event data: An overview}, volume={14}, number={3}, journal={Statistica Sinica}, author={Tsiatis, A. A. and Davidian, M.}, year={2004}, pages={809–834} } @article{tsiatis_ma_2004, title={Locally efficient semiparametric estimators for functional measurement error models}, volume={91}, number={4}, journal={Biometrika}, author={Tsiatis, A. A. and Ma, Y. Y.}, year={2004}, pages={835–848} } @article{bodnar_davidian_siega-riz_tsiatis_2004, title={Marginal structural models for analyzing causal effects of time-dependent treatments: An application in perinatal epidemiology}, volume={159}, ISSN={["1476-6256"]}, DOI={10.1093/aje/kwh131}, abstractNote={Marginal structural models (MSMs) are causal models designed to adjust for time-dependent confounding in observational studies of time-varying treatments. MSMs are powerful tools for assessing causality with complicated, longitudinal data sets but have not been widely used by practitioners. The objective of this paper is to illustrate the fitting of an MSM for the causal effect of iron supplement use during pregnancy (time-varying treatment) on odds of anemia at delivery in the presence of time-dependent confounding. Data from pregnant women enrolled in the Iron Supplementation Study (Raleigh, North Carolina, 1997-1999) were used. The authors highlight complexities of MSMs and key issues epidemiologists should recognize before and while undertaking an analysis with these methods and show how such methods can be readily interpreted in existing software packages, including SAS and Stata. The authors emphasize that if a data set with rich information on confounders is available, MSMs can be used straightforwardly to make robust inferences about causal effects of time-dependent treatments/exposures in epidemiologic research.}, number={10}, journal={AMERICAN JOURNAL OF EPIDEMIOLOGY}, author={Bodnar, LM and Davidian, M and Siega-Riz, AM and Tsiatis, AA}, year={2004}, month={May}, pages={926–934} } @article{rebeiz_dery_tsiatis_jc o'shea_johnson_hellkamp_pieper_gilchrist_slater_muhlestein_et al._2004, title={Optimal duration of eptifibatide infusion in percutaneous coronary intervention (an ESPRIT Substudy)}, volume={94}, ISSN={["1879-1913"]}, DOI={10.1016/j.amjcard.2004.06.030}, abstractNote={Although randomized trials have clearly demonstrated the clinical efficacy with regimens of platelet glycoprotein IIb/IIIa antagonists that result in >80% inhibition of baseline platelet aggregation in percutaneous coronary intervention (PCI), there are no data available concerning the optimal duration of infusion of these agents. In an era when the length of hospitalization has a major impact on health care costs, the determination of the optimal duration of the infusion of these drugs after PCI is of great relevance. The investigators therefore sought to determine the optimal length of the infusion of eptifibatide after PCI by analyzing the outcomes of patients enrolled in the Enhanced Suppression of the Platelet IIb/IIIa Receptor With Integrilin Therapy trial who were randomized to treatment with eptifibatide.}, number={7}, journal={AMERICAN JOURNAL OF CARDIOLOGY}, author={Rebeiz, AG and Dery, JP and Tsiatis, AA and JC O'Shea and Johnson, BA and Hellkamp, AS and Pieper, KS and Gilchrist, IC and Slater, J and Muhlestein, JB and et al.}, year={2004}, month={Oct}, pages={926–929} } @article{wahed_tsiatis_2004, title={Optimal estimator for the survival distribution and related quantities for treatment policies in two-stage randomization designs in clinical trials}, volume={60}, ISSN={["1541-0420"]}, DOI={10.1111/j.0006-341X.2004.00160.x}, abstractNote={Summary.  Two‐stage designs, where patients are initially randomized to an induction therapy and then depending upon their response and consent, are randomized to a maintenance therapy, are common in cancer and other clinical trials. The goal is to compare different combinations of primary and maintenance therapies to find the combination that is most beneficial. In practice, the analysis is usually conducted in two separate stages which does not directly address the major objective of finding the best combination. Recently Lunceford, Davidian, and Tsiatis (2002, Biometrics58, 48–57) introduced ad hoc estimators for the survival distribution and mean restricted survival time under different treatment policies. These estimators are consistent but not efficient, and do not include information from auxiliary covariates. In this article we derive estimators that are easy to compute and are more efficient than previous estimators. We also show how to improve efficiency further by taking into account additional information from auxiliary variables. Large sample properties of these estimators are derived and comparisons with other estimators are made using simulation. We apply our estimators to a leukemia clinical trial data set that motivated this study.}, number={1}, journal={BIOMETRICS}, author={Wahed, AS and Tsiatis, AA}, year={2004}, month={Mar}, pages={124–133} } @article{sachdev_sun_tsiatis_2004, title={The prognostic importance of comorbidity for mortality in patients with stable coronary artery disease}, volume={13}, ISSN={1062-1458}, url={http://dx.doi.org/10.1016/j.accreview.2004.03.077}, DOI={10.1016/j.accreview.2004.03.077}, number={4}, journal={ACC Current Journal Review}, publisher={Elsevier BV}, author={Sachdev, M. and Sun, J.L. and Tsiatis, A.A.}, year={2004}, month={Apr}, pages={11–12} } @article{sachdev_sun_tsiatis_nelson_mark_jollis_2004, title={The prognostic importance of comorbidity for mortality in patients with stable coronary artery disease}, volume={43}, ISSN={0735-1097}, url={http://dx.doi.org/10.1016/j.jacc.2003.10.031}, DOI={10.1016/j.jacc.2003.10.031}, abstractNote={To identify the prevalent and prognostically important coexisting illnesses among single coronary artery disease (CAD) patients.As the population ages, physicians are increasingly required to make decisions concerning patients with multiple co-existing illnesses (comorbidity). Many trials of CAD therapy have excluded patients with significant comorbidity, such that there are limited data to guide the management of those patients.To consider the long-term prognostic importance of comorbid illness, we examined a cohort of 1471 patients with CAD who underwent cardiac catheterization between 1985 and 1989 and were followed up through 2000 in the Duke Databank for Cardiovascular Diseases. Weights were assigned to individual diseases according to their prognostic significance in Cox proportional hazards models, thus creating a new CAD-specific index. The new index was compared with the widely used Charlson index, according to prevalence of conditions, individual and overall associations with survival, and agreement.The Charlson index and the CAD-specific index were highly associated with long-term survival and almost equivalent to left ventricular ejection fraction. When considering the components of the Charlson index, diabetes, renal insufficiency, chronic obstructive pulmonary disease, and peripheral vascular disease had greater prognostic significance among CAD patients, whereas peptic ulcer disease, connective tissue disease, and lymphoma were less significant. Hemiplegia, leukemia, lymphoma, severe liver disease, and acquired immunodeficiency syndrome were rarely identified among patients undergoing coronary angiography.Comorbid disease is strongly associated with long-term survival in patients with CAD. These data suggest co-existing illnesses should be measured and considered in clinical trials, disease registries, quality comparisons, and counseling of individual patients.}, number={4}, journal={Journal of the American College of Cardiology}, publisher={Elsevier BV}, author={Sachdev, Molly and Sun, Jie Lena and Tsiatis, Anastasios A. and Nelson, Charlotte L. and Mark, Daniel B. and Jollis, James G.}, year={2004}, month={Feb}, pages={576–582} } @article{tsiatis_mehta_2003, title={On the inefficiency of the adaptive design for monitoring clinical trials}, volume={90}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/90.2.367}, abstractNote={Adaptive designs, which allow the sample size to be modified based on sequentially computed observed treatment differences, have been advocated recently for monitoring clinical trials. Although such methods have a great deal of appeal on the surface, we show that such methods are inefficient and that one can improve uniformly on such adaptive designs using standard group-sequential tests based on the sequentially computed likelihood ratio test statistic. Copyright Biometrika Trust 2003, Oxford University Press.}, number={2}, journal={BIOMETRIKA}, author={Tsiatis, AA and Mehta, C}, year={2003}, month={Jun}, pages={367–378} } @article{leon_tsiatis_davidian_2003, title={Semiparametric estimation of treatment effect in a pretest-posttest study}, volume={59}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2003.00120.x}, abstractNote={Summary.  Inference on treatment effects in a pretest‐posttest study is a routine objective in medicine, public health, and other fields. A number of approaches have been advocated. We take a semiparametric perspective, making no assumptions about the distributions of baseline and posttest responses. By representing the situation in terms of counterfactual random variables, we exploit recent developments in the literature on missing data and causal inference, to derive the class of all consistent treatment effect estimators, identify the most efficient such estimator, and outline strategies for implementation of estimators that may improve on popular methods. We demonstrate the methods and their properties via simulation and by application to a data set from an HIV clinical trial.}, number={4}, journal={BIOMETRICS}, author={Leon, S and Tsiatis, AA and Davidian, M}, year={2003}, month={Dec}, pages={1046–1055} } @article{song_davidian_tsiatis_2002, title={A semiparametric likelihood approach to joint modeling of longitudinal and time-to-event data}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00742.x}, abstractNote={Summary. Joint models for a time‐to‐event (e.g., survival) and a longitudinal response have generated considerable recent interest. The longitudinal data are assumed to follow a mixed effects model, and a proportional hazards model depending on the longitudinal random effects and other covariates is assumed for the survival endpoint. Interest may focus on inference on the longitudinal data process, which is informatively censored, or on the hazard relationship. Several methods for fitting such models have been proposed, most requiring a parametric distributional assumption (normality) on the random effects. A natural concern is sensitivity to violation of this assumption; moreover, a restrictive distributional assumption may obscure key features in the data. We investigate these issues through our proposal of a likelihood‐based approach that requires only the assumption that the random effects have a smooth density. Implementation via the EM algorithm is described, and performance and the benefits for uncovering noteworthy features are illustrated by application to data from an HIV clinical trial and by simulation.}, number={4}, journal={BIOMETRICS}, author={Song, X and Davidian, M and Tsiatis, AA}, year={2002}, month={Dec}, pages={742–753} } @article{song_davidian_tsiatis_2002, title={An estimator for the proportional hazards model with multiple longitudinal covariates measured with error}, volume={3}, number={4}, journal={Biostatistics (Oxford, England)}, author={Song, X. A. and Davidian, M. and Tsiatis, A. A.}, year={2002}, pages={511–528} } @article{skyler_brown_chase_collier_cowie_eisenbarth_fradkin_grave_greenbaum_jackson_et al._2002, title={Effects of insulin in relatives of patients with type 1 diabetes mellitus}, volume={346}, number={22}, journal={New England Journal of Medicine}, author={Skyler, J. S. and Brown, D. and Chase, H. P. and Collier, E. and Cowie, C. and Eisenbarth, G. S. and Fradkin, J. and Grave, G. and Greenbaum, C. and Jackson, R. A. and et al.}, year={2002}, pages={1685–1691} } @article{lunceford_davidian_tsiatis_2002, title={Estimation of survival distributions of treatment policies in two-stage randomization designs in clinical trials}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00048.x}, abstractNote={Summary. Some clinical trials follow a design where patients are randomized to a primary therapy at entry followed by another randomization to maintenance therapy contingent upon disease remission. Ideally, analysis would allow different treatment policies, i.e., combinations of primary and maintenance therapy if specified up‐front, to be compared. Standard practice is to conduct separate analyses for the primary and follow‐up treatments, which does not address this issue directly. We propose consistent estimators for the survival distribution and mean restricted survival time for each treatment policy in such two‐stage studies and derive large‐sample properties. The methods are demonstrated on a leukemia clinical trial data set and through simulation.}, number={1}, journal={BIOMETRICS}, author={Lunceford, JK and Davidian, M and Tsiatis, AA}, year={2002}, month={Mar}, pages={48–57} } @article{bang_tsiatis_2002, title={Median regression with censored cost data}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00643.x}, abstractNote={Summary. Because of the skewness of the distribution of medical costs, we consider modeling the median as well as other quantiles when establishing regression relationships to covariates. In many applications, the medical cost data are also right censored. In this article, we propose semiparametric procedures for estimating the parameters in median regression models based on weighted estimating equations when censoring is present. Numerical studies are conducted to show that our estimators perform well with small samples and the resulting inference is reliable in circumstances of practical importance. The methods are applied to a dataset for medical costs of patients with colorectal cancer.}, number={3}, journal={BIOMETRICS}, author={Bang, H and Tsiatis, AA}, year={2002}, month={Sep}, pages={643–649} } @article{tsiatis_davidian_mcneney_2002, title={Multiple imputation methods for testing treatment differences in survival distributions with missing cause of failure}, volume={89}, number={1}, journal={Biometrika}, author={Tsiatis, A. A. and Davidian, M. and Mcneney, B.}, year={2002}, pages={238–244} } @article{tsiatis_davidian_2001, title={A semiparametric estimator for the proportional hazards model with longitudinal covariates measured with error}, volume={88}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/88.2.447}, abstractNote={SUMMARY A common objective in longitudinal studies is to characterise the relationship between a failure time process and time-independent and time-dependent covariates. Timedependent covariates are generally available as longitudinal data collected periodically during the course of the study. We assume that these data follow a linear mixed effects model with normal measurement error and that the hazard of failure depends both on the underlying random effects describing the covariate process and other time-independent covariates through a proportional hazards relationship. A routine assumption is that the random effects are normally distributed; however, this need not hold in practice. Within this framework, we develop a simple method for estimating the proportional hazards model parameters that requires no assumption on the distribution of the random effects. Large-sample properties are discussed, and finite-sample performance is assessed and compared to competing methods via simulation.}, number={2}, journal={BIOMETRIKA}, author={Tsiatis, AA and Davidian, M}, year={2001}, month={Jun}, pages={447–458} } @article{chen_tsiatis_2001, title={Causal inference on the difference of the restricted mean lifetime between two groups}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01030.x}, abstractNote={Summary. When comparing survival times between two treatment groups, it may be more appropriate to compare the restricted mean lifetime, i.e., the expectation of lifetime restricted to a time L, rather than mean lifetime in order to accommodate censoring. When the treatments are not assigned to patients randomly, as in observational studies, we also need to account for treatment imbalances in confounding factors. In this article, we propose estimators for the difference of the restricted mean lifetime between two groups that account for treatment imbalances in prognostic factors assuming a proportional hazards relationship. Large‐sample properties of our estimators based on martingale theory for counting processes are also derived. Simulation studies were conducted to compare these estimators and to assess the adequacy of the large‐sample approximations. Our methods are also applied to an observational database of acute coronary syndrome patients from Duke University Medical Center to estimate the treatment effect on the restricted mean lifetime over 5 years.}, number={4}, journal={BIOMETRICS}, author={Chen, PY and Tsiatis, AA}, year={2001}, month={Dec}, pages={1030–1038} } @article{betensky_rabinowitz_tsiatis_2001, title={Computationally simple accelerated failure time regression for interval censored data}, volume={88}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/88.3.703}, abstractNote={SUMMARY An approach is presented for fitting the accelerated failure time model to interval censored data that does not involve computing the nonparametric maximum likelihood estimate of the distribution function at the residuals. The approach involves estimating equations computed with the examination times from the same individual treated as if they had actually been obtained from different individuals. The dependence between different measurements obtained from the same individual is then accounted for in the calculation of the standard error of the regression coefficients. The approach is applicable to interval censored data in settings in which examinations continue to occur regardless of whether the failure time has occurred. Simulations are presented to assess the behaviour of the approach, and the methodology is illustrated through an application to data from an AIDS clinical trial.}, number={3}, journal={BIOMETRIKA}, author={Betensky, RA and Rabinowitz, D and Tsiatis, AA}, year={2001}, month={Sep}, pages={703–711} } @article{yang_tsiatis_2001, title={Efficiency study of estimators for a treatment effect in a pretest-posttest trial}, volume={55}, ISSN={["0003-1305"]}, DOI={10.1198/000313001753272466}, abstractNote={Several possible methods used to evaluate treatment effects in a randomized pretest–posttest trial with two treatment groups are the two-sample t test, the paired t test, analysis of covariance I (ANCOVA I), the analysis of covariance II (ANCOVA II), and generalized estimating equations (GEE). The ANCOVA I includes treatment and baseline response as covariates in a linear model and ANCOVA II additionally includes an interaction term between the baseline response and treatment indicator as a covariate. The parameters in the ANCOVAI and ANCOVAII models are generally estimated using ordinary least squares. In this article, a semiparametric model, which makes no assumptions about the response distributions, is used. The asymptotic properties of the estimators derived from these five methods and their relative efficiencies are discussed under this semiparametric model. We show that all these methods yield consistent estimators for the treatment effect which have asymptotically normal distributions under the semiparametric model. The GEE and the ANCOVA II estimators are asymptotically equivalent and the most efficient. The estimators from other three methods are less efficient except under some special conditions which are outlined in the article.}, number={4}, journal={AMERICAN STATISTICIAN}, author={Yang, L and Tsiatis, AA}, year={2001}, month={Nov}, pages={314–321} } @article{mehta_tsiatis_2001, title={Flexible sample size considerations using information-based interim monitoring}, volume={35}, ISSN={["0092-8615"]}, DOI={10.1177/009286150103500407}, abstractNote={At the design phase of a clinical trial the total number of participants needed to detect a clinically important treatment difference with sufficient precision frequently depends on nuisance parameters such as variance, baseline response rate, or regression coefficients other than the main effect. In practical applications, nuisance parameter values are often unreliable guesses founded on little or no available past history. Sample size calculations based on these initial guesses may, therefore, lead to under- or over-powered studies. In this paper, we argue that the precision with which a treatment effect is estimated is directly related to the statistical information in the data. In general, statistical information is a complicated function of sample size and nuisance parameters. However, the amount of information necessary to answer the scientific question concerning treatment difference is easily calculated a priori and applies to almost any statistical model for a large variety of endpoints. It is thus possible to be flexible on sample size but rather continue collecting data until we have achieved the desired information. Such a strategy is well suited to being adopted in conjunction with a group sequential clinical trial where the data are monitored routinely anyway. We present several scenarios and examples of how group sequential information-based design and monitoring can be carried out and demonstrate through simulations that this type of strategy will indeed give us the desired operating characteristics.}, number={4}, journal={DRUG INFORMATION JOURNAL}, author={Mehta, CR and Tsiatis, AA}, year={2001}, pages={1095–1112} } @article{pampallona_tsiatis_kim_2001, title={Interim monitoring of group sequential trials using spending functions for the type I and type II error probabilities}, volume={35}, ISSN={["0092-8615"]}, DOI={10.1177/009286150103500408}, abstractNote={Lan and DeMets (1) introduced a flexible procedure for the analysis of sequential trials based on the discretization of the Brownian motion. In this paper, we consider an extension of this strategy that preserves both the desired significance level and the power of any group sequential trial. We propose a procedure that allows for any number and timing of interim analyses. This entails the derivation of boundaries at the monitoring stage by means of two spending functions, one for the type I and one for the type II error probabilities, as well as the adjustment of the target maximum information as the trial progresses. The general solution to the problem is provided together with a discussion of implementation strategies. The procedure is intended for group sequential designs that allow early stopping in favor of both the null and the alternative hypotheses, and an example is presented for this case. However, its application is also easily extended for designs where there is no early stopping in favor of the null.}, number={4}, journal={DRUG INFORMATION JOURNAL}, author={Pampallona, S and Tsiatis, A and Kim, K}, year={2001}, pages={1113–1121} } @article{lu_tsiatis_2001, title={Multiple imputation methods for estimating regression coefficients in the competing risks model with missing cause of failure}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01191.x}, abstractNote={Summary. We propose a method to estimate the regression coefficients in a competing risks model where the cause‐specific hazard for the cause of interest is related to covariates through a proportional hazards relationship and when cause of failure is missing for some individuals. We use multiple imputation procedures to impute missing cause of failure, where the probability that a missing cause is the cause of interest may depend on auxiliary covariates, and combine the maximum partial likelihood estimators computed from several imputed data sets into an estimator that is consistent and asymptotically normal. A consistent estimator for the asymptotic variance is also derived. Simulation results suggest the relevance of the theory in finite samples. Results are also illustrated with data from a breast cancer study.}, number={4}, journal={BIOMETRICS}, author={Lu, KF and Tsiatis, AA}, year={2001}, month={Dec}, pages={1191–1197} } @article{zhao_tsiatis_2001, title={Testing equality of survival functions of quality-adjusted lifetime}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.00861.x}, abstractNote={Summary. We present a method for comparing the survival functions of quality‐adjusted lifetime from two treatments. This test statistic becomes the ordinary log‐rank test when quality‐adjusted lifetime is the same as the survival time. Simulation experiments are conducted to examine the behavior of our proposed test statistic under both null and alternative hypotheses. In addition, we apply our method to a breast cancer trial for comparing the distribution of quality‐adjusted lifetime between two treatment regimes.}, number={3}, journal={BIOMETRICS}, author={Zhao, HW and Tsiatis, AA}, year={2001}, month={Sep}, pages={861–867} } @article{murray_tsiatis_2001, title={Using auxiliary time-dependent covariates to recover information in nonparametric testing with censored data}, volume={7}, ISSN={["1380-7870"]}, DOI={10.1023/A:1011392622173}, abstractNote={Murrayand Tsiatis (1996) described a weighted survival estimate thatincorporates prognostic time-dependent covariate informationto increase the efficiency of estimation. We propose a test statisticbased on the statistic of Pepe and Fleming (1989, 1991) thatincorporates these weighted survival estimates. As in Pepe andFleming, the test is an integrated weighted difference of twoestimated survival curves. This test has been shown to be effectiveat detecting survival differences in crossing hazards settingswhere the logrank test performs poorly. This method uses stratifiedlongitudinal covariate information to get more precise estimatesof the underlying survival curves when there is censored informationand this leads to more powerful tests. Another important featureof the test is that it remains valid when informative censoringis captured by the incorporated covariate. In this case, thePepe-Fleming statistic is known to be biased and should not beused. These methods could be useful in clinical trials with heavycensoring that include collection over time of covariates, suchas laboratory measurements, that are prognostic of subsequentsurvival or capture information related to censoring.}, number={2}, journal={LIFETIME DATA ANALYSIS}, author={Murray, S and Tsiatis, AA}, year={2001}, pages={125–141} } @article{anstrom_tsiatis_2001, title={Utilizing propensity scores to estimate causal treatment effects with censored time-lagged data}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01207.x}, abstractNote={Summary. Observational studies frequently are conducted to compare long‐term effects of treatments. Without randomization, patients receiving one treatment are not guaranteed to be prognostically comparable to those receiving another treatment. Furthermore, the response of interest may be right‐censored because of incomplete follow‐up. Statistical methods that do not account for censoring and confounding may lead to biased estimates. This article presents a method for estimating treatment effects in nonrandomized studies with right‐censored responses. We review the assumptions required to estimate average causal effects and derive an estimator for comparing two treatments by applying inverse weights to the complete cases. The weights are determined according to the estimated probability of receiving treatment conditional on covariates and the estimated treatment‐specific censoring distribution. By utilizing martingale representations, the estimator is shown to be asymptotically normal and an estimator for the asymptotic variance is derived. Simulation results are presented to evaluate the properties of the estimator. These methods are applied to an observational data set of acute coronary syndrome patients from Duke University Medical Center to estimate the effect of a treatment strategy on the mean 5‐year medical cost.}, number={4}, journal={BIOMETRICS}, author={Anstrom, KJ and Tsiatis, AA}, year={2001}, month={Dec}, pages={1207–1218} } @article{mark_harrington_lincoff_califf_nelson_tsiatis_buell_mahaffey_davidson-ray_topol_2000, title={Cost-effectiveness of platelet glycoprotein IIb/IIIa inhibition with eptifibatide in patients with non-ST-elevation acute coronary syndromes}, volume={101}, ISSN={["0009-7322"]}, DOI={10.1161/01.cir.101.4.366}, abstractNote={ Background —In the PURSUIT trial, eptifibatide significantly reduced the 30-day incidence of death and myocardial infarction relative to placebo in 9461 patients with an acute coronary syndrome (unstable angina or non–Q-wave myocardial infarction). }, number={4}, journal={CIRCULATION}, author={Mark, DB and Harrington, RA and Lincoff, AM and Califf, RM and Nelson, CL and Tsiatis, AA and Buell, H and Mahaffey, KW and Davidson-Ray, L and Topol, EJ}, year={2000}, month={Feb}, pages={366–371} } @article{bang_tsiatis_2000, title={Estimating medical costs with censored data}, volume={87}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/87.2.329}, abstractNote={Incompleteness of follow-up data is a common problem in estimating medical costs. Naive analysis using summary statistics on the collected data can result in severely misleading statistical inference. This paper focuses on the problem of estimating the mean medical cost from a sample of individuals whose medical costs may be right censored. A class of weighted estimators which account appropriately for censoring are introduced. Our estimators are shown to be consistent and asymptotically normal with easily estimated variances. The efficiency of these estimators is studied with the goal of finding as efficient an estimator for the mean medical cost as is feasible. Extensive simulation studies are used to show that our estimators perform well in finite samples, even with heavily censored data, for a variety of circumstances. The methods are applied to a set of cost data from a cardiology trial conducted by the Duke University Medical Center. Extensions to other censored data problems are also discussed.}, number={2}, journal={BIOMETRIKA}, author={Bang, H and Tsiatis, AA}, year={2000}, month={Jun}, pages={329–343} } @article{tsiatis_2000, title={Estimating the distribution of quality-adjusted life with censored data}, volume={139}, ISSN={["0002-8703"]}, DOI={10.1016/S0002-8703(00)90068-1}, number={4}, journal={AMERICAN HEART JOURNAL}, author={Tsiatis, AA}, year={2000}, month={Apr}, pages={S177–S181} } @article{babiker_bartlett_breckenridge_collins_coombs_cooper_creagh_cross_daniels_darbyshire_et al._2000, title={Human immunodeficiency virus type 1 RNA level and CD4 count as prognostic markers and surrogate end points: A meta-analysis}, volume={16}, number={12}, journal={AIDS Research and Human Retroviruses}, author={Babiker, A. and Bartlett, J. and Breckenridge, A. and Collins, G. and Coombs, R. and Cooper, D. and Creagh, T. and Cross, A. and Daniels, M. and Darbyshire, J. and et al.}, year={2000}, pages={1123–1133} } @article{fine_tsiatis_2000, title={Testing for differences in survival with delayed ascertainment}, volume={56}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2000.00145.x}, abstractNote={Summary. During the interim stages of most large‐scale clinical trials, knowledge that a patient is alive or dead is usually not up‐to‐date. This is due to the pattern of patient visits to hospitals as well as the administrative set‐up used by the study to obtain information on vital status. On a two‐armed study, if the process of ascertaining vital status is not the same in both treatment groups, then the standard method of testing based on the logrank statistic may not be applicable. Instead, an ad hoc modification to the logrank test, which artificially truncates follow‐up prior to the time of analysis, is often used. These approaches have not been formally addressed in the literature. In the early stages of a clinical trial, severe bias or loss of power may result. For this situation, we propose a class of test statistics that extends the usual class of U statistics. Asymptotic normality is derived by reformulating the statistics in terms of counting processes and employing the theory of U statistics along with martingale techniques. For early interim analyses, a numerical study indicates that the new tests can be more powerful than the current practice when differential ascertainment is present. To illustrate the potential loss of information when lagging follow‐up to control for ascertainment delays, we reanalyze an AIDS clinical trial with the truncated logrank and the new statistics.}, number={1}, journal={BIOMETRICS}, author={Fine, JP and Tsiatis, AA}, year={2000}, month={Mar}, pages={145–153} } @article{rabinowitz_betensky_tsiatis_2000, title={Using conditional logistic regression to fit proportional odds models to interval censored data}, volume={56}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2000.00511.x}, abstractNote={Summary. An easily implemented approach to fitting the proportional odds regression model to interval‐censored data is presented. The approach is based on using conditional logistic regression routines in standard statistical packages. Using conditional logistic regression allows the practitioner to sidestep complications that attend estimation of the baseline odds ratio function. The approach is applicable both for interval‐censored data in settings in which examinations continue regardless of whether the event of interest has occurred and for current status data. The methodology is illustrated through an application to data from an AIDS study of the effect of treatment with ZDV + ddC versus ZDV alone on 50% drop in CD4 cell count from baseline level. Simulations are presented to assess the accuracy of the procedure.}, number={2}, journal={BIOMETRICS}, author={Rabinowitz, D and Betensky, RA and Tsiatis, AA}, year={2000}, month={Jun}, pages={511–518} } @article{zhao_tsiatis_1999, title={Efficient estimation of the distribution of quality-adjusted survival time}, volume={55}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.1999.01101.x}, abstractNote={Summary. Quality of life is an important aspect in evaluation of clinical trials of chronic diseases, such as cancer and AIDS. Quality‐adjusted survival analysis is a method that combines both the quantity and quality of a patient's life into one single measure. In this paper, we discuss the efficiency of weighted estimators for the distribution of quality‐adjusted survival time. Using the general representation theorem for missing data processes, we are able to derive an estimator that is more efficient than the one proposed in Zhao and Tsiatis (1997, Biometrika84, 339–348). Simulation experiments are conducted to assess the small sample properties of this estimator and to compare it with the semiparametric efficiency bound. The value of this estimator is demonstrated from an application of the method to a data set obtained from a breast cancer clinical trial.}, number={4}, journal={BIOMETRICS}, author={Zhao, HW and Tsiatis, AA}, year={1999}, month={Dec}, pages={1101–1107} } @article{hu_tsiatis_davidian_1998, title={Estimating the parameters in the Cox model when covariate variables are measured with error}, volume={54}, ISSN={["0006-341X"]}, DOI={10.2307/2533667}, abstractNote={The Cox proportional hazards model is commonly used to model survival data as a function of covariates. Because of the measuring mechanism or the nature of the environment, covariates are often measured with error and are not directly observable. A naive approach is to use the observed values of the covariates in the Cox model, which usually produces biased estimates of the true association of interest. An alternative strategy is to take into account the error in measurement, which may be carried out for the Cox model in a number of ways. We examine several such approaches and compare and contrast them through several simulation studies. We introduce a likelihood-based approach, which we refer to as the semiparametric method, and show that this method is an appealing alternative. The methods are applied to analyze the relationship between survival and CD4 count in patients with AIDS.}, number={4}, journal={BIOMETRICS}, author={Hu, P and Tsiatis, AA and Davidian, M}, year={1998}, month={Dec}, pages={1407–1419} } @article{scharfstein_tsiatis_gilbert_1998, title={Semiparametric efficient estimation in the generalized odds-rate class of regression models for right-censored time-to-event data}, volume={4}, DOI={10.1023/A:1009634103154}, abstractNote={The generalized odds-rate class of regression models for time to event data is indexed by a non-negative constant rho and assumes that [formula: see text] where g: rho(s) = log(rho-1(s-rho - 1)) for rho > 0, g0(s) = log(-logs), S(t[symbol: see text]Z) is the survival function of the time to event for an individual with q x 1 covariate vector Z, beta is a q x 1 vector of unknown regression parameters, and alpha(t) is some arbitrary increasing function of t. When rho = 0, this model is equivalent to the proportional hazards model and when rho = 1, this model reduces to the proportional odds model. In the presence of right censoring, we construct estimators for beta and exp(alpha(t)) and show that they are consistent and asymptotically normal. In addition, we show that the estimator for beta is semiparametric efficient in the sense that it attains the semiparametric variance bound.}, number={4}, journal={Lifetime Data Analysis}, author={Scharfstein, D. O. and Tsiatis, A. A. and Gilbert, P. B.}, year={1998}, pages={355–391} } @article{scharfstein_tsiatis_1998, title={Use of simulation and bootstrap in information-based group sequential studies}, volume={17}, DOI={10.1002/(SICI)1097-0258(19980115)17:1<75::AID-SIM731>3.0.CO;2-N}, abstractNote={In this paper, we present an information-based design and monitoring procedure which applies to any type of model for any type of group sequential study provided there is a unique parameter of interest one can estimate efficiently. Simulation techniques are described to handle the design phase of this procedure. Since designs depend on potentially unreliable guesses of nuisance parameters, we propose a bootstrap method that uses the information available at the interim analysis times to generate projections and prediction intervals for the time at which the study will be fully powered. A monitoring board can use this information to decide whether a redesign of the trial is warranted. We also show how to use simulation to redesign studies in progress. We illustrate all of these techniques with data from AIDS Clinical Trial Group Protocol 021.}, number={1}, journal={Statistics in Medicine}, author={Scharfstein, D. O. and Tsiatis, A. A.}, year={1998}, pages={75–87} } @article{zhao_tsiatis_1997, title={A consistent estimator for the distribution of quality adjusted survival time}, volume={84}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/84.2.339}, abstractNote={SUMMARY Quality adjusted survival analysis is a new approach to therapy evaluation in clinical trials. It has received much attention recently because of its ability to take patients' quality of life into consideration. In this paper, we present a method that enables us to calculate the survival distribution of quality adjusted lifetime. Using martingale theory for counting processes, we can show that our estimator is asymptotically consistent, normally distributed, and its asymptotic variance estimate can be obtained analytically. Simulation experiments are conducted to compare our estimator with the true underlying distribution for two cases that are of practical importance.}, number={2}, journal={BIOMETRIKA}, author={Zhao, HW and Tsiatis, AA}, year={1997}, month={Jun}, pages={339–348} } @article{wulfsohn_tsiatis_1997, title={A joint model for survival and longitudinal data measured with error}, volume={53}, ISSN={["0006-341X"]}, DOI={10.2307/2533118}, abstractNote={The relationship between a longitudinal covariate and a failure time process can be assessed using the Cox proportional hazards regression model. We consider the problem of estimating the parameters in the Cox model when the longitudinal covariate is measured infrequently and with measurement error. We assume a repeated measures random effects model for the covariate process. Estimates of the parameters are obtained by maximizing the joint likelihood for the covariate process and the failure time process. This approach uses the available information optimally because we use both the covariate and survival data simultaneously. Parameters are estimated using the expectation-maximization algorithm. We argue that such a method is superior to naive methods where one maximizes the partial likelihood of the Cox model using the observed covariate values. It also improves on two-stage methods where, in the first stage, empirical Bayes estimates of the covariate process are computed and then used as time-dependent covariates in a second stage to find the parameters in the Cox model that maximize the partial likelihood.}, number={1}, journal={BIOMETRICS}, author={Wulfsohn, MS and Tsiatis, AA}, year={1997}, month={Mar}, pages={330–339} } @article{scharfstein_tsiatis_robins_1997, title={Semiparametric efficiency and its implication on the design and analysis of group-sequential studies}, volume={92}, ISSN={["0162-1459"]}, DOI={10.2307/2965404}, abstractNote={Abstract Authors have shown that the time-sequential joint distributions of many statistics used to analyze data arising from group-sequential time-to-event and longitudinal studies are multivariate normal with an independent increments covariance structure. In Theorem 1 of this article, we demonstrate that this limiting distribution arises naturally when one uses an efficient test statistic to test a single parameter in a semiparametric or parametric model. Because we are able to think of many of the statistics in the literature in this fashion, the limiting distribution under investigation is just a special case of Theorem 1. Using this general structure, we then develop an information-based design and monitoring procedure that can be applied to any type of model for any type of group-sequential study provided that there is a unique parameter of interest that can be efficiently tested.}, number={440}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Scharfstein, DO and Tsiatis, AA and Robins, JM}, year={1997}, month={Dec}, pages={1342–1350} }