@article{tsiatis_davidian_2024, title={A generalized logrank-type test for comparison of treatment regimes in sequential multiple assignment randomized trials}, volume={80}, ISSN={["1541-0420"]}, DOI={10.1093/biomtc/ujae139}, abstractNote={ABSTRACT The sequential multiple assignment randomized trial (SMART) is the ideal study design for the evaluation of multistage treatment regimes, which comprise sequential decision rules that recommend treatments for a patient at each of a series of decision points based on their evolving characteristics. A common goal is to compare the set of so-called embedded regimes represented in the design on the basis of a primary outcome of interest. In the study of chronic diseases and disorders, this outcome is often a time to an event, and a goal is to compare the distributions of the time-to-event outcome associated with each regime in the set. We present a general statistical framework in which we develop a logrank-type test for comparison of the survival distributions associated with regimes within a specified set based on the data from a SMART with an arbitrary number of stages that allows incorporation of covariate information to enhance efficiency and can also be used with data from an observational study. The framework provides clarification of the assumptions required to yield a principled test procedure, and the proposed test subsumes or offers an improved alternative to existing methods. We demonstrate performance of the methods in a suite of simulation studies. The methods are applied to a SMART in patients with acute promyelocytic leukemia.}, number={4}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2024}, month={Nov} } @article{tsiatis_davidian_2022, title={Group sequential methods for interim monitoring of randomized clinical trials with time-lagged outcome}, volume={9}, ISSN={["1097-0258"]}, DOI={10.1002/sim.9580}, abstractNote={The primary analysis in two‐arm clinical trials usually involves inference on a scalar treatment effect parameter; for example, depending on the outcome, the difference of treatment‐specific means, risk difference, risk ratio, or odds ratio. Most clinical trials are monitored for the possibility of early stopping. Because ordinarily the outcome on any given subject can be ascertained only after some time lag, at the time of an interim analysis, among the subjects already enrolled, the outcome is known for only a subset and is effectively censored for those who have not been enrolled sufficiently long for it to be observed. Typically, the interim analysis is based only on the data from subjects for whom the outcome has been ascertained. A goal of an interim analysis is to stop the trial as soon as the evidence is strong enough to do so, suggesting that the analysis ideally should make the most efficient use of all available data, thus including information on censoring as well as other baseline and time‐dependent covariates in a principled way. A general group sequential framework is proposed for clinical trials with a time‐lagged outcome. Treatment effect estimators that take account of censoring and incorporate covariate information at an interim analysis are derived using semiparametric theory and are demonstrated to lead to stronger evidence for early stopping than standard approaches. The associated test statistics are shown to have the independent increments structure, so that standard software can be used to obtain stopping boundaries.}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2022}, month={Sep} } @misc{joffe_babiker_ellenberg_fix_griffin_hunsberger_kalil_levine_makgoba_moore_et al._2021, title={Data and Safety Monitoring of COVID-19 Vaccine Clinical Trials}, volume={224}, ISSN={["1537-6613"]}, DOI={10.1093/infdis/jiab263}, abstractNote={To speed the development of vaccines against SARS-CoV-2, the United States Federal Government has funded multiple phase 3 trials of candidate vaccines. A single 11-member data and safety monitoring board (DSMB) monitors all government-funded trials to ensure coordinated oversight, promote harmonized designs, and allow shared insights related to safety across trials. DSMB reviews encompass 3 domains: (1) the conduct of trials, including overall and subgroup accrual and data quality and completeness; (2) safety, including individual events of concern and comparisons by randomized group; and (3) interim analyses of efficacy when event-driven milestones are met. Challenges have included the scale and pace of the trials, the frequency of safety events related to the combined enrollment of over 100 000 participants, many of whom are older adults or have comorbid conditions that place them at independent risk of serious health events, and the politicized environment in which the trials have taken place.}, number={12}, journal={JOURNAL OF INFECTIOUS DISEASES}, author={Joffe, Steven and Babiker, Abdel and Ellenberg, Susan S. and Fix, Alan and Griffin, Marie R. and Hunsberger, Sally and Kalil, Jorge and Levine, Myron M. and Makgoba, Malegapuru W. and Moore, Renee H. and et al.}, year={2021}, month={Dec}, pages={1995–2000} } @article{tsiatis_davidian_2021, title={Estimating vaccine efficacy over time after a randomized study is unblinded}, volume={8}, ISSN={["1541-0420"]}, DOI={10.1111/biom.13509}, abstractNote={The COVID-19 pandemic due to the novel coronavirus SARS CoV-2 has inspired remarkable breakthroughs in the development of vaccines against the virus and the launch of several phase 3 vaccine trials in Summer 2020 to evaluate vaccine efficacy (VE). Trials of vaccine candidates using mRNA delivery systems developed by Pfizer-BioNTech and Moderna have shown substantial VEs of 94-95%, leading the US Food and Drug Administration to issue Emergency Use Authorizations and subsequent widespread administration of the vaccines. As the trials continue, a key issue is the possibility that VE may wane over time. Ethical considerations dictate that trial participants be unblinded and those randomized to placebo be offered study vaccine, leading to trial protocol amendments specifying unblinding strategies. Crossover of placebo subjects to vaccine complicates inference on waning of VE. We focus on the particular features of the Moderna trial and propose a statistical framework based on a potential outcomes formulation within which we develop methods for inference on potential waning of VE over time and estimation of VE at any postvaccination time. The framework clarifies assumptions made regarding individual- and population-level phenomena and acknowledges the possibility that subjects who are more or less likely to become infected may be crossed over to vaccine differentially over time. The principles of the framework can be adapted straightforwardly to other trials.}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2021}, month={Aug} } @article{tsiatis_davidian_holloway_2021, title={Estimation of the odds ratio in a proportional odds model with censored time-lagged outcome in a randomized clinical trial}, volume={12}, ISSN={["1541-0420"]}, url={https://doi.org/10.1111/biom.13603}, DOI={10.1111/biom.13603}, abstractNote={In many randomized clinical trials of therapeutics for COVID-19, the primary outcome is an ordinal categorical variable, and interest focuses on the odds ratio (OR; active agent vs control) under the assumption of a proportional odds model. Although at the final analysis the outcome will be determined for all subjects, at an interim analysis, the status of some participants may not yet be determined, for example, because ascertainment of the outcome may not be possible until some prespecified follow-up time. Accordingly, the outcome from these subjects can be viewed as censored. A valid interim analysis can be based on data only from those subjects with full follow-up; however, this approach is inefficient, as it does not exploit additional information that may be available on those for whom the outcome is not yet available at the time of the interim analysis. Appealing to the theory of semiparametrics, we propose an estimator for the OR in a proportional odds model with censored, time-lagged categorical outcome that incorporates additional baseline and time-dependent covariate information and demonstrate that it can result in considerable gains in efficiency relative to simpler approaches. A byproduct of the approach is a covariate-adjusted estimator for the OR based on the full data that would be available at a final analysis.}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T.}, year={2021}, month={Dec} } @article{tsiatis_davidian_2021, title={Rejoinder: Estimating vaccine efficacy over time after a randomized study is unblinded}, volume={8}, ISSN={["1541-0420"]}, DOI={10.1111/biom.13539}, abstractNote={We are honored to have our work critiqued by such distinguished, internationally recognized authorities on vaccine efficacy and vaccine trials. When the first author (AAT) was appointed to the Data and Safety Monitoring Board for the U.S. government-sponsored COVID-19 vaccine trials, we were embarrassingly unacquainted with even the basic concepts in this area, starting with the definition of vaccine efficacy (VE), and it was to the fundamental work of these researchers we turned to get up to speed. Responding to the points they raise has enhanced our understanding of the area and the role of our work within it. We comment on the issues raised in each discussion in turn; because all note challenges posed by viral variants, we address this point separately at the end. Heterogeneity of vaccine efficacy (HVE) and bias. We thank Drs. Janes, Gao, and Luedtke (JGL henceforth) for raising this issue, as they inspired us to think more deeply about the role of heterogeneity. As JGL note, our assumption (ii), E { π 1 ( t , τ ) | X } / E { π 0 ( t ) | X } = q ( τ ) , precludes HVE, as we demonstrate shortly. Our assumption of no HVE embodied in (ii) was based on the emerging evidence in the vaccine trials suggesting little variation in VE across subgroups defined by baseline characteristics X, which, as cited by JGL, persists to the present, as well as scant information on viral variants available at the time. JGL conjecture that the result in our simulations that the methods with stabilized weights equal to 1 yield unbiased inference similar to that obtained with the inverse probability weighted methods, which adjust for possible confounding, could be a consequence of the no-HVE assumption. Their comments are relevant without reference to VE waning, so, for simplicity, we discuss them in the case π 1 ( t , τ ) = π 1 ( t ) , so that VE does not depend on time since vaccination τ. Here, our assumption (ii) of no HVE becomes E { π 1 ( t ) | X } / E { π 0 ( t ) | X } = q , a constant. In our framework, X comprises individual-specific covariates, such as age, gender, and so on; thus, just as c b ( t ) , c 0 u ( t ) , c 01 ℓ u ( t ) , c 1 u ( t ) , π 1 ( t ) , π 0 ( t ) are inherent, individual-specific characteristics of trial participants (albeit unobservable), so are the components of X. In contrast, viral variants are external forces to which individuals are exposed. We focus here on HVE due to variation in X and discuss heterogeneity in VE across variants in Section 5. As in most clinical trials, interest focuses on population-averaged inference, so on marginal VE in the overall population, which here, from (2) of our article, is V E ( t ) = 1 − R b ( t ) = 1 − E { p ( t , S ) c b ( t ) π 1 ( t ) } / E { p ( t , S ) c b ( t ) π 0 ( t ) } . Analogously, V E ( t , X ) = 1 − R b ( t , X ) = 1 − E { p ( t , S ) c b ( t ) π 1 ( t ) | X } / E { p ( t , S ) c b ( t ) π 0 ( t ) X } is VE in the subpopulation defined by X, if dependent on X implies HVE. Under our assumptions (i) and (ii), V E ( t , X ) = V E ( t ) = 1 − q , so there is no HVE, and VE is constant over time. If (ii) is relaxed to E { π 1 ( t ) | X } / E { π 0 ( t ) | X } = q ( X ) , then under (i) and this version of (ii), V E ( t , X ) = 1 − q ( X ) (HVE), and it is straightforward that the marginal VE is V E ( t ) = 1 − R b ( t ) = 1 − E { w ( t , X ) q ( X ) } / E { w ( t , X ) } = E { w ( t , X ) V E ( t , X ) } / E { w ( t , X ) } , where w ( t , X ) = E { p ( t , S ) c b ( t ) | X } E { π 0 ( t ) | X } , so can be viewed as a weighted average of X-specific VEs. Thus, HVE introduces the complication that marginal VE is time dependent. Inspection of the weights w ( t , X ) suggests that they may not vary substantially over time; for example, it may be reasonable to assume that p ( t , S ) is independent of all other individual-specific quantities and thus factors out of R b ( t ) , and that the ratio of c b ( t ) for two randomly chosen individuals might stay in roughly constant proportion over time, as some are inherently risk-averse and others not. If w ( t , X ) do not vary substantially, then neither does marginal VE. Accordingly, we discuss JGL's conjecture under this scenario, as we believe they implicitly intended, by considering estimation of V E ( t ) , equivalently of R b ( t ) , at a specific time t. Taking infection rates and hazard rates to be equivalent as in Section 4.3 of our article, R b ( t ) = E { w ( t , X ) q ( X ) } / E { w ( t , X ) } is approximated by the ratio of the marginal hazard rates for potential infection times under vaccine and placebo and is the estimand of interest. JGL consider the situation where X contains age, V E ( t , X ) is lower for older individuals (HVE), and such individuals have a higher probability of being unblinded earlier. They assert that the “standard” analysis based on Cox models, which estimates the marginal hazard ratio by the usual partial likelihood estimator and is roughly equivalent to our approach with stability weights equal to 1, will yield positively biased inference on the marginal VE with HVE but consistent inference under no HVE. To gain insight, we consider the implications of the relationship between HVE and the unblinding process for estimation of marginal VE. It is straightforward to show that, if there were no unblinding at all, or if the unblinding probability does not depend on X, then the standard analysis leads to a consistent estimator for V E ( t ) whether or not there is HVE. If the unblinding probability is X-dependent, but there is no HVE, JGL contend that the standard analysis and our method with stabilized weights equal to 1 also lead to consistent inference on marginal VE, which could explain our simulation results. We can show, however, that if the dependence of unblinding probability on X is different for vaccine and placebo, then even with no HVE ( q ( X ) = q ), bias can arise if E { π 0 ( t ) | X } depends on X. This is the configuration in our simulations for unblinding in the interval [ T P , T U ) , suggesting the potential for bias; we speculate that the negligible bias seen in our simulations is partially due to the shortness (1 week) of this interval. We can also show that, with both HVE and unblinding probability depending on X, bias results and is positive when both q ( X ) and unblinding probability decrease with X, as noted by JGL. Our method with estimated stability weights based on correct models for unblinding depending on X leads to consistent estimation of marginal VE whether or not HVE holds. The foregoing developments are for a fixed t. It is well known that, if the proportional hazards assumption is violated, the standard partial likelihood estimator for the assumed constant hazard ratio estimates a weighted average over time of the time-dependent hazard ratio, R b ( t ) in our case. If the w ( t , X ) do not vary substantially with t, as above, neither will R b ( t ) and V E ( t ) , and this weighted average may have public health relevance. Here, the results above still apply; under no HVE, marginal VE is constant and consistently estimated, and under HVE and X-dependent unblinding, the standard analysis will be biased while our methods consistently estimate this weighted average. If instead the w ( t , X ) and thus R b ( t ) and V E ( t ) do vary nontrivially with t, it may be possible to incorporate estimation of the vaccine and placebo hazard rates via nonparametric smoothing. Potential contact rates as potential outcomes. Dr. Halloran raises the subtlety of referring to the individual-specific contact rates as “potential outcomes.” In the causal inference literature, ordinarily, a potential outcome is a characteristic that is potentially observable, as for a clinical outcome if an individual were to receive placebo or active treatment. In contrast, the contact rates are conceptual, unobservable quantities, as are the transmission probabilities. Accordingly, { c b ( t ) , c 0 u ( t ) , c 01 ℓ u ( t ) , c 1 u ( t ) t > 0 , π 0 ( t ) , π 1 ( t , τ ) , τ ≥ 0 } are similar to unobservable random effects or frailties that characterize heterogeneity across individuals. Model for VE waning. The model g ( u ; θ 1 ) = θ 1 I ( u > v ) we used in the simulations is admittedly simplistic, and we chose it to simplify interpretation of the results. Dr. Halloran rightly notes that the analyst must select the change point v at which efficacy is thought to shift, and clearly inference on waning is predicated on this choice. Such a model is most likely a considerable simplification of a more complex truth under which waning of VE occurs smoothly over time, but it could be a useful tool for preliminary exploratory analysis: one could estimate θ1 over a range of v to gain insight, then adopt a linear or cubic spline representation with knot selection informed by these preliminary analyses to obtain a more nuanced approximation to smoothly continuous waning (these choices are built-in options in our R package VEwaning). As Dr. Follmann suggests in his discussion, it may be possible to prove that the VE as a function of τ is nonparametrically recoverable from the data, although sample size considerations may limit the complexity of how g ( u ; θ 1 ) is represented. Symptomatic viral infection. Dr. Halloran points out that the primary endpoint in the Moderna trial is symptomatic COVID-19 infection, but our presentation is admittedly unclear regarding the meaning of “infection” in our potential outcomes formulation. We tacitly take π 0 ( t ) and π 1 ( t , τ ) to be the individual-specific probabilities of transmission per contact leading to symptomatic infection and thus lump asymptomatic infection with no infection without comment. We thus do not acknowledge explicitly that symptomatic infection results from transmission that, with some probability, results in symptomatic disease. Dr. Halloran rightly raises the issue of how the formulation should be modified to acknowledge this reality. Let ρ 0 ( t ) and ρ 1 ( t , τ ) be the individual-specific probabilities of transmission per contact and s 0 ( t ) and s 1 ( t , τ ) be individual-specific conditional probabilities of becoming symptomatic given transmission, that is, pathogenicity, under placebo and vaccine. Then π 0 ( t ) = ρ 0 ( t ) s 0 ( t ) and π 1 ( t , τ ) = ρ 1 ( t , τ ) s 1 ( t , τ ) . Because pathogenicity is a biological characteristic, (i) can be modified reasonably to { ρ 1 ( t , τ ) , ρ 0 ( t ) , s 1 ( t , τ ) , s 0 ( t ) } ⊥ { S , c b ( t ) } | X and { ρ 1 ( t , τ ) , ρ 0 ( t ) , s 1 ( t , τ ) , s 0 ( t ) } ⊥ { S , c 01 ℓ u ( t ) , c 1 u ( t ) } | X . Assumption (ii) is equivalent to E { ρ 1 ( t , τ ) s 1 ( t , τ ) | X } / E { ρ 0 ( t ) s 0 ( t ) | X } = q ( τ ) . To modify (ii) to incorporate pathogenicity, one can assume that (ii)(a) s 1 ( t , τ ) ⊥ ρ 1 ( t , τ ) | X and s 0 ( t ) ⊥ ρ 0 ( t ) | X , and (ii)(b) E { ρ 1 ( t , τ ) | X } / E { ρ 0 ( t ) | X } and E { s 1 ( t , τ ) | X } / E { s 0 ( t ) | X } do not depend on t or X so are functions only of τ. Assumption (ii)(b) can be viewed as Dr. Halloran's speculated constant of proportionality. If one is willing to assume that there is no effect of vaccine on pathogenicity, then s 1 ( t , τ ) = s 0 ( t ) at any t regardless of τ, and (ii)(b) is unnecessary. Dr. Follmann provides an excellent example that clarifies the challenges of estimating VE and waning of VE after unblinding and how differential unblinding can lead to biased inference on waning. This example and Dr. Follmann's nice summary of the main principles underlying our approach in his Section 3 strongly complement our account of the methodology by making the key issues more accessible. We comment on two main points raised by Dr. Follmann. Celebratory bias. In our formulation, unblinded placebo participants who receive study vaccine engage in behavior represented by c 01 ℓ u ( t ) prior to reaching full efficacy after an interval of length ℓ and then adopt behavior c 1 u ( t ) , whereas unblinded vaccine participants adopt c 1 u ( t ) immediately. Dr. Follmann suggests that, while unblinded placebo participants now on study vaccine will behave as c 01 ℓ u ( t ) for the efficacy lag interval, unblinded vaccine participants may experience a celebratory interval of length C during which they engage in more risky behavior, which we could represent in our framework by c 1 c u ( t ) , say. We agree with Dr. Follmann that it is prudent to remove these individuals from the risk sets during the celebratory interval, just as we remove unblinded placebo participants behaving as c 01 ℓ u ( t ) during the efficacy lag interval. Given that C would be unknown, a possible sensitivity analysis would involve specifying a range of values for C and examining the stability of the results. Time-dependent covariate information. Dr. Follmann raises the possibility of exploiting time-dependent, post-randomization covariate information to account for potential confounding, and he provides interesting examples of such covariates. Our methodology readily incorporates time-dependent covariates. Values of such covariates up to time r could be included in the specifications of models for the unblinding hazard functions λ R , j ( r | X , A , E ) , j = 1 , 2 ; similarly, such information could be incorporated in the model for pr ( Ψ = 1 | X , E , Γ , R ) . All discussants note the potential for variability in VE across emerging new variants of the SARS-CoV-2 virus. Dr. Follmann sketches how, given data on viral genotypes from infected trial participants, variant-specific analyses of VE waning can be carried out. We briefly outline how our framework can be modified to allow for such variant-specific inference, using ν = 1 , … , V to index V variants of interest. As noted in Section 2, variants are external forces to which individuals are exposed. Thus, prevalence of infection can differ by variant, represented by defining p ( t , s , ν ) to be the prevalence for variant ν at time t at site s. Likewise, in accordance with emerging evidence (e.g., the delta variant), it is natural to take individual-specific transmission probabilities per contact at time t to differ by variant, denoted by π 0 ( t , ν ) and π 1 ( t , τ , ν ) for variant ν under placebo and vaccination with study vaccine for τ > 0 time units. We take the contact rates reflecting individual-specific behavior c b ( t ) , c 0 u ( t ) , c 01 ℓ u ( t ) , c 1 u ( t ) to remain unchanged. The infection rates in the study population at time t for variant ν if all individuals were to receive placebo or vaccine at time t − τ are then I 0 b ( t , ν ) = E { p ( t , S , ν ) c b ( t ) π 0 ( t , ν ) } and I 1 b ( t , τ , ν ) = E { p ( t , S , ν ) c b ( t ) π 1 ( t , τ , ν ) } , and, analogous to (2) of our article, define VE for variant ν at time t after vaccination at t − τ as V E ( t , τ , ν ) = 1 − R b ( t , τ , ν ) = 1 − I 1 b ( t , τ , ν ) / I 0 b ( t , ν ) . Assumption (i) is generalized to { π 1 ( t , τ , ν ) , π 0 ( t , ν ) } ⊥ { S , c b ( t ) } | X , and { π 1 ( t , τ , ν ) , π 0 ( t , ν ) } ⊥ { S , c 01 ℓ u ( t ) , c 1 u ( t ) } | X similarly. We modify (ii) to reflect the belief that, while VE can vary by variant, within variants, there is no additional HVE associated with components of X; namely, (ii) becomes E { π 1 ( t , τ , ν ) | X } / E { π 0 ( t , ν ) | X } = q ( τ , ν ) . Under (i) and (ii), V E ( t , τ , ν ) = V E ( τ , ν ) = 1 − R b ( τ , ν ) = 1 − q ( τ , ν ) is the VE for variant ν = 1 , … , V . Moreover, with I 1 u ( t , τ , ν ) = E { p ( t , S , ν ) c 1 u ( t ) π 1 ( t , τ , ν ) } , τ ≥ ℓ , (4) of our article becomes I 1 u ( t , τ , ν ) = I 1 u ( t , ν ) R b ( τ , ν ) / R b ( ℓ , ν ) , τ ≥ ℓ . Representing the variant-specific infection rate ratio R b ( τ , ν ) = exp { ζ ν ( τ ) } I ( τ < ℓ ) + exp { θ 0 ν + g ν ( τ − ℓ ; θ 1 ν ) } I ( τ ≥ ℓ ) , where now ζ ν ( · ) and g ν ( · ; · ) are variant-specific, we have for τ ≥ ℓ , analogous to (8), I 1 b ( t , τ , ν ) = I 1 b ( t , ν ) exp { θ 0 ν + g ν ( τ − ℓ ; θ 1 ν ) } , I 1 u ( t , τ , ν ) = I 1 u ( t , ν ) exp { g ν ( τ − ℓ ; θ 1 ν ) } , and thus V E ( τ , ν ) = 1 − exp { θ 0 ν + g ν ( τ − ℓ ; θ 1 ν ) } , ν = 1 , … , V , as given by Dr. Follmann. With the observed data as in (1) of our article, redefine Δ so Δ = 0 if U > L and Δ = 1 , … , V according to the infection variant otherwise, and define d N ( t , ν ) = I ( U = t , Δ = ν ) , ν = 1 , … , V . Then, under obvious modifications of the consistency assumptions (16) and (17)–(20), results analogous to (21)–(24) hold, and observed data-estimating functions analogous to those in Section 4.4 can be formulated. Defining d N ∼ b ( t , ν ) , Y ∼ b ( t , ν ) , d N ∼ u ( t , ν ) , Y ∼ u ( t , ν ) , Z b ( t , ν ) , and Z u ( t , ν ) , ν = 1 , … , V , as d N ∼ b ( t ) , Y ∼ b ( t ) , d N ∼ u ( t ) , Y ∼ u ( t ) , Z b ( t ) , and Z u ( t ) in Section 4.4 with d N ( t ) replaced by d N ( t , ν ) , g ( v , θ 1 ) by g ν ( v , θ 1 ν ) , and ( θ 0 , θ 1 ) by ( θ 0 ν , θ 1 ν ) , one is led to an estimating equation of the form (30), solution of which in ( θ 0 ν , θ 1 ν ) , ν = 1 , … , V , reduces to solving separate equations in ( θ 0 ν , θ 1 ν ) for each ν. Elaborating on Dr. Follmann's final key point, information on a given variant will be available only during time intervals when it was/is in circulation. If these intervals traverse blinded and unblinded periods of the trial, estimation of both θ 0 ν and θ 1 ν is possible, whereas, as Dr. Follmann notes, if the intervals are primarily within the unblinded phase, only θ 1 ν will be estimable, but will still provide evidence of possibly waning for variant ν.}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie}, year={2021}, month={Aug} } @article{cools_johnson_camm_bassand_verheugt_yang_tsiatis_fitzmaurice_goldhaber_kayani_et al._2021, title={Risks associated with discontinuation of oral anticoagulation in newly diagnosed patients with atrial fibrillation: Results from the GARFIELD-AF Registry}, volume={7}, ISSN={["1538-7836"]}, DOI={10.1111/jth.15415}, abstractNote={Oral anticoagulation (OAC) in atrial fibrillation (AF) reduces the risk of stroke/systemic embolism (SE). The impact of OAC discontinuation is less well documented.Investigate outcomes of patients prospectively enrolled in the Global Anticoagulant Registry in the Field-Atrial Fibrillation study who discontinued OAC.Oral anticoagulation discontinuation was defined as cessation of treatment for ≥7 consecutive days. Adjusted outcome risks were assessed in 23 882 patients with 511 days of median follow-up after discontinuation.Patients who discontinued (n = 3114, 13.0%) had a higher risk (hazard ratio [95% CI]) of all-cause death (1.62 [1.25-2.09]), stroke/systemic embolism (SE) (2.21 [1.42-3.44]) and myocardial infarction (MI) (1.85 [1.09-3.13]) than patients who did not, whether OAC was restarted or not. This higher risk of outcomes after discontinuation was similar for patients treated with vitamin K antagonists (VKAs) and direct oral anticoagulants (DOACs) (p for interactions range = 0.145-0.778). Bleeding history (1.43 [1.14-1.80]), paroxysmal vs. persistent AF (1.15 [1.02-1.29]), emergency room care setting vs. office (1.37 [1.18-1.59]), major, clinically relevant nonmajor, and minor bleeding (10.02 [7.19-13.98], 2.70 [2.24-3.25] and 1.90 [1.61-2.23]), stroke/SE (4.09 [2.55-6.56]), MI (2.74 [1.69-4.43]), and left atrial appendage procedures (4.99 [1.82-13.70]) were predictors of discontinuation. Age (0.84 [0.81-0.88], per 10-year increase), history of stroke/transient ischemic attack (0.81 [0.71-0.93]), diabetes (0.88 [0.80-0.97]), weeks from AF onset to treatment (0.96 [0.93-0.99] per week), and permanent vs. persistent AF (0.73 [0.63-0.86]) were predictors of lower discontinuation rates.In GARFIELD-AF, the rate of discontinuation was 13.0%. Discontinuation for ≥7 consecutive days was associated with significantly higher all-cause mortality, stroke/SE, and MI risk. Caution should be exerted when considering any OAC discontinuation beyond 7 days.}, journal={JOURNAL OF THROMBOSIS AND HAEMOSTASIS}, author={Cools, Frank and Johnson, Dana and Camm, Alan J. and Bassand, Jean-Pierre and Verheugt, Freek W. A. and Yang, Shu and Tsiatis, Anastasios and Fitzmaurice, David A. and Goldhaber, Samuel Z. and Kayani, Gloria and et al.}, year={2021}, month={Jul} } @misc{kim_tsiatis_2020, title={Independent increments in group sequential tests: a review}, volume={44}, ISSN={["2013-8830"]}, DOI={10.2436/20.8080.02.101}, number={2}, journal={SORT-STATISTICS AND OPERATIONS RESEARCH TRANSACTIONS}, author={Kim, Kyung Mann and Tsiatis, Anastasios A.}, year={2020}, pages={223–264} } @misc{ruppert_yin_davidian_tsiatis_byrd_woyach_mandrekar_2019, title={Application of a sequential multiple assignment randomized trial (SMART) design in older patients with chronic lymphocytic leukemia}, volume={30}, ISSN={["1569-8041"]}, DOI={10.1093/annonc/mdz053}, abstractNote={Ibrutinib therapy is safe and effective in patients with chronic lymphocytic leukemia (CLL). Currently, ibrutinib is administered continuously until disease progression. Combination regimens with ibrutinib are being developed to deepen response which could allow for ibrutinib maintenance (IM) discontinuation. Among untreated older patients with CLL, clinical investigators had the following questions: (i) does ibrutinib + venetoclax + obinutuzumab (IVO) with IM have superior progression-free survival (PFS) compared with ibrutinib + obinutuzumab (IO) with IM, and (ii) does the treatment strategy of IVO + IM for patients without minimal residual disease complete response (MRD- CR) or IVO + IM discontinuation for patients with MRD- CR have superior PFS compared with IO + IM.}, number={4}, journal={ANNALS OF ONCOLOGY}, author={Ruppert, A. S. and Yin, J. and Davidian, M. and Tsiatis, A. A. and Byrd, J. C. and Woyach, J. A. and Mandrekar, S. J.}, year={2019}, month={Apr}, pages={542–550} } @book{tsiatis_davidian_holloway_laber_2019, title={Dynamic Treatment Regimes}, ISBN={9780429192692}, url={http://dx.doi.org/10.1201/9780429192692}, DOI={10.1201/9780429192692}, abstractNote={Dynamic Treatment Regimes: Statistical Methods for Precision Medicine provides a comprehensive introduction to statistical methodology for the evaluation and discovery of dynamic treatment regimes from data. Researchers and graduate students in statistics, data science, and related quantitative disciplines with a background in probability and statistical inference and popular statistical modeling techniques will be prepared for further study of this rapidly evolving field. A dynamic treatment regime is a set of sequential decision rules, each corresponding to a key decision point in a disease or disorder process, where each rule takes as input patient information and returns the treatment option he or she should receive. Thus, a treatment regime formalizes how a clinician synthesizes patient information and selects treatments in practice. Treatment regimes are of obvious relevance to precision medicine, which involves tailoring treatment selection to patient characteristics in an evidence-based way. Of critical importance to precision medicine is estimation of an optimal treatment regime, one that, if used to select treatments for the patient population, would lead to the most beneficial outcome on average. Key methods for estimation of an optimal treatment regime from data are motivated and described in detail. A dedicated companion website presents full accounts of application of the methods using a comprehensive R package developed by the authors. The authors' website www.dtr-book.com includes updates, corrections, new papers, and links to useful websites.}, publisher={Chapman and Hall/CRC}, author={Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T. and Laber, Eric B.}, year={2019}, month={Dec} } @article{triolo_fouts_pyle_yu_gottlieb_steck_greenbaum_atkinson_baidal_battaglia_et al._2019, title={Identical and Nonidentical Twins: Risk and Factors Involved in Development of Islet Autoimmunity and Type 1 Diabetes}, volume={42}, ISSN={["1935-5548"]}, DOI={10.2337/dc18-0288}, abstractNote={OBJECTIVE There are variable reports of risk of concordance for progression to islet autoantibodies and type 1 diabetes in identical twins after one twin is diagnosed. We examined development of positive autoantibodies and type 1 diabetes and the effects of genetic factors and common environment on autoantibody positivity in identical twins, nonidentical twins, and full siblings. RESEARCH DESIGN AND METHODS Subjects from the TrialNet Pathway to Prevention Study (N = 48,026) were screened from 2004 to 2015 for islet autoantibodies (GAD antibody [GADA], insulinoma-associated antigen 2 [IA-2A], and autoantibodies against insulin [IAA]). Of these subjects, 17,226 (157 identical twins, 283 nonidentical twins, and 16,786 full siblings) were followed for autoantibody positivity or type 1 diabetes for a median of 2.1 years. RESULTS At screening, identical twins were more likely to have positive GADA, IA-2A, and IAA than nonidentical twins or full siblings (all P < 0.0001). Younger age, male sex, and genetic factors were significant factors for expression of IA-2A, IAA, one or more positive autoantibodies, and two or more positive autoantibodies (all P ≤ 0.03). Initially autoantibody-positive identical twins had a 69% risk of diabetes by 3 years compared with 1.5% for initially autoantibody-negative identical twins. In nonidentical twins, type 1 diabetes risk by 3 years was 72% for initially multiple autoantibody–positive, 13% for single autoantibody–positive, and 0% for initially autoantibody-negative nonidentical twins. Full siblings had a 3-year type 1 diabetes risk of 47% for multiple autoantibody–positive, 12% for single autoantibody–positive, and 0.5% for initially autoantibody-negative subjects. CONCLUSIONS Risk of type 1 diabetes at 3 years is high for initially multiple and single autoantibody–positive identical twins and multiple autoantibody–positive nonidentical twins. Genetic predisposition, age, and male sex are significant risk factors for development of positive autoantibodies in twins.}, number={2}, journal={DIABETES CARE}, author={Triolo, Taylor M. and Fouts, Alexandra and Pyle, Laura and Yu, Liping and Gottlieb, Peter A. and Steck, Andrea K. and Greenbaum, C. J. and Atkinson, M. and Baidal, D. and Battaglia, M. and et al.}, year={2019}, month={Feb}, pages={192–199} } @article{zhang_laber_davidian_tsiatis_2018, title={Interpretable Dynamic Treatment Regimes}, volume={113}, ISSN={["1537-274X"]}, DOI={10.1080/01621459.2017.1345743}, abstractNote={Precision medicine is currently a topic of great interest in clinical and intervention science. A key component of precision medicine is that it is evidence-based, that is, data-driven, and consequently there has been tremendous interest in estimation of precision medicine strategies using observational or randomized study data. One way to formalize precision medicine is through a treatment regime, which is a sequence of decision rules, one per stage of clinical intervention, that map up-to-date patient information to a recommended treatment. An optimal treatment regime is defined as maximizing the mean of some cumulative clinical outcome if applied to a population of interest. It is well-known that even under simple generative models an optimal treatment regime can be a highly nonlinear function of patient information. Consequently, a focal point of recent methodological research has been the development of flexible models for estimating optimal treatment regimes. However, in many settings, estimation of an optimal treatment regime is an exploratory analysis intended to generate new hypotheses for subsequent research and not to directly dictate treatment to new patients. In such settings, an estimated treatment regime that is interpretable in a domain context may be of greater value than an unintelligible treatment regime built using "black-box" estimation methods. We propose an estimator of an optimal treatment regime composed of a sequence of decision rules, each expressible as a list of "if-then" statements that can be presented as either a paragraph or as a simple flowchart that is immediately interpretable to domain experts. The discreteness of these lists precludes smooth, that is, gradient-based, methods of estimation and leads to nonstandard asymptotics. Nevertheless, we provide a computationally efficient estimation algorithm, prove consistency of the proposed estimator, and derive rates of convergence. We illustrate the proposed methods using a series of simulation examples and application to data from a sequential clinical trial on bipolar disorder. Supplementary materials for this article are available online.}, number={524}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Zhang, Yichi and Laber, Eric B. and Davidian, Marie and Tsiatis, Anastasios A.}, year={2018}, pages={1541–1549} } @article{yang_tsiatis_blazing_2018, title={Modeling survival distribution as a function of time to treatment discontinuation: A dynamic treatment regime approach}, volume={74}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12845}, abstractNote={Summary We consider estimating the effect that discontinuing a beneficial treatment will have on the distribution of a time to event clinical outcome, and in particular assessing whether there is a period of time over which the beneficial effect may continue after discontinuation. There are two major challenges. The first is to make a distinction between mandatory discontinuation, where by necessity treatment has to be terminated and optional discontinuation which is decided by the preference of the patient or physician. The innovation in this article is to cast the intervention in the form of a dynamic regime “terminate treatment optionally at time v unless a mandatory treatment-terminating event occurs prior to v” and consider estimating the distribution of time to event as a function of treatment regime v. The second challenge arises from biases associated with the nonrandom assignment of treatment regimes, because, naturally, optional treatment discontinuation is left to the patient and physician, and so time to discontinuation may depend on the patient's disease status. To address this issue, we develop dynamic-regime Marginal Structural Models and use inverse probability of treatment weighting to estimate the impact of time to treatment discontinuation on a time to event outcome, compared to the effect of not discontinuing treatment. We illustrate our methods using the IMPROVE-IT data on cardiovascular disease.}, number={3}, journal={BIOMETRICS}, author={Yang, Shu and Tsiatis, Anastasios A. and Blazing, Michael}, year={2018}, month={Sep}, pages={900–909} } @article{hager_tsiatis_davidian_2018, title={Optimal two-stage dynamic treatment regimes from a classification perspective with censored survival data}, volume={74}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12894}, abstractNote={Summary Clinicians often make multiple treatment decisions at key points over the course of a patient's disease. A dynamic treatment regime is a sequence of decision rules, each mapping a patient's observed history to the set of available, feasible treatment options at each decision point, and thus formalizes this process. An optimal regime is one leading to the most beneficial outcome on average if used to select treatment for the patient population. We propose a method for estimation of an optimal regime involving two decision points when the outcome of interest is a censored survival time, which is based on maximizing a locally efficient, doubly robust, augmented inverse probability weighted estimator for average outcome over a class of regimes. By casting this optimization as a classification problem, we exploit well-studied classification techniques such as support vector machines to characterize the class of regimes and facilitate implementation via a backward iterative algorithm. Simulation studies of performance and application of the method to data from a sequential, multiple assignment randomized clinical trial in acute leukemia are presented.}, number={4}, journal={BIOMETRICS}, author={Hager, Rebecca and Tsiatis, Anastasios A. and Davidian, Marie}, year={2018}, month={Dec}, pages={1180–1192} } @article{greenbaum_atkinson_baidal_battaglia_bingley_bosi_buckner_clements_colman_dimeglio_et al._2017, title={Effect of oral insulin on prevention of diabetes in relatives of patients with type 1 diabetes a randomized clinical trial}, volume={318}, number={19}, journal={Journal of the American Medical Association}, author={Greenbaum, C. and Atkinson, M. and Baidal, D. and Battaglia, M. and Bingley, P. and Bosi, E. and Buckner, J. and Clements, M. and Colman, P. and DiMeglio, L. and et al.}, year={2017}, pages={1891–1902} } @article{bai_tsiatis_lu_song_2017, title={Optimal treatment regimes for survival endpoints using locally-efficient doubly-robust estimator from a classification perspective}, volume={23}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-016-9376-x}, abstractNote={A treatment regime at a single decision point is a rule that assigns a treatment, among the available options, to a patient based on the patient’s baseline characteristics. The value of a treatment regime is the average outcome of a population of patients if they were all treated in accordance to the treatment regime, where large values are desirable. The optimal treatment regime is a regime which results in the greatest value. Typically, the optimal treatment regime is estimated by positing a regression relationship for the outcome of interest as a function of treatment and baseline characteristics. However, this can lead to suboptimal treatment regimes when the regression model is misspecified. We instead consider value search estimators for the optimal treatment regime where we directly estimate the value for any treatment regime and then maximize this estimator over a class of regimes. For many studies the primary outcome of interest is survival time which is often censored. We derive a locally efficient, doubly robust, augmented inverse probability weighted complete case estimator for the value function with censored survival data and study the large sample properties of this estimator. The optimization is realized from a weighted classification perspective that allows us to use available off the shelf software. In some studies one treatment may have greater toxicity or side effects, thus we also consider estimating a quality adjusted optimal treatment regime that allows a patient to trade some additional risk of death in order to avoid the more invasive treatment.}, number={4}, journal={Lifetime Data Analysis}, author={Bai, X. and Tsiatis, A. and Lu, W. and Song, R.}, year={2017}, pages={585–604} } @article{vock_durheim_tsuang_copeland_tsiatis_davidian_neely_lederer_palmer_2017, title={Survival benefit of lung transplantation in the modern era of lung allocation}, volume={14}, number={2}, journal={Annals of the American Thoracic Society}, author={Vock, D. M. and Durheim, M. T. and Tsuang, W. M. and Copeland, C. A. F. and Tsiatis, A. A. and Davidian, M. and Neely, M. L. and Lederer, D. J. and Palmer, S. M.}, year={2017}, pages={172–181} } @article{bai_tsiatis_2016, title={A log rank type test in observational survival studies with stratified sampling}, volume={22}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-015-9331-2}, abstractNote={In randomized clinical trials, the log rank test is often used to test the null hypothesis of the equality of treatment-specific survival distributions. In observational studies, however, the ordinary log rank test is no longer guaranteed to be valid. In such studies we must be cautious about potential confounders; that is, the covariates that affect both the treatment assignment and the survival distribution. In this paper, two cases were considered: the first is when it is believed that all the potential confounders are captured in the primary database, and the second case where a substudy is conducted to capture additional confounding covariates. We generalize the augmented inverse probability weighted complete case estimators for treatment-specific survival distribution proposed in Bai et al. (Biometrics 69:830–839, 2013) and develop the log rank type test in both cases. The consistency and double robustness of the proposed test statistics are shown in simulation studies. These statistics are then applied to the data from the observational study that motivated this research.}, number={2}, journal={LIFETIME DATA ANALYSIS}, author={Bai, Xiaofei and Tsiatis, Anastasios A.}, year={2016}, month={Apr}, pages={280–298} } @article{zhang_tsiatis_davidian_zhang_laber_2016, title={Estimating optimal treatment regimes from a classification perspective (vol 1, pg 103, 2012)}, volume={5}, ISSN={["2049-1573"]}, DOI={10.1002/sta4.124}, abstractNote={StatVolume 5, Issue 1 p. 278-278 Erratum Estimating optimal treatment regimes from a classification perspective Baqun Zhang, Corresponding Author Baqun Zhang baqun.zhang@northwestern.edu Department of Preventive Medicine, Northwestern University, Chicago, IL, 60611 USAE-mail: baqun.zhang@northwestern.eduSearch for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMin Zhang, Min Zhang Department of Biotatistics, University of Michigan, Ann Arbor, MI, 48109-2029 USASearch for more papers by this authorEric Laber, Eric Laber Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this author Baqun Zhang, Corresponding Author Baqun Zhang baqun.zhang@northwestern.edu Department of Preventive Medicine, Northwestern University, Chicago, IL, 60611 USAE-mail: baqun.zhang@northwestern.eduSearch for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this authorMin Zhang, Min Zhang Department of Biotatistics, University of Michigan, Ann Arbor, MI, 48109-2029 USASearch for more papers by this authorEric Laber, Eric Laber Department of Statistics, North Carolina State University, Raleigh, NC, 27695-8203 USASearch for more papers by this author First published: 04 November 2016 https://doi.org/10.1002/sta4.124Read the full textAboutPDF ToolsRequest permissionExport citationAdd to favoritesTrack citation ShareShare Give accessShare full text accessShare full-text accessPlease review our Terms and Conditions of Use and check box below to share full-text version of article.I have read and accept the Wiley Online Library Terms and Conditions of UseShareable LinkUse the link below to share a full-text version of this article with your friends and colleagues. Learn more.Copy URL Share a linkShare onFacebookTwitterLinkedInRedditWechat No abstract is available for this article. Volume5, Issue12016Pages 278-278 RelatedInformation}, number={1}, journal={STAT}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Laber, Eric}, year={2016}, pages={278–278} } @article{milanzi_molenberghs_alonso_kenward_tsiatis_davidian_verbeke_2015, title={Estimation After a Group Sequential Trial}, volume={7}, ISSN={["1867-1772"]}, DOI={10.1007/s12561-014-9112-6}, abstractNote={Group sequential trials are one important instance of studies for which the sample size is not fixed a priori but rather takes one of a finite set of pre-specified values, dependent on the observed data. Much work has been devoted to the inferential consequences of this design feature. Molenberghs et al. (Statistical Methods in Medical Research, 2012) and Milanzi et al. (Properties of estimators in exponential family settings with observation-based stopping rules, 2012) reviewed and extended the existing literature, focusing on a collection of seemingly disparate, but related, settings, namely completely random sample sizes, group sequential studies with deterministic and random stopping rules, incomplete data, and random cluster sizes. They showed that the ordinary sample average is a viable option for estimation following a group sequential trial, for a wide class of stopping rules and for random outcomes with a distribution in the exponential family. Their results are somewhat surprising in the sense that the sample average is not optimal, and further, there does not exist an optimal, or even, unbiased linear estimator. However, the sample average is asymptotically unbiased, both conditionally upon the observed sample size as well as marginalized over it. By exploiting ignorability they showed that the sample average is the conventional maximum likelihood estimator. They also showed that a conditional maximum likelihood estimator is finite sample unbiased, but is less efficient than the sample average and has the larger mean squared error. Asymptotically, the sample average and the conditional maximum likelihood estimator are equivalent. This previous work is restricted, however, to the situation in which the the random sample size can take only two values, $$N=n$$ or $$N=2n$$ . In this paper, we consider the more practically useful setting of sample sizes in a the finite set $$\{n_1,n_2,\dots ,n_L\}$$ . It is shown that the sample average is then a justifiable estimator , in the sense that it follows from joint likelihood estimation, and it is consistent and asymptotically unbiased. We also show why simulations can give the false impression of bias in the sample average when considered conditional upon the sample size. The consequence is that no corrections need to be made to estimators following sequential trials. When small-sample bias is of concern, the conditional likelihood estimator (CLE) provides a relatively straightforward modification to the sample average. Finally, it is shown that classical likelihood-based standard errors and confidence intervals can be applied, obviating the need for technical corrections.}, number={2}, journal={STATISTICS IN BIOSCIENCES}, author={Milanzi, Elasma and Molenberghs, Geert and Alonso, Ariel and Kenward, Michael G. and Tsiatis, Anastasios A. and Davidian, Marie and Verbeke, Geert}, year={2015}, month={Oct}, pages={187–205} } @article{zhang_laber_tsiatis_davidian_2015, title={Using Decision Lists to Construct Interpretable and Parsimonious Treatment Regimes}, volume={71}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12354}, abstractNote={Summary A treatment regime formalizes personalized medicine as a function from individual patient characteristics to a recommended treatment. A high-quality treatment regime can improve patient outcomes while reducing cost, resource consumption, and treatment burden. Thus, there is tremendous interest in estimating treatment regimes from observational and randomized studies. However, the development of treatment regimes for application in clinical practice requires the long-term, joint effort of statisticians and clinical scientists. In this collaborative process, the statistician must integrate clinical science into the statistical models underlying a treatment regime and the clinician must scrutinize the estimated treatment regime for scientific validity. To facilitate meaningful information exchange, it is important that estimated treatment regimes be interpretable in a subject-matter context. We propose a simple, yet flexible class of treatment regimes whose members are representable as a short list of if–then statements. Regimes in this class are immediately interpretable and are therefore an appealing choice for broad application in practice. We derive a robust estimator of the optimal regime within this class and demonstrate its finite sample performance using simulation experiments. The proposed method is illustrated with data from two clinical trials.}, number={4}, journal={BIOMETRICS}, author={Zhang, Yichi and Laber, Eric B. and Tsiatis, Anastasios and Davidian, Marie}, year={2015}, month={Dec}, pages={895–904} } @article{laber_zhao_regh_davidian_tsiatis_stanford_zeng_song_kosorok_2015, title={Using pilot data to size a two-arm randomized trial to find a nearly optimal personalized treatment strategy}, volume={35}, ISSN={0277-6715}, url={http://dx.doi.org/10.1002/SIM.6783}, DOI={10.1002/sim.6783}, abstractNote={A personalized treatment strategy formalizes evidence-based treatment selection by mapping patient information to a recommended treatment. Personalized treatment strategies can produce better patient outcomes while reducing cost and treatment burden. Thus, among clinical and intervention scientists, there is a growing interest in conducting randomized clinical trials when one of the primary aims is estimation of a personalized treatment strategy. However, at present, there are no appropriate sample size formulae to assist in the design of such a trial. Furthermore, because the sampling distribution of the estimated outcome under an estimated optimal treatment strategy can be highly sensitive to small perturbations in the underlying generative model, sample size calculations based on standard (uncorrected) asymptotic approximations or computer simulations may not be reliable. We offer a simple and robust method for powering a single stage, two-armed randomized clinical trial when the primary aim is estimating the optimal single stage personalized treatment strategy. The proposed method is based on inverting a plugin projection confidence interval and is thereby regular and robust to small perturbations of the underlying generative model. The proposed method requires elicitation of two clinically meaningful parameters from clinical scientists and uses data from a small pilot study to estimate nuisance parameters, which are not easily elicited. The method performs well in simulated experiments and is illustrated using data from a pilot study of time to conception and fertility awareness.}, number={8}, journal={Statistics in Medicine}, publisher={Wiley}, author={Laber, Eric B. and Zhao, Ying-Qi and Regh, Todd and Davidian, Marie and Tsiatis, Anastasios and Stanford, Joseph B. and Zeng, Donglin and Song, Rui and Kosorok, Michael R.}, year={2015}, month={Oct}, pages={1245–1256} } @article{laber_tsiatis_davidian_holloway_2014, title={Combining Biomarkers to Optimize Patient Treatment Recommendations Discussions}, volume={70}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12187}, abstractNote={BiometricsVolume 70, Issue 3 p. 707-710 BIOMETRIC PRACTICE Discussion of “Combining biomarkers to optimize patient treatment recommendation” Eric B. Laber, Corresponding Author Eric B. Laber Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.email: [email protected]Search for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this authorShannon T. Holloway, Shannon T. Holloway Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this author Eric B. Laber, Corresponding Author Eric B. Laber Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.email: [email protected]Search for more papers by this authorAnastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this authorShannon T. Holloway, Shannon T. Holloway Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695–8203, U.S.A.Search for more papers by this author First published: 02 June 2014 https://doi.org/10.1111/biom.12187Citations: 4Read the full textAboutPDF ToolsRequest permissionExport citationAdd to favoritesTrack citation ShareShare Give accessShare full text accessShare full-text accessPlease review our Terms and Conditions of Use and check box below to share full-text version of article.I have read and accept the Wiley Online Library Terms and Conditions of UseShareable LinkUse the link below to share a full-text version of this article with your friends and colleagues. Learn more.Copy URL Share a linkShare onEmailFacebookTwitterLinkedInRedditWechat References Barto, A. and Dieterich, T. (2004). Reinforcement learning and its relation to supervised learning. In Handbook of Learning and Approximate Dynamic Programming, J. Si, A. G. Barto, W. B. Powell, and D. Wunsch (eds), 45–63. New York: Wiley. Breiman, L. and Shang, N. (1996). Born again trees. Available at ftp://ftp.stat.berkeley. edu/pub/users/breiman/BAtrees.ps. Chakraborty, B. and Moodie, E. E. M. (2013). Statistical Methods for Dynamic Treatment Regimes: Reinforcement Learning, Causal Inference, and Personalized Medicine. New York: Springer. Fan, W., Stolfo, S. J., Zhang, J., and Chan, P. K. (1999). Adacost: misclassification cost-sensitive boosting. In Proceedings of the Sixteenth International Conference on Machine Learning (ICML'99), 97–105, Bled, Slovenia, June 1999. Laber, E., Linn, K., and Stefanski, L. (in press). Interactive model-building for Q-learning. Biometrika. Moodie, E. E. M., Dean, N., and Sun, Y. R. (2013). Q-learning: Flexible learning about useful utilities. Statistics in Biosciences, 1–21. Orellana, L., Rotnitzky, A., and Robins, J. M. (2010). Dynamic regime marginal structural mean models for estimation of optimal treatment regimes, part I: Main content. International Journal of Biostatistics 6, Article 8. Robins, J. M. (2004). Optimal structured nested models for optimal sequential decisions. In Proceedings of the Second Seattle Symposium on Biostatistics, D. Y. Lin and P. J. Heagerty (eds), 189–326. New York: Springer. Scharfstein, D. O., Rotnitzky, A., and Robins, J. M. (1999). Adjusting for nonignorable drop-out using semiparametric nonresponse models. Journal of the American Statistical Association 94, 1096–1120. Schulte, P., Tsiatis, A., Laber, E., and Davidian, M. (in press). Q-and A-learning methods for estimating optimal dynamic treatment regimes. Statistical Science. Zhang, B., Tsiatis, A. A., Davidian, M., Zhang, M., and Laber, E. (2012a). Estimating optimal treatment regimes from a classification perspective. Stat 1, 103–114. Zhang, B., Tsiatis, A. A., Laber, E. B., and Davidian, M. (2012b). A robust method for estimating optimal treatment regimes. Biometrics 68, 1010–1018. Zhang, B., Tsiatis, A. A., Laber, E. B., and Davidian, M. (2013). Robust estimation of optimal dynamic treatment regimes for sequential treatment decisions. Biometrika 100, 681–694. Zhao, Y., Zeng, D., Rush, A. J., and Kosorok, M. R. (2012). Estimating individualized treatment rules using outcome weighted learning. Journal of the American Statistical Association 107, 1106–1118. Zhao, Y., Zeng, D., Socinski, M. A., and Kosorok, M. R. (2011). Reinforcement learning strategies for clinical trials in nonsmall cell lung cancer. Biometrics 67, 1422–1433. Citing Literature Volume70, Issue3September 2014Pages 707-710 ReferencesRelatedInformation}, number={3}, journal={BIOMETRICS}, author={Laber, Eric B. and Tsiatis, Anastasios A. and Davidian, Marie and Holloway, Shannon T.}, year={2014}, month={Sep}, pages={707–710} } @article{molenberghs_kenward_aerts_verbeke_tsiatis_davidian_rizopoulos_2014, title={On random sample size, ignorability, ancillarity, completeness, separability, and degeneracy: Sequential trials, random sample sizes, and missing data}, volume={23}, ISSN={["1477-0334"]}, DOI={10.1177/0962280212445801}, abstractNote={The vast majority of settings for which frequentist statistical properties are derived assume a fixed, a priori known sample size. Familiar properties then follow, such as, for example, the consistency, asymptotic normality, and efficiency of the sample average for the mean parameter, under a wide range of conditions. We are concerned here with the alternative situation in which the sample size is itself a random variable which may depend on the data being collected. Further, the rule governing this may be deterministic or probabilistic. There are many important practical examples of such settings, including missing data, sequential trials, and informative cluster size. It is well known that special issues can arise when evaluating the properties of statistical procedures under such sampling schemes, and much has been written about specific areas (Grambsch P. Sequential sampling based on the observed Fisher information to guarantee the accuracy of the maximum likelihood estimator. Ann Stat 1983; 11: 68–77; Barndorff-Nielsen O and Cox DR. The effect of sampling rules on likelihood statistics. Int Stat Rev 1984; 52: 309–326). Our aim is to place these various related examples into a single framework derived from the joint modeling of the outcomes and sampling process and so derive generic results that in turn provide insight, and in some cases practical consequences, for different settings. It is shown that, even in the simplest case of estimating a mean, some of the results appear counterintuitive. In many examples, the sample average may exhibit small sample bias and, even when it is unbiased, may not be optimal. Indeed, there may be no minimum variance unbiased estimator for the mean. Such results follow directly from key attributes such as non-ancillarity of the sample size and incompleteness of the minimal sufficient statistic of the sample size and sample sum. Although our results have direct and obvious implications for estimation following group sequential trials, there are also ramifications for a range of other settings, such as random cluster sizes, censored time-to-event data, and the joint modeling of longitudinal and time-to-event data. Here, we use the simplest group sequential setting to develop and explicate the main results. Some implications for random sample sizes and missing data are also considered. Consequences for other related settings will be considered elsewhere.}, number={1}, journal={STATISTICAL METHODS IN MEDICAL RESEARCH}, author={Molenberghs, Geert and Kenward, Michael G. and Aerts, Marc and Verbeke, Geert and Tsiatis, Anastasios A. and Davidian, Marie and Rizopoulos, Dimitris}, year={2014}, month={Feb}, pages={11–41} } @article{schulte_tsiatis_laber_davidian_2014, title={Q- and A-Learning Methods for Estimating Optimal Dynamic Treatment Regimes}, volume={29}, ISSN={["0883-4237"]}, DOI={10.1214/13-sts450}, abstractNote={In clinical practice, physicians make a series of treatment decisions over the course of a patient's disease based on his/her baseline and evolving characteristics. A dynamic treatment regime is a set of sequential decision rules that operationalizes this process. Each rule corresponds to a decision point and dictates the next treatment action based on the accrued information. Using existing data, a key goal is estimating the optimal regime, that, if followed by the patient population, would yield the most favorable outcome on average. Q- and A-learning are two main approaches for this purpose. We provide a detailed account of these methods, study their performance, and illustrate them using data from a depression study.}, number={4}, journal={STATISTICAL SCIENCE}, author={Schulte, Phillip J. and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2014}, month={Nov}, pages={640–661} } @article{vock_tsiatis_davidian_laber_tsuang_copeland_palmer_2013, title={Assessing the Causal Effect of Organ Transplantation on the Distribution of Residual Lifetime}, volume={69}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12084}, abstractNote={Summary Because the number of patients waiting for organ transplants exceeds the number of organs available, a better understanding of how transplantation affects the distribution of residual lifetime is needed to improve organ allocation. However, there has been little work to assess the survival benefit of transplantation from a causal perspective. Previous methods developed to estimate the causal effects of treatment in the presence of time-varying confounders have assumed that treatment assignment was independent across patients, which is not true for organ transplantation. We develop a version of G-estimation that accounts for the fact that treatment assignment is not independent across individuals to estimate the parameters of a structural nested failure time model. We derive the asymptotic properties of our estimator and confirm through simulation studies that our method leads to valid inference of the effect of transplantation on the distribution of residual lifetime. We demonstrate our method on the survival benefit of lung transplantation using data from the United Network for Organ Sharing.}, number={4}, journal={BIOMETRICS}, author={Vock, David M. and Tsiatis, Anastasios A. and Davidian, Marie and Laber, Eric B. and Tsuang, Wayne M. and Copeland, C. Ashley Finlen and Palmer, Scott M.}, year={2013}, month={Dec}, pages={820–829} } @article{bai_tsiatis_sean m. o'brien_2013, title={Doubly-Robust Estimators of Treatment-Specific Survival Distributions in Observational Studies with Stratified Sampling}, volume={69}, ISSN={["1541-0420"]}, DOI={10.1111/biom.12076}, abstractNote={Observational studies are frequently conducted to compare the effects of two treatments on survival. For such studies we must be concerned about confounding; that is, there are covariates that affect both the treatment assignment and the survival distribution. With confounding the usual treatment-specific Kaplan-Meier estimator might be a biased estimator of the underlying treatment-specific survival distribution. This article has two aims. In the first aim we use semiparametric theory to derive a doubly robust estimator of the treatment-specific survival distribution in cases where it is believed that all the potential confounders are captured. In cases where not all potential confounders have been captured one may conduct a substudy using a stratified sampling scheme to capture additional covariates that may account for confounding. The second aim is to derive a doubly-robust estimator for the treatment-specific survival distributions and its variance estimator with such a stratified sampling scheme. Simulation studies are conducted to show consistency and double robustness. These estimators are then applied to the data from the ASCERT study that motivated this research.}, number={4}, journal={BIOMETRICS}, author={Bai, Xiaofei and Tsiatis, Anastasios A. and Sean M. O'Brien}, year={2013}, month={Dec}, pages={830–839} } @article{daniel_tsiatis_2013, title={Efficient estimation of the distribution of time to composite endpoint when some endpoints are only partially observed}, volume={19}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-013-9261-9}, abstractNote={Two common features of clinical trials, and other longitudinal studies, are (1) a primary interest in composite endpoints, and (2) the problem of subjects withdrawing prematurely from the study. In some settings, withdrawal may only affect observation of some components of the composite endpoint, for example when another component is death, information on which may be available from a national registry. In this paper, we use the theory of augmented inverse probability weighted estimating equations to show how such partial information on the composite endpoint for subjects who withdraw from the study can be incorporated in a principled way into the estimation of the distribution of time to composite endpoint, typically leading to increased efficiency without relying on additional assumptions above those that would be made by standard approaches. We describe our proposed approach theoretically, and demonstrate its properties in a simulation study.}, number={4}, journal={LIFETIME DATA ANALYSIS}, author={Daniel, Rhian M. and Tsiatis, Anastasios A.}, year={2013}, month={Oct}, pages={513–546} } @article{zhang_tsiatis_laber_davidian_2013, title={Robust estimation of optimal dynamic treatment regimes for sequential treatment decisions}, volume={100}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/ast014}, abstractNote={Journal Article Robust estimation of optimal dynamic treatment regimes for sequential treatment decisions Get access Baqun Zhang, Baqun Zhang School of Statistics, Renmin University of China, Beijing 100872, China, zhangbaqun@ruc.edu.cn Search for other works by this author on: Oxford Academic Google Scholar Anastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, North Carolina, 27695-8203, U.S.A., tsiatis@ncsu.edu Search for other works by this author on: Oxford Academic Google Scholar Eric B. Laber, Eric B. Laber Department of Statistics, North Carolina State University, Raleigh, North Carolina, 27695-8203, U.S.A., eblaber@ncsu.edu Search for other works by this author on: Oxford Academic Google Scholar Marie Davidian Marie Davidian Department of Statistics, North Carolina State University, Raleigh, North Carolina, 27695-8203, U.S.A., davidian@ncsu.edu Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 100, Issue 3, September 2013, Pages 681–694, https://doi.org/10.1093/biomet/ast014 Published: 30 May 2013 Article history Received: 01 July 2012 Revision received: 01 March 2013 Published: 30 May 2013}, number={3}, journal={BIOMETRIKA}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2013}, month={Sep}, pages={681–694} } @article{zhang_tsiatis_laber_davidian_2012, title={A Robust Method for Estimating Optimal Treatment Regimes}, volume={68}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2012.01763.x}, abstractNote={Summary A treatment regime is a rule that assigns a treatment, among a set of possible treatments, to a patient as a function of his/her observed characteristics, hence “personalizing” treatment to the patient. The goal is to identify the optimal treatment regime that, if followed by the entire population of patients, would lead to the best outcome on average. Given data from a clinical trial or observational study, for a single treatment decision, the optimal regime can be found by assuming a regression model for the expected outcome conditional on treatment and covariates, where, for a given set of covariates, the optimal treatment is the one that yields the most favorable expected outcome. However, treatment assignment via such a regime is suspect if the regression model is incorrectly specified. Recognizing that, even if misspecified, such a regression model defines a class of regimes, we instead consider finding the optimal regime within such a class by finding the regime that optimizes an estimator of overall population mean outcome. To take into account possible confounding in an observational study and to increase precision, we use a doubly robust augmented inverse probability weighted estimator for this purpose. Simulations and application to data from a breast cancer clinical trial demonstrate the performance of the method.}, number={4}, journal={BIOMETRICS}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Laber, Eric B. and Davidian, Marie}, year={2012}, month={Dec}, pages={1010–1018} } @article{zhang_tsiatis_davidian_zhang_laber_2012, title={Estimating optimal treatment regimes from a classification perspective}, volume={1}, ISSN={2049-1573}, url={http://dx.doi.org/10.1002/sta.411}, DOI={10.1002/sta.411}, abstractNote={A treatment regime maps observed patient characteristics to a recommended treatment. Recent technological advances have increased the quality, accessibility, and volume of patient-level data; consequently, there is a growing need for powerful and flexible estimators of an optimal treatment regime that can be used with either observational or randomized clinical trial data. We propose a novel and general framework that transforms the problem of estimating an optimal treatment regime into a classification problem wherein the optimal classifier corresponds to the optimal treatment regime. We show that commonly employed parametric and semi-parametric regression estimators, as well as recently proposed robust estimators of an optimal treatment regime can be represented as special cases within our framework. Furthermore, our approach allows any classification procedure that can accommodate case weights to be used without modification to estimate an optimal treatment regime. This introduces a wealth of new and powerful learning algorithms for use in estimating treatment regimes. We illustrate our approach using data from a breast cancer clinical trial.}, number={1}, journal={Stat}, publisher={Wiley}, author={Zhang, Baqun and Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Laber, Eric}, year={2012}, month={Oct}, pages={103–114} } @article{vock_davidian_tsiatis_muir_2012, title={Mixed model analysis of censored longitudinal data with flexible random-effects density}, volume={13}, ISSN={["1468-4357"]}, DOI={10.1093/biostatistics/kxr026}, abstractNote={Mixed models are commonly used to represent longitudinal or repeated measures data. An additional complication arises when the response is censored, for example, due to limits of quantification of the assay used. While Gaussian random effects are routinely assumed, little work has characterized the consequences of misspecifying the random-effects distribution nor has a more flexible distribution been studied for censored longitudinal data. We show that, in general, maximum likelihood estimators will not be consistent when the random-effects density is misspecified, and the effect of misspecification is likely to be greatest when the true random-effects density deviates substantially from normality and the number of noncensored observations on each subject is small. We develop a mixed model framework for censored longitudinal data in which the random effects are represented by the flexible seminonparametric density and show how to obtain estimates in SAS procedure NLMIXED. Simulations show that this approach can lead to reduction in bias and increase in efficiency relative to assuming Gaussian random effects. The methods are demonstrated on data from a study of hepatitis C virus.}, number={1}, journal={BIOSTATISTICS}, author={Vock, David M. and Davidian, Marie and Tsiatis, Anastasios A. and Muir, Andrew J.}, year={2012}, month={Jan}, pages={61–73} } @article{tsiatis_davidian_2011, title={Connections between survey calibration estimators and semiparametric models for incomplete data discussion}, volume={79}, number={2}, journal={International Statistical Review}, author={Tsiatis, A. A. and Davidian, M.}, year={2011}, pages={221–223} } @article{tsiatis_davidian_cao_2011, title={Improved Doubly Robust Estimation When Data Are Monotonely Coarsened, with Application to Longitudinal Studies with Dropout}, volume={67}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2010.01476.x}, abstractNote={Summary A routine challenge is that of making inference on parameters in a statistical model of interest from longitudinal data subject to dropout, which are a special case of the more general setting of monotonely coarsened data. Considerable recent attention has focused on doubly robust (DR) estimators, which in this context involve positing models for both the missingness (more generally, coarsening) mechanism and aspects of the distribution of the full data, that have the appealing property of yielding consistent inferences if only one of these models is correctly specified. DR estimators have been criticized for potentially disastrous performance when both of these models are even only mildly misspecified. We propose a DR estimator applicable in general monotone coarsening problems that achieves comparable or improved performance relative to existing DR methods, which we demonstrate via simulation studies and by application to data from an AIDS clinical trial.}, number={2}, journal={BIOMETRICS}, author={Tsiatis, Anastasios A. and Davidian, Marie and Cao, Weihua}, year={2011}, month={Jun}, pages={536–545} } @article{zhang_tsiatis_davidian_pieper_mahaffey_2011, title={Inference on treatment effects from a randomized clinical trial in the presence of premature treatment discontinuation: the SYNERGY trial}, volume={12}, ISSN={["1465-4644"]}, DOI={10.1093/biostatistics/kxq054}, abstractNote={The Superior Yield of the New Strategy of Enoxaparin, Revascularization, and GlYcoprotein IIb/IIIa inhibitors (SYNERGY) was a randomized, open-label, multicenter clinical trial comparing 2 anticoagulant drugs on the basis of time-to-event endpoints. In contrast to other studies of these agents, the primary, intent-to-treat analysis did not find evidence of a difference, leading to speculation that premature discontinuation of the study agents by some subjects may have attenuated the apparent treatment effect and thus to interest in inference on the difference in survival distributions were all subjects in the population to follow the assigned regimens, with no discontinuation. Such inference is often attempted via ad hoc analyses that are not based on a formal definition of this treatment effect. We use SYNERGY as a context in which to describe how this effect may be conceptualized and to present a statistical framework in which it may be precisely identified, which leads naturally to inferential methods based on inverse probability weighting.}, number={2}, journal={BIOSTATISTICS}, author={Zhang, Min and Tsiatis, Anastasios A. and Davidian, Marie and Pieper, Karen S. and Mahaffey, Kenneth W.}, year={2011}, month={Apr}, pages={258–269} } @article{lu_tsiatis_2011, title={Semiparametric estimation of treatment effect with time-lagged response in the presence of informative censoring}, volume={17}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-011-9199-8}, abstractNote={In many randomized clinical trials, the primary response variable, for example, the survival time, is not observed directly after the patients enroll in the study but rather observed after some period of time (lag time). It is often the case that such a response variable is missing for some patients due to censoring that occurs when the study ends before the patient’s response is observed or when the patients drop out of the study. It is often assumed that censoring occurs at random which is referred to as noninformative censoring; however, in many cases such an assumption may not be reasonable. If the missing data are not analyzed properly, the estimator or test for the treatment effect may be biased. In this paper, we use semiparametric theory to derive a class of consistent and asymptotically normal estimators for the treatment effect parameter which are applicable when the response variable is right censored. The baseline auxiliary covariates and post-treatment auxiliary covariates, which may be time-dependent, are also considered in our semiparametric model. These auxiliary covariates are used to derive estimators that both account for informative censoring and are more efficient then the estimators which do not consider the auxiliary covariates.}, number={4}, journal={LIFETIME DATA ANALYSIS}, author={Lu, Xiaomin and Tsiatis, Anastasios A.}, year={2011}, month={Oct}, pages={566–593} } @article{brinkley_tsiatis_anstrom_2010, title={A Generalized Estimator of the Attributable Benefit of an Optimal Treatment Regime}, volume={66}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2009.01282.x}, abstractNote={Summary For many diseases where there are several treatment options often there is no consensus on the best treatment to give individual patients. In such cases, it may be necessary to define a strategy for treatment assignment; that is, an algorithm that dictates the treatment an individual should receive based on their measured characteristics. Such a strategy or algorithm is also referred to as a treatment regime. The optimal treatment regime is the strategy that would provide the most public health benefit by minimizing as many poor outcomes as possible. Using a measure that is a generalization of attributable risk (AR) and notions of potential outcomes, we derive an estimator for the proportion of events that could have been prevented had the optimal treatment regime been implemented. Traditional AR studies look at the added risk that can be attributed to exposure of some contaminant; here we will instead study the benefit that can be attributed to using the optimal treatment strategy. We will show how regression models can be used to estimate the optimal treatment strategy and the attributable benefit of that strategy. We also derive the large sample properties of this estimator. As a motivating example, we will apply our methods to an observational study of 3856 patients treated at the Duke University Medical Center with prior coronary artery bypass graft surgery and further heart‐related problems requiring a catheterization. The patients may be treated with either medical therapy alone or a combination of medical therapy and percutaneous coronary intervention without a general consensus on which is the best treatment for individual patients.}, number={2}, journal={BIOMETRICS}, author={Brinkley, Jason and Tsiatis, Anastasios and Anstrom, Kevin J.}, year={2010}, month={Jun}, pages={512–522} } @article{lu_jiang_tsiatis_2010, title={Multiple Imputation Approaches for the Analysis of Dichotomized Responses in Longitudinal Studies with Missing Data}, volume={66}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2010.01405.x}, abstractNote={Often a binary variable is generated by dichotomizing an underlying continuous variable measured at a specific time point according to a prespecified threshold value. In the event that the underlying continuous measurements are from a longitudinal study, one can use the repeated-measures model to impute missing data on responder status as a result of subject dropout and apply the logistic regression model on the observed or otherwise imputed responder status. Standard Bayesian multiple imputation techniques (Rubin, 1987, in Multiple Imputation for Nonresponse in Surveys) that draw the parameters for the imputation model from the posterior distribution and construct the variance of parameter estimates for the analysis model as a combination of within- and between-imputation variances are found to be conservative. The frequentist multiple imputation approach that fixes the parameters for the imputation model at the maximum likelihood estimates and construct the variance of parameter estimates for the analysis model using the results of Robins and Wang (2000, Biometrika 87, 113-124) is shown to be more efficient. We propose to apply (Kenward and Roger, 1997, Biometrics 53, 983-997) degrees of freedom to account for the uncertainty associated with variance-covariance parameter estimates for the repeated measures model.}, number={4}, journal={BIOMETRICS}, author={Lu, Kaifeng and Jiang, Liqiu and Tsiatis, Anastasios A.}, year={2010}, month={Dec}, pages={1202–1208} } @article{cao_tsiatis_davidian_2009, title={Improving efficiency and robustness of the doubly robust estimator for a population mean with incomplete data}, volume={96}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/asp033}, abstractNote={Considerable recent interest has focused on doubly robust estimators for a population mean response in the presence of incomplete data, which involve models for both the propensity score and the regression of outcome on covariates. The usual doubly robust estimator may yield severely biased inferences if neither of these models is correctly specified and can exhibit nonnegligible bias if the estimated propensity score is close to zero for some observations. We propose alternative doubly robust estimators that achieve comparable or improved performance relative to existing methods, even with some estimated propensity scores close to zero.}, number={3}, journal={BIOMETRIKA}, author={Cao, Weihua and Tsiatis, Anastasios A. and Davidian, Marie}, year={2009}, month={Sep}, pages={723–734} } @article{tsiatis_davidian_zhang_lu_2008, title={Covariate adjustment for two-sample treatment comparisons in randomized clinical trials: A principled yet flexible approach}, volume={27}, ISSN={["1097-0258"]}, DOI={10.1002/sim.3113}, abstractNote={Abstract There is considerable debate regarding whether and how covariate‐adjusted analyses should be used in the comparison of treatments in randomized clinical trials. Substantial baseline covariate information is routinely collected in such trials, and one goal of adjustment is to exploit covariates associated with outcome to increase precision of estimation of the treatment effect. However, concerns are routinely raised over the potential for bias when the covariates used are selected post hoc and the potential for adjustment based on a model of the relationship between outcome, covariates, and treatment to invite a ‘fishing expedition’ for that leading to the most dramatic effect estimate. By appealing to the theory of semiparametrics, we are led naturally to a characterization of all treatment effect estimators and to principled, practically feasible methods for covariate adjustment that yield the desired gains in efficiency and that allow covariate relationships to be identified and exploited while circumventing the usual concerns. The methods and strategies for their implementation in practice are presented. Simulation studies and an application to data from an HIV clinical trial demonstrate the performance of the techniques relative to the existing methods. Copyright © 2007 John Wiley & Sons, Ltd.}, number={23}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A. and Davidian, Marie and Zhang, Min and Lu, Xiaomin}, year={2008}, month={Oct}, pages={4658–4677} } @article{nelson_sun_tsiatis_mark_2008, title={Empirical estimation of life expectancy from large clinical trials: Use of left-truncated, right-censored survival analysis methodology}, volume={27}, ISSN={["1097-0258"]}, DOI={10.1002/sim.3355}, abstractNote={In the current era of ever-increasing health care costs, economic analyses are an essential component in the comprehensive evaluation of new medical interventions. Cost-effectiveness analysis (CEA)--the most common form of economic analysis used in medicine--aids policy-makers in determining how to allocate finite health care dollars among possible alternative therapies. CEA relates the incremental benefits of a new technology to its incremental costs in a cost-effectiveness (CE) ratio. Although the generally agreed-upon standard of presentation for the CE ratio is the lifetime perspective (incremental lifetime cost to add one life year), this perspective presents an obvious challenge to the statistical analyst. Most large clinical trials collect limited follow-up data, and yet their findings form the basis of therapeutic recommendations that often extend far beyond the limits of the empirical data. Although clinical practice guidelines do not yet require explicit modeling to examine the long-term implications of their recommendations, health policy analyses routinely rely upon such extrapolations. This paper describes methods for using empirical patient-level data to extrapolate survival in large clinical trials and cohorts beyond a limited follow-up period in which most patients remain alive in order to estimate the entire survival distribution for a cohort of patients. We accomplish this task through a novel combination of models that estimate the hazard rate not only as a function of time but also as a function of patient age. Extrapolation of survival beyond a limited time frame is made possible by capitalizing on the extensive latitude of survival information available across the range of ages represented in the data. Variations in approach are presented, and issues arising in these analyses are discussed. The proposed methodology is developed, applied, and evaluated in both a large clinical trial cohort with 5-year follow-up on over 23,000 patients and a large observational database with long-term follow-up on over 4000 patients.}, number={26}, journal={STATISTICS IN MEDICINE}, author={Nelson, Charlotte L. and Sun, Jie L. and Tsiatis, Anastasios A. and Mark, Daniel B.}, year={2008}, month={Nov}, pages={5525–5555} } @article{zhang_tsiatis_davidian_2008, title={Improving efficiency of inferences in randomized clinical trials using auxiliary covariates}, volume={64}, ISSN={["1541-0420"]}, DOI={10.1111/j.1541-0420.2007.00976.x}, abstractNote={Summary The primary goal of a randomized clinical trial is to make comparisons among two or more treatments. For example, in a two‐arm trial with continuous response, the focus may be on the difference in treatment means; with more than two treatments, the comparison may be based on pairwise differences. With binary outcomes, pairwise odds ratios or log odds ratios may be used. In general, comparisons may be based on meaningful parameters in a relevant statistical model. Standard analyses for estimation and testing in this context typically are based on the data collected on response and treatment assignment only. In many trials, auxiliary baseline covariate information may also be available, and it is of interest to exploit these data to improve the efficiency of inferences. Taking a semiparametric theory perspective, we propose a broadly applicable approach to adjustment for auxiliary covariates to achieve more efficient estimators and tests for treatment parameters in the analysis of randomized clinical trials. Simulations and applications demonstrate the performance of the methods.}, number={3}, journal={BIOMETRICS}, author={Zhang, Min and Tsiatis, Anastasios A. and Davidian, Marie}, year={2008}, month={Sep}, pages={707–715} } @article{lu_tsiatis_2008, title={Improving the efficiency of the log-rank test using auxiliary covariates}, volume={95}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/asn003}, abstractNote={Journal Article Improving the efficiency of the log-rank test using auxiliary covariates Get access Xiaomin Lu, Xiaomin Lu Department of Epidemiology and Biostatistics, College of Public Health and Health Professions, University of Florida Gainesville, Florida 32611, U.S.A., xlu2@phhp.ufl.edu Search for other works by this author on: Oxford Academic Google Scholar Anastasios A. Tsiatis Anastasios A. Tsiatis Department of Statistics, North Carolina State University, Raleigh, North Carolina 27695-8203, U.S.A., tsiatis@stat.ncsu.edu Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 95, Issue 3, September 2008, Pages 679–694, https://doi.org/10.1093/biomet/asn003 Published: 04 June 2008 Article history Received: 01 June 2006 Revision received: 01 February 2007 Published: 04 June 2008}, number={3}, journal={BIOMETRIKA}, author={Lu, Xiaomin and Tsiatis, Anastasios A.}, year={2008}, month={Sep}, pages={679–694} } @article{lokhnygina_tsiatis_2008, title={Optimal two-stage group-sequential designs}, volume={138}, ISSN={["0378-3758"]}, DOI={10.1016/j.jspi.2007.06.011}, abstractNote={We derive optimal two-stage adaptive group-sequential designs for normally distributed data which achieve the minimum of a mixture of expected sample sizes at the range of plausible values of a normal mean. Unlike standard group-sequential tests, our method is adaptive in that it allows the group size at the second look to be a function of the observed test statistic at the first look. Using optimality criteria, we construct two-stage designs which we show have advantage over other popular adaptive methods. The employed computational method is a modification of the backward induction algorithm applied to a Bayesian decision problem.}, number={2}, journal={JOURNAL OF STATISTICAL PLANNING AND INFERENCE}, author={Lokhnygina, Yuliya and Tsiatis, Anastasios A.}, year={2008}, month={Feb}, pages={489–499} } @article{mark_anstrom_sun_clapp-channing_tsiatis_davidson-ray_lee_bardy_2008, title={Quality of life with defibrillator therapy or amiodarone in heart failure}, volume={359}, ISSN={["0028-4793"]}, DOI={10.1056/NEJMoa0706719}, abstractNote={Implantable cardioverter-defibrillator (ICD) therapy significantly prolongs life in patients at increased risk for sudden death from depressed left ventricular function. However, whether this increased longevity is accompanied by deterioration in the quality of life is unclear.In a randomized trial, we compared ICD therapy or amiodarone with state-of-the-art medical therapy alone in 2521 patients who had stable heart failure with depressed left ventricular function. We prospectively measured quality of life at baseline and at months 3, 12, and 30; data collection was 93 to 98% complete. The Duke Activity Status Index (which measures cardiac physical functioning) and the Medical Outcomes Study 36-Item Short-Form Mental Health Inventory 5 (which measures psychological well-being) were prespecified primary outcomes. Multiple additional quality-of-life outcomes were also examined.Psychological well-being in the ICD group, as compared with medical therapy alone, was significantly improved at 3 months (P=0.01) and at 12 months (P=0.003) but not at 30 months. No clinically or statistically significant differences in physical functioning among the study groups were observed. Additional quality-of-life measures were improved in the ICD group at 3 months, 12 months, or both, but there was no significant difference at 30 months. ICD shocks in the month preceding a scheduled assessment were associated with a decreased quality of life in multiple domains. The use of amiodarone had no significant effects on the primary quality-of-life outcomes.In a large primary-prevention population with moderately symptomatic heart failure, single-lead ICD therapy was not associated with any detectable adverse quality-of-life effects during 30 months of follow-up.}, number={10}, journal={NEW ENGLAND JOURNAL OF MEDICINE}, author={Mark, Daniel B. and Anstrom, Kevin J. and Sun, Jie L. and Clapp-Channing, Nancy E. and Tsiatis, Anastasios A. and Davidson-Ray, Linda and Lee, Kerry L. and Bardy, Gust H.}, year={2008}, month={Sep}, pages={999–1008} } @article{banerjee_tsiatis_2006, title={Adaptive two-stage designs in phase II clinical trials}, volume={25}, ISSN={["1097-0258"]}, DOI={10.1002/sim.2501}, abstractNote={Two-stage designs have been widely used in phase II clinical trials. Such designs are desirable because they allow a decision to be made on whether a treatment is effective or not after the accumulation of the data at the end of each stage. Optimal fixed two-stage designs, where the sample size at each stage is fixed in advance, were proposed by Simon when the primary outcome is a binary response. This paper proposes an adaptive two-stage design which allows the sample size at the second stage to depend on the results at the first stage. Using a Bayesian decision-theoretic construct, we derive optimal adaptive two-stage designs; the optimality criterion being minimum expected sample size under the null hypothesis. Comparisons are made between Simon's two-stage fixed design and the new design with respect to this optimality criterion. Copyright © 2006 John Wiley & Sons, Ltd.}, number={19}, journal={STATISTICS IN MEDICINE}, author={Banerjee, Anindita and Tsiatis, Anastasios A.}, year={2006}, month={Oct}, pages={3382–3395} } @article{mark_nelson_anstrom_al-khatib_tsiatis_cowper_clapp-channing_davidson-ray_poole_johnson_et al._2006, title={Cost-effectiveness of defibrillator therapy or amiodarone in chronic stable heart failure - Results from the Sudden Cardiac Death in Heart Failure Trial (SCD-HeFT)}, volume={114}, ISSN={["0009-7322"]}, DOI={10.1161/circulationaha.105.581884}, abstractNote={In the Sudden Cardiac Death in Heart Failure Trial (SCD-HeFT), implantable cardioverter-defibrillator (ICD) therapy significantly reduced all-cause mortality rates compared with medical therapy alone in patients with stable, moderately symptomatic heart failure, whereas amiodarone had no benefit on mortality rates. We examined long-term economic implications of these results.Medical costs were estimated by using hospital billing data and the Medicare Fee Schedule. Our base case cost-effectiveness analysis used empirical clinical and cost data to estimate the lifetime incremental cost of saving an extra life-year with ICD therapy relative to medical therapy alone. At 5 years, the amiodarone arm had a survival rate equivalent to that of the placebo arm and higher costs than the placebo arm. For ICD relative to medical therapy alone, the base case lifetime cost-effectiveness and cost-utility ratios (discounted at 3%) were dollar 38,389 per life-year saved (LYS) and dollar 41,530 per quality-adjusted LYS, respectively. A cost-effectiveness ratio < dollar 100,000 was obtained in 99% of 1000 bootstrap repetitions. The cost-effectiveness ratio was sensitive to the amount of extrapolation beyond the empirical 5-year trial data: dollar 127,503 per LYS at 5 years, dollar 88,657 per LYS at 8 years, and dollar 58,510 per LYS at 12 years. Because of a significant interaction between ICD treatment and New York Heart Association class, the cost-effectiveness ratio was dollar 29,872 per LYS for class II, whereas there was incremental cost but no incremental benefit in class III.Prophylactic use of single-lead, shock-only ICD therapy is economically attractive in patients with stable, moderately symptomatic heart failure with an ejection fraction < or = 35%, particularly those in NYHA class II, as long as the benefits of ICD therapy observed in the SCD-HeFT persist for at least 8 years.}, number={2}, journal={CIRCULATION}, author={Mark, Daniel B. and Nelson, Charlotte L. and Anstrom, Kevin J. and Al-Khatib, Sana M. and Tsiatis, Anastasios A. and Cowper, Patricia A. and Clapp-Channing, Nancy E. and Davidson-Ray, Linda and Poole, Jeanne E. and Johnson, George and et al.}, year={2006}, month={Jul}, pages={135–142} } @article{tsiatis_2006, title={Information-based monitoring of clinical trials}, volume={25}, ISSN={["0277-6715"]}, DOI={10.1002/sim.2625}, abstractNote={When designing a clinical trial to compare the effect of different treatments on response, a key issue facing the statistician is to determine how large a study is necessary to detect a clinically important difference with sufficient power. This is the case whether the study will be analysed only once (single-analysis) or whether it will be monitored periodically with the possibility of early stopping (group-sequential). Standard sample size calculations are based on both the magnitude of difference that is considered clinically important as well as values for the nuisance parameters in the statistical model. For planning purposes, best guesses are made for the value of the nuisance parameters and these are used to determine the sample size. However, if these guesses are incorrect this will affect the subsequent power to detect the clinically important difference. It is argued in this paper that statistical precision is directly related to Statistical Information and that the study should continue until the requisite statistical information is obtained. This is referred to as information-based design and analysis of clinical trials. We also argue that this type of methodology is best suited with group-sequential trials which monitor the data periodically and allow for estimation of the statistical information as the study progresses. Copyright © 2006 John Wiley & Sons, Ltd.}, number={19}, journal={STATISTICS IN MEDICINE}, author={Tsiatis, Anastasios A.}, year={2006}, month={Oct}, pages={3236–3244} } @article{ma_tsiatis_2006, title={On closed form semiparametric estimators for measurement error models}, volume={16}, number={1}, journal={Statistica Sinica}, author={Ma, Y. Y. and Tsiatis, A. A.}, year={2006}, pages={183–193} } @article{wahed_tsiatis_2006, title={Semiparametric efficient estimation of survival distributions in two-stage randomisation designs in clinical trials with censored data}, volume={93}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/93.1.163}, abstractNote={Two-stage randomisation designs are useful in the evaluation of combination therapies where patients are initially randomised to an induction therapy and then, depending upon their response and consent, are randomised to a maintenance therapy. In this paper we derive the best regular asymptotically linear estimator for the survival distribution and related quantities of treatment regimes. We propose an estimator which is easily computable and is more efficient than existing estimators. Large-sample properties of the proposed estimator are derived and comparisons with other estimators are made using simulation.}, number={1}, journal={BIOMETRIKA}, author={Wahed, AS and Tsiatis, AA}, year={2006}, month={Mar}, pages={163–177} } @book{tsiatis_2006, title={Semiparametric theory and missing data}, ISBN={0387324488}, publisher={New York: Springer}, author={Tsiatis, A. A.}, year={2006} } @article{lu_tsiatis_2006, title={Semiparametric transformation models for the case-cohort study}, volume={93}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/93.1.207}, abstractNote={A general class of semiparametric transformation models is studied for analysing survival data from the case-cohort design, which was introduced by Prentice (1986). Weighted estimating equations are proposed for simultaneous estimation of the regression parameters and the transformation function. It is shown that the resulting regression estimators are asymptotically normal, with variance-covariance matrix that has a closed form and can be consistently estimated by the usual plug-in method. Simulation studies show that the proposed approach is appropriate for practical use. An application to a case-cohort dataset from the Atherosclerosis Risk in Communities study is also given to illustrate the methodology.}, number={1}, journal={BIOMETRIKA}, author={Lu, WB and Tsiatis, AA}, year={2006}, month={Mar}, pages={207–214} } @article{lu_tsiatis_2005, title={Comparison between two partial likelihood approaches for the competing risks model with missing cause of failure}, volume={11}, ISSN={["1572-9249"]}, DOI={10.1007/s10985-004-5638-0}, abstractNote={In many clinical studies where time to failure is of primary interest, patients may fail or die from one of many causes where failure time can be right censored. In some circumstances, it might also be the case that patients are known to die but the cause of death information is not available for some patients. Under the assumption that cause of death is missing at random, we compare the Goetgbebeur and Ryan (1995, Biometrika, 82, 821-833) partial likelihood approach with the Dewanji (1992, Biometrika, 79, 855-857) partial likelihood approach. We show that the estimator for the regression coefficients based on the Dewanji partial likelihood is not only consistent and asymptotically normal, but also semiparametric efficient. While the Goetghebeur and Ryan estimator is more robust than the Dewanji partial likelihood estimator against misspecification of proportional baseline hazards, the Dewanji partial likelihood estimator allows the probability of missing cause of failure to depend on covariate information without the need to model the missingness mechanism. Tests for proportional baseline hazards are also suggested and a robust variance estimator is derived.}, number={1}, journal={LIFETIME DATA ANALYSIS}, author={Lu, KF and Tsiatis, AA}, year={2005}, month={Mar}, pages={29–40} } @article{reed_anstrom_bakhai_briggs_califf_cohen_drummond_glick_gnanasakthy_hlatky_et al._2005, title={Conducting economic evaluations alongside multinational clinical trials: Toward a research consensus}, volume={149}, ISSN={["1097-5330"]}, DOI={10.1016/j.ahj.2004.11.001}, abstractNote={Demand for economic evaluations in multinational clinical trials is increasing, but there is little consensus about how such studies should be conducted and reported. At a workshop in Durham, North Carolina, we sought to identify areas of agreement about how the primary findings of economic evaluations in multinational clinical trials should be generated and presented. In this paper, we propose a framework for classifying multinational economic evaluations according to (a) the sources of an analyst's estimates of resource use and clinical effectiveness and (b) the analyst's method of estimating costs. We review existing studies in the cardiology literature in the context of the proposed framework. We then describe important methodological and practical considerations in conducting multinational economic evaluations and summarize the advantages and disadvantages of each approach. Finally, we describe opportunities for future research. Delineation of the various approaches to multinational economic evaluation may assist researchers, peer reviewers, journal editors, and decision makers in evaluating the strengths and limitations of particular studies.}, number={3}, journal={AMERICAN HEART JOURNAL}, author={Reed, SD and Anstrom, KJ and Bakhai, A and Briggs, AH and Califf, RM and Cohen, DJ and Drummond, MF and Glick, HA and Gnanasakthy, A and Hlatky, MA and et al.}, year={2005}, month={Mar}, pages={434–443} } @article{ma_genton_tsiatis_2005, title={Locally efficient semiparametric estimators for generalized skew-elliptical distributions}, volume={100}, ISSN={["0162-1459"]}, DOI={10.1198/016214505000000079}, abstractNote={We consider a class of generalized skew-normal distributions that is useful for selection modeling and robustness analysis and derive a class of semiparametric estimators for the location and scale parameters of the central part of the model. We show that these estimators are consistent and asymptotically normal. We present the semiparametric efficiency bound and derive the locally efficient estimator that achieves this bound if the model for the skewing function is correctly specified. The estimators that we propose are consistent and asymptotically normal even if the model for the skewing function is misspecified, and we compute the loss of efficiency in such cases. We conduct a simulation study and provide an illustrative example. Our method is applicable to generalized skew-elliptical distributions.}, number={471}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Ma, YY and Genton, MG and Tsiatis, AA}, year={2005}, month={Sep}, pages={980–989} } @article{allen_satten_tsiatis_2005, title={Locally-efficient robust estimation of haplotype-disease association in family-based studies}, volume={92}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/92.3.559}, abstractNote={Journal Article Locally-efficient robust estimation of haplotype-disease association in family-based studies Get access Andrew S. Allen, Andrew S. Allen Search for other works by this author on: Oxford Academic Google Scholar Glen A. Satten, Glen A. Satten Search for other works by this author on: Oxford Academic Google Scholar Anastasios A. Tsiatis Anastasios A. Tsiatis Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 92, Issue 3, September 2005, Pages 559–571, https://doi.org/10.1093/biomet/92.3.559 Published: 01 September 2005}, number={3}, journal={BIOMETRIKA}, author={Allen, AS and Satten, GA and Tsiatis, AA}, year={2005}, month={Sep}, pages={559–571} } @article{davidian_tsiatis_leon_2005, title={Semiparametric estimation of treatment effect in a pretest-posttest study with missing data}, volume={20}, number={3}, journal={Statistical Science}, author={Davidian, M. and Tsiatis, A. A. and Leon, S.}, year={2005}, pages={261–282} } @article{gao_tsiatis_2005, title={Semiparametric estimators for the regression coefficients in the linear transformation competing risks model with missing cause of failure}, volume={92}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/92.4.875}, abstractNote={SUMMARY We consider the problem of estimating the regression coefficients in a competing risks model, where the relationship between the cause-specific hazard for the cause of interest and covariates is described using linear transformation models and when cause of failure is missing at random for a subset of individuals. Using the theory of Robins et al. (1994) for missing data problems and the approach of Chen et al. (2002) for estimating regression coefficients for linear transformation models, we derive augmented inverse probability weighted complete-case estimators for the regression coefficients that are doubly robust. Simulations demonstrate the relevance of the theory in finite samples.}, number={4}, journal={BIOMETRIKA}, author={Gao, GZ and Tsiatis, AA}, year={2005}, month={Dec}, pages={875–891} } @article{johnson_tsiatis_2005, title={Semiparametric inference in observational duration-response studies, with duration possibly right-censored}, volume={92}, ISSN={["1464-3510"]}, DOI={10.1093/biomet/92.3.605}, abstractNote={Journal Article Semiparametric inference in observational duration-response studies, with duration possibly right-censored Get access Brent A. Johnson, Brent A. Johnson Search for other works by this author on: Oxford Academic Google Scholar Anastasios A. Tsiatis Anastasios A. Tsiatis Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 92, Issue 3, September 2005, Pages 605–618, https://doi.org/10.1093/biomet/92.3.605 Published: 01 September 2005}, number={3}, journal={BIOMETRIKA}, author={Johnson, BA and Tsiatis, AA}, year={2005}, month={Sep}, pages={605–618} } @article{tsiatis_davidian_2005, title={Statistical issues arising in the Women's Health Initiative - Discussion}, volume={61}, ISSN={["1541-0420"]}, DOI={10.1111/j.0006-341X.2005.454_9.x}, abstractNote={BiometricsVolume 61, Issue 4 p. 933-935 Discussion on "Statistical Issues Arising in the Women's Health Initiative" Anastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics Box 8203, North Carolina State University Raleigh, North Carolina 27695-8203, U.S.A. Search for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics Box 8203, North Carolina State University Raleigh, North Carolina 27695-8203, U.S.A. email:[email protected]Search for more papers by this author Anastasios A. Tsiatis, Anastasios A. Tsiatis Department of Statistics Box 8203, North Carolina State University Raleigh, North Carolina 27695-8203, U.S.A. Search for more papers by this authorMarie Davidian, Marie Davidian Department of Statistics Box 8203, North Carolina State University Raleigh, North Carolina 27695-8203, U.S.A. email:[email protected]Search for more papers by this author First published: 02 December 2005 https://doi.org/10.1111/j.0006-341X.2005.454_9.xRead the full textAboutPDF ToolsRequest permissionExport citationAdd to favoritesTrack citation ShareShare Give accessShare full text accessShare full-text accessPlease review our Terms and Conditions of Use and check box below to share full-text version of article.I have read and accept the Wiley Online Library Terms and Conditions of UseShareable LinkUse the link below to share a full-text version of this article with your friends and colleagues. Learn more.Copy URL Share a linkShare onFacebookTwitterLinkedInRedditWechat Volume61, Issue4December 2005Pages 933-935 RelatedInformation}, number={4}, journal={BIOMETRICS}, author={Tsiatis, AA and Davidian, M}, year={2005}, month={Dec}, pages={933–935} } @article{pieper_tsiatis_davidian_hasselblad_kleiman_boersma_chang_griffin_armstrong_califf_et al._2004, title={Differential Treatment Benefit of Platelet Glycoprotein IIb/IIIa Inhibition With Percutaneous Coronary Intervention Versus Medical Therapy for Acute Coronary Syndromes}, volume={109}, ISSN={0009-7322 1524-4539}, url={http://dx.doi.org/10.1161/01.cir.0000112570.97220.89}, DOI={10.1161/01.cir.0000112570.97220.89}, abstractNote={Although many believe that platelet glycoprotein IIb/IIIa inhibitors should be used only in acute coronary syndrome patients undergoing percutaneous coronary intervention, supporting data from randomized clinical trials are tenuous. The assumption that these agents are useful only in conjunction with percutaneous coronary intervention is based primarily on inappropriate subgroup analyses performed across the glycoprotein IIb/IIIa inhibitor trials.We describe the problems with these analytical techniques and demonstrate that different approaches to the question can result in opposing answers.Clinical-practice decisions and practice guidelines should be based on overall trial results and not analyses of post-randomization subgroups.}, number={5}, journal={Circulation}, publisher={Ovid Technologies (Wolters Kluwer Health)}, author={Pieper, Karen S. and Tsiatis, Anastasios A. and Davidian, Marie and Hasselblad, Vic and Kleiman, Neal S. and Boersma, Eric and Chang, Wei-Ching and Griffin, Jeffrey and Armstrong, Paul W. and Califf, Robert M. and et al.}, year={2004}, month={Feb}, pages={641–646} } @article{johnson_tsiatis_2004, title={Estimating mean response as a function of treatment duration in an observational study, where duration may be informatively censored}, volume={60}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2004.00175.x}, abstractNote={Summary . After a treatment is found to be effective in a clinical study, attention often focuses on the effect of treatment duration on outcome. Such an analysis facilitates recommendations on the most beneficial treatment duration. In many studies, the treatment duration, within certain limits, is left to the discretion of the investigators. It is often the case that treatment must be terminated prematurely due to an adverse event, in which case a recommended treatment duration is part of a policy that treats patients for a specified length of time or until a treatment‐censoring event occurs, whichever comes first. Evaluating mean response for a particular treatment‐duration policy from observational data is difficult due to censoring and the fact that it may not be reasonable to assume patients are prognostically similar across all treatment strategies. We propose an estimator for mean response as a function of treatment‐duration policy under these conditions. The method uses potential outcomes and embodies assumptions that allow consistent estimation of the mean response. The estimator is evaluated through simulation studies and demonstrated by application to the ESPRIT infusion trial coordinated at Duke University Medical Center.}, number={2}, journal={BIOMETRICS}, author={Johnson, BA and Tsiatis, AA}, year={2004}, month={Jun}, pages={315–323} } @article{tsiatis_davidian_2004, title={Joint modeling of longitudinal and time-to-event data: An overview}, volume={14}, number={3}, journal={Statistica Sinica}, author={Tsiatis, A. A. and Davidian, M.}, year={2004}, pages={809–834} } @article{tsiatis_ma_2004, title={Locally efficient semiparametric estimators for functional measurement error models}, volume={91}, number={4}, journal={Biometrika}, author={Tsiatis, A. A. and Ma, Y. Y.}, year={2004}, pages={835–848} } @article{bodnar_davidian_siega-riz_tsiatis_2004, title={Marginal structural models for analyzing causal effects of time-dependent treatments: An application in perinatal epidemiology}, volume={159}, ISSN={["1476-6256"]}, DOI={10.1093/aje/kwh131}, abstractNote={Marginal structural models (MSMs) are causal models designed to adjust for time-dependent confounding in observational studies of time-varying treatments. MSMs are powerful tools for assessing causality with complicated, longitudinal data sets but have not been widely used by practitioners. The objective of this paper is to illustrate the fitting of an MSM for the causal effect of iron supplement use during pregnancy (time-varying treatment) on odds of anemia at delivery in the presence of time-dependent confounding. Data from pregnant women enrolled in the Iron Supplementation Study (Raleigh, North Carolina, 1997-1999) were used. The authors highlight complexities of MSMs and key issues epidemiologists should recognize before and while undertaking an analysis with these methods and show how such methods can be readily interpreted in existing software packages, including SAS and Stata. The authors emphasize that if a data set with rich information on confounders is available, MSMs can be used straightforwardly to make robust inferences about causal effects of time-dependent treatments/exposures in epidemiologic research.}, number={10}, journal={AMERICAN JOURNAL OF EPIDEMIOLOGY}, author={Bodnar, LM and Davidian, M and Siega-Riz, AM and Tsiatis, AA}, year={2004}, month={May}, pages={926–934} } @article{rebeiz_dery_tsiatis_jc o'shea_johnson_hellkamp_pieper_gilchrist_slater_muhlestein_et al._2004, title={Optimal duration of eptifibatide infusion in percutaneous coronary intervention (an ESPRIT Substudy)}, volume={94}, ISSN={["1879-1913"]}, DOI={10.1016/j.amjcard.2004.06.030}, abstractNote={Although randomized trials have clearly demonstrated the clinical efficacy with regimens of platelet glycoprotein IIb/IIIa antagonists that result in >80% inhibition of baseline platelet aggregation in percutaneous coronary intervention (PCI), there are no data available concerning the optimal duration of infusion of these agents. In an era when the length of hospitalization has a major impact on health care costs, the determination of the optimal duration of the infusion of these drugs after PCI is of great relevance. The investigators therefore sought to determine the optimal length of the infusion of eptifibatide after PCI by analyzing the outcomes of patients enrolled in the Enhanced Suppression of the Platelet IIb/IIIa Receptor With Integrilin Therapy trial who were randomized to treatment with eptifibatide.}, number={7}, journal={AMERICAN JOURNAL OF CARDIOLOGY}, author={Rebeiz, AG and Dery, JP and Tsiatis, AA and JC O'Shea and Johnson, BA and Hellkamp, AS and Pieper, KS and Gilchrist, IC and Slater, J and Muhlestein, JB and et al.}, year={2004}, month={Oct}, pages={926–929} } @article{wahed_tsiatis_2004, title={Optimal estimator for the survival distribution and related quantities for treatment policies in two-stage randomization designs in clinical trials}, volume={60}, ISSN={["1541-0420"]}, DOI={10.1111/j.0006-341X.2004.00160.x}, abstractNote={Two-stage designs, where patients are initially randomized to an induction therapy and then depending upon their response and consent, are randomized to a maintenance therapy, are common in cancer and other clinical trials. The goal is to compare different combinations of primary and maintenance therapies to find the combination that is most beneficial. In practice, the analysis is usually conducted in two separate stages which does not directly address the major objective of finding the best combination. Recently Lunceford, Davidian, and Tsiatis (2002, Biometrics58, 48-57) introduced ad hoc estimators for the survival distribution and mean restricted survival time under different treatment policies. These estimators are consistent but not efficient, and do not include information from auxiliary covariates. In this article we derive estimators that are easy to compute and are more efficient than previous estimators. We also show how to improve efficiency further by taking into account additional information from auxiliary variables. Large sample properties of these estimators are derived and comparisons with other estimators are made using simulation. We apply our estimators to a leukemia clinical trial data set that motivated this study.}, number={1}, journal={BIOMETRICS}, author={Wahed, AS and Tsiatis, AA}, year={2004}, month={Mar}, pages={124–133} } @article{sachdev_sun_tsiatis_2004, title={The prognostic importance of comorbidity for mortality in patients with stable coronary artery disease}, volume={13}, ISSN={1062-1458}, url={http://dx.doi.org/10.1016/j.accreview.2004.03.077}, DOI={10.1016/j.accreview.2004.03.077}, number={4}, journal={ACC Current Journal Review}, publisher={Elsevier BV}, author={Sachdev, M. and Sun, J.L. and Tsiatis, A.A.}, year={2004}, month={Apr}, pages={11–12} } @article{sachdev_sun_tsiatis_nelson_mark_jollis_2004, title={The prognostic importance of comorbidity for mortality in patients with stable coronary artery disease}, volume={43}, ISSN={0735-1097}, url={http://dx.doi.org/10.1016/j.jacc.2003.10.031}, DOI={10.1016/j.jacc.2003.10.031}, abstractNote={To identify the prevalent and prognostically important coexisting illnesses among single coronary artery disease (CAD) patients.As the population ages, physicians are increasingly required to make decisions concerning patients with multiple co-existing illnesses (comorbidity). Many trials of CAD therapy have excluded patients with significant comorbidity, such that there are limited data to guide the management of those patients.To consider the long-term prognostic importance of comorbid illness, we examined a cohort of 1471 patients with CAD who underwent cardiac catheterization between 1985 and 1989 and were followed up through 2000 in the Duke Databank for Cardiovascular Diseases. Weights were assigned to individual diseases according to their prognostic significance in Cox proportional hazards models, thus creating a new CAD-specific index. The new index was compared with the widely used Charlson index, according to prevalence of conditions, individual and overall associations with survival, and agreement.The Charlson index and the CAD-specific index were highly associated with long-term survival and almost equivalent to left ventricular ejection fraction. When considering the components of the Charlson index, diabetes, renal insufficiency, chronic obstructive pulmonary disease, and peripheral vascular disease had greater prognostic significance among CAD patients, whereas peptic ulcer disease, connective tissue disease, and lymphoma were less significant. Hemiplegia, leukemia, lymphoma, severe liver disease, and acquired immunodeficiency syndrome were rarely identified among patients undergoing coronary angiography.Comorbid disease is strongly associated with long-term survival in patients with CAD. These data suggest co-existing illnesses should be measured and considered in clinical trials, disease registries, quality comparisons, and counseling of individual patients.}, number={4}, journal={Journal of the American College of Cardiology}, publisher={Elsevier BV}, author={Sachdev, Molly and Sun, Jie Lena and Tsiatis, Anastasios A. and Nelson, Charlotte L. and Mark, Daniel B. and Jollis, James G.}, year={2004}, month={Feb}, pages={576–582} } @article{tsiatis_mehta_2003, title={On the inefficiency of the adaptive design for monitoring clinical trials}, volume={90}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/90.2.367}, abstractNote={Journal Article On the inefficiency of the adaptive design for monitoring clinical trials Get access Anastasios A. Tsiatis, Anastasios A. Tsiatis Search for other works by this author on: Oxford Academic Google Scholar Cyrus Mehta Cyrus Mehta Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 90, Issue 2, June 2003, Pages 367–378, https://doi.org/10.1093/biomet/90.2.367 Published: 01 June 2003}, number={2}, journal={BIOMETRIKA}, author={Tsiatis, AA and Mehta, C}, year={2003}, month={Jun}, pages={367–378} } @article{leon_tsiatis_davidian_2003, title={Semiparametric estimation of treatment effect in a pretest-posttest study}, volume={59}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2003.00120.x}, abstractNote={Inference on treatment effects in a pretest-posttest study is a routine objective in medicine, public health, and other fields. A number of approaches have been advocated. We take a semiparametric perspective, making no assumptions about the distributions of baseline and posttest responses. By representing the situation in terms of counterfactual random variables, we exploit recent developments in the literature on missing data and causal inference, to derive the class of all consistent treatment effect estimators, identify the most efficient such estimator, and outline strategies for implementation of estimators that may improve on popular methods. We demonstrate the methods and their properties via simulation and by application to a data set from an HIV clinical trial.}, number={4}, journal={BIOMETRICS}, author={Leon, S and Tsiatis, AA and Davidian, M}, year={2003}, month={Dec}, pages={1046–1055} } @article{song_davidian_tsiatis_2002, title={A semiparametric likelihood approach to joint modeling of longitudinal and time-to-event data}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00742.x}, abstractNote={Summary. Joint models for a time-to-event (e.g., survival) and a longitudinal response have generated considerable recent interest. The longitudinal data are assumed to follow a mixed effects model, and a proportional hazards model depending on the longitudinal random effects and other covariates is assumed for the survival endpoint. Interest may focus on inference on the longitudinal data process, which is informatively censored, or on the hazard relationship. Several methods for fitting such models have been proposed, most requiring a parametric distributional assumption (normality) on the random effects. A natural concern is sensitivity to violation of this assumption; moreover, a restrictive distributional assumption may obscure key features in the data. We investigate these issues through our proposal of a likelihood-based approach that requires only the assumption that the random effects have a smooth density. Implementation via the EM algorithm is described, and performance and the benefits for uncovering noteworthy features are illustrated by application to data from an HIV clinical trial and by simulation.}, number={4}, journal={BIOMETRICS}, author={Song, X and Davidian, M and Tsiatis, AA}, year={2002}, month={Dec}, pages={742–753} } @article{song_davidian_tsiatis_2002, title={An estimator for the proportional hazards model with multiple longitudinal covariates measured with error}, volume={3}, number={4}, journal={Biostatistics (Oxford, England)}, author={Song, X. A. and Davidian, M. and Tsiatis, A. A.}, year={2002}, pages={511–528} } @article{skyler_brown_chase_collier_cowie_eisenbarth_fradkin_grave_greenbaum_jackson_et al._2002, title={Effects of insulin in relatives of patients with type 1 diabetes mellitus}, volume={346}, number={22}, journal={New England Journal of Medicine}, author={Skyler, J. S. and Brown, D. and Chase, H. P. and Collier, E. and Cowie, C. and Eisenbarth, G. S. and Fradkin, J. and Grave, G. and Greenbaum, C. and Jackson, R. A. and et al.}, year={2002}, pages={1685–1691} } @article{lunceford_davidian_tsiatis_2002, title={Estimation of survival distributions of treatment policies in two-stage randomization designs in clinical trials}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00048.x}, abstractNote={Summary. Some clinical trials follow a design where patients are randomized to a primary therapy at entry followed by another randomization to maintenance therapy contingent upon disease remission. Ideally, analysis would allow different treatment policies, i.e., combinations of primary and maintenance therapy if specified up‐front, to be compared. Standard practice is to conduct separate analyses for the primary and follow‐up treatments, which does not address this issue directly. We propose consistent estimators for the survival distribution and mean restricted survival time for each treatment policy in such two‐stage studies and derive large‐sample properties. The methods are demonstrated on a leukemia clinical trial data set and through simulation.}, number={1}, journal={BIOMETRICS}, author={Lunceford, JK and Davidian, M and Tsiatis, AA}, year={2002}, month={Mar}, pages={48–57} } @article{bang_tsiatis_2002, title={Median regression with censored cost data}, volume={58}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2002.00643.x}, abstractNote={Summary. Because of the skewness of the distribution of medical costs, we consider modeling the median as well as other quantiles when establishing regression relationships to covariates. In many applications, the medical cost data are also right censored. In this article, we propose semiparametric procedures for estimating the parameters in median regression models based on weighted estimating equations when censoring is present. Numerical studies are conducted to show that our estimators perform well with small samples and the resulting inference is reliable in circumstances of practical importance. The methods are applied to a dataset for medical costs of patients with colorectal cancer.}, number={3}, journal={BIOMETRICS}, author={Bang, H and Tsiatis, AA}, year={2002}, month={Sep}, pages={643–649} } @article{tsiatis_davidian_mcneney_2002, title={Multiple imputation methods for testing treatment differences in survival distributions with missing cause of failure}, volume={89}, number={1}, journal={Biometrika}, author={Tsiatis, A. A. and Davidian, M. and Mcneney, B.}, year={2002}, pages={238–244} } @article{tsiatis_davidian_2001, title={A semiparametric estimator for the proportional hazards model with longitudinal covariates measured with error}, volume={88}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/88.2.447}, abstractNote={Journal Article A semiparametric estimator for the proportional hazards model with longitudinal covariates measured with error Get access Anastasios A. Tsiatis, Anastasios A. Tsiatis Search for other works by this author on: Oxford Academic Google Scholar Marie Davidian Marie Davidian Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 88, Issue 2, 1 June 2001, Pages 447–458, https://doi.org/10.1093/biomet/88.2.447 Published: 01 June 2001}, number={2}, journal={BIOMETRIKA}, author={Tsiatis, AA and Davidian, M}, year={2001}, month={Jun}, pages={447–458} } @article{chen_tsiatis_2001, title={Causal inference on the difference of the restricted mean lifetime between two groups}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01030.x}, abstractNote={Summary. When comparing survival times between two treatment groups, it may be more appropriate to compare the restricted mean lifetime, i.e., the expectation of lifetime restricted to a time L, rather than mean lifetime in order to accommodate censoring. When the treatments are not assigned to patients randomly, as in observational studies, we also need to account for treatment imbalances in confounding factors. In this article, we propose estimators for the difference of the restricted mean lifetime between two groups that account for treatment imbalances in prognostic factors assuming a proportional hazards relationship. Large-sample properties of our estimators based on martingale theory for counting processes are also derived. Simulation studies were conducted to compare these estimators and to assess the adequacy of the large-sample approximations. Our methods are also applied to an observational database of acute coronary syndrome patients from Duke University Medical Center to estimate the treatment effect on the restricted mean lifetime over 5 years.}, number={4}, journal={BIOMETRICS}, author={Chen, PY and Tsiatis, AA}, year={2001}, month={Dec}, pages={1030–1038} } @article{betensky_rabinowitz_tsiatis_2001, title={Computationally simple accelerated failure time regression for interval censored data}, volume={88}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/88.3.703}, abstractNote={Journal Article Computationally simple accelerated failure time regression for interval censored data Get access Rebecca A. Betensky, Rebecca A. Betensky Search for other works by this author on: Oxford Academic Google Scholar Daniel Rabinowitz, Daniel Rabinowitz Search for other works by this author on: Oxford Academic Google Scholar Anastasios A. Tsiatis Anastasios A. Tsiatis Search for other works by this author on: Oxford Academic Google Scholar Biometrika, Volume 88, Issue 3, 1 October 2001, Pages 703–711, https://doi.org/10.1093/biomet/88.3.703 Published: 01 October 2001}, number={3}, journal={BIOMETRIKA}, author={Betensky, RA and Rabinowitz, D and Tsiatis, AA}, year={2001}, month={Sep}, pages={703–711} } @article{yang_tsiatis_2001, title={Efficiency study of estimators for a treatment effect in a pretest-posttest trial}, volume={55}, ISSN={["0003-1305"]}, DOI={10.1198/000313001753272466}, abstractNote={Several possible methods used to evaluate treatment effects in a randomized pretest–posttest trial with two treatment groups are the two-sample t test, the paired t test, analysis of covariance I (ANCOVA I), the analysis of covariance II (ANCOVA II), and generalized estimating equations (GEE). The ANCOVA I includes treatment and baseline response as covariates in a linear model and ANCOVA II additionally includes an interaction term between the baseline response and treatment indicator as a covariate. The parameters in the ANCOVAI and ANCOVAII models are generally estimated using ordinary least squares. In this article, a semiparametric model, which makes no assumptions about the response distributions, is used. The asymptotic properties of the estimators derived from these five methods and their relative efficiencies are discussed under this semiparametric model. We show that all these methods yield consistent estimators for the treatment effect which have asymptotically normal distributions under the semiparametric model. The GEE and the ANCOVA II estimators are asymptotically equivalent and the most efficient. The estimators from other three methods are less efficient except under some special conditions which are outlined in the article.}, number={4}, journal={AMERICAN STATISTICIAN}, author={Yang, L and Tsiatis, AA}, year={2001}, month={Nov}, pages={314–321} } @article{mehta_tsiatis_2001, title={Flexible sample size considerations using information-based interim monitoring}, volume={35}, ISSN={["0092-8615"]}, DOI={10.1177/009286150103500407}, abstractNote={At the design phase of a clinical trial the total number of participants needed to detect a clinically important treatment difference with sufficient precision frequently depends on nuisance parameters such as variance, baseline response rate, or regression coefficients other than the main effect. In practical applications, nuisance parameter values are often unreliable guesses founded on little or no available past history. Sample size calculations based on these initial guesses may, therefore, lead to under- or over-powered studies. In this paper, we argue that the precision with which a treatment effect is estimated is directly related to the statistical information in the data. In general, statistical information is a complicated function of sample size and nuisance parameters. However, the amount of information necessary to answer the scientific question concerning treatment difference is easily calculated a priori and applies to almost any statistical model for a large variety of endpoints. It is thus possible to be flexible on sample size but rather continue collecting data until we have achieved the desired information. Such a strategy is well suited to being adopted in conjunction with a group sequential clinical trial where the data are monitored routinely anyway. We present several scenarios and examples of how group sequential information-based design and monitoring can be carried out and demonstrate through simulations that this type of strategy will indeed give us the desired operating characteristics.}, number={4}, journal={DRUG INFORMATION JOURNAL}, author={Mehta, CR and Tsiatis, AA}, year={2001}, pages={1095–1112} } @article{pampallona_tsiatis_kim_2001, title={Interim monitoring of group sequential trials using spending functions for the type I and type II error probabilities}, volume={35}, ISSN={["0092-8615"]}, DOI={10.1177/009286150103500408}, abstractNote={Lan and DeMets (1) introduced a flexible procedure for the analysis of sequential trials based on the discretization of the Brownian motion. In this paper, we consider an extension of this strategy that preserves both the desired significance level and the power of any group sequential trial. We propose a procedure that allows for any number and timing of interim analyses. This entails the derivation of boundaries at the monitoring stage by means of two spending functions, one for the type I and one for the type II error probabilities, as well as the adjustment of the target maximum information as the trial progresses. The general solution to the problem is provided together with a discussion of implementation strategies. The procedure is intended for group sequential designs that allow early stopping in favor of both the null and the alternative hypotheses, and an example is presented for this case. However, its application is also easily extended for designs where there is no early stopping in favor of the null.}, number={4}, journal={DRUG INFORMATION JOURNAL}, author={Pampallona, S and Tsiatis, A and Kim, K}, year={2001}, pages={1113–1121} } @article{lu_tsiatis_2001, title={Multiple imputation methods for estimating regression coefficients in the competing risks model with missing cause of failure}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01191.x}, abstractNote={Summary. We propose a method to estimate the regression coefficients in a competing risks model where the cause-specific hazard for the cause of interest is related to covariates through a proportional hazards relationship and when cause of failure is missing for some individuals. We use multiple imputation procedures to impute missing cause of failure, where the probability that a missing cause is the cause of interest may depend on auxiliary covariates, and combine the maximum partial likelihood estimators computed from several imputed data sets into an estimator that is consistent and asymptotically normal. A consistent estimator for the asymptotic variance is also derived. Simulation results suggest the relevance of the theory in finite samples. Results are also illustrated with data from a breast cancer study.}, number={4}, journal={BIOMETRICS}, author={Lu, KF and Tsiatis, AA}, year={2001}, month={Dec}, pages={1191–1197} } @article{zhao_tsiatis_2001, title={Testing equality of survival functions of quality-adjusted lifetime}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.00861.x}, abstractNote={Summary. We present a method for comparing the survival functions of quality-adjusted lifetime from two treatments. This test statistic becomes the ordinary log-rank test when quality-adjusted lifetime is the same as the survival time. Simulation experiments are conducted to examine the behavior of our proposed test statistic under both null and alternative hypotheses. In addition, we apply our method to a breast cancer trial for comparing the distribution of quality-adjusted lifetime between two treatment regimes.}, number={3}, journal={BIOMETRICS}, author={Zhao, HW and Tsiatis, AA}, year={2001}, month={Sep}, pages={861–867} } @article{murray_tsiatis_2001, title={Using auxiliary time-dependent covariates to recover information in nonparametric testing with censored data}, volume={7}, ISSN={["1380-7870"]}, DOI={10.1023/A:1011392622173}, abstractNote={Murrayand Tsiatis (1996) described a weighted survival estimate thatincorporates prognostic time-dependent covariate informationto increase the efficiency of estimation. We propose a test statisticbased on the statistic of Pepe and Fleming (1989, 1991) thatincorporates these weighted survival estimates. As in Pepe andFleming, the test is an integrated weighted difference of twoestimated survival curves. This test has been shown to be effectiveat detecting survival differences in crossing hazards settingswhere the logrank test performs poorly. This method uses stratifiedlongitudinal covariate information to get more precise estimatesof the underlying survival curves when there is censored informationand this leads to more powerful tests. Another important featureof the test is that it remains valid when informative censoringis captured by the incorporated covariate. In this case, thePepe-Fleming statistic is known to be biased and should not beused. These methods could be useful in clinical trials with heavycensoring that include collection over time of covariates, suchas laboratory measurements, that are prognostic of subsequentsurvival or capture information related to censoring.}, number={2}, journal={LIFETIME DATA ANALYSIS}, author={Murray, S and Tsiatis, AA}, year={2001}, pages={125–141} } @article{anstrom_tsiatis_2001, title={Utilizing propensity scores to estimate causal treatment effects with censored time-lagged data}, volume={57}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2001.01207.x}, abstractNote={Summary. Observational studies frequently are conducted to compare long-term effects of treatments. Without randomization, patients receiving one treatment are not guaranteed to be prognostically comparable to those receiving another treatment. Furthermore, the response of interest may be right-censored because of incomplete follow-up. Statistical methods that do not account for censoring and confounding may lead to biased estimates. This article presents a method for estimating treatment effects in nonrandomized studies with right-censored responses. We review the assumptions required to estimate average causal effects and derive an estimator for comparing two treatments by applying inverse weights to the complete cases. The weights are determined according to the estimated probability of receiving treatment conditional on covariates and the estimated treatment-specific censoring distribution. By utilizing martingale representations, the estimator is shown to be asymptotically normal and an estimator for the asymptotic variance is derived. Simulation results are presented to evaluate the properties of the estimator. These methods are applied to an observational data set of acute coronary syndrome patients from Duke University Medical Center to estimate the effect of a treatment strategy on the mean 5-year medical cost.}, number={4}, journal={BIOMETRICS}, author={Anstrom, KJ and Tsiatis, AA}, year={2001}, month={Dec}, pages={1207–1218} } @article{mark_harrington_lincoff_califf_nelson_tsiatis_buell_mahaffey_davidson-ray_topol_2000, title={Cost-effectiveness of platelet glycoprotein IIb/IIIa inhibition with eptifibatide in patients with non-ST-elevation acute coronary syndromes}, volume={101}, ISSN={["0009-7322"]}, DOI={10.1161/01.cir.101.4.366}, abstractNote={Background —In the PURSUIT trial, eptifibatide significantly reduced the 30-day incidence of death and myocardial infarction relative to placebo in 9461 patients with an acute coronary syndrome (unstable angina or non–Q-wave myocardial infarction). Methods and Results —We conducted a 2-part prospective economic substudy of the 3522 US patients enrolled in PURSUIT: (1) an empirical intention-to-treat comparison of medical costs (hospital plus physician) up to 6 months after hospitalization and (2) a lifetime cost-effectiveness analysis. The base-case cost-effectiveness ratio was expressed as the 1996 US dollars required to add 1 life-year with eptifibatide therapy. The 2 treatment arms had equivalent resource consumption and medical costs (exclusive of the cost of the eptifibatide regimen) during the index (enrollment) hospitalization ( P =0.78) and up to 6 months afterward ( P =0.60). The average wholesale price of the eptifibatide regimen was $1217, but a typical hospital discounted price was $1014. The estimated life expectancy from randomization in the US patients was 15.96 years for eptifibatide and 15.85 years for placebo, an incremental difference of 0.111. The incremental cost-effectiveness ratio for eptifibatide therapy in US PURSUIT patients was $16 491 per year of life saved. This result was robust through a wide range of sensitivity analyses. The cost-utility ratio for eptifibatide (using time trade-off defined utilities) was $19 693 per added quality-adjusted life-year. Conclusions —Based on the results observed in the US PURSUIT patients, the routine addition of eptifibatide to standard care for non–ST-elevation acute coronary syndrome patients is economically attractive by conventional standards.}, number={4}, journal={CIRCULATION}, author={Mark, DB and Harrington, RA and Lincoff, AM and Califf, RM and Nelson, CL and Tsiatis, AA and Buell, H and Mahaffey, KW and Davidson-Ray, L and Topol, EJ}, year={2000}, month={Feb}, pages={366–371} } @article{bang_tsiatis_2000, title={Estimating medical costs with censored data}, volume={87}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/87.2.329}, abstractNote={Incompleteness of follow-up data is a common problem in estimating medical costs. Native analysis using summary statistics on the collected data can result in severely misleading statistical inference. This paper focuses on the problem of estimating the mean medical cost from a sample of individuals whose medical costs may be right censored. A class of weighted estimators which account appropriately for censoring are introduced. Our estimators are shown to be consistent and asymptotically normal with easily estimated variances. The efficiency of these estimators is studied with the goal of finding as efficient an estimator for the mean medical cost as is feasible. Extensive simulation studies are used to show that our estimators perform well in finite samples, even with heavily censored data, for a variety of circumstances. The methods are applied to a set of cost data from a cardiology trial conducted by the Duke University Medical Center. Extensions to other censored data problems are also discussed.}, number={2}, journal={BIOMETRIKA}, author={Bang, H and Tsiatis, AA}, year={2000}, month={Jun}, pages={329–343} } @article{tsiatis_2000, title={Estimating the distribution of quality-adjusted life with censored data}, volume={139}, ISSN={["0002-8703"]}, DOI={10.1016/S0002-8703(00)90068-1}, number={4}, journal={AMERICAN HEART JOURNAL}, author={Tsiatis, AA}, year={2000}, month={Apr}, pages={S177–S181} } @article{babiker_bartlett_breckenridge_collins_coombs_cooper_creagh_cross_daniels_darbyshire_et al._2000, title={Human immunodeficiency virus type 1 RNA level and CD4 count as prognostic markers and surrogate end points: A meta-analysis}, volume={16}, number={12}, journal={AIDS Research and Human Retroviruses}, author={Babiker, A. and Bartlett, J. and Breckenridge, A. and Collins, G. and Coombs, R. and Cooper, D. and Creagh, T. and Cross, A. and Daniels, M. and Darbyshire, J. and et al.}, year={2000}, pages={1123–1133} } @article{fine_tsiatis_2000, title={Testing for differences in survival with delayed ascertainment}, volume={56}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2000.00145.x}, abstractNote={Summary. During the interim stages of most large‐scale clinical trials, knowledge that a patient is alive or dead is usually not up‐to‐date. This is due to the pattern of patient visits to hospitals as well as the administrative set‐up used by the study to obtain information on vital status. On a two‐armed study, if the process of ascertaining vital status is not the same in both treatment groups, then the standard method of testing based on the logrank statistic may not be applicable. Instead, an ad hoc modification to the logrank test, which artificially truncates follow‐up prior to the time of analysis, is often used. These approaches have not been formally addressed in the literature. In the early stages of a clinical trial, severe bias or loss of power may result. For this situation, we propose a class of test statistics that extends the usual class of U statistics. Asymptotic normality is derived by reformulating the statistics in terms of counting processes and employing the theory of U statistics along with martingale techniques. For early interim analyses, a numerical study indicates that the new tests can be more powerful than the current practice when differential ascertainment is present. To illustrate the potential loss of information when lagging follow‐up to control for ascertainment delays, we reanalyze an AIDS clinical trial with the truncated logrank and the new statistics.}, number={1}, journal={BIOMETRICS}, author={Fine, JP and Tsiatis, AA}, year={2000}, month={Mar}, pages={145–153} } @article{rabinowitz_betensky_tsiatis_2000, title={Using conditional logistic regression to fit proportional odds models to interval censored data}, volume={56}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.2000.00511.x}, abstractNote={Summary. An easily implemented approach to fitting the proportional odds regression model to interval‐censored data is presented. The approach is based on using conditional logistic regression routines in standard statistical packages. Using conditional logistic regression allows the practitioner to sidestep complications that attend estimation of the baseline odds ratio function. The approach is applicable both for interval‐censored data in settings in which examinations continue regardless of whether the event of interest has occurred and for current status data. The methodology is illustrated through an application to data from an AIDS study of the effect of treatment with ZDV + ddC versus ZDV alone on 50% drop in CD4 cell count from baseline level. Simulations are presented to assess the accuracy of the procedure.}, number={2}, journal={BIOMETRICS}, author={Rabinowitz, D and Betensky, RA and Tsiatis, AA}, year={2000}, month={Jun}, pages={511–518} } @article{zhao_tsiatis_1999, title={Efficient estimation of the distribution of quality-adjusted survival time}, volume={55}, ISSN={["0006-341X"]}, DOI={10.1111/j.0006-341X.1999.01101.x}, abstractNote={Summary. Quality of life is an important aspect in evaluation of clinical trials of chronic diseases, such as cancer and AIDS. Quality‐adjusted survival analysis is a method that combines both the quantity and quality of a patient's life into one single measure. In this paper, we discuss the efficiency of weighted estimators for the distribution of quality‐adjusted survival time. Using the general representation theorem for missing data processes, we are able to derive an estimator that is more efficient than the one proposed in Zhao and Tsiatis (1997, Biometrika 84 , 339–348). Simulation experiments are conducted to assess the small sample properties of this estimator and to compare it with the semiparametric efficiency bound. The value of this estimator is demonstrated from an application of the method to a data set obtained from a breast cancer clinical trial.}, number={4}, journal={BIOMETRICS}, author={Zhao, HW and Tsiatis, AA}, year={1999}, month={Dec}, pages={1101–1107} } @article{hu_tsiatis_davidian_1998, title={Estimating the parameters in the Cox model when covariate variables are measured with error}, volume={54}, ISSN={["0006-341X"]}, DOI={10.2307/2533667}, abstractNote={The Cox proportional hazards model is commonly used to model survival data as a function of covariates. Because of the measuring mechanism or the nature of the environment, covariates are often measured with error and are not directly observable. A naive approach is to use the observed values of the covariates in the Cox model, which usually produces biased estimates of the true association of interest. An alternative strategy is to take into account the error in measurement, which may be carried out for the Cox model in a number of ways. We examine several such approaches and compare and contrast them through several simulation studies. We introduce a likelihood-based approach, which we refer to as the semiparametric method, and show that this method is an appealing alternative. The methods are applied to analyze the relationship between survival and CD4 count in patients with AIDS.}, number={4}, journal={BIOMETRICS}, author={Hu, P and Tsiatis, AA and Davidian, M}, year={1998}, month={Dec}, pages={1407–1419} } @article{scharfstein_tsiatis_gilbert_1998, title={Semiparametric efficient estimation in the generalized odds-rate class of regression models for right-censored time-to-event data}, volume={4}, DOI={10.1023/A:1009634103154}, abstractNote={The generalized odds-rate class of regression models for time to event data is indexed by a non-negative constant rho and assumes that [formula: see text] where g: rho(s) = log(rho-1(s-rho - 1)) for rho > 0, g0(s) = log(-logs), S(t[symbol: see text]Z) is the survival function of the time to event for an individual with q x 1 covariate vector Z, beta is a q x 1 vector of unknown regression parameters, and alpha(t) is some arbitrary increasing function of t. When rho = 0, this model is equivalent to the proportional hazards model and when rho = 1, this model reduces to the proportional odds model. In the presence of right censoring, we construct estimators for beta and exp(alpha(t)) and show that they are consistent and asymptotically normal. In addition, we show that the estimator for beta is semiparametric efficient in the sense that it attains the semiparametric variance bound.}, number={4}, journal={Lifetime Data Analysis}, author={Scharfstein, D. O. and Tsiatis, A. A. and Gilbert, P. B.}, year={1998}, pages={355–391} } @article{scharfstein_tsiatis_1998, title={Use of simulation and bootstrap in information-based group sequential studies}, volume={17}, DOI={10.1002/(SICI)1097-0258(19980115)17:1<75::AID-SIM731>3.0.CO;2-N}, abstractNote={In this paper, we present an information-based design and monitoring procedure which applies to any type of model for any type of group sequential study provided there is a unique parameter of interest one can estimate efficiently. Simulation techniques are described to handle the design phase of this procedure. Since designs depend on potentially unreliable guesses of nuisance parameters, we propose a bootstrap method that uses the information available at the interim analysis times to generate projections and prediction intervals for the time at which the study will be fully powered. A monitoring broad can use this information to decide whether a redesign of the trial is warranted. We also show how to use simulation to redesign studies in progress. We illustrate all of these techniques with data from AIDS Clinical Trial Group Protocol 021. © 1998 John Wiley & Sons, Ltd.}, number={1}, journal={Statistics in Medicine}, author={Scharfstein, D. O. and Tsiatis, A. A.}, year={1998}, pages={75–87} } @article{zhao_tsiatis_1997, title={A consistent estimator for the distribution of quality adjusted survival time}, volume={84}, ISSN={["0006-3444"]}, DOI={10.1093/biomet/84.2.339}, abstractNote={Quality adjusted survival analysis is a new approach to therapy evaluation in clinical trials. It has received much attention recently because of its ability to take patients' quality of life into consideration. In this paper, we present a method that enables us to calculate the survival distribution of quality adjusted lifetime. Using martingale theory for counting processes, we can show that our estimator is asymptotically consistent, normally distributed, and its asymptotic variance estimate can be obtained analytically. Simulation experiments are conducted to compare our estimator with the true underlying distribution for two cases that are of practical importance.}, number={2}, journal={BIOMETRIKA}, author={Zhao, HW and Tsiatis, AA}, year={1997}, month={Jun}, pages={339–348} } @article{wulfsohn_tsiatis_1997, title={A joint model for survival and longitudinal data measured with error}, volume={53}, ISSN={["0006-341X"]}, DOI={10.2307/2533118}, abstractNote={The relationship between a longitudinal covariate and a failure time process can be assessed using the Cox proportional hazards regression model. We consider the problem of estimating the parameters in the Cox model when the longitudinal covariate is measured infrequently and with measurement error. We assume a repeated measures random effects model for the covariate process. Estimates of the parameters are obtained by maximizing the joint likelihood for the covariate process and the failure time process. This approach uses the available information optimally because we use both the covariate and survival data simultaneously. Parameters are estimated using the expectation-maximization algorithm. We argue that such a method is superior to naive methods where one maximizes the partial likelihood of the Cox model using the observed covariate values. It also improves on two-stage methods where, in the first stage, empirical Bayes estimates of the covariate process are computed and then used as time-dependent covariates in a second stage to find the parameters in the Cox model that maximize the partial likelihood.}, number={1}, journal={BIOMETRICS}, author={Wulfsohn, MS and Tsiatis, AA}, year={1997}, month={Mar}, pages={330–339} } @article{scharfstein_tsiatis_robins_1997, title={Semiparametric efficiency and its implication on the design and analysis of group-sequential studies}, volume={92}, ISSN={["0162-1459"]}, DOI={10.2307/2965404}, abstractNote={Abstract Authors have shown that the time-sequential joint distributions of many statistics used to analyze data arising from group-sequential time-to-event and longitudinal studies are multivariate normal with an independent increments covariance structure. In Theorem 1 of this article, we demonstrate that this limiting distribution arises naturally when one uses an efficient test statistic to test a single parameter in a semiparametric or parametric model. Because we are able to think of many of the statistics in the literature in this fashion, the limiting distribution under investigation is just a special case of Theorem 1. Using this general structure, we then develop an information-based design and monitoring procedure that can be applied to any type of model for any type of group-sequential study provided that there is a unique parameter of interest that can be efficiently tested.}, number={440}, journal={JOURNAL OF THE AMERICAN STATISTICAL ASSOCIATION}, author={Scharfstein, DO and Tsiatis, AA and Robins, JM}, year={1997}, month={Dec}, pages={1342–1350} }