@article{caner_eliaz_2024, title={Should Humans Lie to Machines? The Incentive Compatibility of Lasso and GLM Structured Sparsity Estimators}, ISSN={["1537-2707"]}, DOI={10.1080/07350015.2024.2316102}, abstractNote={We consider situations where a user feeds her attributes to a machine learning method that tries to predict her best option based on a random sample of other users. The predictor is incentive-compatible if the user has no incentive to misreport her covariates. Focusing on the popular Lasso estimation technique, we borrow tools from high-dimensional statistics to characterize sufficient conditions that ensure that Lasso is incentive compatible in the asymptotic case. We extend our results to a new nonlinear machine learning technique, Generalized Linear Model Structured Sparsity estimators. Our results show that incentive compatibility is achieved if the tuning parameter is kept above some threshold in the case of asymptotics.}, journal={JOURNAL OF BUSINESS & ECONOMIC STATISTICS}, author={Caner, Mehmet and Eliaz, Kfir}, year={2024}, month={Mar} }
@article{caner_2023, title={Generalized linear models with structured sparsity estimators}, volume={236}, ISSN={["1872-6895"]}, DOI={10.1016/j.jeconom.2023.105478}, abstractNote={In this paper, we introduce structured sparsity estimators for use in Generalized Linear Models. Structured sparsity estimators in the least squares loss are introduced by Stucky and van de Geer (2018). Their proofs exclusively depend on their use of fixed design and normal errors. We extend their results to debiased structured sparsity estimators with Generalized Linear Model based loss through incorporating random design and non-sub Gaussian data. Structured sparsity estimation means that penalized loss functions with a possible sparsity structure in a norm. These norms include norms generated from convex cones. Our contributions are threefold: (1) We generalize the existing oracle inequality results in penalized Generalized Linear Models; (2) We provide a feasible weighted nodewise regression proof which generalizes the results in the literature; (3) We realize that norms used in feasible nodewise regression proofs should be weaker or equal to the norms in penalized Generalized Linear Model loss.}, number={2}, journal={JOURNAL OF ECONOMETRICS}, author={Caner, Mehmet}, year={2023}, month={Oct} }
@article{caner_medeiros_vasconcelos_2023, title={Sharpe Ratio analysis in high dimensions: Residual-based nodewise regression in factor models}, volume={235}, ISSN={["1872-6895"]}, DOI={10.1016/j.jeconom.2022.03.009}, abstractNote={We provide a new theory for nodewise regression when the residuals from a fitted factor model are used. We apply our results to the analysis of the consistency of Sharpe Ratio estimators when there are many assets in a portfolio. We allow for an increasing number of assets as well as time observations of the portfolio. Since the nodewise regression is not feasible due to the unknown nature of idiosyncratic errors, we provide a feasible-residual-based nodewise regression to estimate the precision matrix of errors which is consistent even when number of assets, p, exceeds the time span of the portfolio, n. In another new development, we also show that the precision matrix of returns can be estimated consistently, even with an increasing number of factors and p>n. We show that: (1) with p>n, the Sharpe Ratio estimators are consistent in global minimum-variance and mean–variance portfolios; and (2) with p>n, the maximum Sharpe Ratio estimator is consistent when the portfolio weights sum to one; and (3) with p<