@article{chen_hallman_2023, title={KRYLOV-AWARE STOCHASTIC TRACE ESTIMATION}, volume={44}, ISSN={["1095-7162"]}, DOI={10.1137/22M1494257}, abstractNote={We introduce an algorithm for estimating the trace of a matrix function $f(\mathbf{A})$ using implicit products with a symmetric matrix $\mathbf{A}$. Existing methods for implicit trace estimation of a matrix function tend to treat matrix-vector products with $f(\mathbf{A})$ as a black-box to be computed by a Krylov subspace method. Like other recent algorithms for implicit trace estimation, our approach is based on a combination of deflation and stochastic trace estimation. However, we take a closer look at how products with $f(\mathbf{A})$ are integrated into these approaches which enables several efficiencies not present in previously studied methods. In particular, we describe a Krylov subspace method for computing a low-rank approximation of a matrix function by a computationally efficient projection onto Krylov subspace.}, number={3}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Chen, Tyler and Hallman, Eric}, year={2023}, pages={1218–1244} } @article{hallman_ipsen_saibaba_2023, title={MONTE CARLO METHODS FOR ESTIMATING THE DIAGONAL OF A REAL SYMMETRIC MATRIX}, volume={44}, ISSN={["1095-7162"]}, url={https://doi.org/10.1137/22M1476277}, DOI={10.1137/22M1476277}, abstractNote={For real symmetric matrices that are accessible only through matrix vector products, we present Monte Carlo estimators for computing the diagonal elements. Our probabilistic bounds for normwise absolute and relative errors apply to Monte Carlo estimators based on random Rademacher, sparse Rademacher, and normalized and unnormalized Gaussian vectors and to vectors with bounded fourth moments. The novel use of matrix concentration inequalities in our proofs represents a systematic model for future analyses. Our bounds mostly do not depend explicitly on the matrix dimension, target different error measures than existing work, and imply that the accuracy of the estimators increases with the diagonal dominance of the matrix. Applications to derivative-based global sensitivity metrics and node centrality measures in network science corroborate this, as do numerical experiments on synthetic test matrices. We recommend against the use in practice of sparse Rademacher vectors, which are the basis for many randomized sketching and sampling algorithms, because they tend to deliver barely a digit of accuracy even under large sampling amounts.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Hallman, Eric and Ipsen, Ilse C. F. and Saibaba, Arvind K.}, year={2023}, pages={240–269} } @article{hallman_ipsen_2023, title={Precision-aware deterministic and probabilistic error bounds for floating point summation}, volume={8}, ISSN={["0945-3245"]}, DOI={10.1007/s00211-023-01370}, journal={NUMERISCHE MATHEMATIK}, author={Hallman, Eric and Ipsen, Ilse C. F.}, year={2023}, month={Aug} } @article{hallman_ipsen_2023, title={Precision-aware deterministic and probabilistic error bounds for floating point summation}, volume={155}, ISSN={["0945-3245"]}, DOI={10.1007/s00211-023-01370-y}, number={1-2}, journal={NUMERISCHE MATHEMATIK}, author={Hallman, Eric and Ipsen, Ilse C. F.}, year={2023}, month={Oct}, pages={83–119} } @article{al daas_ballard_cazeaux_hallman_miedlar_pasha_reid_saibaba_2023, title={RANDOMIZED ALGORITHMS FOR ROUNDING IN THE TENSOR-TRAIN FORMAT}, volume={45}, ISSN={["1095-7197"]}, DOI={10.1137/21M1451191}, abstractNote={The Tensor-Train (TT) format is a highly compact low-rank representation for high-dimensional tensors. TT is particularly useful when representing approximations to the solutions of certain types of parametrized partial differential equations. For many of these problems, computing the solution explicitly would require an infeasible amount of memory and computational time. While the TT format makes these problems tractable, iterative techniques for solving the PDEs must be adapted to perform arithmetic while maintaining the implicit structure. The fundamental operation used to maintain feasible memory and computational time is called rounding, which truncates the internal ranks of a tensor already in TT format. We propose several randomized algorithms for this task that are generalizations of randomized low-rank matrix approximation algorithms and provide significant reduction in computation compared to deterministic TT-rounding algorithms. Randomization is particularly effective in the case of rounding a sum of TT-tensors (where we observe 20x speedup), which is the bottleneck computation in the adaptation of GMRES to vectors in TT format. We present the randomized algorithms and compare their empirical accuracy and computational time with deterministic alternatives.}, number={1}, journal={SIAM JOURNAL ON SCIENTIFIC COMPUTING}, author={Al Daas, Hussam and Ballard, Grey and Cazeaux, Paul and Hallman, Eric and Miedlar, Agnieszka and Pasha, Mirjeta and Reid, Tim W. and Saibaba, Arvind K.}, year={2023}, pages={A74–A95} } @article{hallman_2022, title={A BLOCK BIDIAGONALIZATION METHOD FOR FIXED-ACCURACY LOW-RANK MATRIX APPROXIMATION}, volume={43}, ISSN={["1095-7162"]}, DOI={10.1137/21M1397866}, abstractNote={We present randUBV, a randomized algorithm for matrix sketching based on the block Lanzcos bidiagonalization process. Given a matrix $\mathbb{A}$, it produces a low-rank approximation of the form $\mathbb{UBV}^T$, where $\mathbb{U}$ and $\mathbb{V}$ have orthonormal columns in exact arithmetic and $\mathbb{B}$ is block bidiagonal. In finite precision, the columns of both $\mathbb{U}$ and $\mathbb{V}$ will be close to orthonormal. Our algorithm is closely related to the randQB algorithms of Yu, Gu, and Li [SIAM J. Matrix Anal. Appl., 39 (2018), pp. 1339--1359]. in that the entries of $\mathbb{B}$ are incrementally generated and the Frobenius norm approximation error may be efficiently estimated. It is therefore suitable for the fixed-accuracy problem and so is designed to terminate as soon as a user input error tolerance is reached. Numerical experiments suggest that the block Lanczos method is generally competitive with or superior to algorithms that use power iteration, even when $\mathbb{A}$ has significant clusters of singular values.}, number={2}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Hallman, Eric}, year={2022}, pages={661–680} } @article{hallman_troester_2022, title={A multilevel approach to stochastic trace estimation}, volume={638}, ISSN={["1873-1856"]}, DOI={10.1016/j.laa.2021.12.010}, abstractNote={This article presents a randomized matrix-free method for approximating the trace of f(A), where A is a large symmetric matrix and f is a function analytic in a closed interval containing the eigenvalues of A. Our method uses a combination of stochastic trace estimation (i.e., Hutchinson's method), Chebyshev approximation, and multilevel Monte Carlo techniques. We establish general bounds on the approximation error of this method by extending an existing error bound for Hutchinson's method to multilevel trace estimators. Numerical experiments are conducted for common applications such as estimating the log-determinant, nuclear norm, and Estrada index. We find that using multilevel techniques can substantially reduce the variance of existing single-level estimators.}, journal={LINEAR ALGEBRA AND ITS APPLICATIONS}, author={Hallman, Eric and Troester, Devon}, year={2022}, month={Apr}, pages={125–149} } @article{hallman_2020, title={SHARP 2-NORM ERROR BOUNDS FOR LSQR AND THE CONJUGATE GRADIENT METHOD}, volume={41}, ISSN={["1095-7162"]}, DOI={10.1137/19M1272822}, abstractNote={We consider the iterative method LSQR for solving $\min_x |Ax-b|_2$. LSQR is based on the Golub--Kahan bidiagonalization process and at every step produces an iterate that minimizes the norm of the...}, number={3}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Hallman, Eric}, year={2020}, pages={1183–1207} }