@article{hallman_ipsen_saibaba_2023, title={MONTE CARLO METHODS FOR ESTIMATING THE DIAGONAL OF A REAL SYMMETRIC MATRIX}, volume={44}, ISSN={["1095-7162"]}, url={https://doi.org/10.1137/22M1476277}, DOI={10.1137/22M1476277}, abstractNote={For real symmetric matrices that are accessible only through matrix vector products, we present Monte Carlo estimators for computing the diagonal elements. Our probabilistic bounds for normwise absolute and relative errors apply to Monte Carlo estimators based on random Rademacher, sparse Rademacher, and normalized and unnormalized Gaussian vectors and to vectors with bounded fourth moments. The novel use of matrix concentration inequalities in our proofs represents a systematic model for future analyses. Our bounds mostly do not depend explicitly on the matrix dimension, target different error measures than existing work, and imply that the accuracy of the estimators increases with the diagonal dominance of the matrix. Applications to derivative-based global sensitivity metrics and node centrality measures in network science corroborate this, as do numerical experiments on synthetic test matrices. We recommend against the use in practice of sparse Rademacher vectors, which are the basis for many randomized sketching and sampling algorithms, because they tend to deliver barely a digit of accuracy even under large sampling amounts.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Hallman, Eric and Ipsen, Ilse C. F. and Saibaba, Arvind K.}, year={2023}, pages={240–269} } @article{hallman_ipsen_2023, title={Precision-aware deterministic and probabilistic error bounds for floating point summation}, volume={8}, ISSN={["0945-3245"]}, DOI={10.1007/s00211-023-01370}, journal={NUMERISCHE MATHEMATIK}, author={Hallman, Eric and Ipsen, Ilse C. F.}, year={2023}, month={Aug} } @article{hallman_ipsen_2023, title={Precision-aware deterministic and probabilistic error bounds for floating point summation}, volume={155}, ISSN={["0945-3245"]}, DOI={10.1007/s00211-023-01370-y}, number={1-2}, journal={NUMERISCHE MATHEMATIK}, author={Hallman, Eric and Ipsen, Ilse C. F.}, year={2023}, month={Oct}, pages={83–119} } @article{reid_ipsen_cockayne_oates_2023, title={Statistical properties of BayesCG under the Krylov prior}, volume={10}, ISSN={["0945-3245"]}, DOI={10.1007/s00211-023-01375-7}, journal={NUMERISCHE MATHEMATIK}, author={Reid, Tim W. and Ipsen, Ilse C. F. and Cockayne, Jon and Oates, Chris J.}, year={2023}, month={Oct} } @article{chi_ipsen_2021, title={A projector-based approach to quantifying total and excess uncertainties for sketched linear regression}, volume={8}, ISSN={["2049-8772"]}, url={http://dx.doi.org/10.1093/imaiai/iaab016}, DOI={10.1093/imaiai/iaab016}, abstractNote={ Linear regression is a classic method of data analysis. In recent years, sketching—a method of dimension reduction using random sampling, random projections or both—has gained popularity as an effective computational approximation when the number of observations greatly exceeds the number of variables. In this paper, we address the following question: how does sketching affect the statistical properties of the solution and key quantities derived from it? To answer this question, we present a projector-based approach to sketched linear regression that is exact and that requires minimal assumptions on the sketching matrix. Therefore, downstream analyses hold exactly and generally for all sketching schemes. Additionally, a projector-based approach enables derivation of key quantities from classic linear regression that account for the combined model- and algorithm-induced uncertainties. We demonstrate the usefulness of a projector-based approach in quantifying and enabling insight on excess uncertainties and bias-variance decompositions for sketched linear regression. Finally, we demonstrate how the insights from our projector-based analyses can be used to produce practical sketching diagnostics to aid the design of judicious sketching schemes.}, journal={INFORMATION AND INFERENCE-A JOURNAL OF THE IMA}, publisher={Oxford University Press (OUP)}, author={Chi, Jocelyn T. and Ipsen, Ilse C. F.}, year={2021}, month={Aug} } @article{chi_ipsen_2021, title={Multiplicative perturbation bounds for multivariate multiple linear regression in Schatten p-norms}, volume={624}, ISSN={["1873-1856"]}, url={https://doi.org/10.1016/j.laa.2021.03.039}, DOI={10.1016/j.laa.2021.03.039}, abstractNote={Multivariate multiple linear regression (MMLR), which occurs in a number of practical applications, generalizes traditional least squares (multivariate linear regression) to multiple right-hand sides. We extend recent MLR analyses to sketched MMLR in general Schatten p-norms by interpreting the sketched problem as a multiplicative perturbation. Our work represents an extension of Maher's results on Schatten p-norms. We derive expressions for the exact and perturbed solutions in terms of projectors for easy geometric interpretation. We also present a geometric interpretation of the action of the sketching matrix in terms of relevant subspaces. We show that a key term in assessing the accuracy of the sketched MMLR solution can be viewed as a tangent of a largest principal angle between subspaces under some assumptions. Our results enable additional interpretation of the difference between an orthogonal and oblique projector with the same range.}, journal={LINEAR ALGEBRA AND ITS APPLICATIONS}, publisher={Elsevier BV}, author={Chi, Jocelyn T. and Ipsen, Ilse C. F.}, year={2021}, month={Sep}, pages={87–102} } @article{probabilistic iterative methods for linear systems_2021, url={http://jmlr.org/papers/v22/21-0031.html}, journal={Journal of Machine Learning Research}, year={2021} } @article{chi_ipsen_hsiao_lin_wang_lee_lu_tzeng_2021, title={SEAGLE: A Scalable Exact Algorithm for Large-Scale Set-Based Gene-Environment Interaction Tests in Biobank Data}, volume={12}, ISSN={["1664-8021"]}, DOI={10.3389/fgene.2021.710055}, abstractNote={The explosion of biobank data offers unprecedented opportunities for gene-environment interaction (GxE) studies of complex diseases because of the large sample sizes and the rich collection in genetic and non-genetic information. However, the extremely large sample size also introduces new computational challenges in G×E assessment, especially for set-based G×E variance component (VC) tests, which are a widely used strategy to boost overall G×E signals and to evaluate the joint G×E effect of multiple variants from a biologically meaningful unit (e.g., gene). In this work, we focus on continuous traits and present SEAGLE, a Scalable Exact AlGorithm for Large-scale set-based G×E tests, to permit G×E VC tests for biobank-scale data. SEAGLE employs modern matrix computations to calculate the test statistic and p-value of the GxE VC test in a computationally efficient fashion, without imposing additional assumptions or relying on approximations. SEAGLE can easily accommodate sample sizes in the order of 105, is implementable on standard laptops, and does not require specialized computing equipment. We demonstrate the performance of SEAGLE using extensive simulations. We illustrate its utility by conducting genome-wide gene-based G×E analysis on the Taiwan Biobank data to explore the interaction of gene and physical activity status on body mass index.}, journal={FRONTIERS IN GENETICS}, author={Chi, Jocelyn T. and Ipsen, Ilse C. F. and Hsiao, Tzu-Hung and Lin, Ching-Heng and Wang, Li-San and Lee, Wan-Ping and Lu, Tzu-Pin and Tzeng, Jung-Ying}, year={2021}, month={Nov} } @article{seagle: a scalable exact algorithm for large-scale set-based gxe tests in biobank data_2021, DOI={https://doi.org/10.3389/fgene.2021.710055}, abstractNote={The explosion of biobank data offers unprecedented opportunities for gene-environment interaction (GxE) studies of complex diseases because of the large sample sizes and the rich collection in genetic and non-genetic information. However, the extremely large sample size also introduces new computational challenges in G×E assessment, especially for set-based G×E variance component (VC) tests, which are a widely used strategy to boost overall G×E signals and to evaluate the joint G×E effect of multiple variants from a biologically meaningful unit (e.g., gene). In this work, we focus on continuous traits and present SEAGLE, a Scalable Exact AlGorithm for Large-scale set-based G×E tests, to permit G×E VC tests for biobank-scale data. SEAGLE employs modern matrix computations to calculate the test statistic and p-value of the GxE VC test in a computationally efficient fashion, without imposing additional assumptions or relying on approximations. SEAGLE can easily accommodate sample sizes in the order of 105, is implementable on standard laptops, and does not require specialized computing equipment. We demonstrate the performance of SEAGLE using extensive simulations. We illustrate its utility by conducting genome-wide gene-based G×E analysis on the Taiwan Biobank data to explore the interaction of gene and physical activity status on body mass index.}, year={2021}, month={May} } @article{ipsen_zhou_2020, title={Probabilistic Error Analysis for Inner Products}, volume={41}, url={https://doi.org/10.1137/19M1270434}, DOI={10.1137/19M1270434}, abstractNote={Probabilistic models are proposed for bounding the forward error in the numerically computed inner product (dot product, scalar product) between two real n-vectors. We derive probabilistic perturbation bounds as well as probabilistic roundoff error bounds for the sequential accumulation of the inner product. These bounds are nonasymptotic, explicit, with minimal assumptions, and with a clear relationship between failure probability and relative error. The roundoffs are represented as bounded, zero-mean random variables that are independent or have conditionally independent means. Our probabilistic bounds are based on Azuma's inequality and its associated martingale, which mirrors the sequential order of computations. The derivation of forward error bounds "from first principles" has the advantage of producing condition numbers that are customized for the probabilistic bounds. Numerical experiments confirm that our bounds are more informative, often by several orders of magnitude, than traditional deterministic bounds-even for small vector dimensions n and very stringent success probabilities. In particular the probabilistic roundoff error bounds are functions of n rather than n, thus giving a quantitative confirmation of Wilkinson's intuition. The paper concludes with a critical assessment of the probabilistic approach.}, number={4}, journal={SIAM Journal on Matrix Analysis and Applications}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse C. F. and Zhou, Hua}, year={2020}, month={Jan}, pages={1726–1741} } @article{cockayne_oates_ipsen_girolami_2019, title={A Bayesian conjugate gradient method}, volume={14}, url={https://doi.org/10.1214/19-BA1145}, DOI={doi:10.1214/19-BA1145}, abstractNote={A fundamental task in numerical computation is the solution of large linear systems.The conjugate gradient method is an iterative method which offers rapid convergence to the solution, particularly when an effective preconditioner is employed.However, for more challenging systems a substantial error can be present even after many iterations have been performed.The estimates obtained in this case are of little value unless further information can be provided about, for example, the magnitude of the error.In this paper we propose a novel statistical model for this error, set in a Bayesian framework.Our approach is a strict generalisation of the conjugate gradient method, which is recovered as the posterior mean for a particular choice of prior.The estimates obtained are analysed with Krylov subspace methods and a contraction result for the posterior is presented.The method is then analysed in a simulation study as well as being applied to a challenging problem in medical imaging.}, number={3}, journal={Bayesian Anal.}, publisher={International Society for Bayesian Analysis}, author={Cockayne, Jon and Oates, Chris J. and Ipsen, Ilse C.F. and Girolami, Mark}, year={2019}, pages={937–1012} } @article{girolami_ipsen_oates_owen_sullivan_2019, title={Editorial: special edition on probabilistic numerics}, volume={29}, ISSN={["1573-1375"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85072031679&partnerID=MN8TOARS}, DOI={10.1007/s11222-019-09892-y}, number={6}, journal={STATISTICS AND COMPUTING}, author={Girolami, M. and Ipsen, I. C. F. and Oates, C. J. and Owen, A. B. and Sullivan, T. J.}, year={2019}, month={Nov}, pages={1181–1183} } @article{drineas_ipsen_2019, title={LOW-RANK MATRIX APPROXIMATIONS DO NOT NEED A SINGULAR VALUE GAP}, volume={40}, ISSN={["1095-7162"]}, url={https://doi.org/10.1137/18M1163658}, DOI={10.1137/18M1163658}, abstractNote={This is a systematic investigation into the sensitivity of low-rank approximations of real matrices. We show that the low-rank approximation errors, in the two-norm, Frobenius norm and more generally, any Schatten p-norm, are insensitive to additive rank-preserving perturbations in the projector basis; and to matrix perturbations that are additive or change the number of columns (including multiplicative perturbations). Thus, low-rank matrix approximations are always well-posed and do not require a singular value gap. In the presence of a singular value gap, connections are established between low-rank approximations and subspace angles.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Drineas, Petros and Ipsen, Ilse C. F.}, year={2019}, pages={299–319} } @article{bartels_cockayne_ipsen_hennig_2019, title={Probabilistic linear solvers: a unifying view}, volume={29}, ISSN={["1573-1375"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85073975117&partnerID=MN8TOARS}, DOI={10.1007/s11222-019-09897-7}, abstractNote={Abstract Several recent works have developed a new, probabilistic interpretation for numerical algorithms solving linear systems in which the solution is inferred in a Bayesian framework, either directly or by inferring the unknown action of the matrix inverse. These approaches have typically focused on replicating the behaviour of the conjugate gradient method as a prototypical iterative method. In this work, surprisingly general conditions for equivalence of these disparate methods are presented. We also describe connections between probabilistic linear solvers and projection methods for linear systems, providing a probabilistic interpretation of a far more general class of iterative methods. In particular, this provides such an interpretation of the generalised minimum residual method. A probabilistic view of preconditioning is also introduced. These developments unify the literature on probabilistic linear solvers and provide foundational connections to the literature on iterative solvers for linear systems.}, number={6}, journal={STATISTICS AND COMPUTING}, author={Bartels, Simon and Cockayne, Jon and Ipsen, Ilse C. F. and Hennig, Philipp}, year={2019}, month={Nov}, pages={1249–1263} } @article{holodnak_ipsen_smith_2018, title={A PROBABILISTIC SUBSPACE BOUND WITH APPLICATION TO ACTIVE SUBSPACES}, volume={39}, ISSN={["1095-7162"]}, url={https://doi.org/10.1137/17M1141503}, DOI={10.1137/17M1141503}, abstractNote={Given a real symmetric positive semi-definite matrix E, and an approximation S that is a sum of n independent matrix-valued random variables, we present bounds on the relative error in S due to randomization. The bounds do not depend on the matrix dimensions but only on the numerical rank (intrinsic dimension) of E. Our approach resembles the low-rank approximation of kernel matrices from random features, but our accuracy measures are more stringent. In the context of parameter selection based on active subspaces, where S is computed via Monte Carlo sampling, we present a bound on the number of samples so that with high probability the angle between the dominant subspaces of E and S is less than a user-specified tolerance. This is a substantial improvement over existing work, as it is a non-asymptotic and fully explicit bound on the sampling amount n, and it allows the user to tune the success probability. It also suggests that Monte Carlo sampling can be efficient in the presence of many parameters, as long as the underlying function f is sufficiently smooth.}, number={3}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Holodnak, John T. and Ipsen, Ilse C. F. and Smith, Ralph C.}, year={2018}, pages={1208–1220} } @article{frame_he_ipsen_lee_lee_rrapaj_2018, title={Eigenvector Continuation with Subspace Learning}, volume={121}, ISSN={0031-9007 1079-7114}, url={http://dx.doi.org/10.1103/physrevlett.121.032501}, DOI={10.1103/physrevlett.121.032501}, abstractNote={A common challenge faced in quantum physics is finding the extremal eigenvalues and eigenvectors of a Hamiltonian matrix in a vector space so large that linear algebra operations on general vectors are not possible. There are numerous efficient methods developed for this task, but they generally fail when some control parameter in the Hamiltonian matrix exceeds some threshold value. In this Letter we present a new technique called eigenvector continuation that can extend the reach of these methods. The key insight is that while an eigenvector resides in a linear space with enormous dimensions, the eigenvector trajectory generated by smooth changes of the Hamiltonian matrix is well approximated by a very low-dimensional manifold. We prove this statement using analytic function theory and propose an algorithm to solve for the extremal eigenvectors. We benchmark the method using several examples from quantum many-body theory.}, number={3}, journal={Physical Review Letters}, publisher={American Physical Society (APS)}, author={Frame, Dillon and He, Rongzheng and Ipsen, Ilse and Lee, Daniel and Lee, Dean and Rrapaj, Ermal}, year={2018}, month={Jul} } @article{drineas_ipsen_kontopoulou_magdon-ismail_2018, title={STRUCTURAL CONVERGENCE RESULTS FOR APPROXIMATION OF DOMINANT SUBSPACES FROM BLOCK KRYLOV SPACES}, volume={39}, ISSN={["1095-7162"]}, url={https://doi.org/10.1137/16M1091745}, DOI={10.1137/16m1091745}, abstractNote={This paper is concerned with approximating the dominant left singular vector space of a real matrix $A$ of arbitrary dimension, from block Krylov spaces generated by the matrix $AA^T$ and the block vector $AX$. Two classes of results are presented. First are bounds on the distance, in the two and Frobenius norms, between the Krylov space and the target space. The distance is expressed in terms of principal angles. Second are quality of approximation bounds, relative to the best approximation in the Frobenius norm. For starting guesses $X$ of full column-rank, the bounds depend on the tangent of the principal angles between $X$ and the dominant right singular vector space of $A$. The results presented here form the structural foundation for the analysis of randomized Krylov space methods. The innovative feature is a combination of traditional Lanczos convergence analysis with optimal approximations via least squares problems.}, number={2}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Drineas, Petros and Ipsen, Ilse C. F. and Kontopoulou, Eugenia-Maria and Magdon-Ismail, Malik}, year={2018}, pages={567–586} } @article{saibaba_alexanderian_ipsen_2017, title={Randomized matrix-free trace and log-determinant estimators}, volume={137}, ISSN={["0945-3245"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85017115710&partnerID=MN8TOARS}, DOI={10.1007/s00211-017-0880-z}, abstractNote={We present randomized algorithms for estimating the trace and determinant of Hermitian positive semi-definite matrices. The algorithms are based on subspace iteration, and access the matrix only through matrix vector products. We analyse the error due to randomization, for starting guesses whose elements are Gaussian or Rademacher random variables. The analysis is cleanly separated into a structural (deterministic) part followed by a probabilistic part. Our absolute bounds for the expectation and concentration of the estimators are non-asymptotic and informative even for matrices of low dimension. For the trace estimators, we also present asymptotic bounds on the number of samples (columns of the starting guess) required to achieve a user-specified relative error. Numerical experiments illustrate the performance of the estimators and the tightness of the bounds on low-dimensional matrices, and on a challenging application in uncertainty quantification arising from Bayesian optimal experimental design.}, number={2}, journal={NUMERISCHE MATHEMATIK}, author={Saibaba, Arvind K. and Alexanderian, Alen and Ipsen, Ilse C. F.}, year={2017}, month={Oct}, pages={353–395} } @article{gallopoulos_drineas_ipsen_mahoney_2016, title={RandNLA, Pythons, and the CUR for your data problems: Reporting from G2S3 in Delphi}, volume={49}, number={1}, journal={SIAM News}, publisher={Society of Industrial and Applied Mathematics}, author={Gallopoulos, S. and Drineas, P. and Ipsen, I.C.F. and Mahoney, M.W.}, year={2016}, month={Jan}, pages={7–8} } @article{holodnak_ipsen_wentworth_2015, title={CONDITIONING OF LEVERAGE SCORES AND COMPUTATION BY QR DECOMPOSITION}, volume={36}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84944583307&partnerID=MN8TOARS}, DOI={10.1137/140988541}, abstractNote={The leverage scores of a full-column rank matrix A are the squared row norms of any orthonormal basis for range(A). We show that corresponding leverage scores of two matrices A and A + \Delta A are close in the relative sense, if they have large magnitude and if all principal angles between the column spaces of A and A + \Delta A are small. We also show three classes of bounds that are based on perturbation results of QR decompositions. They demonstrate that relative differences between individual leverage scores strongly depend on the particular type of perturbation \Delta A. The bounds imply that the relative accuracy of an individual leverage score depends on: its magnitude and the two-norm condition of A, if \Delta A is a general perturbation; the two-norm condition number of A, if \Delta A is a perturbation with the same norm-wise row-scaling as A; (to first order) neither condition number nor leverage score magnitude, if \Delta A is a component-wise row-scaled perturbation. Numerical experiments confirm the qualitative and quantitative accuracy of our bounds.}, number={3}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Holodnak, John T. and Ipsen, Ilse C. F. and Wentworth, Thomas}, year={2015}, pages={1143–1163} } @article{barlow_drma?_ipsen_moro_2015, title={Preface}, volume={464}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84908326625&partnerID=MN8TOARS}, DOI={10.1016/j.laa.2014.10.006}, journal={Linear Algebra and Its Applications}, author={Barlow, J.L. and Drma?, Z. and Ipsen, I. and Moro, J.}, year={2015}, pages={1–2} } @article{holodnak_ipsen_2015, title={RANDOMIZED APPROXIMATION OF THE GRAM MATRIX: EXACT COMPUTATION AND PROBABILISTIC BOUNDS}, volume={36}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84925298413&partnerID=MN8TOARS}, DOI={10.1137/130940116}, abstractNote={Given a real matrix A with n columns, the problem is to approximate the Gram product AA^T by c = rank(A) columns depend on the right singular vector matrix of A. For a Monte-Carlo matrix multiplication algorithm by Drineas et al. that samples outer products, we present probabilistic bounds for the 2-norm relative error due to randomization. The bounds depend on the stable rank or the rank of A, but not on the matrix dimensions. Numerical experiments illustrate that the bounds are informative, even for stringent success probabilities and matrices of small dimension. We also derive bounds for the smallest singular value and the condition number of matrices obtained by sampling rows from orthonormal matrices.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Holodnak, John T. and Ipsen, Ilse C. F.}, year={2015}, pages={110–137} } @article{ipsen_wentworth_2014, title={THE EFFECT OF COHERENCE ON SAMPLING FROM MATRICES WITH ORTHONORMAL COLUMNS, AND PRECONDITIONED LEAST SQUARES PROBLEMS}, volume={35}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84919934241&partnerID=MN8TOARS}, DOI={10.1137/120870748}, abstractNote={Motivated by the least squares solver Blendenpik, we investigate three strategies for uniform sampling of rows from $m\times n$ matrices $Q$ with orthonormal columns. The goal is to determine, with high probability, how many rows are required so that the sampled matrices have full rank and are well-conditioned with respect to inversion. Extensive numerical experiments illustrate that the three sampling strategies (without replacement, with replacement, and Bernoulli sampling) behave almost identically, for small to moderate amounts of sampling. In particular, sampled matrices of full rank tend to have two-norm condition numbers of at most 10. We derive a bound on the condition number of the sampled matrices in terms of the coherence $\mu$ of $Q$. This bound applies to all three different sampling strategies; it implies a, not necessarily tight, lower bound of $\mathcal{O}(m\mu\ln{n})$ for the number of sampled rows; and it is realistic and informative even for matrices of small dimension and the stringent...}, number={4}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Ipsen, Ilse C. F. and Wentworth, Thomas}, year={2014}, pages={1490–1520} } @article{ipsen_2012, title={Research spotlights}, volume={54}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84861631122&partnerID=MN8TOARS}, DOI={10.1137/SIREAD000054000001000119000001}, abstractNote={A new section with a new direction. Research Spotlights replaced Expository Research Papers on January 31, 2012. Papers in Research Spotlights should cover topics in applied and computational mathematics of particularly wide interest and importance. Contributions must also be accessible to the broad and diverse SIAM Review readership. The principal theme of Research Spotlights is author flexibility with the intent that an expanded format promotes creativity and spurs innovative articles. Authors now have latitude in terms of considering a standard research paper or else a nontraditional article such as a mini-survey, a timely communication, a software description, or a new mathematical perspective within an application area. Prospective authors are encouraged to first consult with the section editor Ray Tuminaro (rstumin@sandia.gov) about potential contributions—especially those that lie outside the scope of traditional SIAM articles. While they can have a more relaxed format, ultimately articles must be of broad interest, must be accessible to the community, and must meet the technical review standards of SIAM journals. Focus groups provide feedback on potential new products: Which juice is more appealing to you, the slimy green or the yucky yellow one? Systems of sensors process signals to decide: Is there an intruder or not? These are examples of “group decision making,” a process where individuals work together to make a collective decision. The problem of figuring out how the collective arrives at a decision occurs in areas as varied as cognitive psychology, economics, political science, and signal processing. Margot Kimura and Jeff Moehlis in their paper “Group Decision-Making Models for Sequential Tasks” consider the “two-alternative forced-choice test,” where one must choose between two hypotheses: slimy green or yucky yellow; intruder or no intruder. Decisions must be made quickly and can only tolerate certain error rates. This means that there are limits on how often the sensors are allowed to miss an intruder, or signal a false alarm. The model in this paper assumes $N$ independent decision makers, each of whom receives observations, sequentially, one at a time. Each decision maker continues to process the observations until s/he is able to make a decision. The incoming observations are represented by independent random variables, with known prior probabilities for each decision. The processing consists of applying the “sequential probability ratio test” to each new observation. Based on the prespecified error rates for the number of misses and false alarms, this test either reports a decision or continues to process the next observation. The authors also consider a continuous version of this test, which becomes a drift-diffusion model as the time between observations goes to zero. Once a decision maker has come up with a decision, s/he reports it to the “fusion center,” which is responsible for arriving at a collective decision. The fusion center can operate in one of three modes: race (report only the very first decision that arrives), majority-total (wait until all $N$ decisions have arrived and then report the majority), and majority-first (wait until $N/2$ identical decisions have arrived, and then report this smallest possible majority). For each such mode, the authors derive probability distribution functions for the collective error rates and decision times, from the error rates and decision times of the individual decision makers. Simulations are presented to compare the different modes. Which mode turns out to be the most efficient is not at all obvious and depends on the scenario at hand, whether decision makers can have different error rates or can malfunction. Finally, the authors extend their analysis to more general modes, where the fusion center makes a collective decision based on the first $\eta$ decisions that arrive. The approach presented here has many advantages. It is general and applies to many situations that require collective decision making based on sequential observations, including even “cybernetic groups” with human observers and nonhuman detectors. Furthermore it is systematic and elegant, because it provides a clear path for deriving the efficiency of collective decisions from those of individual decision makers. Especially appreciated is a list of acronyms thoughtfully included by the authors at the beginning of the paper, which makes it easy to decipher the many acronyms in this area.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2012}, pages={119–120} } @article{rehman_ipsen_2011, title={COMPUTING CHARACTERISTIC POLYNOMIALS FROM EIGENVALUES}, volume={32}, ISSN={["0895-4798"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-79952367119&partnerID=MN8TOARS}, DOI={10.1137/100788392}, abstractNote={This paper concerns the computation of the coefficients $c_k$ of the characteristic polynomial of a real or complex matrix $A$. We analyze the forward error in the coefficients $c_k$ when they are computed from the eigenvalues of $A$, as is done by MATLAB's poly function. In particular, we derive absolute and relative perturbation bounds for elementary symmetric functions, which we use in turn to derive perturbation bounds for the coefficients $c_k$ with regard to absolute and relative changes in the eigenvalues $\lambda_j$ of $A$. We present the so-called Summation Algorithm for computing the coefficients $c_k$ from the eigenvalues $\lambda_j$, which is essentially the algorithm used by poly. We derive roundoff error bounds and running error bounds for the Summation Algorithm. The roundoff error bounds imply that the Summation Algorithm is forward stable. The running error bounds can be used to estimate the accuracy of the computed coefficients “on the fly,” and they tend to be less pessimistic than the roundoff error bounds. Numerical experiments illustrate that our bounds give useful estimates for the accuracy of the coefficients $c_k$. In particular, the bounds confirm that poly computes the coefficients $c_k$ to high relative accuracy if the eigenvalues are positive and given to high relative accuracy.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Rehman, Rizwana and Ipsen, Ilse C. F.}, year={2011}, pages={90–114} } @article{ipsen_selee_2011, title={ERGODICITY COEFFICIENTS DEFINED BY VECTOR NORMS}, volume={32}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84875120748&partnerID=MN8TOARS}, DOI={10.1137/090752948}, abstractNote={Ergodicity coefficients for stochastic matrices determine inclusion regions for subdominant eigenvalues; estimate the sensitivity of the stationary distribution to changes in the matrix; and bound the convergence rate of methods for computing the stationary distribution. We survey results for ergodicity coefficients that are defined by $p$-norms, for stochastic matrices as well as for general real or complex matrices. We express ergodicity coefficients in the one-, two-, and infinity-norms as norms of projected matrices, and we bound coefficients in any $p$-norm by norms of deflated matrices. We show that two-norm ergodicity coefficients of a matrix $A$ are closely related to the singular values of $A$. In particular, the singular values determine the extreme values of the coefficients. We show that ergodicity coefficients can determine inclusion regions for subdominant eigenvalues of complex matrices, and that the tightness of these regions depends on the departure of the matrix from normality. In the special case of normal matrices, two-norm ergodicity coefficients turn out to be Lehmann bounds.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Ipsen, Ilse C. F. and Selee, Teresa M.}, year={2011}, pages={153–200} } @article{ipsen_2011, title={Expository Research Papers}, volume={53}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84856703956&partnerID=MN8TOARS}, DOI={10.1137/siread000053000001000069000001}, abstractNote={The two papers in this issue are of the analytic kind. The first one deals with symbol calculus, and the second one with compressed sensing. In their paper “Discrete Symbol Calculus,” Laurent Demanet and Lexing Ying propose efficient representations for functions of elliptic operators. The idea is to replace the symbol of an operator matrix A by a low-rank approximation. Then one can derive an efficient representation for $f(A)$, where f is a function like inverse, square root, or exponential. The low-rank approximations are constructed from rational Chebyshev interpolants, and also from hierarchical spline interpolants. The authors analyze how many terms are required for a low-rank approximation of specified accuracy, and they present numerical experiments. This appears to be an original and promising approach for reflection seismology and other areas of wave propagation. Compressed sensing is one of mathematics' hot topics. And it has already made contributions to signal processing: Compressed sensing can reduce the time for an MRI scan by a factor of 7. This is remarkable when you imagine that a small child may be able to hold still for 1 minute, but rarely for an eternity of 7. So what is compressed sensing? Suppose we want to determine (“measure”) all N elements of an unknown vector x. The straightforward way would be to perform N inner products with canonical vectors, with each canonical vector being responsible for one element of x. If x is sparse with only k nonzero elements, and we know the positions of these nonzero elements, then k inner products suffice to measure x. But what if we don't know the positions of the k nonzero elements? Can we still measure x with about k inner products? Look to compressed sensing for answers, and you'll be advised as follows: Perform the inner products in parallel, by means of a matrix vector multiplication $Ax$. However, for the rows of A don't choose canonical vectors, but instead choose, say, random vectors. The resulting algorithm for measuring x consists of two steps: The first (“encoding”) step determines a vector y from the matrix vector product $y=Ax$. There upon A and y are fed as constraints to the second (“decoding”) step, which recovers x as the solution of the $\ell_1$ minimization problem $\min_z{\|z\|_1}$ subject to the constraint $Az=y$. The performance of A in recovering x can be quantified from a RIP constant, where “RIP” stands for “restricted isometry property.” The RIP constant of A indicates how much A can deviate from behaving like an isometry when applied to vectors with k nonzero elements. More precisely, the RIP constant indicates by how much A can distort the two norm of a vector with k nonzero elements. RIP constants are the topic of the second paper, “Compressed Sensing: How Sharp Is the Restricted Isometry Property?” by Jeffrey Blanchard, Coralia Cartis, and Jared Tanner. They present tight bounds on RIP constants, and they introduce the framework of proportional growth arithmetic in order to compare RIP bounds to two alternative performance metrics: polytope analysis and geometric functional analysis techniques. This is an exciting paper and an exciting area, combining tools from matrix theory, probability, and functional analysis!}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2011}, month={Jan}, pages={69–69} } @article{ipsen_2011, title={Expository Research Papers}, volume={53}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-79957451081&partnerID=MN8TOARS}, DOI={10.1137/siread000053000002000289000001}, abstractNote={The two papers in this issue are concerned with analysis of differential equations: the first paper discusses the solution of partial differential equations that describe heat transfer, while the second one analyzes dynamical systems whose behavior alternates between continuous and discrete modes. The paper “Application of Standard and Refined Heat Balance Integral Methods to One-Dimensional Stefan Problems,” by Sarah Mitchell and Tim Myers, deals with a heat transfer problem, on a semi-infinite region. Consider an experiment on a long metal bar occupying the positive real-axis. The experiment starts by heating the origin, thereby raising the bar above the melting temperature. The bar melts near the origin, and as the heat diffuses, the solid-liquid interface propagates slowly but surely towards infinity. The temperature is modeled by heat equations across the two regions, and by a Stefan condition at their interface. The authors heat balance integral methods to solve for the temperature. These methods reduce a partial differential equation to an ordinary differential equation. The authors investigate different boundary conditions, and different approximating functions for the temperature. Readers interested in heat balance integral methods will find this to be a valuable survey with new results that preserve the simplicity of the method. The second paper is concerned with so-called hybrid dynamical systems. A bouncing ball, for instance, is a hybrid dynamical system. The movement of the ball above ground can be described by Newton's law. However, at the very moment the ball hits the ground and bounces, an instantaneous reversal of velocity occurs along with some dissipation of energy. After the bounce, the ball moves again according to Newton's law until the next bounce, and so on. Mathematically one can show that the time points at which the ball bounces represent a convergent sequence. The convergence of this sequence implies that infinitely many bounces occur in a finite amount of time. This is called “Zeno behavior”: infinitely many switches of mode in a finite amount of time. If Zeno behavior occurs in a control system, a numerical simulation of the system is extremely difficult, if not impossible. In the terminology of dynamical systems, the movement of the ball above ground is a “flow” and the bounce is a “jump.” Hybrid dynamical systems alternate between continuous (flow) and discrete (jump) modes. Rafal Goebel and Andrew Teel in their paper “Preasymptotic Stability and Homogeneous Approximations of Hybrid Dynamical Systems” model hybrid dynamical systems and approximate them by simpler systems obtained from linearization and tangent cones. The authors analyze preasymptotic stability, homogeneity, and convergence. A variety of well-chosen simple examples helps us to understand the general concepts and results.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2011}, month={Jan}, pages={289–289} } @article{ipsen_2011, title={Expository Research Papers}, volume={53}, DOI={10.1137/siread000053000004000721000001}, abstractNote={You may be concerned that this issue features just a single paper in the Expository Research section. But not to worry—you will get your money's worth. Based on foundations laid by the eminent Austrian-born algebraist Emil Artin (1898–1962) you will be acquainted with improved designs for bread mixers and taffy pullers. Ingredients include topology, dynamical systems, linear algebra, the golden ratio, and Lego toys. The applications considered by Matthew Finn and Jean-Luc Thiffeault in their paper “Topological Optimization of Rod-Stirring Devices” go far beyond food preparation and extend to industrial dough production, and to glass manufacturing where inhomogeneities in molten glass are removed by stirring it with rods. The subject of this paper is the design of efficient devices that stir a fluid thoroughly. When the fluid motion is mainly two-dimensional, as in the case of glass, for example, one can model the fluid as a two-dimensional surface. Instead of using the Stokes equations to describe the fluid motion, the authors adopt a topological approach. They view the fluid as a punctured disk (the punctures being the stirring rods), and the rod motions as mappings of this disk. The mappings associated with efficient practical stirring protocols belong to the so-called pseudo-Anosov category and produce a fluid motion that is related to chaotic dynamical systems. Implementing a pseudo-Anosov mapping requires at least three stirring rods, which is why many taffy pullers have three stirring rods. The authors choose the topological notion of braids to describe the motion protocol of the stirring rods. Mathematical relations derived by Emil Artin identify those groups of braids that can actually be physically realized. In order to estimate how thoroughly a stirring protocol mixes up the fluid, one represents the braids as products of $2\times 2$ matrices, and then determines the spectral radius of the product. For three rods the best stirring protocol has a spectral radius equal to the golden ratio. One must be careful, though, to balance mathematical tractability with practical engineering considerations. Stirring protocols that are optimal from a mathematical point of view are not necessarily so in practice. Part of the reason is that the mathematical approach corresponds to a sequential operation of the stirring rods, while in practice one would want them to work in parallel. The authors devise a parallel motion protocol for stirring rods and present evidence for the optimality of this parallel protocol. This is a marvelous paper, easy to read, accessible, and most enjoyable. To top everything off, the authors built optimal stirring devices with 2 and with 4 stirring rods from Lego toy pieces. How cool is that?}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2011}, month={Jan}, pages={721–721} } @article{ipsen_2011, title={Expository Research Papers}, volume={53}, DOI={10.1137/siread000053000003000503000001}, abstractNote={The two papers in this issue deal with bifurcations and social network analysis. Bifurcation, in its original sense, means division into two branches. In the context of dynamical systems, a bifurcation occurs when a small change in a parameter causes a sudden qualitative change in the solution. Mike Jeffrey and S. J. Hogan study dynamical systems where a state vector x is defined by a system of ordinary differential equations $$\frac{dx}{dt}= f(x,t;\mu)$$ and $\mu$ represents the parameter. Here it is the function f that is responsible for the bifurcations, because f is only piecewise smooth. Piecewise smoothness occurs in applications ranging from engineering and medicine to biology and ecology, often in situations where impact or friction is modeled. Furthermore the authors assume that the discontinuity in f is confined to a smooth manifold of codimension one, the so-called switching manifold, where the solution x remains continuous but may fail to be unique. As a consequence, trajectories of x either cross through the switching manifold or slide along it. If the latter give rise to a bifurcation, then it is, aptly named, a sliding bifurcation. This is a succinct and well-written paper and it makes a number of contributions: first, identifying those points on the switching manifold where switching bifurcations can occur, namely at certain singularities (folds, cusps, saddles, and bowls); second, deriving a complete classification of all one-parameter sliding bifurcations; and third, discovering new sliding bifurcations, which all turn out to be catastrophic. In the second paper, “Comparing Community Structure to Characteristics in Online Collegiate Social Networks,” Amanda Traud, Eric Kelsic, Peter Mucha, and Mason Porter examine how relationships on a social networking site are correlated with demographic traits. In particular, they inspect the complete Facebook pages of five U.S. universities from a single snapshot in September 2005 and ask questions like: If people are friends on Facebook, are they likely to live in the same dormitory, major in the same subject, or have gone to the same high school? Computationally, Facebook users are represented as nodes of a graph, friendships as edges, and social communities as clusters of nodes. Identifying clusters, i.e., community detection, is done with an “unsupervised” algorithm that amounts to optimizing a modularity quality function, which depends only on the edge structure of the graph. This algorithmically computed community must now be compared to communities associated with demographic traits. The authors investigate different similarity measures, based on pair counting, for performing the comparisons. Along the way they face a number of issues: How to display and compare the communities visually? How to compute the correlations accurately and efficiently? What to do about missing data? This is an exciting paper, drawing on tools from graph partitioning, optimization, data clustering, contingency tables, and statistical psychology. It might also be the only paper in a SIAM journal that explains why the social structure at Caltech is different from that of the University of North Carolina.}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2011}, month={Jan}, pages={503–503} } @article{ipsen_2011, title={Expository research papers}, volume={53}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84856706336&partnerID=MN8TOARS}, number={4}, journal={SIAM Review}, author={Ipsen, I.}, year={2011} } @article{eriksson-bique_solbrig_stefanelli_warkentin_abbey_ipsen_2011, title={IMPORTANCE SAMPLING FOR A MONTE CARLO MATRIX MULTIPLICATION ALGORITHM, WITH APPLICATION TO INFORMATION RETRIEVAL}, volume={33}, ISSN={["1095-7197"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-80052734652&partnerID=MN8TOARS}, DOI={10.1137/10080659x}, abstractNote={We perform importance sampling for a randomized matrix multiplication algorithm by Drineas, Kannan, and Mahoney and derive probabilities that minimize the expected value (with regard to the distributions of the matrix elements) of the variance. We compare these optimized probabilities with uniform probabilities and derive conditions under which the actual variance of the optimized probabilities is lower. Numerical experiments with query matching in information retrieval applications illustrate that the optimized probabilities produce more accurate matchings than the uniform probabilities and that they can also be computed efficiently.}, number={4}, journal={SIAM JOURNAL ON SCIENTIFIC COMPUTING}, author={Eriksson-Bique, Sylvester and Solbrig, Mary and Stefanelli, Michael and Warkentin, Sarah and Abbey, Ralph and Ipsen, Ilse C. F.}, year={2011}, pages={1689–1706} } @article{ipsen_kelley_pope_2011, title={RANK-DEFICIENT NONLINEAR LEAST SQUARES PROBLEMS AND SUBSET SELECTION}, volume={49}, ISSN={["0036-1429"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-79960422161&partnerID=MN8TOARS}, DOI={10.1137/090780882}, abstractNote={We examine the local convergence of the Levenberg-Marquardt method for the solution of nonlinear least squares problems that are rank-deficient and have nonzero residual. We show that replacing the Jacobian by a truncated singular value decomposition can be numerically unstable. We recommend instead the use of subset selection. We corroborate our recommendations by perturbation analyses and numerical experiments.}, number={3}, journal={SIAM JOURNAL ON NUMERICAL ANALYSIS}, author={Ipsen, I. C. F. and Kelley, C. T. and Pope, S. R.}, year={2011}, pages={1244–1266} } @article{ipsen_2010, title={Expository Research Papers}, volume={52}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-77954340593&partnerID=MN8TOARS}, DOI={10.1137/siread000052000003000453000001}, abstractNote={The two papers in this issue have to do with matrices and sparsity—but from different points of view. Sparsity, in the first paper, means many zero elements in the matrix, while in the second paper it refers to many zero singular values, i.e., low rank. The context of the first paper, “On the Block Triangular Form of Symmetric Matrices” by Iain Duff and Bora Uçar, is the solution of linear systems of equations $Ax = b$ whose coefficient matrix A is sparse, i.e., has many zero elements. A direct method, such as Gaussian elimination, becomes more efficient if one can first permute the rows and columns of A into block triangular form. The classical method for permuting a matrix to block triangular form is due to Dulmage and Mendelsohn and dates back to 1963. The idea is to represent the matrix A as a bipartite graph whose nodes are columns and rows of A, and whose edges correspond to nonzero elements of A, and then to determine a matching of maximum cardinality in this graph. That means determining a maximal number of nonzero elements no two of which belong to the same row or column. Duff and Uçar analyze the block triangular form for a particular class of square matrices A: These matrices are structurally symmetric, i.e., their zero/nonzero structure is symmetric; and they can be structurally rank deficient, i.e., any rank deficiency is evident from the zero/nonzero structure of A. This paper illustrates nicely how graph theory can contribute to improving the efficiency of sparse matrix methods. The paper should appeal to those who need to solve large sparse linear systems, as well as those interested in graph theory. The second paper, “Guaranteed Minimum-Rank Solutions of Linear Matrix Equations via Nuclear Norm Minimization” by Benjamin Recht, Maryam Fazel, and Pablo Parrilo, has applications to model reduction and system identification, machine learning, and image compression, just to name a few. Given a set of affine constraints for a matrix, the problem is to find a matrix of minimum rank that satisfies these constraints. The authors attack this hard nonconvex optimization problem by replacing it with a convex approximation: Instead of minimizing the rank, they minimize the sum of the singular values, the so-called nuclear norm. Nuclear norm minimization thus extends the compressed sensing framework from finding sparse vectors via $\ell_1$ minimization to finding low-rank matrices via $\ell_1$ minimization of the (vector of) singular values. The purpose of this paper is to justify mathematically why nuclear norm minimization does so well in practice. In addition to extending the restricted isometry property to matrices, the authors also discuss algorithms, computational performance, and numerical issues. This is a fascinating and well-written paper that combines results from compressed sensing, matrix theory, optimization, and probability.}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2010}, month={Jan}, pages={453–453} } @article{ipsen_2010, title={Expository Research Papers}, volume={52}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-77249161188&partnerID=MN8TOARS}, DOI={10.1137/siread000052000002000267000001}, abstractNote={The two papers in this issue deal with differential equations, one with the numerical solution of partial differential equations, and the other one with analytic solutions for ordinary differential equations. In his paper "From Functional Analysis to Iterative Methods", Robert Kirby is concerned with linear systems arising from discretizations of partial differential equations (PDEs). Specifically, the PDEs are elliptic and describe boundary value problems; the discretizations are done via finite elements, and at issue is the convergence rate of iterative methods for solving the linear systems. The author's approach is to go back to the underlying variational problem in a Hilbert space, and to make ample use of the Riesz representation theorem. This point of view results in short and elegant proofs, as well as the construction of efficient preconditioners. The general theory is illustrated with two concrete model problems of PDEs for convection diffusion and planar elasticity. This paper will appeal to anybody who has an interest in the numerical solution of PDEs. In 1963 the mathematician/meteorologist Edward Lorenz formulated a system of three coupled nonlinear ordinary differential equations, whose long-term behavior is described by an attractor with fractal structure. You can see a beautiful rendition of the thus named Lorenz attractor on the cover of this issue. Although it is "easy" to plot solutions of the Lorenz system, it is much harder to determine them mathematically. This is what motivated the paper "Complex Singularities and the Lorenz Attractor" by Divakar Viswanath and Sönmez Şahutoğlu. Their idea is to allow the time variable to be complex, rather than real; to focus on singular solutions; and to express these singular solutions in terms of so-called psi series. After all is said and done, the authors end up with a two-parameter family of complex solutions to the Lorenz system. This a highly readable and very enjoyable paper, with concrete steps for future research, and connections to thunderstorms and analytic function theory.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2010}, month={Jan}, pages={267–267} } @article{ipsen_2010, title={Expository Research Papers}, volume={52}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-79952956707&partnerID=MN8TOARS}, DOI={10.1137/siread000052000004000675000001}, abstractNote={The first paper in this issue is concerned with the numerical solution of an inverse problem, and the second one with detecting properties of networks. Alexander Graham Bell discovered the photoacoustic effect in the nineteenth century. He noticed that thin discs emit a sound when they are exposed to a light beam that is rapidly turned on and off. What happens is that the disks absorb the energy from the light, and convert it to heat. The heat then produces a momentary expansion, which in turn sets off a pressure wave or sound. The photoacoustic effect is exploited in biomedical applications, to help with the detection of lesions and cancer. The tissue is exposed to energy pulses and then expands. The expansion produces ultrasonic waves that propagate to the boundary and can be detected there. The objective of the paper “Mathematical Modeling in Photoacoustic Imaging of Small Absorbers” by Habib Ammari, Emmanuel Bossy, Vincent Jugnon, and Hyeonbae Kang is to identify energy absorbing regions from measurements taken at the boundary. This is an inverse problem, and the boundary conditions require special attention. The authors derive a reconstruction method, and present numerical experiments to illustrate its effectiveness. This is noteworthy paper, due to its potential contribution to medical imaging. Mathematicians like to gauge their position in the community via the so-called Erdös number, which represents the “collaborative distance” to Paul Erdös. Erdös (1913–1996) was a prolific Hungarian mathematician who worked in many areas, including combinatorics, graph theory, and number theory, and published by some accounts as many as 1400 papers. Erdös himself has an Erdös number of 0, while his coauthors have an Erdös number of 1. To compute your own Erdös number, take the lowest Erdös number among all of your coauthors, and then add one. If none of your coauthors has a finite Erdös number, neither do you, and your Erdös number is $\infty$. How can we express the influence of Paul Erdös in mathematical terms? There is his centrality, measured by the number of his coauthors and said to exceed 500. There is his communicability, which is the sum of all finite Erdös numbers, but with larger Erdös numbers given less weight (i.e., Erdös number k is divided by k!). And there is his betweenness, a measure of what would happen to the communicability of the other mathematicians had Erdös not existed. Concepts such as centrality, communicability, and betweenness quantify the connectivity and topology of general networks. Ernesto Estrada and Desmond Higham, in their paper “Network Properties Revealed through Matrix Functions,” express these concepts in terms of the exponential and resolvent of the adjacency matrix, and compare them on real test data from social science, ecology, and proteomics. This well-written paper gives us a glimpse into network science, a new area that studies patterns of interactions, and exposes the opportunities for contributions from linear algebra.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2010}, month={Jan}, pages={675–675} } @article{ipsen_2010, title={Expository Research Papers}, volume={52}, DOI={10.1137/siread000052000001000055000001}, abstractNote={The two papers in this issue are concerned with analysis of differential equations: the first paper discusses the solution of partial differential equations that describe heat transfer, while the second one analyzes dynamical systems whose behavior alternates between continuous and discrete modes. The paper “Application of Standard and Refined Heat Balance Integral Methods to One-Dimensional Stefan Problems,” by Sarah Mitchell and Tim Myers, deals with a heat transfer problem, on a semi-infinite region. Consider an experiment on a long metal bar occupying the positive real-axis. The experiment starts by heating the origin, thereby raising the bar above the melting temperature. The bar melts near the origin, and as the heat diffuses, the solid-liquid interface propagates slowly but surely towards infinity. The temperature is modeled by heat equations across the two regions, and by a Stefan condition at their interface. The authors heat balance integral methods to solve for the temperature. These methods reduce a partial differential equation to an ordinary differential equation. The authors investigate different boundary conditions, and different approximating functions for the temperature. Readers interested in heat balance integral methods will find this to be a valuable survey with new results that preserve the simplicity of the method. The second paper is concerned with so-called hybrid dynamical systems. A bouncing ball, for instance, is a hybrid dynamical system. The movement of the ball above ground can be described by Newton's law. However, at the very moment the ball hits the ground and bounces, an instantaneous reversal of velocity occurs along with some dissipation of energy. After the bounce, the ball moves again according to Newton's law until the next bounce, and so on. Mathematically one can show that the time points at which the ball bounces represent a convergent sequence. The convergence of this sequence implies that infinitely many bounces occur in a finite amount of time. This is called “Zeno behavior”: infinitely many switches of mode in a finite amount of time. If Zeno behavior occurs in a control system, a numerical simulation of the system is extremely difficult, if not impossible. In the terminology of dynamical systems, the movement of the ball above ground is a “flow” and the bounce is a “jump.” Hybrid dynamical systems alternate between continuous (flow) and discrete (jump) modes. Rafal Goebel and Andrew Teel in their paper “Preasymptotic Stability and Homogeneous Approximations of Hybrid Dynamical Systems” model hybrid dynamical systems and approximate them by simpler systems obtained from linearization and tangent cones. The authors analyze preasymptotic stability, homogeneity, and convergence. A variety of well-chosen simple examples helps us to understand the general concepts and results.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2010}, month={Jan}, pages={55–55} } @article{ipsen_2010, title={Expository research papers}, volume={52}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-79952956707&partnerID=MN8TOARS}, number={4}, journal={SIAM Review}, author={Ipsen, I.}, year={2010} } @inproceedings{kelley_ipsen_pope_2010, title={Rank-deficient and ill-conditioned nonlinear least squares problems}, booktitle={Proceedings of the 2010 East Asian SIAM Conference}, author={Kelley, C.T. and Ipsen, I.C.F. and Pope, S.R.}, year={2010} } @inbook{ipsen_2010, title={The Eigenproblem and Invariant Subspaces: Perturbation Theory}, ISBN={9780817649678 9780817649685}, url={http://dx.doi.org/10.1007/978-0-8176-4968-5_7}, DOI={10.1007/978-0-8176-4968-5_7}, abstractNote={In this collection of papers, Pete Stewart established the foundations for the perturbation theory of invariant subspaces. He introduced two crucial concepts that allow a systematic approach toward such a perturbation theory: subspace rotation and operator separation. These two concepts form the guiding principle in most of these papers.}, booktitle={G.W. Stewart}, publisher={Birkhäuser Boston}, author={Ipsen, Ilse C. F.}, year={2010}, pages={71–93} } @article{preface to the 14th ilas conference proceedings shanghai 2007_2009, volume={430}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-58349086314&partnerID=MN8TOARS}, DOI={10.1016/j.laa.2008.10.002}, number={5-6}, journal={Linear Algebra and Its Applications}, year={2009} } @article{ipsen_2009, title={Problems and Techniques}, volume={51}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-68649123647&partnerID=MN8TOARS}, DOI={10.1137/siread000051000002000337000001}, abstractNote={The two papers in this issue share a common concern: smoothing. In the first paper, it is time series that are being smoothed, and in the second paper it is multigrid methods that do the smoothing for PDE-constrained optimization problems. In biology, medicine, finance, economics, geophysics, and social sciences one frequently needs to discern “trends” in data sets. Mathematically, this problem is known as filtering, smoothing, or time series analysis, and many different methods have been devised, including moving average filters, bandpass filters, and smoothing splines. For a given set of scalars $y_t$, one wants to find another set of scalars $x_t$ that vary smoothly, but that are close to the original set. The new points $x_t$ are considered to represent the underlying trend. The question now is, how do we define what it means to “vary smoothly”? Seung-Jean Kim, Kwangmoo Koh, Stephen Boyd, and Dimitry Gorinevsky answer this question in the one-norm. In their paper “$\ell_1$ Trend Filtering,” they minimize an expression containing a one-norm, so that the resulting $x_t$ are points of a piecewise linear function. The minimization is formulated as a convex quadratic program and solved by an interior point method. This paper should be of interest to many readers, because of its connections to $\ell_1$ regularization in geophysics, signal processing, statistics, and sparse approximation. In their paper “Multigrid Methods for PDE Optimization,” Alfio Borzì and Volker Schulz review multigrid methods for solving infinite-dimensional optimization problems whose constraints are expressed in terms of partial differential equations (PDEs). Such optimization problem arise in optimal control, shape design, and shape optimization. Multigrid methods, very informally, solve PDEs by discretizing them iteratively on a hierarchy of grids, so as to capture all frequencies. So-called smoothers are responsible for high frequencies, while lower frequencies are resolved on coarser grids. This paper is essentially self-contained. It starts by introducing terminology for PDE-constrained optimization problems and reviewing multigrid methods. Subsequent discussions focus on multigrid SQP, Schur-complement-based multigrid smoothers, and collective smoothing multigrid, as well as applications to optimal control problems governed by hyperbolic, elliptic, and parabolic PDEs.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2009}, month={May}, pages={337–337} } @article{ipsen_2009, title={Problems and Techniques}, volume={51}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-59749092679&partnerID=MN8TOARS}, DOI={10.1137/siread000051000003000543000001}, abstractNote={Halftoning and manifold learning are the two subjects of this issue. Halftoning will help you to understand better what goes on inside your printer, in particular how your printer converts continuous images to pixels. Manifold learning has the potential to become an important approach for machine learning, specifically for the automated analysis of large high-dimensional data sets. 1. Manifold learning is a form of dimension reduction, where one assumes that data lie on a low-dimensional manifold within a high-dimensional space. Applications of manifold learning include object recognition, computer vision, speech analysis, and molecular dynamics simulations. Hongyuan Zha and Zhenyue Zhang, the authors of the paper “Spectral Properties of the Alignment Matrices in Manifold Learning,” focus on a particular class of local manifold learning methods known as local tangent space alignment (LTSA). LTSA computes representations for local neighborhoods of a manifold, and then combines, or aligns, all the local representations into one global representation for the whole manifold. This involves the computation of eigenspaces and null spaces of alignment matrices. The accuracy of LTSA is affected by the amount of overlap among local neighborhoods, as well as by perturbations from noise and computational errors. Armed with tools from matrix and graph theory the authors determine the impact of overlap under ideal conditions, when computations are free of errors, and they determine the accuracy of the null spaces when the alignment matrices are subjected to perturbations. 2. Black ink printers convert grayscale images into images made up of binary pixels in such a way that the human eye perceives almost no difference between the binary and the grayscale image. This conversion process is called “halftoning.” It works because the human eye has the ability to perform spatial smoothing. In contrast, an artificial vision system with a sufficiently fine resolution perceives a binary halftone image as merely a chaotic mess of pixels. In his paper “Least-Squares Halftoning via Human Vision System and Markov Gradient Descent (LS-MGD): Algorithm and Analysis,” Jianhong (Jackie) Shen models the human vision system as a point spread function that is weakly lowpass and quantifies its spatial smoothing ability. He presents a halftoning algorithm based on a randomized gradient descent approach that minimizes the difference between the perceived binary and continuous images in the least squares sense. Numerical experiments with test images give a good feel for the performance of the algorithm.}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2009}, month={Aug}, pages={543–543} } @article{ipsen_2009, title={Problems and Techniques}, volume={51}, DOI={10.1137/siread000051000004000705000001}, abstractNote={This issue features two papers involving optimization problems, one targeted at minimizing temperature differences across a plate, and the other at multidimensional integration. 1. Want to turn the graphics card in your laptop or desktop into a personal supercomputer? Eddie Wadbro and Martin Berggren show you how, in their paper “Megapixel Topology Optimization on a Graphics Processing Unit.” The advantage of graphics cards is that they are cheap, relatively speaking, and that they allocate more resources to data processing than a CPU would. Graphics cards are also becoming easier to program (especially for those of us who grew up with Connection Machines and FPS-164s). Graphics processing units are natural hardware platforms for problems that are highly data parallel. This includes the topology optimization problem considered here, where a limited amount of high-conductivity material is to be distributed across a heated plate so that its temperature field is as even as possible. The authors express this as an area-to-point flow optimization problem. A subsequent finite element discretization gives a symmetric positive-definite linear system that is solved by a diagonally preconditioned conjugate gradient method. As it turns out, the optimal distribution of the high-conductivity material emanates like a root from the heat sink, with increasing girth and finer branches as the discretization is refined. 2. In their paper “Approximate Volume and Integration for Basic Semialgebraic Sets,” Didier Henrion, Jean Bernard Lasserre, and Carlo Savorgnan are concerned with deterministic techniques for difficult multidimensional integration, of the type where only brute force Monte Carlo methods have a chance at producing acceptable approximations. The bodies can be disconnected or nonconvex, and are described by sets of polynomial inequalities (i.e., semialgebraic sets). The foundation for this work was laid more than a hundred years ago, when Chebyshev, Markov, and Stieltjes showed how to approximate one-dimensional integrals by sequences of moments. In this paper, the authors formulate the multidimensional integration as an infinite-dimensional linear programming problem, and approximate the required moments by a hierarchy of semidefinite programming problems. Numerical examples illustrate that the approach produces accurate approximations in two and three dimensions.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2009}, month={Nov}, pages={705–705} } @article{ipsen_2009, title={Problems and Techniques}, volume={51}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-68649123647&partnerID=MN8TOARS}, DOI={10.1137/siread000051000001000127000001}, abstractNote={Evaluating and improving the performance of scientific software—this is the topic of the six-author paper “Optimization and Performance Modeling of Stencil Computations on Modern Multiprocessors.” Performance can degrade substantially when data are too large to fit in cache, so that data that do not fit in cache may have to be retrieved from main memory. Since memory traffic is slow compared to computation speed, processors become idle while waiting for data to arrive. The software in question is a code for the numerical solution by finite difference methods of the heat equation in three dimensions. The finite difference method is referred to as a “stencil computation” because each point in this three-dimensional grid requires information from a certain set of neighboring points (the stencil) to perform its computations. The code is evaluated on three different microprocessors: Itanium2, AMD Opteron, and IBM Power5; and also on the STI Cell processor. The authors evaluate strategies for managing memory hierarchies; and they examine issues such as cache blocking, cache-aware and cache-oblivious algorithms, local-store management, and hardware and software prefetching. The paper illustrates well the difficulty of this undertaking, and the interaction among the numerous hardware features and software strategies that affect the performance even of computations with simple and predictable memory access patterns.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2009}, month={Feb}, pages={127–127} } @article{ipsen_2009, title={Problems and techniques}, volume={51}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-59749092679&partnerID=MN8TOARS}, number={1}, journal={SIAM Review}, author={Ipsen, I.}, year={2009} } @article{ipsen_nadler_2009, title={REFINED PERTURBATION BOUNDS FOR EIGENVALUES OF HERMITIAN AND NON-HERMITIAN MATRICES}, volume={31}, ISSN={["0895-4798"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-73649111433&partnerID=MN8TOARS}, DOI={10.1137/070682745}, abstractNote={We present eigenvalue bounds for perturbations of Hermitian matrices and express the change in eigenvalues in terms of a projection of the perturbation onto a particular eigenspace, rather than in terms of the full perturbation. The perturbations we consider are Hermitian of rank one, and Hermitian or non-Hermitian with norm smaller than the spectral gap of a specific eigenvalue. Applications include principal component analysis under a spiked covariance model, and pseudo-arclength continuation methods for the solution of nonlinear systems.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Ipsen, I. C. F. and Nadler, B.}, year={2009}, pages={40–53} } @article{ipsen_2009, title={SPECIAL ISSUE ON ACCURATE SOLUTION OF EIGENVALUE PROBLEMS}, volume={31}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-73649094469&partnerID=MN8TOARS}, DOI={10.1137/sjmael000031000001000vii000001}, abstractNote={The occasion for this special issue is the Sixth International Workshop on Accurate Solution of Eigenvalue Problems, which took place at The Pennsylvania State University from May 22–25, 2006. This special issue provides an outlet for papers from the workshop and recognizes advances in the numerical solution of eigenvalue and related problems. This is the second such special issue published in the SIAM Journal on Matrix Analysis and Applications; the first was published in number 4 of volume 28 in connection with the Fifth International Workshop on Accurate Solution of Eigenvalue Problems, which took place in Hagen, Germany, from June 29 to July 1, 2004. The twelve papers in the current issue are concerned with a variety of aspects that arise in the computation of eigenvalues and invariant subspaces: perturbation bounds and sensitivity, accuracy and convergence behavior of algorithms, exploitation of structure in matrices, and particular engineering applications. Thanks go to SIMAX Editor-in-Chief, Henk van der Vorst; guest editors Jesse Barlow, Froilán Dopico, and Zlatko Drmač, who put great effort into the careful and timely review of papers; and Mitch Chernoff, Cherie Trebisky, and other members of the SIAM staff who worked hard to publish this special issue.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse C. F.}, year={2009}, pages={VII-VII} } @article{ipsen_2009, title={Special issue on accurate solution of eigenvalue problems}, volume={31}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-73649094469&partnerID=MN8TOARS}, number={1}, journal={SIAM Journal on Matrix Analysis and Applications}, author={Ipsen, I.C.F.}, year={2009} } @article{wills_ipsen_2008, title={ORDINAL RANKING FOR GOOGLE'S PAGERANK}, volume={30}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-70449368930&partnerID=MN8TOARS}, DOI={10.1137/070698129}, abstractNote={We present computationally efficient criteria that can guarantee correct ordinal ranking of Google's PageRank scores when they are computed with the power method (ordinal ranking of a list consists of assigning an ordinal number to each item in the list). We discuss the tightness of the ranking criteria, and illustrate their effectiveness for top k and bucket ranking. We present a careful implementation of the power method, combined with a roundoff error analysis that is valid for matrix dimensions $n<10^{14}$. To first order, the roundoff error depends neither on $n$ nor on the iteration count, but only on the maximal number of inlinks and the dangling nodes. The applicability of our ranking criterion is limited by the roundoff error from a single matrix vector multiply. Numerical experiments suggest that our criteria can effectively rank the top PageRank scores. We also discuss how to implement ranking for extremely large practical problems, by curbing roundoff error, reducing the matrix dimension, and using faster converging methods.}, number={4}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Wills, Rebecca S. and Ipsen, Ilse C. F.}, year={2008}, pages={1677–1696} } @article{ipsen_rehman_2008, title={PERTURBATION BOUNDS FOR DETERMINANTS AND CHARACTERISTIC POLYNOMIALS}, volume={30}, ISSN={["0895-4798"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-67649206986&partnerID=MN8TOARS}, DOI={10.1137/070704770}, abstractNote={We derive absolute perturbation bounds for the coefficients of the characteristic polynomial of a $n\times n$ complex matrix. The bounds consist of elementary symmetric functions of singular values, and suggest that coefficients of normal matrices are better conditioned with regard to absolute perturbations than those of general matrices. When the matrix is Hermitian positive-definite, the bounds can be expressed in terms of the coefficients themselves. We also improve absolute and relative perturbation bounds for determinants. The basis for all bounds is an expansion of the determinant of a perturbed diagonal matrix.}, number={2}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Ipsen, Ilse C. F. and Rehman, Rizwana}, year={2008}, pages={762–776} } @article{ipsen_2008, title={Problems and Techniques}, volume={50}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-44949224981&partnerID=MN8TOARS}, DOI={10.1137/siread000050000002000273000001}, abstractNote={The three papers in this issue are concerned with asymptotic expansions of integrals, calculus of variations, and singular perturbation techniques. 1. Among integral transforms, the Fourier transform is arguably the best known, but there are many others, including Laplace, Stieltjes, Mellin, Hankel, and Poisson transforms. They can all be represented as $I(x)\equiv\int_0^{\infty}{f(t)h(xt)dt}$. If x is an asymptotic parameter, which means that x is close to zero or very large, then one may be able to approximate $I(x)$ by an asymptotic expansion. In his paper “Asymptotic Expansions of Mellin Convolution Integrals,” José López presents a general and simple method to generate asymptotic expansions for $I(x)$ that encompasses many existing methods as special cases. 2. Analysis of “slope stability” is an important aspect of geology and soil mechanics: How likely is a sloped terrain (an embankment, a dam) to succumb to erosion and turn into a landslide? And how should the slope profile be modified to prevent further sliding? Analysis of slope stability is one of the applications envisioned by Enrique Castillo, Antonio Conejo, and Ernesto Aranda in their paper “Sensitivity Analysis in Calculus of Variations. Some Applications.” They propose to perform slope stability by means of a sensitivity analysis in the calculus of variations. In the context of slope stability, for instance, one might want to determine how sensitive the slope safety factor is to changes in the slope profile or to changes in soil strength. The mathematical problem comes down to this: How sensitive to changes in parameters are the following quantities: objective function values, primal solutions, and dual solutions? The authors show how to express the sensitivities in terms of partial derivatives, and how to compute them numerically by solving a boundary value problem. 3. In their paper “High-Frequency Oscillations of a Sphere in a Viscous Fluid near a Rigid Plane,” Richard Chadwick and Zhijie Liao model the operation of atomic force microscopes. An atomic force microscope (AFM) is a high resolution scanning device for imaging at nanoscales. Unlike conventional microscopes, which use light, an AFM scans a specimen by “feeling” the surface with a mechanical probe, which in this case consists of a sphere attached to a cantilever. When the specimens are delicate biological samples, such as tissues of the inner ear, they are scanned in a fluid, and the AFM is employed in “tapping mode.” In tapping mode, the sphere is forced to oscillate up and down. When it approaches the specimen, its oscillations are changed by hydrodynamic forces. This provides information about properties of the specimen. To understand the hydrodynamic interactions taking place between sphere and wall, one can model the situation as a fluid that contains a sphere oscillating close to a rigid wall. The problem becomes singular when the sphere approaches the wall. The authors revive singular perturbation techniques developed in the 1960s to derive asymptotic expansions for the forces acting on the sphere.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2008}, month={Jan}, pages={273–273} } @article{ipsen_2008, title={Problems and Techniques}, volume={50}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-56349149490&partnerID=MN8TOARS}, DOI={10.1137/siread000050000001000035000001}, abstractNote={The two papers in this issue deal with differential equations, one with the numerical solution of partial differential equations, and the other one with analytic solutions for ordinary differential equations. In his paper "From Functional Analysis to Iterative Methods", Robert Kirby is concerned with linear systems arising from discretizations of partial differential equations (PDEs). Specifically, the PDEs are elliptic and describe boundary value problems; the discretizations are done via finite elements, and at issue is the convergence rate of iterative methods for solving the linear systems. The author's approach is to go back to the underlying variational problem in a Hilbert space, and to make ample use of the Riesz representation theorem. This point of view results in short and elegant proofs, as well as the construction of efficient preconditioners. The general theory is illustrated with two concrete model problems of PDEs for convection diffusion and planar elasticity. This paper will appeal to anybody who has an interest in the numerical solution of PDEs. In 1963 the mathematician/meteorologist Edward Lorenz formulated a system of three coupled nonlinear ordinary differential equations, whose long-term behavior is described by an attractor with fractal structure. You can see a beautiful rendition of the thus named Lorenz attractor on the cover of this issue. Although it is "easy" to plot solutions of the Lorenz system, it is much harder to determine them mathematically. This is what motivated the paper "Complex Singularities and the Lorenz Attractor" by Divakar Viswanath and Sonmez Sahutoglu. Their idea is to allow the time variable to be complex, rather than real; to focus on singular solutions; and to express these singular solutions in terms of so-called psi series. After all is said and done, the authors end up with a two-parameter family of complex solutions to the Lorenz system. This a highly readable and very enjoyable paper, with concrete steps for future research, and connections to thunderstorms and analytic function theory.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2008}, month={Jan}, pages={35–35} } @article{ipsen_2008, title={Problems and Techniques}, volume={50}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-50949105062&partnerID=MN8TOARS}, DOI={10.1137/siread000050000004000721000001}, abstractNote={The paper featured in this issue is “Logically Rectangular Grids and Finite Volume Methods for PDEs in Circular and Spherical Domains” by Donna Calhoun, Christiane Helzel, and Randy LeVeque. The topic is the numerical solution of partial differential equations (PDEs) whose domains are circles, balls, spheres, and related geometries. The main idea is to solve the PDE on a “logically rectangular” grid. This means, for instance, if the physical space is a circle, then the computational space is a square. The authors focus on hyperbolic PDEs, including Euler, acoustics, and shallow water equations, and also give an example for solving reaction-diffusion equations. This paper is a pleasure to read. One finds lucid explanations of what can go wrong with different types of grids for circular and spherical domains, such as grid cells of widely differing sizes and extreme shapes. In contrast, the algorithms in this paper map a single rectangular block into a spherical shape; and the resulting cell sizes differ by a factor of at most two. The different mappings from computational space to physical space are presented as short, intuitive MATLAB algorithms. Many crisp pictures illustrate the uniform appearance and effectiveness of the grids. Even those who are not PDE experts will appreciate the simplicity, elegance, and generality of the logically rectangular grids when they are combined with a discretization of the PDE by finite volume methods.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2008}, month={Jan}, pages={721–721} } @article{ipsen_2008, title={Problems and Techniques}, volume={50}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-56349149490&partnerID=MN8TOARS}, DOI={10.1137/siread000050000003000483000001}, abstractNote={The two papers in this issue are about short recurrences for computing bases of Krylov subspaces, and numerical methods for computing the Laplace transform and its inverse. Systems of linear equations $Ax=b$, where the matrix A is large and sparse (i.e., only a few elements of A are nonzero), are often solved by so-called Krylov subspace methods. Such methods restrict operations involving A to matrix vector products. In the simplest case the iterates $x^{(k)}$ are computed from vectors in the Krylov space $\mathcal{K}_k=span\{b, Ab,\ldots, A^{k-1}b\}$. For instance, when A is Hermitian positive-definite (or real symmetric positive-definite) the conjugate gradient method computes $x^{(k)}$ as a linear combination of $x^{(k-1)}$ and a “direction vector” $p_k$. The direction vectors form an A-orthogonal basis for the Krylov space $\mathcal{K}_k$, that is, $p_i^*Ap_j=0$ for $i\neq j$ (the superscript $*$ denotes the conjugate transpose). As a consequence, a direction vector $p_k$ can be computed from $Ap_{k-1}$, $p_{k-1}$ and $p_{k-2}$. This is called a 3-term recurrence. However, if A is a general matrix, then it is well known that the direction vectors cannot be computed with 3-term recurrences—even if one relaxes the orthogonality to B-orthogonality, where B is any Hermitian positive-definite matrix. The question is, if 3-term recurrences are not possible, then how short can the recurrences possibly be? In their paper “On Optimal Short Recurrences for Generating Orthogonal Krylov Subspace Bases,” J. Liesen and Z. Strakoš derive necessary and sufficient conditions for a nonsingular matrix A to admit $(s+2)$-term recurrences for $s\geq 1$. They also give a comprehensive overview of work on short recurrences for Krylov subspace methods. This is a clear and carefully written paper, and the authors go to great lengths to illuminate the subtle issues involved. In the second paper, “The Bad Truth about Laplace's Transform,” Charles Epstein and John Schotland are concerned with the difficulties of inverting the Laplace transform. This may be necessary, for instance, when solving inverse scattering problems from optical tomography and image reconstruction. The Laplace transform of a real function $f(x)$ is defined as the integral $\mathcal{L}\>f(x)=\int_0^{\infty}{e^{-xy}f(y)dy}.$ Inverting $\mathcal{L}$ to recover $f(x)$ is an ill-posed problem. Ill-posed problems are extremely hard to solve numerically because they may not have a solution, the solution may not be unique, or the solution may not depend continuously on the data. The authors use harmonic analysis to derive fast algorithms for approximating the Laplace transform and its inverse (when the function values are sampled on geometrically uniformly spaced data), and to derive regularized inverses.}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2008}, month={Jan}, pages={483–483} } @article{ipsen_2008, title={Problems and techniques}, volume={50}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-50949105062&partnerID=MN8TOARS}, number={3}, journal={SIAM Review}, author={Ipsen, I.C.F.}, year={2008} } @article{dickson_kelley_ipsen_kevrekidis_2007, title={Condition estimates for pseudo-arclength continuation}, volume={45}, ISSN={["1095-7170"]}, DOI={10.1137/060654384}, abstractNote={We bound the condition number of the Jacobian in pseudo-arclength continuation problems, and we quantify the effect of this condition number on the linear system solution in a Newton-GMRES solve. Pseudo-arclength continuation solves parameter dependent nonlinear equations $G(u,\lambda) = 0$ by introducing a new parameter $s$, which approximates arclength, and viewing the vector $x = (u,\lambda)$ as a function of $s$. In this way simple fold singularities can be computed directly by solving a larger system $F(x,s) = 0$ by simple continuation in the new parameter $s$. It is known that the Jacobian $F_x$ of $F$ with respect to $x=(u,\lambda)$ is nonsingular if the path contains only regular points and simple fold singularities. We introduce a new characterization of simple folds in terms of the singular value decomposition, and we use it to derive a new bound for the norm of $F_x^{-1}$. We also show that the convergence rate of GMRES in a Newton step for $F(x,s)=0$ is essentially the same as that of the original problem $G(u,\lambda)=0$. In particular, we prove that the bounds on the degrees of the minimal polynomials of the Jacobians $F_x$ and $G_u$ differ by at most 2. We illustrate the effectiveness of our bounds with an example from radiative transfer theory.}, number={1}, journal={SIAM JOURNAL ON NUMERICAL ANALYSIS}, author={Dickson, K. I. and Kelley, C. T. and Ipsen, I. C. F. and Kevrekidis, I. G.}, year={2007}, pages={263–276} } @article{ipsen_2007, title={First SIAG linear algebra school slated for July 2008}, volume={40}, number={9}, journal={SIAM News}, author={Ipsen, I.C.F.}, year={2007}, month={Nov} } @article{ipsen_selee_2007, title={Pagerank computation, with special attention to dangling nodes}, volume={29}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-38049017083&partnerID=MN8TOARS}, DOI={10.1137/060664331}, abstractNote={We present a simple algorithm for computing the PageRank (stationary distribution) of the stochastic Google matrix $G$. The algorithm lumps all dangling nodes into a single node. We express lumping as a similarity transformation of $G$ and show that the PageRank of the nondangling nodes can be computed separately from that of the dangling nodes. The algorithm applies the power method only to the smaller lumped matrix, but the convergence rate is the same as that of the power method applied to the full matrix $G$. The efficiency of the algorithm increases as the number of dangling nodes increases. We also extend the expression for PageRank and the algorithm to more general Google matrices that have several different dangling node vectors, when it is required to distinguish among different classes of dangling nodes. We also analyze the effect of the dangling node vector on the PageRank and show that the PageRank of the dangling nodes depends strongly on that of the nondangling nodes but not vice versa. Last we present a Jordan decomposition of the Google matrix for the (theoretical) extreme case when all Web pages are dangling nodes.}, number={4}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, author={Ipsen, Ilse C. F. and Selee, Teresa M.}, year={2007}, pages={1281–1296} } @article{ipsen_2007, title={Problems and Techniques}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-34548494833&partnerID=MN8TOARS}, DOI={10.1137/siread000049000004000593000001}, abstractNote={In this issue, the Problems and Techniques section features papers concerned with parallel matrix vector multiplication, polynomial interpolation, and propagation of electromagnetic waves. The first paper, “Revisiting Hypergraph Models for Sparse Matrix Partitioning” by Bora Uçar and Cevdet Aykanat, is concerned with parallelizing matrix vector multiplication $y=Ax$, where the matrix A is sparse and rectangular. The authors propose a hypergraph model to describe dependences among the data and model communication among processors. In the hypergraph model, the elements of x and y and the rows of A are represented by vertices of a graph. If element $a_{i,j}$ of A is nonzero, then the multiplication $a_{ij}x_j$ contributes a nonzero summand and requires $x_j$ to be present in the processor holding row i. This dependence is accounted for with an undirected edge connecting $x_j$ with row i of A. A so-called net collects all dependences for a vertex: For instance, the net $n_x(j)$ contains vertex $x_j$ as well as all rows of A that have an edge with $x_j$. The problem of minimizing communication among processors can now be formulated as allocating the vertices to processors, so that each net has its vertices spread over as few processors as possible. The authors illustrate the effectiveness of hypergraph models when constraints are put on the allocation of the data to processors. When the German mathematician, physicist, and spectroscopist Carl David Tolmé Runge (1856–1927) interpolated the function $f(x)=1/(1+25x^2)$ at $n+1$ equally spaced points in the interval $[-1,1]$ by a polynomial of degree n, he made a startling observation: With increasing degree n, the polynomials approximate f less accurately—instead of, as one would have hoped, more accurately. Although the polynomials agree with f at the interpolation points, they oscillate between the interpolation points, and the oscillations worsen as the polynomial degree grows. Thence polynomial interpolation at equally spaced points fell into disrepute. It turns out, however, that interpolation at equally spaced points cannot be avoided altogether. It is necessary, for instance, in computational fluid dynamics, when one has to solve hyperbolic partial differential equations. Moreover, the interpolant may also have to retain the positivity, monotonicity, and boundedness of the underlying function. How to meet these demands in the face of Runge's phenomenon? This is the subject of Martin Berzins's paper “Adaptive Polynomial Interpolation on Evenly Spaced Meshes.” The third paper, “Uniform Asymptotics Applied to Ultrawideband Pulse Propagation” by Natalie Cartwright and Kurt Oughstun, deals with a problem that was first studied by the German theoretical physicist Arnold Sommerfeld (1868–1951) and his student, the French physicist Léon Brillouin (1889–1969): the propagation of electromagnetic waves in dispersive materials. The problem at hand is complicated by the fact that the pulse is an ultrawide band consisting of a wide range of frequencies. The propagation of the pulse is represented as a complex integral, and the authors derive a continuous asymptotic expansion of this integral by applying a combination of saddle point methods.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2007}, month={Jan}, pages={593–593} } @article{ipsen_2007, title={Problems and Techniques}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-34548494833&partnerID=MN8TOARS}, DOI={10.1137/siread000049000002000209000001}, abstractNote={The Super Bowl is the annual championship game of professional American football, the National Football League (NFL). A coin toss determines which team kicks off and which one receives. This coin toss is big business in Las Vegas, spawning bets worth millions of dollars. In the past 12 years, 9 tosses came up tails and only 3 came up heads. Why so many tails and so few heads? Persi Diaconis, Susan Holmes, and Richard Montgomery may have found an explanation in their paper Dynamical Bias in the Coin Toss, based on a fascinating mixture of mechanics, probability distributions, and high speed cameras. If you want to be a millionaire, SIAM Review is here to help! In the second paper, A Hybrid Approach for Efficient Robust Design of Dynamic Systems, Martin Mönnigmann and his five coauthors are concerned with optimizing models for chemical processes such as fermentation (think of yeast producing alcohol in the brewing of beer). If such a process operates on a continuous basis, one needs to find optimal equilibrium solutions for the underlying model. Say, for example, that one would like to determine the yeast concentration that maximizes the amount of alcohol in the beer. If bacteria can affect the yeast concentration, one also needs to be assured that the fermentation process remains stable, even when yeast concentrations fluctuate slightly. With the help of bifurcation theory, interval arithmetic, and nonlinear solvers, the authors derive a strategy for computing such solutions. Prost! (That’s German for “cheers.”) A Potpourri of Conjectures and Open Questions in Nonlinear Analysis and Optimization by Jean‐Baptiste Hiriart‐Urruty is one of the more unusual papers in the Problems and Techniques section. The fourteen very different problems include a question posed by Newton in 1686 (what is the shape of a body that offers minimal resistance in a fluid?); issues in matrix theory (which real symmetric matrices can be simultaneously diagonalized with a single congruence transformation?); and solutions to eikonal equations (which smooth functions f over which domains satisfy $\|\nabla f(x)\| = 1$?). Each problem comes with a precise description, a short history, and a digestible set of references.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2007}, month={Jan}, pages={209–209} } @article{ipsen_2007, title={Problems and Techniques}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33847672254&partnerID=MN8TOARS}, DOI={10.1137/siread000049000003000419000001}, abstractNote={This issues features papers about mathematical genetics, data compression, and data assimilation in oceanography. 1. Computer algebra can help to predict the genetic makeup of a population. This is what we can conclude from the first paper, “Buchberger's Algorithm and the Two-Locus, Two-Allele Model.” With reproduction rates and probability of chromosome recombination as parameters, and one variable for each chromosome type, one can set up a system of polynomial equations. The solution value for a variable indicates how often this chromosome type occurs in the population. The question is now, over all parameter choices, how many different equilibrium solutions can there be? In other words, in the long term, can a chromosome type occur with any possible frequency? Arthur Copeland's answer is no: the number of solutions is almost always finite (and conjectured to be 15) in the special case when a population contains at most four different chromosome types. He arrives at his answer by means of affine varieties, ideals, and Gröbner bases. He also shows that the equilibrium solutions vary smoothly with the parameters. 2. The second paper, “A Direct Formulation for Sparse PCA Using Semidefinite Programming,” by Alexandre d'Aspremont, Laurent El Ghaoui, Michael I. Jordan, and Gert R. G. Lanckriet, is concerned with principal component analysis (PCA), a technique for compressing data so that as little information as possible is lost. PCA accomplishes this by finding directions of maximum variance in the data. In data analysis of gene expressions, for instance, different elements of a direction vector correspond to different genes. However, interpreting the meaning of a direction is much easier if only a few genes contribute to the direction. Such a direction vector has many zero elements and is called sparse. The authors propose a semidefinite programming method that maximizes variance as well as sparsity. The method is computationally efficient and robust. 3. The topic of the third paper, “A Reduced-Order Kalman Filter for Data Assimilation in Physical Oceanography,” by D. Rozier, F. Birol, E. Cosme, P. Brasseur, J. M. Brankart, and J. Verron, is a technique used by oceanographers to increase the accuracy of numerical models for predicting ocean circulation. Data assimilation combines the output of a simulation step with observed data and error statistics and then returns this “corrected” output to the simulation as the basis for the next step. Data assimilation based on Kalman filters assumes that measurement and model errors are unbiased and Gaussian. This is expensive, however, because an error covariance matrix must be constructed at every step. A reduced-order Kalman filter (called SEEK) is a cheaper version that reduces (or compresses) the size of the covariance matrix—the same key idea as in the paper described above. The authors give a very readable account of the issues associated with so-called ocean global circulation models: choice of vertical coordinate system (depending on the ocean region: shallow coastal shelf, steeply sloping ocean floors, stratified regions); limitations of models for ocean circulation simulations; sequential data assimilation techniques; adaptation of Kalman filters to geophysical applications; and modes of data acquisition (via satellites, ships, floats, mooring networks). The effectiveness of the SEEK filter is illustrated with numerical simulations of the Gulf Stream and the Bay of Biscay.}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2007}, month={Jan}, pages={419–420} } @article{ipsen_2007, title={Problems and Techniques}, volume={49}, DOI={10.1137/siread000049000001000033000001}, abstractNote={The three papers in this issue deal with how to describe spikes and plateaus; how to bound the probability that a random variable belongs to a particular set; and how to analyze thermodynamic properties of DNA and RNA. 1. If you had only a (mathematical) sound bite to explain the difference between the broad plateau of the Black Mesa in Arizona and the steep spike of the Matterhorn in Switzerland, what could you do? Thomas Hillen, in his paper “A Classification of Spikes and Plateaus,” proposes that you inspect fourth derivatives. In Flatland (a one‐dimensional domain, that is), the Black Mesa has a negative fourth derivative at its highest point, while the Matterhorn has a positive fourth derivative. For n‐dimensional domains it’s a little bit more complicated because one has to check whether the Hessian of the Laplacian is positive definite or negative definite. The author illustrates how the classification of plateaus and spikes benefits the qualitative analysis of several partial differential equations from mathematical physics and biology. 2. Given a random vector $X\in\real^n$ whose first two moments (the expected mean vector $\mathbf{E}X$ and the covariance matrix $\mathbf{E}XX^T$) are known, the problem considered in the second paper is to bound the probability that X lies in a particular subset C of $\real^n$. Here the set C is defined by m strict quadratic inequalities, $$C=\left\{x\in\real^n:\ x_i^TA_ix+2b_i^Tx+c_i<0, \quad 1\leq i\leq m\right\},$$ which involve symmetric matrices $A_i$, vectors $b_i$, and scalars $c_i$. The best possible (“sharp”) lower bound on the probability that X lies in C is an example of a generalized Chebyshev inequality. In general, generalized Chebyshev inequalities are sharp upper or lower bounds on the probability that a random vector with given moments lies in a particular set. The first such inequality was formulated in the nineteenth century by Chebyshev and proved by his student Markov. Almost a hundred years later duality theory and optimization emerged as powerful tools for deriving Chebyshev inequalities. Since then Chebyshev inequalities have appeared in decision analysis, statistics, finance, and machine learning. In their well‐written paper “Generalized Chebyshev Bounds via Semidefinite Programming,” Lieven Vandenberghe, Stephen Boyd, and Katherine Comanor construct two equivalent (dual) semidefinite programs for solving the above Chebyshev inequality. That is, the optimal value of these programs equals the best possible lower bound on the probability that a random vector X with given mean and covariance lies in the set C. If you have time, savor the well‐organized and constructive proofs, and check out the two simple examples to watch the semidefinite programs in action. 3. Nucleic acid technology is based on the design of molecular systems that self‐assemble out of strands of DNA or RNA, so as to implement functions relevant to robotics, biosensing, medicine, and many other applications. Fundamental to the success of this technology is a rigorous analysis of the thermodynamic properties of nucleic acid strands. In their paper “Thermodynamic Analysis of Interacting Nucleic Acid Strands,” Robert Dirks, Justin Bois, Joseph Schaeffer, Erik Winfree, and Niles Pierce make a significant advance by devising models and algorithms, not just for a single strand, but for an entire test tube of interacting strands of nucleic acids. They do this by combining an unusual variety of different mathematical techniques. A nucleic acid strand can be represented as a sequence of bases from a four‐letter alphabet (for DNA the bases are A, C, G, and T). Complementary bases can interact to form base pairs (for DNA these base pairs are C‐G and A‐T). The set of base pairs in a molecular conformation of interacting nucleic acid strands is called the secondary structure. In the simplest case, two fully‐complementary strands can base‐pair to each other completely, bringing to mind a ladder in which the rungs represent base pairs and the two sides of the ladder represent the backbones of the two strands. In practice, however, things are usually more complicated. Not every base is paired, and the strands might interact to form branched structures with various types of bulges and loops between the base pairs. For reasons of practicality, the authors disallow certain complicated secondary structures (pseudoknots). An important thermodynamic property, useful for the design of nucleic acids, is the equilibrium probability of a secondary structure. It can be calculated in a straightforward way from the partition function. Calculating the partition function for a single strand requires summing the partition functions of all smaller subsequences. This can be done by a dynamic program that computes the sums recursively. By contrast, calculating the partition function for a complex of several interacting strands is much more difficult, due to the physical and mathematical issues that arise when bases from different strands pair up. The authors present models and algorithms for computing the partition function for a complex of an arbitrary number of interacting strands. In particular, they use graph theory to map each allowed secondary structure to a unique ordering of strands (representation theorem). And they use group theory to eliminate symmetries and redundancies that would lead to overestimates of the partition function (distinguishability correction). Together, these results ensure that a dynamic program can again be used to recursively calculate the partition function. Now the authors are ready to extend their analysis to realistic experimental conditions where an arbitrary number of strand species interact (in a dilute solution) to form a variety of complexes. The authors use combinatorial arguments to determine how many times the dynamic program must be invoked to compute the partition function for such a system. They express the computation of the equilibrium concentration of each species of complex as a convex programming problem and use the concave dual problem as the basis for an efficient numerical implementation.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2007}, month={Jan}, pages={33–34} } @article{ipsen_2007, title={Problems and techniques}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-34249999151&partnerID=MN8TOARS}, number={2}, journal={SIAM Review}, author={Ipsen, I.}, year={2007} } @article{ipsen_2007, title={Problems and techniques: Introduction}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-37249005235&partnerID=MN8TOARS}, number={4}, journal={SIAM Review}, author={Ipsen, I.}, year={2007} } @article{ipsen_2007, title={Problems and techniques: Introduction}, volume={49}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-37249005235&partnerID=MN8TOARS}, number={4}, journal={SIAM Review}, author={Ipsen, I.}, year={2007} } @article{finkel_kuster_lasater_levy_reese_ipsen_2006, title={Communicating Applied Mathematics: Four Examples}, volume={48}, ISSN={0036-1445 1095-7200}, url={http://dx.doi.org/10.1137/s0036144504443523}, DOI={10.1137/S0036144504443523}, abstractNote={Communicating Applied Mathematics is a writing- and speaking-intensive graduate course at North Carolina State University. The purpose of this article is to provide a brief description of the course objectives and the assignments. Parts A--D of of this article represent the class projects and illustrate the outcome of the course: * The Evolution of an Optimization Test Problem: From Motivation to Implementation, by Daniel E. Finkel and Jill P. Reese * Finding the Volume of a Powder from a Single Surface Height Measurement, by Christopher Kuster * Finding Oscillations in Resonant Tunneling Diodes, by Matthew Lasater * A Shocking Discovery: Nonclassical Waves in Thin Liquid Films, by Rachel Levy We introduce a water-supply problem considered by the optimization and hydrology communities for benchmarking purposes. The objective is to drill five wells so that the cost of pumping water out of the ground is minimized. Using the implicit filtering optimization algorithm to locate the wells, we save approximately $2,500 over the cost of a given initial well configuration. The volume of powder poured into a bin with obstructions is found by calculating the height of the surface at every point. This is done using the fast marching algorithm. We look at two different bin geometries and determine the volumes as a function of the powder height under the spout. The surface of the powder satisfies a two-dimensional eikonal equation. This equation is solved using the fast marching method. Resonant tunneling diodes (RTDs) are ultrasmall semiconductor devices that have potential as very high-frequency oscillators. To describe the electron transport within these devices, physicists use the Wigner--Poisson equations which incorporate quantum mechanics to describe the distribution of electrons within the RTD. Continuation methods are employed to determine the steady-state electron distributions as a function of the voltage difference across the device. These simulations predict the operating state of the RTD under different applied voltages and will be a tool to help physicists understand how changing the voltage applied to the device leads to the development of current oscillations. When a thin film flows down an inclined plane, a bulge of fluid, known as a capillary ridge, forms on the leading edge and is subject to a fingering instability in which the fluid is channeled into rivulets. This process is familiar to us in everyday experiments such as painting a wall or pouring syrup over a stack of pancakes. It is also observed that changes in surface tension due to a temperature gradient can draw fluid up an inclined plane. Amazingly, in this situation the capillary ridge broadens and no fingering instability is observed. Numerical and analytical studies of a mathematical model of this process led to the discovery that these observations are associated with a nonclassical shock wave previously unknown to exist in thin liquid films.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Finkel, Daniel E. and Kuster, Christopher and Lasater, Matthew and Levy, Rachel and Reese, Jill P. and Ipsen, Ilse C. F.}, year={2006}, month={Jan}, pages={359–389} } @article{ipsen_kirkland_2006, title={Convergence analysis of a PageRank updating algorithm by Langville and Meyer}, volume={27}, ISSN={["1095-7162"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33646455491&partnerID=MN8TOARS}, DOI={10.1137/S0895479804439808}, abstractNote={The PageRank updating algorithm proposed by Langville and Meyer is a special case of an iterative aggregation/disaggregation (SIAD) method for computing stationary distributions of very large Markov chains. It is designed, in particular, to speed up the determination of PageRank, which is used by the search engine Google in the ranking of web pages. In this paper the convergence, in exact arithmetic, of the SIAD method is analyzed. The SIAD method is expressed as the power method preconditioned by a partial LU factorization. This leads to a simple derivation of the asymptotic convergence rate of the SIAD method. It is known that the power method applied to the Google matrix always converges, and we show that the asymptotic convergence rate of the SIAD method is at least as good as that of the power method. Furthermore, by exploiting the hyperlink structure of the web it can be shown that the asymptotic convergence rate of the SIAD method applied to the Google matrix can be made strictly faster than that of the power method.}, number={4}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, ICF and Kirkland, S}, year={2006}, pages={952–967} } @article{ipsen_wills_2006, title={Mathematical properties and analysis of Google’s PageRank}, volume={34}, journal={Boletin de la Sociedad Espanola de Matematica Aplicada}, author={Ipsen, I.C.F. and Wills, R.S.}, year={2006}, pages={191–196} } @article{ipsen_2006, title={New ideas for SIAM conferences from Europe}, volume={39}, number={10}, journal={SIAM News}, author={Ipsen, I.C.F.}, year={2006}, month={Dec}, pages={5} } @article{ipsen_2006, title={Problems and Techniques}, volume={48}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33644586567&partnerID=MN8TOARS}, DOI={10.1137/siread000048000001000041000001}, abstractNote={The Problems and Techniques section in this issue contains three papers on very different topics: achieving reliability through optimization, improving the solution of systems of linear equations by preconditioning, and determining asymptotic expansion of integrals in Laplace form. Reliability is an important aspect in engineering design. One can make a system (such as a car) more reliable by introducing redundancy (e.g., carrying around a spare tire) or by making individual system components more reliable (e.g., using better material to manufacture car parts). In the first paper, James Falk, Nozer Singpurwalla, and Yefim Vladimirsky pursue the latter approach, opting to increase the reliability of individual components. This is called reliability allocation. Why not just adopt the simplest strategy and make all components of a system more reliable? This could be way too expensive. Consider, for instance, my Volkswagen GTI, a system with probably at least 3000 different components. Let's measure the reliability of the GTI by whether it drives or not. Replacing the plastic cup holder by one made from aluminum would cost a lot but would do nothing to increase the car's reliability. That's because the cup holder does not contribute to the drivability of the car (provided we disregard the driver who needs the cupholder for coffee to stay awake). However, replacing the plastic radiator by one made from aluminum costs only slightly more, but increases the reliability of the car significantly because the added cooling capacity prevents the engine from overheating when one wants to drive really fast. The question therefore is, which components of a system should we make more reliable, while keeping an eye on the cost? The authors propose to maximize a utility function that is aware of the interaction among components of the system, and trades off the cost of increasing reliability with greater probability of system functioning. In the second paper, Luis González presents a well-written and elegant analysis to justify the effectiveness of a particular approach for solving systems of linear equations. Systems of linear equations Ax = b whose coefficient matrix A has large dimension and is sparse (i.e., contains many zero elements) occur, for instance, whenever partial differential equations are discretized. Direct methods for solving Ax = b, such as Gaussian elimination, can be too expensive when they introduce too many nonzero elements during the course of the computation. In this case one may resort to iterative methods, which in successive iterations produce better and better approximations—or so one hopes. Unless one is lucky with one's matrix A, though, iterative methods can be slow and unreliable. Efficiency and reliability can be improved by preconditioning the system, i.e., transforming Ax = b into a preconditioned system (AN) y = b, where the matrix N is chosen such that the system (AN) y = b can be solved quickly, and the original solution x = Ny can be recovered easily. In this paper, the matrix N is chosen as an approximate inverse of A, so that the preconditioned matrix AN is close to the identity matrix I, and iterative methods applied to (AN) y = b converge fast. To ensure that recovery of the solution x = Ny is still easy, restrictions must be placed on N. The paper sets a very general context by assuming that N comes from some subspace S of matrices. Formally, the criterion is to choose N such that the residual $\|$AN – I$\|_{\fontsize{6pt}{6pt}\selectfont\textit{F}}$ is minimized, in the Frobenius norm, over all matrices from the subspace S. The idea for justifying the effectiveness of the preconditioner N is to cast the minimization problem in the form min$_{\fontsize{6pt}{6pt}\selectfont\textit{Q}}\|$Q – I$\|_{\fontsize{6pt}{6pt}\selectfont\textit{F}}$, where Q is a matrix from the subspace AS. The problem now amounts to analyzing orthogonal projections Q of the identity matrix I (hence the title of the paper). It turns out that if the smallest singular value of the preconditioned matrix AN is close to one and if also the smallest eigenvalue in magnitude is close to one, then everything works out: the residual $\|$AN – I$\|_{\fontsize{6pt}{6pt}\selectfont\textit{F}}$ is small, the matrix AN is well-conditioned, and its departure from normality is small. These are exactly the conditions that guarantee (in exact arithmetic) the effectiveness and reliability of the preconditioner N. To understand what's going on in the third paper, let's start with an example. The Gamma function can be represented as $$\hspace*{21pt}\hskip3pc \mbox{$\Gamma$($\lambda$ + 1) = $\displaystyle\int_{\mbox{\fontsize{6pt}{6pt}\selectfont 0}}^{\infty}$ u$^{\lambda}e^{\fontsize{6pt}{6pt}\selectfont\mbox{\fontsize{6pt}{6pt}\selectfont –}\textit{u}}$\textit{du},} $$ where $\lambda> $ 0. If we are interested in how $\Gamma$ depends on the positive parameter $\lambda$, we can determine the asymptotic expansion $$\hskip3pc\hspace*{21pt}\mbox{$\Gamma$($\lambda$ + 1) ~ \textit{e}$^{\mbox{\fontsize{6pt}{6pt}\selectfont–}\lambda} \lambda^{\mbox{\fontsize{6pt}{6pt}\selectfont$\lambda$ +\,1}}$ $\left(\frac{\displaystyle\mbox{2} \pi}{\mbox{\fontsize{9}{9pt}\selectfont$\lambda$}}\right)^{\mbox{\fontsize{6pt}{6pt}\selectfont 1/2}} \left[\mbox{1 + }{\mbox{1} \over \mbox{12}\mbox{\fontsize{9}{9pt}\selectfont$\lambda$}}\mbox{\,+\,}{\mbox{1}\over \mbox{288}\,\mbox{\fontsize{9}{9pt}\selectfont$\lambda$}^{\mbox{\fontsize{6pt}{6pt}\selectfont 2}}}\mbox{\,+\,}\cdots\right].$} $$ One way of obtaining such an asymptotic expansion is to change variables, u = $\lambda$(1 + x),set h(x) $\equiv$ x – log(1 + x), and write $$\hspace*{0pt}\hskip3pc\hspace*{26pt}\mbox{$\Gamma$($\lambda$ + 1) = \textit{e}$^{\mbox{\fontsize{6pt}{6pt}\selectfont–}\lambda} \lambda^{\mbox{\fontsize{6pt}{6pt}\selectfont$\lambda$ +\,1}} \displaystyle\int_{-1}^{\infty}$ \textit{e}$^{\mbox{\fontsize{6pt}{6pt}\selectfont–$\lambda$\textit{h}(\textit{x})}}$\textit{dx}.} $$ The integral is in Laplace form, i.e., it is of the form $$\hskip3pc\hspace*{23pt}\mbox{\textit{I}($\lambda$) = $\displaystyle\int_{\fontsize{6pt}{6pt}\selectfont\textit{a}}^{\fontsize{6pt}{6pt}\selectfont\textit{b}} \phi$(\textit{x})\textit{e}$^{\mbox{\fontsize{6pt}{6pt}\selectfont–$\lambda$\textit{h}(\textit{x})}}\textit{dx}$,} $$ where, in this special case, $\phi$(x) = 1, a = –1, and b = $\infty$. Here is the reason why Laplace's form is important, and why it was named after Laplace: Laplace observed that the major contributions to the integral I($\lambda$) come from those points where e$^{\fontsize{6pt}{6pt}\selectfont{-\textit{h}(\textit{x})}}$ attains its greatest values. He showed that if, among other things, h(x) has a minimum only at the left endpoint x = a, then the integral has the asymptotic expansion $$\hskip3pc \mbox{\textit{I}($\lambda$) ~ \textit{e}$^{\mbox{\fontsize{6pt}{6pt}\selectfont–$\lambda$\textit{h}(\textit{a})}}\> \displaystyle\sum_{\fontsize{6pt}{6pt}\selectfont\textit{s = 0}}^{\infty} \frac{\mbox{\textit{d}$_{\fontsize{6pt}{6pt}\selectfont\textit{s}}$}} {\lambda^{\mbox{\fontsize{6pt}{6pt}\selectfont($\alpha$ + \textit{s})$/\mu$}}}\qquad \mbox{as}~\lambda\rightarrow\infty$, } $$ where the numbers $\alpha$ and $\mu$ come from expansions of h(x) and $\phi$(x), respectively. However, symbolic computation of the coefficients d$_{\fontsize{6pt}{6pt}\selectfont\textit{s}}$ in asymptotic expansions of integrals in Laplace form I($\lambda$) has often been intractable. In his paper, John Wojdylo presents explicit expressions for the coefficients d$_{\fontsize{6pt}{6pt}\selectfont\textit{s}}$ that are more efficient and simpler than existing ones, and which should facilitate symbolic implementations. Laplace's method is one of several popular methods for determining asymptotic expansions of integrals (integration by parts is another). Asymptotic expansions can be more advantageous than numerical methods because they can reveal dependence on parameters that are physically significant, they can be differentiated or integrated exactly, and they can be more accurate.}, number={1}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2006}, month={Jan}, pages={41–42} } @article{ipsen_2006, title={Problems and Techniques}, volume={48}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33751424677&partnerID=MN8TOARS}, DOI={10.1137/siread000048000003000485000001}, abstractNote={Related DatabasesWeb of Science You must be logged in with an active subscription to view this.Article DataHistoryPublished online: 17 February 2012Publication DataISSN (print): 0036-1445ISSN (online): 1095-7200Publisher: Society for Industrial and Applied MathematicsCODEN: siread}, number={3}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2006}, month={Jan}, pages={485–485} } @article{ipsen_2006, title={Problems and Techniques}, volume={48}, DOI={10.1137/siread000048000004000679000001}, abstractNote={Readers may be surprised to learn that there is something on which everybody in my department agrees: the heating system is absolutely ineffective. Some offices get too hot too fast (usually inhabitated by some who like it cold), while other offices (like mine, unfortunately) take forever to become even slightly warm. The building is heated by a network of pipes through which hot steam is circulated. Question: How to design a heating system that delivers the same amount of steam to each office as fast as possible, thereby ensuring that all offices have the same temperature as fast as possible? Answer: Construct a specific matrix, called a weighted Laplacian, and choose the weights so as to maximize its second largest eigenvalue. The weights tell us how wide each pipe needs to be. Of course, this is a constrained optimization problem, because we are on a budget and can afford only a limited amount of material for the pipes. A more general problem is discussed by Jun Sun, Stephen Boyd, Lin Xiao, and Persi Diaconis in their paper “The Fastest Mixing Markov Process on a Graph and a Connection to a Maximum Variance Unfolding Problem.” They express the problem of maximizing the second largest eigenvalue of the Laplacian as a semidefinite program. The dual of this program has a simple geometric interpretation: It’s the problem of positioning n points in n‐space, so that they are as far apart as possible, but do not exceed prescribed distances between any two points. Coming back now to the heating issues in my department, we have been promised a new building. Ground breaking is to start anytime now (or so, at least, we are told). I have been thinking about giving this paper to the architects; it might inspire them to install a more effective heating system. In the well‐written paper “Globalization Techniques for Newton–Krylov Methods and Applications to the Fully Coupled Solution of the Navier–Stokes Equations,” Roger Pawlowski, John Shadid, Joseph Simonis, and Homer Walker discuss methods for the solution of systems of nonlinear equations $F(u)=0$. Such systems arise, for instance, when one discretizes partial differential equations to solve fluid flow problems. Arguably the most popular method for solving $F(u)=0$ is Newton’s method. It starts from an initial approximation $u_0$ and produces successively better (we hope) iterates $u_{k+1}$ as updates of the previous iterate, $u_{k+1}=u_k+s_k$. The step $s_k$ is computed as the solution to the linear system $F^{\prime}(u_k)s_k=-F(u_k)$, where the Jacobian $F^{\prime}(u)$ is the matrix of derivatives. When the linear system is solved by a Krylov space method, for instance, one talks about a Newton–Krylov method. Convincing Newton’s method to converge to the solution is not always easy, especially when the initial approximation $u_0$ is far away. A variety of strategies is available that can enhance the performance of Newton’s method. The authors discuss two. To increase robustness, one can solve the linear systems more or less accurately; this is done by terminating the linear system solution as soon as the residual norm $\|F^{\prime}(u_k)+F(u_k)s_k\|$ falls below a specified forcing term. To improve the chances for convergence, one can globalize Newton’s method by changing the length of the step $s_k$ (as opposed to its direction), or by choosing a step $s_k$ that minimizes the residual norm over a particular region. The authors prove convergence results, and perform numerical experiments on standard benchmark problems to compare different forcing terms and globalization strategies. Are you one of those people who firmly believes that there is one and only one way to win a tennis match? And that’s by subjecting your opponent to that impossible‐to‐return 700‐horse‐power serve? Yes? Then we might have just the paper for you. In “Monte Carlo Tennis,” Paul Newton and Kamran Aslam analyze the probability of winning in tennis, and express it in terms of the probability that a player wins a point when serving. In previous work, Paul Newton and coauthor Joe Keller had assumed that this probability is constant—throughout the whole match, and even a tournament. This amounts to assuming that points in tennis are random variables, independently and identically distributed (i.i.d.). However, this assumption fails to account for the “hot‐hand,” when everything goes just swimmingly; the “back‐to‐the‐wall” effect, when miraculous feats become possible in the face of looming loss; or simply the adjustment to new tennis balls. Do these things really make a difference? Is the i.i.d. assumption unrealistic? Paul Newton and Kamran Asham perform Monte Carlo simulations in MATLAB to answer this question. Read the paper if you want to know what they come up with.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2006}, month={Jan}, pages={679–680} } @article{ipsen_2006, title={Problems and Techniques}, volume={48}, DOI={10.1137/siread000048000002000305000001}, abstractNote={The Problems and Techniques section features two papers: one on differential equations, and one on sums containing binomial coefficients and their logarithms. The deflection u(x) of a beam at point x can be described by an ordinary differential equation (ODE) of fourth order, such as, for instance, $$ \hspace*{58pt}\frac{ \mbox{ \textit{d}$^{\mbox{\fontsize{6pt}{6pt}\selectfont 4}}$\textit{u}(\textit{x})}} {\mbox{ \textit{dx}$^{\mbox{\fontsize{6pt}{6pt}\selectfont 4}}$}} \mbox{ = – 100,\qquad 0 $\leq$ \textit{x} $\leq$ 1.} $$ In this specific example the vertical load on the beam is uniform, as is the bending stiffness, and the beam has length 1. A unique solution u(x) exists if appropriate boundary conditions are prescribed, e.g., u(0) = u(1) = u$^{\prime}$(0) = u$^{\prime}$(1) = 0, which means that the beam is fixed at both ends. But what if the beam isn't fixed and we don't know the boundary conditions precisely? What if we have only bounds on the deflection and rotation at the endpoints, that is, inequalities of the form –1 $\leq$ u(x) $\leq$ 1 and –1 $\leq$ u$^{\prime}$(x) $\leq$ 1 for x = 0 and x = 1? Does a solution still exist? Enrique Castillo, Antonio Conejo, Carmen Castillo, and Roberto Mínguez, in “Solving Ordinary Differential Equations with Range Conditions,” show that it does indeed. The authors present a method to determine all solutions for linear ODEs whose boundary conditions are linear and are prescribed within intervals (or ranges) rather than at single points. Based on the inequalities in the boundary conditions, they formulate a system of linear inequalities. The solutions to this system represent coefficients in a linear combination that describes all solutions of the ODE. The authors also discuss tests for existence and uniqueness of solutions. The second paper, “Difference of Sums Containing Products of Binomial Coefficients and Their Logarithms,” is concerned with the expression $$ \hspace*{59pt}\displaystyle\frac{\mbox{1}} {\mbox{2$^{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{n}}}$}} \mbox{$\displaystyle\sum_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k} = 0}} ^{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{n}}}$} \left(\displaystyle\frac{\mbox{1}}{\mbox{2}} \mbox{$\alpha_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}}$} \mbox{ ln } \alpha_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}}-\beta_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}} \mbox{ ln } \beta_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}} \right)\mbox{,} \qquad \mbox{$\alpha_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}}$} \equiv {\mbox{\textit{n} + 1}\choose \mbox{\textit{k}}}\mbox{,}\quad \mbox{$\beta_{\mbox{\fontsize{6pt}{6pt}\selectfont \textit{k}}}$} \equiv {\mbox{\textit{n}}\choose \mbox{\textit{k}}}\mbox{,} $$ which occurs in an analysis of covert communication channels and is related to the capacity of the covert channel. Authors Allen Miller and Ira Moskowitz use binomial identities to simplify the expression, show that it increases monotonically with n, and prove that it converges to ln 2 as n $\rightarrow \infty$.}, number={2}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2006}, month={Jan}, pages={305–305} } @article{ipsen_2006, title={Problems and techniques}, volume={48}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33644586567&partnerID=MN8TOARS}, number={1}, journal={SIAM Review}, author={Ipsen, I.}, year={2006}, pages={41–42} } @article{ipsen_2006, title={Problems and techniques: Introduction}, volume={48}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33744920863&partnerID=MN8TOARS}, number={2}, journal={SIAM Review}, author={Ipsen, I.C.F.}, year={2006}, pages={485–486} } @article{ipsen_2006, title={Problems and techniques: Introduction}, volume={48}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-33744920863&partnerID=MN8TOARS}, number={2}, journal={SIAM Review}, author={Ipsen, I.C.F.}, year={2006}, pages={485–486} } @article{ipsen_2006, title={Special issue on accurate solution of eigenvalue problems}, volume={28}, ISSN={["0895-4798"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-35348933724&partnerID=MN8TOARS}, DOI={10.1137/sjmael0000280000040000ix000001}, abstractNote={The occasion for this special issue is the Sixth International Workshop on Accurate Solution of Eigenvalue Problems, which took place at The Pennsylvania State University from May 22-25, 2006. This special issue provides an outlet for papers from the workshop and recognizes advances in the numerical solution of eigenvalue and related problems. This is the second such special issue published in the SIAM Journal on Matrix Analysis and Applications; the first was published in number 4 of volume 28 in connection with the Fifth International Workshop on Accurate Solution of Eigenvalue Problems, which took place in Hagen, Germany, from June 29 to July 1, 2004. The twelve papers in the current issue are concerned with a variety of aspects that arise in the computation of eigenvalues and invariant subspaces: perturbation bounds and sensitivity, accuracy and convergence behavior of algorithms, exploitation of structure in matrices, and particular engineering applications. Thanks go to SIMAX Editor-in-Chief, Henk van der Vorst; guest editors Jesse Barlow, Froilan Dopico, and Zlatko Drmac, who put great effort into the careful and timely review of papers; and Mitch Chernoff, Cherie Trebisky, and other members of the SIAM staff who worked hard to publish this special issue.}, number={4}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse C. F.}, year={2006}, pages={IX-IX} } @article{ipsen_2006, title={Special issue on accurate solution of eigenvalue problems}, volume={28}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-35348933724&partnerID=MN8TOARS}, number={4}, journal={SIAM Journal on Matrix Analysis and Applications}, author={Ipsen, I.C.F.}, year={2006} } @article{ipsen_2005, title={How to get SIAM cooperation for your meeting}, volume={38}, number={2}, journal={SIAM News}, author={Ipsen, I.C.F.}, year={2005}, month={Mar} } @article{ipsen_2005, title={Problems and Techniques}, volume={47}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-28244444106&partnerID=MN8TOARS}, DOI={10.1137/siread000047000004000707000001}, abstractNote={Suppose you are a bicycle messenger in the busy business district of a large city. Here is what your day looks like: You are to deliver incoming mail to building i, and you are to pick up outgoing mail destined only for building i + 1. All in all you are responsible for m buildings. Your bike, however, is not of the highest quality: It can accommodate only mail for a single delivery. Riding from building i to building j requires tij minutes. Once you arrive at a building, you need d minutes to get off your bike, run into the building, and get onto your bike again. Moreover, once incoming mail has arrived at building i, it takes pi minutes for the outgoing mail to be ready (if this is too long, you may want to make another delivery and pickup in the meantime). You are to visit each building k times. Question: In which order should you visit the buildings so that you finish your work as fast as possible? The above description is a simplified version of the problem discussed in the paper by Milind W. Dawande, H. Neil Geismar, and Suresh P. Sethi. There the messenger is a robot, the buildings are machines, and the mail represents parts to be processed by the machines. The authors show that there exists a cyclic schedule that maximizes long-term throughput. Cyclic schedules are preferred in industrial environments, because they are easy to implement and control. Since the literature on robotic cell scheduling is full of different models for different kinds of industrial applications, it is important to know that all optimal schedules can be reduced to cyclic schedules. The authors end their paper by describing several challenging open problems. The paper by Miguel Torres-Torriti and Hannah Michalska describes a software package (LTP), implemented in Maple, for the symbolic manipulation of expressions that occur in the context of Lie algebra theory. This theory has found applications in classical and quantum mechanics, analysis of dynamical systems, construction of nonlinear filters, and the design of feedback control laws for nonlinear systems. Since the symbolic computations are often complex and tedious, the development of software for applications of Lie algebra theory is crucial. The LTP software package is targeted at applications such as solution of differential equations evolving on Lie groups, and structure analysis of general dynamical systems.}, number={4}, journal={SIAM Review}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse}, year={2005}, month={Jan}, pages={707–707} } @article{ipsen_2005, title={Problems and techniques}, volume={47}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-28244444106&partnerID=MN8TOARS}, number={4}, journal={SIAM Review}, author={Ipsen, I.}, year={2005} } @article{ipsen_2005, title={Why aren’t SIAM conferences cheaper?}, volume={38}, number={1}, journal={SIAM News}, author={Ipsen, I.C.F.}, year={2005}, month={Jan}, pages={1} } @article{ipsen_2004, title={Accurate eigenvalues for fast trains}, volume={37}, number={9}, journal={SIAM News}, author={Ipsen, I.C.F.}, year={2004}, month={Nov}, pages={1–2} } @article{ipsen_2003, title={A note on unifying absolute and relative perturbation bounds}, volume={358}, ISSN={["1873-1856"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84867926408&partnerID=MN8TOARS}, DOI={10.1016/S0024-3795(01)00571-7}, abstractNote={Perturbation bounds for invariant subspaces and eigenvalues of complex matrices are presented that lead to absolute as well as a large class of relative bounds. In particular it is shown that absolute bounds (such as those by Davis and Kahan, Bauer and Fike, and Hoffman and Wielandt) and some relative bounds are special cases of `universal' bounds. As a consequence, we obtain a new relative bound for subspaces of normal matrices, which contains a deviation of the matrix from (positive-) definiteness. We also investigate how row scaling affects eigenvalues and their sensitivity to perturbations, and we illustrate how the departure from normality can affect the condition number (with respect to inversion) of the scaled eigenvectors.}, number={1-3}, journal={LINEAR ALGEBRA AND ITS APPLICATIONS}, publisher={Elsevier BV}, author={Ipsen, ICF}, year={2003}, month={Jan}, pages={239–253} } @article{beattie_ipsen_2003, title={Inclusion regions for matrix eigenvalues}, volume={358}, ISSN={["0024-3795"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84867969159&partnerID=MN8TOARS}, DOI={10.1016/S0024-3795(02)00279-3}, abstractNote={We review Lehmann’s inclusion bounds and provide extensions to general (non-normal) matrices. Each inclusion region has a diameter related to the singular values of a restriction of the matrix to a subspace and dependent on either an eigenvector condition number or the departure of the matrix from normality. The inclusion regions are optimal for normal matrices. Similar considerations lead to inclusion bounds based on relative distances expressed analogously in terms of appropriately defined generalized singular values.}, number={1-3}, journal={LINEAR ALGEBRA AND ITS APPLICATIONS}, publisher={Elsevier BV}, author={Beattie, C and Ipsen, ICF}, year={2003}, month={Jan}, pages={281–291} } @article{lee_ipsen_2003, title={Zone determinant expansions for nuclear lattice simulations}, volume={68}, ISSN={["1089-490X"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85035272077&partnerID=MN8TOARS}, DOI={10.1103/physrevc.68.064003}, abstractNote={We introduce a new approximation to nucleon matrix determinants that is physically motivated by chiral effective theory. The method involves breaking the lattice into spatial zones and expanding the determinant in powers of the boundary hopping parameter.}, number={6}, journal={PHYSICAL REVIEW C}, author={Lee, DJ and Ipsen, ICF}, year={2003}, month={Dec} } @article{ipsen_2001, title={A note on preconditioning nonsymmetric matrices}, volume={23}, ISSN={["1064-8275"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0036294613&partnerID=MN8TOARS}, DOI={10.1137/S1064827500377435}, abstractNote={The preconditioners for indefinite matrices of KKT form in [M. F. Murphy, G. H. Golub, and A. J. Wathen, SIAM J. Sci. Comput., 21 (2000), pp. 1969--1972] are extended to general nonsymmetric matrices.}, number={3}, journal={SIAM JOURNAL ON SCIENTIFIC COMPUTING}, author={Ipsen, ICF}, year={2001}, month={Oct}, pages={1050–1051} } @article{ipsen_mehrmann_2001, title={SIAG/LA and ILAS mark twenty years of progress at joint applied linear algebra meeting}, volume={34}, number={1}, journal={SIAM News}, author={Ipsen, I.C.F. and Mehrmann, V.}, year={2001}, month={Jan} } @article{genin_ipsen_ştefan_van dooren_2001, title={Stability Radius and Optimal Scaling of Discrete-Time Periodic Systems}, volume={34}, ISSN={1474-6670}, url={http://dx.doi.org/10.1016/s1474-6670(17)34081-8}, DOI={10.1016/s1474-6670(17)34081-8}, abstractNote={Robust stability properties of periodic discrete time systems are investigated. Analytic expressions are derived for the stability radius in the scalar case.}, number={12}, journal={IFAC Proceedings Volumes}, publisher={Elsevier BV}, author={Genin, Y. and Ipsen, I. and Ştefan, R. and Van Dooren, P.}, year={2001}, month={Aug}, pages={179–182} } @book{ipsen_2000, title={A note on a certain class of preconditioners for symmetric indefinite linear systems}, number={M&CT-TECH-00-005}, institution={Mathematics & Computing Technology, Phantom Works Division, The Boeing Company}, author={Ipsen, I.C.F.}, year={2000}, month={Jul} } @article{ipsen_2000, title={Absolute and relative perturbation bounds for invariant subspaces of matrices}, volume={309}, ISSN={["1873-1856"]}, DOI={10.1016/S0024-3795(99)00104-4}, abstractNote={Absolute and relative perturbation bounds are derived for angles between invariant subspaces of complex square matrices, in the two-norm and in the Frobenius norm. The absolute bounds can be considered extensions of Davis and Kahan's sinθ theorem to general matrices and invariant subspaces of any dimension. The relative bounds are the most general relative bounds for invariant subspaces because they place no restrictions on the matrix or the perturbation. When the perturbed subspace has dimension one, the relative bound is implied by the absolute bound.}, number={1-3}, journal={LINEAR ALGEBRA AND ITS APPLICATIONS}, publisher={Elsevier BV}, author={Ipsen, ICF}, year={2000}, month={Apr}, pages={45–56} } @article{ipsen_2000, title={An overview of relative sin Theta theorems for invariant subspaces of complex matrices}, volume={123}, ISSN={["1879-1778"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0034322243&partnerID=MN8TOARS}, DOI={10.1016/S0377-0427(00)00404-0}, abstractNote={Relative perturbation bounds for invariant subspaces of complex matrices are reviewed, with emphasis on bounding the sines of the largest principal angle between two subspaces, i.e. sinΘ theorems. The goal is to provide intuition, as well as an idea for why the bounds hold and why they look the way they do. Relative bounds have the advantage of being better at exploiting structure in a perturbation than absolute bounds. Therefore the reaction of subspaces to relative perturbations can be different than to absolute perturbations. In particular, there are certain classes of relative perturbations to which subspaces of indefinite Hermitian matrices can be more sensitive than subspaces of definite matrices.}, number={1-2}, journal={JOURNAL OF COMPUTATIONAL AND APPLIED MATHEMATICS}, author={Ipsen, ICF}, year={2000}, month={Nov}, pages={131–153} } @article{ipsen_2000, title={Expressions and bounds for the GMRES residual}, volume={40}, ISSN={["0006-3835"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0013133887&partnerID=MN8TOARS}, DOI={10.1023/A:1022371814205}, number={3}, journal={BIT}, author={Ipsen, ICF}, year={2000}, pages={524–535} } @book{ipsen_1998, place={Raleigh, NC}, title={A different approach to bounding the minimal residual norm in Krylov methods}, number={CRSC-TR98-19}, institution={Center for Research in Scientific Computation, Department of Mathematics, North Carolina State University}, author={Ipsen, I.C.F.}, year={1998} } @book{ipsen_1998, place={Raleigh, NC}, title={A note on the field of values of non-normal matrices}, number={CRSC-TR98-26}, institution={Center for Research in Scientific Computation, Department of Mathematics, North Carolina State University}, author={Ipsen, I.C.F.}, year={1998} } @book{cho_ipsen_1998, place={Raleigh, NC}, title={If a matrix has a single eigenvalue, how sensitive is this eigenvalue?}, number={CRSC-TR98-8}, institution={Center for Research in Scientific Computation, Department of Mathematics, North Carolina State University}, author={Cho, G.E. and Ipsen, I.C.F.}, year={1998} } @article{eisenstat_ipsen_1998, title={Relative perturbation results for eigenvalues and eigenvectors of diagonalisable matrices}, volume={38}, ISSN={["0006-3835"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0039285620&partnerID=MN8TOARS}, DOI={10.1007/BF02510256}, abstractNote={Let $$\hat \lambda $$ and $$\hat x$$ be a perturbed eigenpair of a diagonalisable matrixA. The problem is to bound the error in $$\hat \lambda $$ and $$\hat \lambda $$ . We present one absolute perturbation bound and two relative perturbation bounds. The absolute perturbation bound is an extension of Davis and Kahan's sin θ Theorem from Hermitian to diagonalisable matrices. The two relative perturbation bounds assume that $$\hat \lambda $$ and $$\hat x$$ are an exact eigenpair of a perturbed matrixD 1 AD 2 , whereD 1 andD 2 are non-singular, butD 1 AD 2 is not necessarily diagonalisable. We derive a bound on the relative error in $$\hat \lambda $$ and a sin θ theorem based on a relative eigenvalue separation. The perturbation bounds contain both the deviation ofD 1 andD 2 from similarity and the deviation ofD 2 from identity.}, number={3}, journal={BIT}, author={Eisenstat, SC and Ipsen, ICF}, year={1998}, month={Sep}, pages={502–509} } @article{ipsen_1998, title={Relative perturbation results for matrix eigenvalues and singular values}, volume={7}, ISSN={0962-4929 1474-0508}, url={http://dx.doi.org/10.1017/s0962492900002828}, DOI={10.1017/s0962492900002828}, abstractNote={It used to be good enough to bound absolute of matrix eigenvalues and singular values. Not any more. Now it is fashionable to bound relative errors. We present a collection of relative perturbation results which have emerged during the past ten years. No need to throw away all those absolute error bound, though. Deep down, the derivation of many relative bounds can be based on absolute bounds. This means that relative bounds are not always better. They may just be better sometimes – and exactly when depends on the perturbation.}, journal={Acta Numerica}, publisher={Cambridge University Press (CUP)}, author={Ipsen, Ilse C. F.}, year={1998}, month={Jan}, pages={151–201} } @article{ipsen_meyer_1998, title={The idea behind Krylov methods}, volume={105}, ISSN={["1930-0972"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0032280255&partnerID=MN8TOARS}, DOI={10.2307/2589281}, abstractNote={Click to increase image sizeClick to decrease image size Additional informationNotes on contributorsIlse C. F. IpsenILSE IPSEN received a Vordiplom in computer science/mathematics from the Universität Kaiserslautern in Germany and a Ph.D. in computer science from Penn State. Before joining the Mathematics Department at North Carolina State University she taught computer science at Yale. Her research interests include numerical linear algebra and scientific computing.Carl D. MeyerCARL MEYER is a professor of Mathematics at North Carolina State University. He received an undergraduate degree in mathematics from the University of Northern Colorado and a Masters and Ph.D. degree in mathematics from Colorado State University. His research interests include matrix and numerical analysis, and applied probability. He has served as Managing Editor for the SIAM Journal on Algebraic and Discrete Methods (now SIMAX), and he is the author of a new text, Matrix Analysis and Applied Linear Algebra.}, number={10}, journal={AMERICAN MATHEMATICAL MONTHLY}, author={Ipsen, ICF and Meyer, CD}, year={1998}, month={Dec}, pages={889–899} } @article{banoczi_chiu_cho_ipsen_1998, title={The lack of influence of the right-hand side on the accuracy of linear system solution}, volume={20}, ISSN={["1064-8275"]}, DOI={10.1137/S106482759630526X}, abstractNote={It is commonly believed that a fortunate right-hand side b can significantly reduce the sensitivity of a system of linear equations Ax=b. We show, both theoretically and experimentally, that this is not true when the system is solved (in floating point arithmetic) with Gaussian elimination or the QR factorization: the error bounds essentially do not depend on b, and the error itself seems to depend only weakly on b. Our error bounds are exact (rather than first-order); they are tight; and they are stronger than the bound of Chan and Foulser. We also present computable lower and upper bounds for the relative error. The lower bound gives rise to a stopping criterion for iterative methods that is better than the relative residual. This is because the relative residual can be much larger, and it may be impossible to reduce it to a desired tolerance.}, number={1}, journal={SIAM JOURNAL ON SCIENTIFIC COMPUTING}, author={Banoczi, JM and Chiu, NC and Cho, GE and Ipsen, ICF}, year={1998}, pages={203–227} } @article{banoczi_chiu_cho_ipsen_1998, title={The lack of influence of the right-hand side on the accuracy of linear system solution}, volume={20}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0032131636&partnerID=MN8TOARS}, number={1}, journal={SIAM Journal on Scientific Computing}, author={Banoczi, J.M. and Chiu, N.-C. and Cho, G.E. and Ipsen, I.C.F.}, year={1998}, pages={203–227} } @article{eisenstat_ipsen_1998, title={Three absolute perturbation bounds for matrix eigenvalues imply relative bounds}, volume={20}, ISSN={["0895-4798"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-0032217351&partnerID=MN8TOARS}, DOI={10.1137/S0895479897323282}, abstractNote={We show that three well-known perturbation bounds for matrix eigenvalues imply relative bounds: the Bauer--Fike and Hoffman--Wielandt theorems for diagonalizable matrices, and Weyl's theorem for Hermitian matrices. As a consequence, relative perturbation bounds are not necessarily stronger than absolute bounds, and the conditioning of an eigenvalue in the relative sense is the same as in the absolute sense. We also show that eigenvalues of normal matrices are no more sensitive to perturbations than eigenvalues of Hermitian positive-definite matrices. The relative error bounds are invariant under congruence transformations, such as grading and scaling.}, number={1}, journal={SIAM JOURNAL ON MATRIX ANALYSIS AND APPLICATIONS}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Eisenstat, SC and Ipsen, ICF}, year={1998}, month={Sep}, pages={149–158} } @article{ipsen_1997, title={Computing an eigenvector with inverse iteration}, volume={39}, ISSN={["1095-7200"]}, DOI={10.1137/S0036144596300773}, abstractNote={The purpose of this paper is two-fold: to analyze the behavior of inverse iteration for computing a single eigenvector of a complex square matrix and to review Jim Wilkinson's contributions to the development of the method. In the process we derive several new results regarding the convergence of inverse iteration in exact arithmetic. In the case of normal matrices we show that residual norms decrease strictly monotonically. For eighty percent of the starting vectors a single iteration is enough. In the case of non-normal matrices, we show that the iterates converge asymptotically to an invariant subspace. However, the residual norms may not converge. The growth in residual norms from one iteration to the next can exceed the departure of the matrix from normality. We present an example where the residual growth is exponential in the departure of the matrix from normality. We also explain the often significant regress of the residuals after the first iteration: it occurs when the non-normal part of the matrix is large compared to the eigenvalues of smallest magnitude. In this case computing an eigenvector with inverse iteration is exponentially ill conditioned (in exact arithmetic). We conclude that the behavior of the residuals in inverse iteration is governed by the departure of the matrix from normality rather than by the conditioning of a Jordan basis or the defectiveness of eigenvalues.}, number={2}, journal={SIAM REVIEW}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, ICF}, year={1997}, month={Jun}, pages={254–291} } @book{cho_ipsen_1997, place={Raleigh, NC}, title={If a matrix has a single eigenvalue, how sensitive is this eigenvalue?}, number={CRSC-TR97-20}, institution={Center for Research in Scientific Computation, Department of Mathematics, North Carolina State University}, author={Cho, G.E. and Ipsen, I.C.F.}, year={1997} } @inbook{ipsen_1996, place={Berlin}, title={A history of inverse iteration}, ISBN={9783110124538}, booktitle={Mathematische Werke. Vol. 2, Linear algebra and analysis : Mathematical works}, publisher={Walter de Gruyter}, author={Ipsen, I.C.F.}, editor={Wielandt, H. and Huppert, B. and Schneider, H.Editors}, year={1996}, pages={464–472} } @article{campbell_ipsen_kelley_meyer_xue_1996, title={Convergence Estimates for Solution of Integral Equations with GMRES}, volume={8}, ISSN={0897-3962}, url={http://dx.doi.org/10.1216/jiea/1181075914}, DOI={10.1216/jiea/1181075914}, abstractNote={In this paper we derive convergence estimates for the iterative solution of nonsymmetric linear systems by GMRES. We work in the context of strongly convergent-collectively compact sequences of approximations to linear compact xed point problems. Our estimates are intended to explain the observations that the performance of GMRES is independent of the discretization if the resolution of the discretization is su ciently good. Our bounds are independent of the right hand side of the equation, re ect the r-superlinear convergence of GMRES in the in nite dimensional setting, and also allow for more than one implementation of the discrete scalar product. Our results are motivated by quadrature rule approximation to second-kind Fredholm integral equations.}, number={1}, journal={Journal of Integral Equations and Applications}, publisher={Rocky Mountain Mathematics Consortium}, author={Campbell, S.L. and Ipsen, I.C.F. and Kelley, C.T. and Meyer, C.D. and Xue, Z.Q.}, year={1996}, month={Mar}, pages={19–34} } @article{campbell_ipsen_kelley_meyer_1996, title={GMRES and the minimal polynomial}, volume={36}, ISSN={0006-3835 1572-9125}, url={http://dx.doi.org/10.1007/bf01733786}, DOI={10.1007/bf01733786}, abstractNote={We present a qualitative model for the convergence behaviour of the Generalised Minimal Residual (GMRES) method for solving nonsingular systems of linear equationsAx =b in finite and infinite dimensional spaces. One application of our methods is the solution of discretised infinite dimensional problems, such as integral equations, where the constants in the asymptotic bounds are independent of the mesh size. Our model provides simple, general bounds that explain the convergence of GMRES as follows: If the eigenvalues ofA consist of a single cluster plus outliers then the convergence factor is bounded by the cluster radius, while the asymptotic error constant reflects the non-normality ofA and the distance of the outliers from the cluster. If the eigenvalues ofA consist of several close clusters, then GMRES treats the clusters as a single big cluster, and the convergence factor is the radius of this big cluster. We exhibit matrices for which these bounds are tight. Our bounds also lead to a simpler proof of existing r-superlinear convergence results in Hilbert space.}, number={4}, journal={BIT Numerical Mathematics}, publisher={Springer Science and Business Media LLC}, author={Campbell, S. L. and Ipsen, I. C. F. and Kelley, C. T. and Meyer, C. D.}, year={1996}, month={Dec}, pages={664–675} } @inbook{ipsen_1996, place={Berlin}, title={Helmut Wielandt’s contributions to the numerical solution of complex eigenvalue problems}, booktitle={Helmut Wielandt, Mathematische Werke, Mathematical Works, volume 2: Linear Algebra and Analysis}, publisher={Walter de Gruyter}, author={Ipsen, I.C.F.}, editor={Huppert, B. and Schneider, H.Editors}, year={1996}, pages={453–463} } @article{chandrasekaran_ipsen_1995, title={Analysis of a QR Algorithm for Computing Singular Values}, volume={16}, ISSN={0895-4798 1095-7162}, url={http://dx.doi.org/10.1137/s0895479892236532}, DOI={10.1137/s0895479892236532}, abstractNote={We extend the Golub--Kahan algorithm for computing the singular value decomposition of bidiagonal matrices to triangular matrices $R$. Our algorithm avoids the explicit formation of $R^TR$ or $RR^T$. We derive a relation between left and right singular vectors of triangular matrices and use it to prove monotonic convergence of singular values and singular vectors. The convergence rate for singular values equals the square of the convergence rate for singular vectors. The convergence behaviour explains the occurrence of deflation in the interior of the matrix. We analyse the relationship between our algorithm and rank-revealing QR and URV decompositions. As a consequence, we obtain an algorithm for computing the URV decomposition, as well as a divide-and-conquer algorithm that computes singular values of dense matrices and may be beneficial on a parallel architecture. Our perturbation result for the smallest singular values of a triangular matrix is stronger than the traditional results because it guarantees high relative accuracy in the smallest singular values after an off-diagonal block of the matrix has been set to zero.}, number={2}, journal={SIAM Journal on Matrix Analysis and Applications}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Chandrasekaran, S. and Ipsen, I. C. F.}, year={1995}, month={Apr}, pages={520–535} } @article{chandrasekaran_ipsen_1995, title={On the Sensitivity of Solution Components in Linear Systems of Equations}, volume={16}, ISSN={0895-4798 1095-7162}, url={http://dx.doi.org/10.1137/s0895479892231255}, DOI={10.1137/s0895479892231255}, abstractNote={Expressions are presented for the errors in individual components of the solution to systems of linear equations and linear least squares problems. No assumptions about the structure or distribution of the perturbations are made. The resulting "componentwise condition numbers" measure the sensitivity of each solution component to perturbations. It is shown that any linear system has at least one solution component whose sensitivity to perturbations is proportional to the condition number of the matrix; but there may exist many components that are much better conditioned. Unless the perturbations are restricted, no norm-based relative error bound can predict the presence of well-conditioned components, so these componentwise condition numbers are essential. For the class of componentwise perturbations, necessary and sufficient conditions are given under which Skeel's condition numbers are informative, and it is shown that these conditions are similar to conditions where componentwise condition numbers are useful. Numerical experiments not only confirm that these circumstances do occur frequently, they also illustrate that for many classes of matrices the ill conditioning of the matrix is due to a few rows of the inverse only. This means that many of the solution components are computed more accurately than current analyses predict.}, number={1}, journal={SIAM Journal on Matrix Analysis and Applications}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Chandrasekaran, S. and Ipsen, I. C. F.}, year={1995}, month={Jan}, pages={93–112} } @article{eisenstat_ipsen_1995, title={Relative Perturbation Techniques for Singular Value Problems}, volume={32}, ISSN={0036-1429 1095-7170}, url={http://dx.doi.org/10.1137/0732088}, DOI={10.1137/0732088}, abstractNote={A technique is presented for deriving bounds on the relative change in the singular values of a real matrix (or the eigenvalues of a real symmetric matrix) due to a perturbation, as well as bounds on the angles between the unperturbed and perturbed singular vectors (or eigenvectors). The class of perturbations considered consists of all $\delta B$ for which $B + \delta B = D_L BD_R $ for some nonsingular matrices $D_L $ and $D_R $. This class includes componentwise relative perturbations of a bidiagonal or biacyclic matrix and perturbations that annihilate the off-diagonal block in a block triangular matrix. Many existing relative perturbation and deflation bounds are derived from results for this general class of perturbations. Also some new relative perturbation and deflation results for the singular values and vectors of biacyclic, triangular, and shifted triangular matrices are presented.}, number={6}, journal={SIAM Journal on Numerical Analysis}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Eisenstat, Stanley C. and Ipsen, Ilse C. F.}, year={1995}, month={Dec}, pages={1972–1988} } @article{ipsen_meyer_1995, title={The Angle Between Complementary Subspaces}, volume={102}, ISSN={0002-9890 1930-0972}, url={http://dx.doi.org/10.1080/00029890.1995.12004683}, DOI={10.1080/00029890.1995.12004683}, number={10}, journal={The American Mathematical Monthly}, publisher={Informa UK Limited}, author={Ipsen, Ilse C. F. and Meyer, Carl D.}, year={1995}, month={Dec}, pages={904–911} } @article{chandrasekaran_ipsen_1994, title={A divide and conquer algorithm for computing singular values}, volume={74}, number={6}, journal={Zeitschrift für Angewandte Mathematik und Mechanik}, author={Chandrasekaran, S. and Ipsen, I.C.F.}, year={1994}, pages={532–534} } @article{chandrasekaran_ipsen_1994, title={Backward errors for eigenvalue and singular value decompositions}, volume={68}, ISSN={0029-599X 0945-3245}, url={http://dx.doi.org/10.1007/s002110050057}, DOI={10.1007/s002110050057}, number={2}, journal={Numerische Mathematik}, publisher={Springer Science and Business Media LLC}, author={Chandrasekaran, S. and Ipsen, I.C.F.}, year={1994}, month={Jul}, pages={215–223} } @article{chandrasekaran_ipsen_1994, title={On Rank-Revealing Factorisations}, volume={15}, ISSN={0895-4798 1095-7162}, url={http://dx.doi.org/10.1137/s0895479891223781}, DOI={10.1137/s0895479891223781}, abstractNote={The problem of finding a rank-revealing QR (RRQR) factorisation of a matrix $A$ consists of permuting the columns of $A$ such that the resulting QR factorisation contains an upper triangular matrix whose linearly dependent columns are separated from the linearly independent ones. In this paper a systematic treatment of algorithms for determining RRQR factorisations is presented. In particular, the authors start by presenting precise mathematical formulations for the problem of determining a RRQR factorisation, all of them optimisation problems. Then a hierarchy of "greedy" algorithms is derived to solve these optimisation problems, and it is shown that the existing RRQR algorithms correspond to particular greedy algorithms in this hierarchy. Matrices on which the greedy algorithms, and therefore the existing RRQR algorithms, can fail arbitrarily badly are presented. Finally, motivated by an insight from the behaviour of the greedy algorithms, the authors present "hybrid" algorithms that solve the optimisation problems almost exactly (up to a factor proportional to the size of the matrix). Applying the hybrid algorithms as a follow-up to the conventional greedy algorithms may prove to be useful in practice.}, number={2}, journal={SIAM Journal on Matrix Analysis and Applications}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Chandrasekaran, Shivkumar and Ipsen, Ilse C. F.}, year={1994}, month={Apr}, pages={592–622} } @inbook{chandrasekaran_ipsen_1994, place={Shanghai}, title={On the singular value decomposition of triangular matrices}, booktitle={Numerical algebra : proceedings of '92 Shanghai International Numerical Algebra and its Applications Conference}, publisher={China Science and Technology Press}, author={Chandrasekaran, S. and Ipsen, I.C.F.}, editor={Er-xiong, JiangEditor}, year={1994}, pages={85–89} } @inproceedings{eisenstat_ipsen_1994, place={Philadelphia}, title={Relative perturbation bounds for eigenspaces and singular vector subspaces}, ISBN={9780898713367}, booktitle={Proceedings of the Fifth SIAM Conference on Applied Linear Algebra}, publisher={SIAM}, author={Eisenstat, S.C. and Ipsen, I.C.F.}, editor={Lewis, J.G.Editor}, year={1994}, pages={62–65} } @article{ipsen_meyer_1994, title={Uniform Stability of Markov Chains}, volume={15}, ISSN={0895-4798 1095-7162}, url={http://dx.doi.org/10.1137/s0895479892237562}, DOI={10.1137/s0895479892237562}, abstractNote={By deriving a new set of tight perturbation bounds, it is shown that all stationary probabilities of a finite irreducible Markov chain react essentially in the same way to perturbations in the transition probabilities. In particular, if at least one stationary probability is insensitive in a relative sense, then all stationary probabilities must be insensitive in an absolute sense. New measures of sensitivity are related to more traditional ones, and it is shown that all relevant condition numbers for the Markov chain problem are small multiples of each other. Finally, the implications of these findings to the computation of stationary probabilities by direct methods are discussed, and the results are applied to stability issues in nearly transient chains.}, number={4}, journal={SIAM Journal on Matrix Analysis and Applications}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse C. F. and Meyer, Carl D.}, year={1994}, month={Oct}, pages={1061–1074} } @article{jessup_ipsen_1992, title={Improving the Accuracy of Inverse Iteration}, volume={13}, ISSN={0196-5204 2168-3417}, url={http://dx.doi.org/10.1137/0913031}, DOI={10.1137/0913031}, abstractNote={The EISPACK routine TINVIT is an implementation of inverse iteration for computing eigenvectors of real symmetric tridiagonal matrices. Experiments have shown that the eigenvectors computed with TINVIT are numerically less accurate than those from implementations of Cuppen’s divide-and-conquer method (TREEQL) and of the QL method (TQL2). The loss of accuracy can be attributed to TINVIT’s choice of starting vectors and to its iteration stopping criterion.This paper introduces a new implementation of TINVIT that computes each eigenvector from a different random starting vector and performs an additional iteration after the stopping criterion is satisfied. A statistical analysis and the results of numerical experiments with matrices of order up to 525 are presented to show that the numerical accuracy of this new implementation is competitive with that of the implementations of the divide-and-conquer and QL methods. The extension of this implementation to larger order matrices is discussed, albeit in less detail.}, number={2}, journal={SIAM Journal on Scientific and Statistical Computing}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Jessup, Elizabeth R. and Ipsen, Ilse C. F.}, year={1992}, month={Mar}, pages={550–572} } @book{chandrasekaran_ipsen_1991, title={Perturbation Theory for the Solution of Systems of Linear Equations}, url={http://dx.doi.org/10.21236/ada254994}, DOI={10.21236/ada254994}, abstractNote={Abstract : We present expressions for absolute and relative errors in individual components of the solution to systems of linear equations. We consider three kinds of linear systems: non-singular, underdetermined of full row rank, and least squares of full column rank. No assumptions regarding the structure or distribution of the perturbations are required. Our expressions for component- wise relative errors allow the following conclusions: For any linear system there is at least one solution component whose sensitivity to perturbations is proportional to the condition number of the matrix; but - depending on the relation between right-hand side and matrix - there may exist components that are much better conditioned. For a least squares problem, the sensitivity of the components also depends on the right-hand side and may be as high as the square of the condition number. Least squares problems are therefore always more receptive to ill-conditioning than linear systems. In addition, we show that the component-wise relative errors for linear systems are reduced by column scaling only if column scaling manages to reduce the perturbations. Regarding underdetermined linear systems of full column rank, the problem of finding the minimal-norm solution can be formulated so that the same analysis as for least squares problems is applicable here as well. Finally, we define component-wise condition numbers that measure the sensitivity of the solution components to perturbations. They have simple geometric interpretations and can be command estimated as efficiently as the conventional condition numbers.}, institution={Defense Technical Information Center}, author={Chandrasekaran, Shivkumar and Ipsen, Ilse}, year={1991}, month={Oct} } @inbook{ipsen_1991, title={Some Remarks on the Generalised Bareiss and Levinson Algorithms}, ISBN={9783642755385 9783642755361}, url={http://dx.doi.org/10.1007/978-3-642-75536-1_10}, DOI={10.1007/978-3-642-75536-1_10}, booktitle={Numerical Linear Algebra, Digital Signal Processing and Parallel Algorithms}, publisher={Springer Berlin Heidelberg}, author={Ipsen, Ilse}, year={1991}, pages={189–214} } @inbook{delosme_ipsen_1990, title={From Bareiss' algorithm to the stable computation of partial correlations}, volume={1}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85012592772&partnerID=MN8TOARS}, DOI={10.1016/B978-0-444-88621-7.50009-8}, abstractNote={This paper presents a new algorithm for the stable computation of sample partial correlation coefficients. We start by generalizing Bareiss' algorithm for the solution of linear systems of equations with (non-symmetric) Toeplitz coefficient matrix to matrices that are not Toeplitz, and show that it computes their LU and UL factorizations. For symmetric positive-definite matrices B, the normalized version of Bareiss' algorithm is the Hyperbolic Cholesky algorithm, which computes the upper and lower triangular Cholesky factors U and L of B by means of 2×2 hyperbolic rotations. If B is a covariance matrix, certain partial correlation coefficients may be obtained as the geometric means of the multiplier pairs in Bareiss' algorithm or as the hyperbolic tangents of the hyperbolic rotations. The partial correlation coefficients for a given data matrix A may suffer from serious loss of numerical accuracy when computed from the covariance matrix B = ATA. While the data flow graph of Bareiss' algorithm suggests a method, due to Cybenko, for computing partial correlation coefficients directly from the columns of A, the graph of Hyperbolic Cholesky suggests an alternative that requires fewer operations and exhibits a higher degree of parallelism. In that method one computes the QR decomposition A = QU of the data matrix and applies orthogonal rotations to transform U to L; the sines of the rotation angles constitute a set of partial correlation coefficients. We extend this approach to determine arbitrary partial correlation coefficients at little extra computational cost.}, number={C}, booktitle={Advances in Parallel Computing}, author={Delosme, J.-M. and Ipsen, I.C.F.}, year={1990}, pages={53–91} } @article{ipsen_jessup_1990, title={Solving the Symmetric Tridiagonal Eigenvalue Problem on the Hypercube}, volume={11}, ISSN={0196-5204 2168-3417}, url={http://dx.doi.org/10.1137/0911013}, DOI={10.1137/0911013}, abstractNote={This paper describes implementations of Cuppen's method, bisection, and multisection for the computation of all eigenvalues and eigenvectors of a real symmetric tridiagonal matrix on a distributed-memory hypercube multiprocessor. Numerical results and timings for Intel's iPSC-1 are presented. Cuppen's method is found to be the numerically most accurate of three methods, while bisection with inverse iteration is observed experimentally to be the fastest method.}, number={2}, journal={SIAM Journal on Scientific and Statistical Computing}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Ipsen, Ilse C. F. and Jessup, Elizabeth R.}, year={1990}, month={Mar}, pages={203–229} } @inbook{gropp_ipsen_1989, place={Philadelphia}, title={A Gray code scheme for local uniform mesh refinement on hypercubes}, booktitle={Parallel Processing for Scientific Computing}, publisher={Society for Industrial and Applied Mathematics}, author={Gropp, W.D. and Ipsen, I.C.F.}, year={1989}, pages={202–206} } @article{delosme_ipsen_1989, title={From Bareiss' algorithm to the stable computation of partial correlations}, volume={27}, ISSN={0377-0427}, url={http://dx.doi.org/10.1016/0377-0427(89)90361-0}, DOI={10.1016/0377-0427(89)90361-0}, abstractNote={This paper presents a new algorithm for the stable computation of sample partial correlation coefficients. We start by generalizing Bareiss' algorithm for the solution of linear systems of equations with (non-symmetric) Toeplitz coefficient matrix to matrices that are not Toeplitz, and show that it computes their LU and UL factorizations. For symmetric positive-definite matrices B, the normalized version of Bareiss' algorithm is the Hyperbolic Cholesky algorithm, which computes the upper and lower triangular Cholesky factors U and L of B by means of 2×2 hyperbolic rotations. If B is a convariance matrix, certain partial correlation coefficients may be obtained as the geometric means of the multiplier pairs in Bareiss' algorithm or as the hyperbolic tangents of the hyperbolic rotations. The partial correlation coefficients for a given data matrix A may suffer from serious loss of numerical accuracy when computed from the covariance matrix B = ATA. While the data flow graph of Bareiss' algorithm suggests a method, due to Cybenko, for computing partial correlation coefficients directly from the columns of A, the graph of Hyperbolic Cholesky suggests an alternative that requires fewer operations nd exhibits a higher degree of parallelism. In that method one computes the QR decomposition A = QU of the data matrix and applies orthogonal rotations to transform U to L; the sines of the rotation angles constitute a set of partial correlation coefficients. We extend this approach to determine arbitrary partial correlation coefficients at little extra computational cost.}, number={1-2}, journal={Journal of Computational and Applied Mathematics}, publisher={Elsevier BV}, author={Delosme, Jean-Marc and Ipsen, Ilse C.F.}, year={1989}, month={Sep}, pages={53–91} } @inproceedings{delosme_ipsen_1989, place={Philadelphia}, title={Parallel computation of algorithms with uniform dependences}, ISBN={9780898712629}, booktitle={Proceedings of the Fourth SIAM Conference on Parallel Processing for Scientific Computing}, publisher={Society for Industrial and Applied Mathematics}, author={Delosme, J.-M. and Ipsen, I.C.F.}, editor={Dongarra, J. and Messina, P. and Sorensen, D.C. and Voigt, R.G.Editors}, year={1989}, pages={319–326} } @article{gropp_ipsen_1989, title={Recursive mesh refinement on hypercubes}, volume={29}, ISSN={0006-3835 1572-9125}, url={http://dx.doi.org/10.1007/bf01952675}, DOI={10.1007/bf01952675}, number={2}, journal={BIT}, publisher={Springer Science and Business Media LLC}, author={Gropp, W. D. and Ipsen, I. C. F.}, year={1989}, month={Jun}, pages={186–211} } @inproceedings{delosme_ipsen_1988, title={SAGA and CONDENSE: a two-phase approach for the implementation of recurrence equations on multiprocessor architectures}, ISBN={0818608412}, url={http://dx.doi.org/10.1109/hicss.1988.11756}, DOI={10.1109/hicss.1988.11756}, abstractNote={The automation of parallel implementations of algorithms, specifically those arising in scientific and engineering applications, is investigated. Since many of these computations can be formulated as coupled system of recurrence equations, they exhibit a high degree of repetitiveness and regularity. These properties are utilized to generate efficient, and in certain cases optimal, schedules and processor assignments on particular multiprocessor configurations. The process of determining a parallel implementation consists of two successive stages, named SAGA and CONDENSE. SAGA generates a minimal-time systolic implementation with a number of processors that is likely to be problem-dependent. If the problem size exceeds the size of the target architecture, CONDENSE groups the processors in the array given by SAGA so that each group is assigned, or condensed, to one processor of the target architecture. The condensation is done so as to minimize the total run time and is guided by the systolic array from SAGA.<>}, booktitle={[1988] Proceedings of the Twenty-First Annual Hawaii International Conference on System Sciences. Volume I: Architecture Track}, publisher={IEEE}, author={Delosme, J.-M. and Ipsen, I.}, year={1988}, pages={126–130} } @inbook{ipsen_1988, title={Systolic Algorithms for the Parallel Solution of Dense Symmetric Positive-Definite Toeplitz Systems}, ISBN={9781468463590 9781468463576}, ISSN={0940-6573}, url={http://dx.doi.org/10.1007/978-1-4684-6357-6_7}, DOI={10.1007/978-1-4684-6357-6_7}, abstractNote={The most popular method for the solution of linear systems of equations with Toeplitz coefficient matrix on a single processor is Levinson’s algorithm, whose intermediate vectors form the Cholesky factor of the inverse of the Toeplitz matrix. However, Levinson’s method is not amenable to efficient parallel implementation. In contrast, use of the Schur algorithm, whose intermediate vectors form the Cholesky factor of the Toeplitz matrix proper, makes it possible to perform the entire solution procedure on one processor array in time linear in the order of the matrix. By means of the Levinson recursions we will show that all three phases of the Toeplitz system solution process: factorisation, forward elimination and backsubstitution, can be based on Schur recursions. This increased exploitation of the Toeplitz structure then leads to more efficient parallel implementations on systolic arrays.}, booktitle={The IMA Volumes in Mathematics and Its Applications}, publisher={Springer US}, author={Ipsen, Ilse C. F.}, year={1988}, pages={85–108} } @book{delosme_ipsen_paige_1988, place={New Haven, Connecticut}, title={The Cholesky factorization, Schur complements, correlation coefficients, angles between vectors, and the QR factorization}, number={607}, institution={Department of Computer Science, Yale University}, author={Delosme, J.-M. and Ipsen, I.C.F. and Paige, C.C.}, year={1988} } @inbook{ipsen_jessup_1987, title={A comparison of Cuppen’s method and multisection for the solution of tridiagonal eigenvalue problems on the hypercube}, booktitle={Advances in Computer Methods for Partial Differential Equations VI}, publisher={The Institute for Mathematics and Computer Science}, author={Ipsen, I.C.F. and Jessup, E.R.}, year={1987}, pages={425–430} } @book{delosme_ipsen_1987, place={New Haven, Connecticut}, title={Computing partial correlations from the data matrix}, number={541}, institution={Department of Computer Science, Yale University}, author={Delosme, J.-M. and Ipsen, I.C.F.}, year={1987} } @inbook{delosme_ipsen_1987, title={Efficient systolic arrays for the solution of Toeplitz systems: An illustration of a methodology for the construction of systolic architectures in VLSI}, booktitle={Systolic Arrays}, publisher={Adam Hilger}, author={Delosme, J.-M. and Ipsen, I.C.F.}, year={1987}, pages={37–46} } @book{hudak_delosme_ipsen_1987, place={New Haven, Connecticut}, title={ParLance: A para-functional programming environment for parallel and distributed computing}, number={524}, institution={Department of Computer Science, Yale University}, author={Hudak, P. and Delosme, J.-M. and Ipsen, I.C.F.}, year={1987} } @article{barlow_ipsen_1987, title={Scaled Givens Rotations for the Solution of Linear Least Squares Problems on Systolic Arrays}, volume={8}, ISSN={0196-5204 2168-3417}, url={http://dx.doi.org/10.1137/0908062}, DOI={10.1137/0908062}, abstractNote={A class of Scaled Givens rotations, to be applied to the solution of weighted multiple linear least squares problems on systolic arrays, is discussed.In comparison to Fast Givens transformations, properly scaled rotations for weighted problems exhibit the same stability, require fewer divisions, and avoid square roots as well as pivoting. Consequently, with a suitable elimination strategy, the algorithm is amenable to parallel linear-time implementation on systolic arrays in VLSI. Round off error and stability analyses are presented, indicating slightly less accumulation of round off error than known sequential methods.}, number={5}, journal={SIAM Journal on Scientific and Statistical Computing}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Barlow, Jesse L. and Ipsen, Ilse F. C.}, year={1987}, month={Sep}, pages={716–733} } @inbook{ipsen_jessup_1987, title={Two methods for solving the symmetric tridiagonal eigenvalue problem on the hypercube}, booktitle={Hypercube Multiprocessors}, publisher={Society for Industrial and Applied Mathematics}, author={Ipsen, I.C.F. and Jessup, E.R.}, year={1987}, pages={627–638} } @article{ipsen_saad_schultz_1986, title={Complexity of dense-linear-system solution on a multiprocessor ring}, volume={77}, ISSN={0024-3795}, url={http://dx.doi.org/10.1016/0024-3795(86)90169-2}, DOI={10.1016/0024-3795(86)90169-2}, abstractNote={Different algorithms, based on Gaussian elimination, for the solution of dense linear systems of equations are discussed for a multiprocessor ring. The number of processors is assumed not to exceed the problem size. A fairly general model for data transfer is proposed, and the algorithms are analyzed with respect to their requirements of arithmetic as well as communication times.}, number={C}, journal={Linear Algebra and its Applications}, publisher={Elsevier BV}, author={Ipsen, Ilse C.F. and Saad, Youcef and Schultz, Martin H.}, year={1986}, month={May}, pages={205–239} } @inproceedings{delosme_ipsen_1986, title={Design Methodology For Systolic Arrays}, volume={696}, url={http://dx.doi.org/10.1117/12.936899}, DOI={10.1117/12.936899}, abstractNote={Many important algorithms in signal and image processing, speech and pattern recognition or matrix computations consist of coupled systems of recurrence equations. Systolic arrays are regular networks of tightly coupled simple processors with limited storage that provide cost-effective high-throughput implementations of many such algorithms. While there are some mathematical techniques for finding efficient systolic implementations for uniform recurrence equations, there is no general theory for more general coupled systems of affine recurrence equations. The first elements of such a theory are presented in this paper.}, booktitle={Advanced Algorithms and Architectures for Signal Processing I}, publisher={SPIE}, author={Delosme, Jean-Marc and Ipsen, Ilse C. F.}, editor={Speiser, Jeffrey M.Editor}, year={1986}, month={Apr}, pages={245–259} } @article{delosme_ipsen_1986, title={Parallel solution of symmetric positive definite systems with hyperbolic rotations}, volume={77}, ISSN={0024-3795}, url={http://dx.doi.org/10.1016/0024-3795(86)90163-1}, DOI={10.1016/0024-3795(86)90163-1}, abstractNote={An algorithm based on hyperbolic rotations is presented for the solution of linear systems of equations Ax = b, with symmetric positive definite coefficient matrix A. Forward elimination and backsubstitution are replaced by matrix-vector multiplications, rendering the method amenable to implementation on a variety of parallel and vector machines. This method can be simplified and formulated without square roots if A is also Toeplitz; a systolic (VLSI) architecture implementing the resulting recurrence equations is more efficient than previously proposed pipelined Toeplitz system solvers. The hardware count becomes independent of the matrix size if its inverse is banded.}, number={C}, journal={Linear Algebra and its Applications}, publisher={Elsevier BV}, author={Delosme, Jean-Marc and Ipsen, Ilse C.F.}, year={1986}, month={May}, pages={75–111} } @inbook{delosme_ipsen_1986, place={Amsterdam}, title={Systolic array synthesis: Computability and time cones}, booktitle={Parallel Algorithms and Architectures}, publisher={North-Holland Publishing}, author={Delosme, J.-M. and Ipsen, I.C.F.}, year={1986}, pages={295–312} } @book{delosme_ipsen_masse_1986, place={New Haven, Connecticut}, title={Systolic implementation of a Toeplitz system solver}, number={8607}, institution={Department of Electrical Engineering, Yale University}, author={Delosme, J.-M. and Ipsen, I.C.F. and Masse, J.-R.}, year={1986} } @inbook{ipsen_saad_1986, title={The Impact of Parallel Architectures on The Solution of Eigenvalue Problems}, volume={127}, ISBN={9780444700742}, ISSN={0304-0208}, url={http://dx.doi.org/10.1016/s0304-0208(08)72638-0}, DOI={10.1016/s0304-0208(08)72638-0}, abstractNote={This paper presents a short survey of recent work on parallel implementations of Numerical Linear Algebra algorithms with emphasis on those relating to the solution of the symmetric eigenvalue problem on loosely coupled multiprocessor architectures. The vital operations in the formulation of most eigenvalue algorithms are matrix vector multiplication, matrix transposition, and linear system solution. Their implementations on several representative multiprocessor systems will be described, as well as parallel implementations of the following classes of eigenvalue methods : QR, bisection, divide-and-conquer, and Lanczos algorithm.}, number={C}, booktitle={Large Scale Eigenvalue Problems, Proceedings of the IBM Europe Institute Workshop on Large Scale Eigenvalue Problems}, publisher={Elsevier}, author={Ipsen, Ilse C.F. and Saad, Youcef}, year={1986}, pages={37–49} } @inproceedings{delosme_ipsen_1985, title={An illustration of a methodology for the construction of efficient systolic architectures in VLSI}, booktitle={Proceedings of the Second International Symposium on VLSI Technology, Systems and Applications}, author={Delosme, J.-M. and Ipsen, I.C.F.}, year={1985}, pages={268–273} } @book{bhatt_ipsen_1985, place={New Haven, Connecticut}, title={How to embed trees in hypercubes}, number={443}, institution={Department of Computer Science, Yale University}, author={Bhatt, S.N. and Ipsen, I.C.F.}, year={1985} } @book{ipsen_1984, place={New Haven, Connecticut}, title={A parallel QR method using fast Givens’ rotations}, number={299}, institution={Department of Computer Science, Yale University}, author={Ipsen, I.C.F.}, year={1984} } @inproceedings{ipsen_1984, title={Singular Value Decomposition With Systolic Arrays}, url={http://dx.doi.org/10.1117/12.944004}, DOI={10.1117/12.944004}, abstractNote={Systolic arrays for determining the Singular Value Decomposition of a mxn, m > n, matrix A of bandwidth w are presented. After A has been reduced to bidiagonal form B by means of Givens plane rotations, the singular values of B are computed by the Golub-Reinsch iteration. The products of plane rotations form the matrices of left and right singular vectors. Assuming each processor can compute or apply a plane rotation, 0(wn) processors accomplish the reduction to bidiagonal form in 0(np) steps, where p is the number of superdiagonals. A constant number of processors can then determine each singular value in about 6n steps. The singular vectors are computed by rerouting the rotations through the arrays used for the reduction to bidiagonal form, or else "along the way" by employing another rectangular array of O(wm) processors.}, booktitle={Real-Time Signal Processing VII}, publisher={SPIE}, author={Ipsen, Ilse}, editor={Bromley, KeithEditor}, year={1984}, month={Nov} } @article{heller_ipsen_1983, title={Systolic Networks for Orthogonal Decompositions}, volume={4}, ISSN={0196-5204 2168-3417}, url={http://dx.doi.org/10.1137/0904020}, DOI={10.1137/0904020}, abstractNote={An orthogonally connected systolic array, consisting of a few types of simple processors, is constructed to perform the $QR$ decomposition of a matrix. Application is made to solution of linear systems and linear least squares problems as well as $QL$ and $LQ$ factorizations. For matrices A of bandwidth w the decomposition network requires less than $w^2 $ processors, independent of the order n of A. In terms of the operation time of the slowest processor, computation time varies between $2n$ and $4n$ subject to the number of codiagonals.}, number={2}, journal={SIAM Journal on Scientific and Statistical Computing}, publisher={Society for Industrial & Applied Mathematics (SIAM)}, author={Heller, Don E. and Ipsen, Ilse C. F.}, year={1983}, month={Jun}, pages={261–269} } @inproceedings{heller_ipsen_1982, place={Norwood, MA}, title={Systolic networks for orthogonal equivalence transformations and their applications}, booktitle={Proceedings of the Conference on Advanced Research in VLSI}, publisher={Artech House, Inc}, author={Heller, D.E. and Ipsen, I.C.F.}, editor={Penfield, P.Editor}, year={1982}, pages={113–122} }