@article{cao_goenka_wong_rajwade_baron_2023, title={Group Testing With Side Information via Generalized Approximate Message Passing}, volume={71}, ISSN={["1941-0476"]}, url={https://doi.org/10.1109/TSP.2023.3287671}, DOI={10.1109/TSP.2023.3287671}, abstractNote={Group testing can help maintain a widespread testing program using fewer resources amid a pandemic. In a group testing setup, we are given $n$ samples, one per individual. Each individual is either infected or uninfected. These samples are arranged into $m < n$ pooled samples, where each pool is obtained by mixing a subset of the $n$ individual samples. Infected individuals are then identified using a group testing algorithm. In this article, we incorporate side information (SI) collected from contact tracing (CT) into nonadaptive/single-stage group testing algorithms. We generate different types of CT SI data by incorporating different possible characteristics of the spread of disease. These data are fed into a group testing framework based on generalized approximate message passing (GAMP). Numerical results show that our GAMP-based algorithms provide improved accuracy.}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Cao, Shu-Jie and Goenka, Ritesh and Wong, Chau-Wai and Rajwade, Ajit and Baron, Dror}, year={2023}, pages={2366–2375} } @article{liu_rush_baron_2023, title={Rigorous State Evolution Analysis for Approximate Message Passing With Side Information}, volume={69}, ISSN={["1557-9654"]}, url={https://doi.org/10.1109/TIT.2022.3220046}, DOI={10.1109/TIT.2022.3220046}, abstractNote={A common goal in many research areas is to reconstruct an unknown signal $\mathbf {x}$ from noisy linear measurements. Approximate message passing (AMP) is a class of low-complexity algorithms that can be used for efficiently solving such high-dimensional regression tasks. Often, it is the case that side information (SI) is available during reconstruction, for example in online learning applications. For this reason, a novel algorithmic framework that incorporates SI into AMP, referred to as approximate message passing with side information (AMP-SI), has been recently introduced. In this work, we provide rigorous performance guarantees for AMP-SI when there are statistical dependencies between the signal and SI pairs and the entries of the measurement matrix are independent and identically distributed (i.i.d.) Gaussian. We also allow for statistical dependencies within the elements of the signal itself, by considering a flexible AMP-SI framework incorporating both separable and non-separable denoisers. The AMP-SI performance is shown to be provably tracked by a scalar iteration referred to as state evolution (SE). Moreover, we provide numerical examples that demonstrate empirically that the SE can predict the AMP-SI mean square error accurately.}, number={6}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Liu, Hangjin and Rush, Cynthia and Baron, Dror}, year={2023}, month={Jun}, pages={3989–4013} } @article{kashyap_ravichandiran_wang_baron_wong_wu_franzon_2023, title={Thermal Estimation for 3D-ICs through Generative Networks}, ISSN={["2164-0157"]}, DOI={10.1109/3DIC57175.2023.10154977}, abstractNote={Thermal limitations play a significant role in modern integrated chips (ICs) design and performance. 3D integrated chip (3DIC) makes the thermal problem even worse due to a high density of transistors and heat dissipation bottlenecks within the stack-up. These issues exacerbate the need for quick thermal solutions throughout the design flow. This paper presents a generative approach for modeling the power to heat dissipation for a 3DIC. This approach focuses on a single layer in a stack and shows that, given the power map, the model can generate the resultant heat for the bulk. It shows two approaches, one straightforward approach where the model only uses the power map and the other where it learns the additional parameters through random vectors. The first approach recovers the temperature maps with 1.2 C° or a root-mean-squared error (RMSE) of 0.31 over the images with pixel values ranging from -1 to 1. The second approach performs better, with the RMSE decreasing to 0.082 in a 0 to 1 range. For any result, the model inference takes less than 100 millisecond for any given power map. These results show that the generative approach has speed advantages over traditional solvers while enabling results with reasonable accuracy for 3DIC, opening the door for thermally aware floorplanning.}, journal={2023 IEEE INTERNATIONAL 3D SYSTEMS INTEGRATION CONFERENCE, 3DIC}, author={Kashyap, Priyank and Ravichandiran, Prasanth P. and Wang, Lee and Baron, Dror and Wong, Chau-Wai and Wu, Tianfu and Franzon, Paul D.}, year={2023} } @article{abdelkhalek_baron_wong_2022, title={Mismatched Estimation in the Distance Geometry Problem}, ISSN={["1058-6393"]}, DOI={10.1109/IEEECONF56349.2022.10051876}, abstractNote={We investigate mismatched estimation in the context of the distance geometry problem (DGP). In the DGP, for a set of points, we are given noisy measurements of pairwise distances between the points, and our objective is to determine the geometric locations of the points. A common approach to deal with noisy measurements of pairwise distances is to compute least-squares estimates of the locations of the points. However, these least-squares estimates are likely to be suboptimal, because they do not necessarily maximize the correct likelihood function. In this paper, we argue that more accurate estimates can be obtained when an estimation procedure that uses the correct likelihood function of noisy measurements is performed. Our numerical results demonstrate that least-squares estimates can be suboptimal by several dB.}, journal={2022 56TH ASILOMAR CONFERENCE ON SIGNALS, SYSTEMS, AND COMPUTERS}, author={Abdelkhalek, Mahmoud and Baron, Dror and Wong, Chau-Wai}, year={2022}, pages={1031–1035} } @article{kashyap_choi_dey_baron_wong_wu_cheng_franzon_2022, title={Modeling of Adaptive Receiver Performance Using Generative Adversarial Networks}, ISSN={["2377-5726"]}, url={http://dx.doi.org/10.1109/ectc51906.2022.00307}, DOI={10.1109/ECTC51906.2022.00307}, abstractNote={As the development of IBIS Algorithmic Modeling Interface (IBIS-AMI) models gets complex and requires time-consuming simulations, a data-driven and domain-independent approach can have tremendous value. This paper presents a data-driven approach to modeling a high-speed serializer/deserializer (SerDes) receiver through generative adversarial networks (GANs). In this work, the modeling considers multiple channels, random bitstreams, and varying decision feedback equalizer (DFE) tap values to predict an accurate bit error rate (BER) contour plot. We employ a discriminator structure that improves the training to generate a contour plot that makes it difficult to distinguish the ground truth. The generated plots’ bathtub curves strongly correlate to the ground truth bathtub curves and have a root-mean-squared error (RMSE) of 0.014, indicating a good fit.}, journal={IEEE 72ND ELECTRONIC COMPONENTS AND TECHNOLOGY CONFERENCE (ECTC 2022)}, publisher={IEEE}, author={Kashyap, Priyank and Choi, Yongjin and Dey, Sumon and Baron, Dror and Wong, Chau-Wai and Wu, Tianfu and Cheng, Chris and Franzon, Paul D.}, year={2022}, pages={1958–1963} } @article{kashyap_gajjar_choi_wong_baron_wu_cheng_franzon_2022, title={RxGAN: Modeling High-Speed Receiver through Generative Adversarial Networks}, url={http://dx.doi.org/10.1145/3551901.3556480}, DOI={10.1145/3551901.3556480}, abstractNote={Creating models for modern high-speed receivers using circuit-level simulations is costly, as it requires computationally expensive simulations and upwards of months to finalize a model. Added to this is that many models do not necessarily agree with the final hardware they are supposed to emulate. Further, these models are complex due to the presence of various filters, such as a decision feedback equalizer (DFE) and continuous-time linear equalizer (CTLE), which enable the correct operation of the receiver. Other data-driven approaches tackle receiver modeling through multiple models to account for as many configurations as possible. This work proposes a data-driven approach using generative adversarial training to model a real-world receiver with varying DFE and CTLE configurations while handling different channel conditions and bitstreams. The approach is highly accurate as the eye height and width are within 1.59% and 1.12% of the ground truth. The horizontal and vertical bathtub curves match the ground truth and correlate to the ground truth bathtub curves.}, journal={MLCAD '22: PROCEEDINGS OF THE 2022 ACM/IEEE 4TH WORKSHOP ON MACHINE LEARNING FOR CAD (MLCAD)}, publisher={ACM}, author={Kashyap, Priyank and Gajjar, Archit and Choi, Yongjin and Wong, Chau-Wai and Baron, Dror and Wu, Tianfu and Cheng, Chris and Franzon, Paul}, year={2022}, pages={167–172} } @article{goenka_cao_wong_rajwade_baron_2021, title={CONTACT TRACING ENHANCES THE EFFICIENCY OF COVID-19 GROUP TESTING}, DOI={10.1109/ICASSP39728.2021.9414034}, abstractNote={Group testing can save testing resources in the context of the ongoing COVID-19 pandemic. In group testing, we are given n samples, one per individual, and arrange them into m < n pooled samples, where each pool is obtained by mixing a subset of the n individual samples. Infected individuals are then identified using a group testing algorithm. In this paper, we use side information (SI) collected from contact tracing (CT) within nonadaptive/single-stage group testing algorithms. We generate data by incorporating CT SI and characteristics of disease spread between individuals. These data are fed into two signal and measurement models for group testing, where numerical results show that our algorithms provide improved sensitivity and specificity. While Nikolopoulos et al. utilized family structure to improve nonadaptive group testing, ours is the first work to explore and demonstrate how CT SI can further improve group testing performance.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP 2021)}, author={Goenka, Ritesh and Cao, Shu-Jie and Wong, Chau-Wai and Rajwade, Ajit and Baron, Dror}, year={2021}, pages={8168–8172} } @article{kashyap_pitts_baron_wong_wu_franzon_2021, title={High Speed Receiver Modeling Using Generative Adversarial Networks}, ISSN={["2165-4107"]}, DOI={10.1109/EPEPS51341.2021.9609124}, abstractNote={This paper presents a generative approach to modeling a high-speed receiver with a time series input. The model is not built with domain knowledge but learned from a wide range of channel conditions and input bitstreams to generate an eye diagram. The generated eye diagrams are similar to the simulated eye diagrams for the same scenario. We also developed a neural network model to evaluate the generated eye diagram's relevant characteristics, such as eye height and width. The generated eye diagrams are within 7% and 3% error to the ground-truth in eye height and eye width, respectively, based on our evaluation neural network.}, journal={IEEE 30TH CONFERENCE ON ELECTRICAL PERFORMANCE OF ELECTRONIC PACKAGING AND SYSTEMS (EPEPS 2021)}, author={Kashyap, Priyank and Pitts, W. Shepherd and Baron, Dror and Wong, Chau-Wai and Wu, Tianfu and Franzon, Paul D.}, year={2021} } @article{ma_kang_silversteint_baron_2021, title={Local Convergence of an AMP Variant to the LASSO Solution in Finite Dimensions}, DOI={10.1109/ISIT45174.2021.9518079}, abstractNote={A common sparse linear regression formulation is $\ell_{1}$ regularized least squares, which is also known as least absolute shrinkage and selection operator (LASSO). Approximate message passing (AMP) has been proved to asymptotically achieve the LASSO solution when the regression matrix has independent and identically distributed (i.i.d.) Gaussian entries in the sense that the averaged per-coordinate $\ell_{2}$ distance between the AMP iterates and LASSO solution vanishes as the signal dimension goes to infinity before the iteration number. However, in finite dimensional settings, characterization of AMP iterates in the limit of large iteration number has not been established. In this work, we propose an AMP variant by including a parameter that depends on the largest singular value of the regression matrix. The proposed algorithm can also be considered as a primal dual hybrid gradient algorithm with adaptive stepsizes. We show that whenever the AMP variant converges, it converges to the LASSO solution for arbitrary finite dimensional regression matrices. Moreover, we show that our AMP variant is locally stable around the LASSO solution under the condition that the LASSO solution is unique and that the regression matrix is drawn from a continuous distribution. Our local stability result implies that when the regression matrix is large and has i.i.d. random entries, the original AMP, which is a special case of the proposed AMP variant, is locally stable around the LASSO solution.}, journal={2021 IEEE INTERNATIONAL SYMPOSIUM ON INFORMATION THEORY (ISIT)}, author={Ma, Yanting and Kang, Min and Silversteint, Jack W. and Baron, Dror}, year={2021}, pages={2256–2261} } @article{ma_zhou_rush_baron_needell_2019, title={An Approximate Message Passing Framework for Side Information}, volume={67}, ISSN={["1941-0476"]}, url={https://doi.org/10.1109/TSP.2019.2899286}, DOI={10.1109/TSP.2019.2899286}, abstractNote={Approximate message passing (AMP) methods have gained recent traction in sparse signal recovery. Additional information about the signal, or side information (SI), is commonly available and can aid in efficient signal recovery. This paper presents an AMP-based framework that exploits SI and can be readily implemented in various settings for which the SI results in separable distributions. To illustrate the simplicity and applicability of our approach, this framework is applied to a Bernoulli–Gaussian (BG) model and a time-varying birth-death-drift (BDD) signal model, motivated by applications in channel estimation. We develop a suite of algorithms, called AMP-SI, and derive denoisers for the BDD and BG models. Numerical evidence demonstrating the advantages of our approach are presented alongside empirical evidence of the accuracy of a proposed state evolution.}, number={7}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Ma, Anna and Zhou, You and Rush, Cynthia and Baron, Dror and Needell, Deanna}, year={2019}, month={Apr}, pages={1875–1888} } @article{ma_rush_baron_2019, title={Analysis of Approximate Message Passing With Non-Separable Denoisers and Markov Random Field Priors}, volume={65}, ISSN={["1557-9654"]}, url={https://doi.org/10.1109/TIT.2019.2934152}, DOI={10.1109/TIT.2019.2934152}, abstractNote={Approximate message passing (AMP) is a class of low-complexity, scalable algorithms for solving high-dimensional linear regression tasks where one wishes to recover an unknown signal from noisy, linear measurements. AMP is an iterative algorithm that performs estimation by updating an estimate of the unknown signal at each iteration and the performance of AMP (quantified, for example, by the mean squared error of its estimates) depends on the choice of a “denoiser” function that is used to produce these signal estimates at each iteration. An attractive feature of AMP is that its performance can be tracked by a scalar recursion referred to as state evolution. Previous theoretical analysis of the accuracy of the state evolution predictions has been limited to the use of only separable denoisers or block-separable denoisers, a class of denoisers that underperform when sophisticated dependencies exist between signal entries. Since signals with entrywise dependencies are common in image/video-processing applications, in this work we study the high-dimensional linear regression task when the dependence structure of the input signal is modeled by a Markov random field prior distribution. We provide a rigorous analysis of the performance of AMP, demonstrating the accuracy of the state evolution predictions, when a class of non-separable sliding-window denoisers is applied. Moreover, we provide numerical examples where AMP with sliding-window denoisers can successfully capture local dependencies in images.}, number={11}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Ma, Yanting and Rush, Cynthia and Baron, Dror}, year={2019}, month={Nov}, pages={7367–7389} } @article{zhu_baron_2018, title={Performance Limits With Additive Error Metrics in Noisy Multimeasurement Vector Problems}, volume={66}, ISSN={["1941-0476"]}, DOI={10.1109/TSP.2018.2866844}, abstractNote={Real-world applications such as magnetic resonance imaging with multiple coils, multiuser communication, and diffuse optical tomography often assume a linear model, where several sparse signals sharing common sparse supports are acquired by several measurement matrices and then contaminated by noise. Multimeasurement vector (MMV) problems consider the estimation or reconstruction of such signals. In different applications, the estimation error that we want to minimize could be the mean squared error or other metrics, such as the mean absolute error and the support set error. Seeing that minimizing different error metrics is useful in MMV problems, we study information-theoretic performance limits for MMV signal estimation with arbitrary additive error metrics. We also propose a message passing algorithmic framework that achieves the optimal performance, and rigorously prove the optimality of our algorithm for a special case. We further conjecture the optimality of our algorithm for some general cases and back it up through numerical examples. As an application of our MMV algorithm, we propose a novel setup for active user detection in multiuser communication and demonstrate the promise of our proposed setup.}, number={20}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Zhu, Junan and Baron, Dror}, year={2018}, month={Oct}, pages={5338–5348} } @inproceedings{zhu_pilgrim_baron_2017, title={An overview of multi-processor approximate message passing}, ISBN={9781509047802}, url={http://dx.doi.org/10.1109/ciss.2017.7926166}, DOI={10.1109/ciss.2017.7926166}, abstractNote={Approximate message passing (AMP) is an algorithmic framework for solving linear inverse problems from noisy measurements, with exciting applications such as reconstructing images, audio, hyper spectral images, and various other signals, including those acquired in compressive signal acquisiton systems. The growing prevalence of big data systems has increased interest in large-scale problems, which may involve huge measurement matrices that are unsuitable for conventional computing systems. To address the challenge of large-scale processing, multi-processor (MP) versions of AMP have been developed. We provide an overview of two such MP-AMP variants. In row-MP-AMP, each computing node stores a subset of the rows of the matrix and processes corresponding measurements. In column-MP-AMP, each node stores a subset of columns, and is solely responsible for reconstructing a portion of the signal. We will discuss pros and cons of both approaches, summarize recent research results for each, and explain when each one may be a viable approach. Aspects that are highlighted include some recent results on state evolution for both MP-AMP algorithms, and the use of data compression to reduce communication in the MP network.}, booktitle={2017 51st Annual Conference on Information Sciences and Systems (CISS)}, publisher={IEEE}, author={Zhu, Junan and Pilgrim, Ryan and Baron, Dror}, year={2017}, month={Mar} } @inproceedings{ma_rush_baron_2017, title={Analysis of approximate message passing with a class of non-separable denoisers}, DOI={10.1109/isit.2017.8006524}, abstractNote={Approximate message passing (AMP) is a class of efficient algorithms for solving high-dimensional linear regression tasks where one wishes to recover an unknown signal βο from noisy, linear measurements y = Αβ0 + w. When applying a separable denoiser at each iteration, the performance of AMP (for example, the mean squared error of its estimates) can be accurately tracked by a simple, scalar iteration referred to as state evolution. Although separable denoisers are sufficient if the unknown signal has independent and identically distributed entries, in many real-world applications, like image or audio signal reconstruction, the unknown signal contains dependencies between entries. In these cases, a coordinate-wise independence structure is not a good approximation to the true prior of the unknown signal. In this paper we assume the unknown signal has dependent entries, and using a class of non-separable sliding-window denoisers, we prove that a new form of state evolution still accurately predicts AMP performance. This is an early step in understanding the role of non-separable denoisers within AMP, and will lead to a characterization of more general denoisers in problems including compressive image reconstruction.}, booktitle={2017 ieee international symposium on information theory (isit)}, author={Ma, Y. T. and Rush, C. and Baron, Dror}, year={2017}, pages={231–235} } @inproceedings{baron_ma_needell_rush_woolf_2017, title={Conditional approximate message passing with side information}, ISBN={9781538618233}, url={http://dx.doi.org/10.1109/acssc.2017.8335374}, DOI={10.1109/acssc.2017.8335374}, abstractNote={In information theory, side information (SI) is often used to increase the efficiency of communication systems. This work lays the framework for a class of Bayes-optimal signal recovery algorithms referred to as conditional approximate message passing (CAMP) that make use of available SI. CAMP involves a linear inverse problem, where noisy, linear measurements acquire an unknown input vector using a measurement matrix with independent and identically distributed entries, and the SI vector obeys a symbol-wise dependence with the input. Despite having a simple and straightforward derivation, our CAMP algorithm obtains lower mean squared error than other signal recovery algorithms that have been proposed to incorporate SI. The good performance of CAMP is due its Bayes-optimality properties, which are not present in previous approaches to SI-aided signal recovery.}, booktitle={2017 51st Asilomar Conference on Signals, Systems, and Computers}, publisher={IEEE}, author={Baron, Dror and Ma, Anna and Needell, Deanna and Rush, Cynthia and Woolf, Tina}, year={2017}, month={Oct} } @inproceedings{pilgrim_zhu_baron_bajwa_2017, title={Generalized geometric programming for rate allocation in consensus}, ISBN={9781538632666}, url={http://dx.doi.org/10.1109/allerton.2017.8262762}, DOI={10.1109/allerton.2017.8262762}, abstractNote={Distributed averaging, or distributed average consensus, is a common method for computing the sample mean of the data dispersed among the nodes of a network in a decentralized manner. By iteratively exchanging messages with neighbors, the nodes of the network can converge to an agreement on the sample mean of their initial states. In real-world scenarios, these messages are subject to bandwidth and power constraints, which motivates the design of a lossy compression strategy. Few prior works consider the rate allocation problem from the perspective of constrained optimization, which provides a principled method for the design of lossy compression schemes, allows for the relaxation of certain assumptions, and offers performance guarantees. We show for Gaussian-distributed initial states with entropy-coded scalar quantization and vector quantization that the coding rates for distributed averaging can be optimized through generalized geometric programming. In the absence of side information from past states, this approach finds a rate allocation over nodes and iterations that minimizes the aggregate coding rate required to achieve a target mean square error within a finite run time. Our rate allocation is compared to some of the prior art through numerical simulations. The results motivate the incorporation of side-information through differential or predictive coding to improve rate-distortion performance.}, booktitle={2017 55th Annual Allerton Conference on Communication, Control, and Computing (Allerton)}, publisher={IEEE}, author={Pilgrim, Ryan and Zhu, Junan and Baron, Dror and Bajwa, Waheed U.}, year={2017}, month={Oct} } @inproceedings{ma_lu_baron_2017, title={Multiprocessor approximate message passing with column-wise partitioning}, ISBN={9781509041176}, url={http://dx.doi.org/10.1109/icassp.2017.7953061}, DOI={10.1109/icassp.2017.7953061}, abstractNote={Solving a large-scale regularized linear inverse problem using multiple processors is important in various real-world applications due to the limitations of individual processors and constraints on data sharing policies. This paper focuses on the setting where the matrix is partitioned column-wise. We extend the algorithmic framework and the theoretical analysis of approximate message passing (AMP), an iterative algorithm for solving linear inverse problems, whose asymptotic dynamics are characterized by state evolution (SE). In particular, we show that column-wise multiprocessor AMP (C-MP-AMP) obeys an SE under the same assumptions when the SE for AMP holds. The SE results imply that (i) the SE of C-MP-AMP converges to a state that is no worse than that of AMP and (ii) the asymptotic dynamics of C-MP-AMP and AMP can be identical. Moreover, for a setting that is not covered by SE, numerical results show that damping can improve the convergence performance of C-MP-AMP.}, booktitle={2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, publisher={IEEE}, author={Ma, Yanting and Lu, Yue M. and Baron, Dror}, year={2017}, month={Mar} } @article{zhu_baron_krzakala_2017, title={Performance Limits for Noisy Multimeasurement Vector Problems}, volume={65}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2016.2646663}, abstractNote={Compressed sensing (CS) demonstrates that sparse signals can be estimated from underdetermined linear systems. Distributed CS (DCS) further reduces the number of measurements by considering joint sparsity within signal ensembles. DCS with jointly sparse signals has applications in multisensor acoustic sensing, magnetic resonance imaging with multiple coils, remote sensing, and array signal processing. Multimeasurement vector (MMV) problems consider the estimation of jointly sparse signals under the DCS framework. Two related MMV settings are studied. In the first setting, each signal vector is measured by a different independent and identically distributed (i.i.d.) measurement matrix, while in the second setting, all signal vectors are measured by the same i.i.d. matrix. Replica analysis is performed for these two MMV settings, and the minimum mean squared error (MMSE), which turns out to be identical for both settings, is obtained as a function of the noise variance and number of measurements. To showcase the application of MMV models, the MMSE's of complex CS problems with both real and complex measurement matrices are also analyzed. Multiple performance regions for MMV are identified where the MMSE behaves differently as a function of the noise variance and the number of measurements. Belief propagation (BP) is a CS signal estimation framework that often achieves the MMSE asymptotically. A phase transition for BP is identified. This phase transition, verified by numerical results, separates the regions where BP achieves the MMSE and where it is suboptimal. Numerical results also illustrate that more signal vectors in the jointly sparse signal ensemble lead to a better phase transition.}, number={9}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Zhu, Junan and Baron, Dror and Krzakala, Florent}, year={2017}, month={May}, pages={2444–2454} } @article{ma_zhu_baron_2016, title={Approximate Message Passing Algorithm With Universal Denoising and Gaussian Mixture Learning}, volume={64}, ISSN={1053-587X 1941-0476}, url={http://dx.doi.org/10.1109/tsp.2016.2599484}, DOI={10.1109/tsp.2016.2599484}, abstractNote={We study compressed sensing (CS) signal reconstruction problems where an input signal is measured via matrix multiplication under additive white Gaussian noise. Our signals are assumed to be stationary and ergodic, but the input statistics are unknown; the goal is to provide reconstruction algorithms that are universal to the input statistics. We present a novel algorithmic framework that combines: 1) the approximate message passing CS reconstruction framework, which solves the matrix channel recovery problem by iterative scalar channel denoising; 2) a universal denoising scheme based on context quantization, which partitions the stationary ergodic signal denoising into independent and identically distributed (i.i.d.) subsequence denoising; and 3) a density estimation approach that approximates the probability distribution of an i.i.d. sequence by fitting a Gaussian mixture (GM) model. In addition to the algorithmic framework, we provide three contributions: 1) numerical results showing that state evolution holds for nonseparable Bayesian sliding-window denoisers; 2) an i.i.d. denoiser based on a modified GM learning algorithm; and 3) a universal denoiser that does not need information about the range where the input takes values from or require the input signal to be bounded. We provide two implementations of our universal CS recovery algorithm with one being faster and the other being more accurate. The two implementations compare favorably with existing universal reconstruction algorithms in terms of both reconstruction quality and runtime.}, number={21}, journal={IEEE Transactions on Signal Processing}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Ma, Yanting and Zhu, Junan and Baron, Dror}, year={2016}, month={Nov}, pages={5611–5622} } @article{tan_ma_rueda_baron_arce_2016, title={Compressive Hyperspectral Imaging via Approximate Message Passing}, volume={10}, ISSN={["1941-0484"]}, DOI={10.1109/jstsp.2015.2500190}, abstractNote={We consider a compressive hyperspectral imaging reconstruction problem, where three-dimensional spatio-spectral information about a scene is sensed by a coded aperture snapshot spectral imager (CASSI). The CASSI imaging process can be modeled as suppressing three-dimensional coded and shifted voxels and projecting these onto a two-dimensional plane, such that the number of acquired measurements is greatly reduced. On the other hand, because the measurements are highly compressive, the reconstruction process becomes challenging. We previously proposed a compressive imaging reconstruction algorithm that is applied to two-dimensional images based on the approximate message passing (AMP) framework. AMP is an iterative algorithm that can be used in signal and image reconstruction by performing denoising at each iteration. We employed an adaptive Wiener filter as the image denoiser, and called our algorithm “AMP-Wiener.” In this paper, we extend AMP-Wiener to three-dimensional hyperspectral image reconstruction, and call it “AMP-3D-Wiener.” Applying the AMP framework to the CASSI system is challenging, because the matrix that models the CASSI system is highly sparse, and such a matrix is not suitable to AMP and makes it difficult for AMP to converge. Therefore, we modify the adaptive Wiener filter and employ a technique called damping to solve for the divergence issue of AMP. Our approach is applied in nature, and the numerical experiments show that AMP-3D-Wiener outperforms existing widely-used algorithms such as gradient projection for sparse reconstruction (GPSR) and two-step iterative shrinkage/thresholding (TwIST) given a similar amount of runtime. Moreover, in contrast to GPSR and TwIST, AMP-3D-Wiener need not tune any parameters, which simplifies the reconstruction process.}, number={2}, journal={IEEE JOURNAL OF SELECTED TOPICS IN SIGNAL PROCESSING}, author={Tan, Jin and Ma, Yanting and Rueda, Hoover and Baron, Dror and Arce, Gonzalo R.}, year={2016}, month={Mar}, pages={389–401} } @inproceedings{han_zhu_niu_baron_2016, title={Multi-processor approximate message passing using lossy compression}, DOI={10.1109/icassp.2016.7472877}, abstractNote={In this paper, a communication-efficient multi-processor compressed sensing framework based on the approximate message passing algorithm is proposed. We perform lossy compression on the data being communicated between processors, resulting in a reduction in communication costs with a minor degradation in recovery quality. In the proposed framework, a new state evolution formulation takes the quantization error into account, and analytically determines the coding rate required in each iteration. Two approaches for allocating the coding rate, an online back-tracking heuristic and an optimal allocation scheme based on dynamic programming, provide significant reductions in communication costs.}, booktitle={International conference on acoustics speech and signal processing}, author={Han, P. X. and Zhu, J. N. and Niu, R. X. and Baron, Dror}, year={2016}, pages={6240–6244} } @inproceedings{zhu_beirami_baron_2016, title={Performance trade-offs in multi-processor approximate message passing}, DOI={10.1109/isit.2016.7541385}, abstractNote={We consider large-scale linear inverse problems in Bayesian settings. Our general approach follows a recent line of work that applies the approximate message passing (AMP) framework in multi-processor (MP) computational systems by storing and processing a subset of rows of the measurement matrix along with corresponding measurements at each MP node. In each MP-AMP iteration, nodes of the MP system and its fusion center exchange lossily compressed messages pertaining to their estimates of the input. There is a trade-off between the physical costs of the reconstruction process including computation time, communication loads, and the reconstruction quality, and it is impossible to simultaneously minimize all the costs. We pose this minimization as a multi-objective optimization problem (MOP), and study the properties of the best trade-offs (Pareto optimality) in this MOP. We prove that the achievable region of this MOP is convex, and conjecture how the combined cost of computation and communication scales with the desired mean squared error. These properties are verified numerically.}, booktitle={2016 ieee international symposium on information theory}, author={Zhu, J. and Beirami, A. and Baron, Dror}, year={2016}, pages={680–684} } @article{krishnan_baron_2015, title={A Universal Parallel Two-Pass MDL Context Tree Compression Algorithm}, volume={9}, ISSN={["1941-0484"]}, DOI={10.1109/jstsp.2015.2403800}, abstractNote={Computing problems that handle large amounts of data necessitate the use of lossless data compression for efficient storage and transmission. We present a novel lossless universal data compression algorithm that uses parallel computational units to increase the throughput. The length- N input sequence is partitioned into B blocks. Processing each block independently of the other blocks can accelerate the computation by a factor of B, but degrades the compression quality. Instead, our approach is to first estimate the minimum description length (MDL) context tree source underlying the entire input, and then encode each of the B blocks in parallel based on the MDL source. With this two-pass approach, the compression loss incurred by using more parallel units is insignificant. Our algorithm is work-efficient, i.e., its computational complexity is O(N/B). Its redundancy is approximately Blog(N/B) bits above Rissanen's lower bound on universal compression performance, with respect to any context tree source whose maximal depth is at most log(N/B). We improve the compression by using different quantizers for states of the context tree based on the number of symbols corresponding to those states. Numerical results from a prototype implementation suggest that our algorithm offers a better trade-off between compression and throughput than competing universal data compression algorithms.}, number={4}, journal={IEEE JOURNAL OF SELECTED TOPICS IN SIGNAL PROCESSING}, author={Krishnan, Nikhil and Baron, Dror}, year={2015}, month={Jun}, pages={741–748} } @inproceedings{tan_ma_rueda_baron_arce_2015, title={Approximate message passing in coded aperture snapshot spectral imaging}, ISBN={9781479975914}, url={http://dx.doi.org/10.1109/globalsip.2015.7418268}, DOI={10.1109/globalsip.2015.7418268}, abstractNote={We consider a compressive hyperspectral imaging reconstruction problem, where three-dimensional spatio-spectral information about a scene is sensed by a coded aperture snapshot spectral imager (CASSI). The approximate message passing (AMP) framework is utilized to reconstruct hyperspectral images from CASSI measurements, and an adaptive Wiener filter is employed as a three-dimensional image denoiser within AMP. We call our algorithm "AMP-3D-Wiener." The simulation results show that AMP-3D-Wiener outperforms existing widely-used algorithms such as gradient projection for sparse reconstruction (GPSR) and two-step iterative shrinkage/thresholding (TwIST) given the same amount of runtime. Moreover, in contrast to GPSR and TwIST, AMP-3D-Wiener need not tune any parameters, which simplifies the reconstruction process.}, booktitle={2015 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Tan, Jin and Ma, Yanting and Rueda, Hoover and Baron, Dror and Arce, Gonzalo R.}, year={2015}, month={Dec} } @article{tan_ma_baron_2015, title={Compressive Imaging via Approximate Message Passing With Image Denoising}, volume={63}, ISSN={1053-587X 1941-0476}, url={http://dx.doi.org/10.1109/tsp.2015.2408558}, DOI={10.1109/tsp.2015.2408558}, abstractNote={We consider compressive imaging problems, where images are reconstructed from a reduced number of linear measurements. Our objective is to improve over existing compressive imaging algorithms in terms of both reconstruction error and runtime. To pursue our objective, we propose compressive imaging algorithms that employ the approximate message passing (AMP) framework. AMP is an iterative signal reconstruction algorithm that performs scalar denoising at each iteration; in order for AMP to reconstruct the original input signal well, a good denoiser must be used. We apply two wavelet-based image denoisers within AMP. The first denoiser is the “amplitude-scale-invariant Bayes estimator” (ABE), and the second is an adaptive Wiener filter; we call our AMP-based algorithms for compressive imaging AMP-ABE and AMP-Wiener. Numerical results show that both AMP-ABE and AMP-Wiener significantly improve over the state of the art in terms of runtime. In terms of reconstruction quality, AMP-Wiener offers lower mean-square error (MSE) than existing compressive imaging algorithms. In contrast, AMP-ABE has higher MSE, because ABE does not denoise as well as the adaptive Wiener filter.}, number={8}, journal={IEEE Transactions on Signal Processing}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Tan, Jin and Ma, Yanting and Baron, Dror}, year={2015}, month={Apr}, pages={2085–2092} } @article{trussell_baron_2015, title={Creating Analytic Online Homework for Digital Signal Processing [sp Education]}, volume={32}, ISSN={1053-5888}, url={http://dx.doi.org/10.1109/msp.2015.2438992}, DOI={10.1109/msp.2015.2438992}, abstractNote={An article by W.L. Everitt in the 1962 50th anniversary issue of Proceedings of the IEEE, "Engineering Education"-Circa 2012 A.D.," was one of many predictive articles that appeared in that issue [1]. One of Everitt's observations was the distinction between training and education. He then predicted that, in the future, training will be done primarily with computers, remarking, "Relieved of the necessity of spending most of their time on the training function, devoted teachers will be able to concentrate their efforts on 'education'."}, number={5}, journal={IEEE Signal Processing Magazine}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Trussell, H. Joel and Baron, Dror}, year={2015}, month={Sep}, pages={112–118} } @inproceedings{ma_baron_beirami_2015, title={Mismatched estimation in large linear systems}, DOI={10.1109/isit.2015.7282557}, abstractNote={We study the excess mean square error (EMSE) above the minimum mean square error (MMSE) in large linear systems where the posterior mean estimator (PME) is evaluated with a postulated prior that differs from the true prior of the input signal. We focus on large linear systems where the measurements are acquired via an independent and identically distributed random matrix, and are corrupted by additive white Gaussian noise (AWGN). The relationship between the EMSE in large linear systems and EMSE in scalar channels is derived, and closed form approximations are provided. Our analysis is based on the decoupling principle, which links scalar channels to large linear system analyses. Numerical examples demonstrate that our closed form approximations are accurate.}, booktitle={2015 ieee international symposium on information theory (isit)}, author={Ma, Y. T. and Baron, Dror and Beirami, A.}, year={2015}, pages={760–764} } @article{zhu_baron_duarte_2015, title={Recovery From Linear Measurements With Complexity-Matching Universal Signal Estimation}, volume={63}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2015.2393845}, abstractNote={We study the compressed sensing (CS) signal estimation problem where an input signal is measured via a linear matrix multiplication under additive noise. While this setup usually assumes sparsity or compressibility in the input signal during recovery, the signal structure that can be leveraged is often not known a priori. In this paper, we consider universal CS recovery, where the statistics of a stationary ergodic signal source are estimated simultaneously with the signal itself. Inspired by Kolmogorov complexity and minimum description length, we focus on a maximum a posteriori (MAP) estimation framework that leverages universal priors to match the complexity of the source. Our framework can also be applied to general linear inverse problems where more measurements than in CS might be needed. We provide theoretical results that support the algorithmic feasibility of universal MAP estimation using a Markov chain Monte Carlo implementation, which is computationally challenging. We incorporate some techniques to accelerate the algorithm while providing comparable and in many cases better reconstruction quality than existing algorithms. Experimental results show the promise of universality in CS, particularly for low-complexity sources that do not exhibit standard sparsity or compressibility.}, number={6}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Zhu, Junan and Baron, Dror and Duarte, Marco F.}, year={2015}, month={Mar}, pages={1512–1527} } @inproceedings{krishnan_baron_mihcak_2014, title={A Parallel two-pass MDL context tree algorithm for universal source coding}, DOI={10.1109/isit.2014.6875156}, abstractNote={We present a novel lossless universal source coding algorithm that uses parallel computational units to increase the throughput. The length-N input sequence is partitioned into B blocks. Processing each block independently of the other blocks can accelerate the computation by a factor of B, but degrades the compression quality. Instead, our approach is to first estimate the minimum description length (MDL) source underlying the entire input, and then encode each of the B blocks in parallel based on the MDL source. With this two-pass approach, the compression loss incurred by using more parallel units is insignificant. Our algorithm is work-efficient, i.e., its computational complexity is O(N=B). Its redundancy is approximately B log(N=B) bits above Rissanen's lower bound on universal coding performance, with respect to any tree source whose maximal depth is at most log(N=B).}, booktitle={2014 ieee international symposium on information theory (isit)}, author={Krishnan, N. and Baron, Dror and Mihcak, M. K.}, year={2014}, pages={1862–5} } @inproceedings{zhu_baron_duarte_2014, title={Complexity-adaptive universal signal estimation for compressed sensing}, DOI={10.1109/ssp.2014.6884657}, abstractNote={We study the compressed sensing (CS) signal estimation problem where a signal is measured via a linear matrix multiplication under additive noise. While this setup usually assumes sparsity or compressibility in the signal during estimation, additional signal structure that can be leveraged is often not known a priori. For signals with independent and identically distributed (i.i.d.) entries, existing CS algorithms achieve optimal or near optimal estimation error without knowing the statistics of the signal. This paper addresses estimating stationary ergodic non-i.i.d. signals with unknown statistics. We have previously proposed a universal CS approach to simultaneously estimate the statistics of a stationary ergodic signal as well as the signal itself. This paper significantly improves on our previous work, especially for continuous-valued signals, by offering a four-stage algorithm called Complexity-Adaptive Universal Signal Estimation (CAUSE), where the alphabet size of the estimate adaptively matches the coding complexity of the signal. Numerical results show that the new approach offers comparable and in some cases, especially for non-i.i.d. signals, lower mean square error than the prior art, despite not knowing the signal statistics.}, booktitle={2014 IEEE Workshop on Statistical Signal Processing (SSP)}, author={Zhu, J. N. and Baron, Dror and Duarte, M. F.}, year={2014}, pages={388–391} } @inproceedings{ma_zhu_baron_2014, place={Monticello, IL}, title={Compressed Sensing via Universal Denoising and Approximate Message Passing}, booktitle={Proc. 52d Allerton Conference on Communication, Control, and Computing}, author={Ma, Y. and Zhu, J. and Baron, D.}, year={2014}, month={Oct} } @inproceedings{tan_ma_baron_2014, title={Compressive imaging via approximate message passing with wavelet-based image denoising}, ISBN={9781479970889}, url={http://dx.doi.org/10.1109/globalsip.2014.7032152}, DOI={10.1109/globalsip.2014.7032152}, abstractNote={We consider compressive imaging problems, where images are reconstructed from a reduced number of linear measurements. Our objective is to improve over current state of the art compressive imaging algorithms in terms of both reconstruction error and runtime. To pursue our objective, we propose a compressive imaging algorithm that employs the approximate message passing (AMP) framework. AMP is an iterative signal reconstruction algorithm that performs scalar denoising of noisy signals. In this work, we apply an adaptive Wiener filter, which is a wavelet-based image denoiser, within AMP. Numerical results show that the proposed algorithm improves over the state of the art in both reconstruction error and runtime.}, booktitle={2014 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Tan, Jin and Ma, Yanting and Baron, Dror}, year={2014}, month={Dec} } @article{ma_tan_krishnan_baron_2014, title={Empirical Bayes and Full Bayes for Signal Estimation}, url={http://arxiv.org/abs/1405.2113}, journal={arXiv:1405.2113 [cs, math]}, author={Ma, Yanting and Tan, Jin and Krishnan, Nikhil and Baron, Dror}, year={2014}, month={May} } @inproceedings{krishnan_baron_2014, title={Performance of parallel two-pass MDL context tree algorithm}, ISBN={9781479970889}, url={http://dx.doi.org/10.1109/globalsip.2014.7032133}, DOI={10.1109/globalsip.2014.7032133}, abstractNote={Computing problems that handle large amounts of data necessitate the use of lossless data compression for efficient storage and transmission. We present numerical results that showcase the advantages of a novel lossless universal data compression algorithm that uses parallel computational units to increase the throughput with minimal degradation in the compression quality. Our approach is to divide the data into blocks, estimate the minimum description length (MDL) context tree source underlying the entire input, and compress each block in parallel based on the MDL source. Numerical results from a prototype implementation suggest that our algorithm offers a better trade-off between compression and throughput than competing universal data compression algorithms.}, booktitle={2014 IEEE Global Conference on Signal and Information Processing (GlobalSIP)}, publisher={IEEE}, author={Krishnan, Nikhil and Baron, Dror}, year={2014}, month={Dec} } @article{tan_carmon_baron_2014, title={Signal Estimation With Additive Error Metrics in Compressed Sensing}, volume={60}, ISSN={["1557-9654"]}, DOI={10.1109/tit.2013.2285214}, abstractNote={Compressed sensing typically deals with the estimation of a system input from its noise-corrupted linear measurements, where the number of measurements is smaller than the number of input components. The performance of the estimation process is usually quantified by some standard error metric such as squared error or support set error. In this correspondence, we consider a noisy compressed sensing problem with any arbitrary error metric. We propose a simple, fast, and highly general algorithm that estimates the original signal by minimizing the error metric defined by the user. We verify that our algorithm is optimal owing to the decoupling principle, and we describe a general method to compute the fundamental information-theoretic performance limit for any error metric. We provide two example metrics --- minimum mean absolute error and minimum mean support error --- and give the theoretical performance limits for these two cases. Experimental results show that our algorithm outperforms methods such as relaxed belief propagation (relaxed BP) and compressive sampling matching pursuit (CoSaMP), and reaches the suggested theoretical limits for our two example metrics.}, number={1}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Tan, Jin and Carmon, Danielle and Baron, Dror}, year={2014}, month={Jan}, pages={150–158} } @inproceedings{tan_baron_dai_2014, title={Signal estimation with low infinity-norm error by minimizing the mean p-norm error}, DOI={10.1109/ciss.2014.6814074}, abstractNote={We consider the problem of estimating an input signal from noisy measurements in both parallel scalar Gaussian channels and linear mixing systems. The performance of the estimation process is quantified by the ℓ∞-norm error metric (worst case error). Our previous results have shown for independent and identically distributed (i.i.d.) Gaussian mixture input signals that, when the input signal dimension goes to infinity, the Wiener filter minimizes the ℓ∞-norm error. However, the input signal dimension is finite in practice. In this paper, we estimate the finite dimensional input signal by minimizing the mean ℓp-norm error. Numerical results show that the ℓp-norm minimizer outperforms the Wiener filter, provided that the value of p is properly chosen. Our results further suggest that the optimal value of p increases with the signal dimension, and that for i.i.d. Bernoulli-Gaussian input signals, the optimal p increases with the percentage of nonzeros.}, booktitle={2014 48th Annual Conference on Information Sciences and Systems (CISS)}, author={Tan, J. and Baron, Dror and Dai, L. Y.}, year={2014} } @article{ma_baron_needell_2014, title={Two-Part Reconstruction With Noisy-Sudocodes}, volume={62}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2014.2362892}, abstractNote={We develop a two-part reconstruction framework for signal recovery in compressed sensing (CS), where a fast algorithm is applied to provide partial recovery in Part 1, and a CS algorithm is applied to complete the residual problem in Part 2. Partitioning the reconstruction process into two complementary parts provides a natural trade-off between runtime and reconstruction quality. To exploit the advantages of the two-part framework, we propose a Noisy-Sudocodes algorithm that performs two-part reconstruction of sparse signals in the presence of measurement noise. Specifically, we design a fast algorithm for Part 1 of Noisy-Sudocodes that identifies the zero coefficients of the input signal from its noisy measurements. Many existing CS algorithms could be applied to Part 2, and we investigate approximate message passing (AMP) and binary iterative hard thresholding (BIHT). For Noisy-Sudocodes with AMP in Part 2, we provide a theoretical analysis that characterizes the trade-off between runtime and reconstruction quality. In a 1-bit CS setting where a new 1-bit quantizer is constructed for Part 1 and BIHT is applied to Part 2, numerical results show that the Noisy-Sudocodes algorithm improves over BIHT in both runtime and reconstruction quality.}, number={23}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Ma, Yanting and Baron, Dror and Needell, Deanna}, year={2014}, month={Dec}, pages={6323–6334} } @article{tan_baron_dai_2014, title={Wiener Filters in Gaussian Mixture Signal Estimation With l(infinity)-Norm Error}, volume={60}, ISSN={["1557-9654"]}, DOI={10.1109/tit.2014.2345260}, abstractNote={Consider the estimation of a signal x ∈ RN from noisy observations r = x + z, where the input x is generated by an independent and identically distributed (i.i.d.) Gaussian mixture source, and z is additive white Gaussian noise in parallel Gaussian channels. Typically, the l2-norm error (squared error) is used to quantify the performance of the estimation process. In contrast, we consider the l∞-norm error (worst case error). For this error metric, we prove that, in an asymptotic setting where the signal dimension N → ∞, the l∞-norm error always comes from the Gaussian component that has the largest variance, and the Wiener filter asymptotically achieves the optimal expected l∞-norm error. The i.i.d. Gaussian mixture case can be extended to i.i.d. Bernoulli-Gaussian distributions, which are often used to model sparse signals. Finally, our results can be extended to linear mixing systems with i.i.d. Gaussian mixture inputs, in settings where a linear mixing systems with i.i.d. Gaussian mixture inputs, in settings where a linear mixing system can be decoupled to parallel Gaussian channels.}, number={10}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Tan, Jin and Baron, Dror and Dai, Liyi}, year={2014}, month={Oct}, pages={6626–6635} } @article{duarte_wakin_baron_sarvotham_baraniuk_2013, title={Measurement Bounds for Sparse Signal Ensembles via Graphical Models}, volume={59}, ISSN={0018-9448 1557-9654}, url={http://dx.doi.org/10.1109/tit.2013.2252051}, DOI={10.1109/tit.2013.2252051}, abstractNote={In compressive sensing, a small collection of linear projections of a sparse signal contains enough information to permit signal recovery. Distributed compressive sensing (DCS) extends this framework, allowing a correlated ensemble of sparse signals to be jointly recovered from a collection of separately acquired compressive measurements. In this paper, we introduce an ensemble sparsity model for capturing the intra- and inter-signal correlations within a collection of sparse signals. For strictly sparse signals obeying an ensemble sparsity model, we characterize the fundamental number of noiseless measurements that each sensor must collect to ensure that the signals are jointly recoverable. Our analysis is based on a novel bipartite graph representation that links the sparse signal coefficients with the measurements obtained for each signal.}, number={7}, journal={IEEE Transactions on Information Theory}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Duarte, M. F. and Wakin, M. B. and Baron, D. and Sarvotham, S. and Baraniuk, R. G.}, year={2013}, month={Jul}, pages={4280–4289} } @inproceedings{zhu_baron_2013, title={Performance regions in compressed sensing from noisy measurements}, DOI={10.1109/ciss.2013.6552256}, abstractNote={In this paper, compressed sensing with noisy measurements is addressed. The theoretically optimal reconstruction error is studied by evaluating Tanaka's equation. The main contribution is to show that in several regions, which have different measurement rates and noise levels, the reconstruction error behaves differently. This paper also evaluates the performance of the belief propagation (BP) signal reconstruction method in the regions discovered. When the measurement rate and the noise level lie in a certain region, BP is suboptimal with respect to Tanaka's equation, and it may be possible to develop reconstruction algorithms with lower error in that region.}, booktitle={2013 47th Annual Conference on Information Sciences and Systems (CISS)}, author={Zhu, J. A. and Baron, Dror}, year={2013} } @inproceedings{tan_baron_2013, title={Signal reconstruction in linear mixing systems with different error metrics}, DOI={10.1109/ita.2013.6502925}, abstractNote={We consider the problem of reconstructing a signal from noisy measurements in linear mixing systems. The reconstruction performance is usually quantified by standard error metrics such as squared error, whereas we consider any additive error metric. Under the assumption that relaxed belief propagation (BP) can compute the posterior in the large system limit, we propose a simple, fast, and highly general algorithm that reconstructs the signal by minimizing the user-defined error metric. For two example metrics, we provide performance analysis and convincing numerical results. Finally, our algorithm can be adjusted to minimize the ℓ∞ error, which is not additive. Interestingly, ℓ∞ minimization only requires to apply a Wiener filter to the output of relaxed BP.}, booktitle={2013 Information Theory and Applications Workshop (ITA)}, author={Tan, J. and Baron, Dror}, year={2013} } @article{baron_weissman_2012, title={An MCMC Approach to Universal Lossy Compression of Analog Sources}, volume={60}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2012.2206585}, abstractNote={Motivated by the Markov Chain Monte Carlo (MCMC) approach to the compression of discrete sources developed by Jalali and Weissman, we propose a lossy compression algorithm for analog sources that relies on a finite reproduction alphabet, which grows with the input length. The algorithm achieves, in an appropriate asymptotic sense, the optimum Shannon theoretic tradeoff between rate and distortion, universally for stationary ergodic continuous amplitude sources. We further propose an MCMC-based algorithm that resorts to a reduced reproduction alphabet when such reduction does not prevent achieving the Shannon limit. The latter algorithm is advantageous due to its reduced complexity and improved rates of convergence when employed on sources with a finite and small optimum reproduction alphabet.}, number={10}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Baron, Dror and Weissman, Tsachy}, year={2012}, month={Oct}, pages={5230–5240} } @inproceedings{tan_carmon_baron_2012, title={Optimal estimation with arbitrary error metrics in compressed sensing}, DOI={10.1109/ssp.2012.6319767}, abstractNote={Noisy compressed sensing deals with the estimation of a system input from its noise-corrupted linear measurements. The performance of the estimation is usually quantified by some standard error metric such as squared error or support error. In this paper, we consider a noisy compressed sensing problem with any arbitrary error metric. We propose a simple, fast, and general algorithm that estimates the original signal by minimizing an arbitrary error metric defined by the user. We verify that, owing to the decoupling principle, our algorithm is optimal, and we describe a general method to compute the fundamental information-theoretic performance limit for any well-defined error metric. We provide an example where the metric is absolute error and give the theoretical performance limit for it. The experimental results show that our algorithm outperforms methods such as relaxed belief propagation, and reaches the suggested theoretical limit for our example error metric.}, booktitle={2012 IEEE Statistical Signal Processing Workshop (ssp)}, author={Tan, J. and Carmon, D. and Baron, Dror}, year={2012}, pages={588–591} } @article{baron_jacob_2012, title={Variable Length Compression of Codeword Indices for Lossy Compression}, volume={19}, ISSN={["1070-9908"]}, DOI={10.1109/lsp.2012.2223462}, abstractNote={Many problems in information theory feature an index into a random codebook being encoded with a fixed length scheme. We propose to purposefully select the index in a manner that skews its distribution, thus making variable length entropy coding of the index more attractive. In an application to lossy compression of a Bernoulli source, we illustrate that variable length coding yields a reduction in the rate over fixed length coding, and allows to reach a requisite rate distortion performance level using a smaller codebook.}, number={12}, journal={IEEE SIGNAL PROCESSING LETTERS}, author={Baron, Dror and Jacob, Theju}, year={2012}, month={Dec}, pages={849–852} } @article{bickson_baron_ihler_avissar_dolev_2011, title={Fault Identification Via Nonparametric Belief Propagation}, volume={59}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2011.2116014}, abstractNote={We consider the problem of identifying a pattern of faults from a set of noisy linear measurements. Unfortunately, maximum a posteriori (MAP) probability estimation of the fault pattern is computationally intractable. To solve the fault identification problem, we propose a nonparametric belief propagation (NBP) approach. We show empirically that our belief propagation solver is more accurate than recent state-of-the-art algorithms including interior point methods and semidefinite programming. Our superior performance is explained by the fact that we take into account both the binary nature of the individual faults and the sparsity of the fault pattern arising from their rarity.}, number={6}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Bickson, Danny and Baron, Dror and Ihler, Alexander and Avissar, Harel and Dolev, Danny}, year={2011}, month={Jun}, pages={2602–2613} } @inproceedings{baron_2011, place={Helsinki, Finland}, title={Information complexity and estimation}, booktitle={Fourth Workshop Inf. Theoretic Methods Science Eng.}, author={Baron, D.}, year={2011}, month={Aug} } @inproceedings{baron_duarte_2011, title={Universal MAP estimation in compressed sensing}, ISBN={9781457718175 9781457718182}, url={http://dx.doi.org/10.1109/allerton.2011.6120245}, DOI={10.1109/allerton.2011.6120245}, abstractNote={We study the compressed sensing (CS) estimation problem where an input is measured via a linear matrix multiplication under additive noise. While this setup usually assumes sparsity or compressibility in the observed signal during recovery, the signal structure that can be leveraged is often not known a priori. In this paper, we consider universal CS recovery, where the statistics of a stationary ergodic signal source are estimated simultaneously with the signal itself. We provide initial theoretical, algorithmic, and experimental evidence based on maximum a posteriori (MAP) estimation that shows the promise of universality in CS, particularly for low-complexity sources that do not exhibit standard sparsity or compressibility.}, booktitle={2011 49th Annual Allerton Conference on Communication, Control, and Computing (Allerton)}, publisher={IEEE}, author={Baron, Dror and Duarte, Marco F.}, year={2011}, month={Sep} } @inproceedings{baron_weissman_2010, title={An MCMC Approach to Lossy Compression of Continuous Sources}, ISBN={9781424464258}, url={http://dx.doi.org/10.1109/dcc.2010.11}, DOI={10.1109/dcc.2010.11}, abstractNote={Motivated by the Markov chain Monte Carlo (MCMC) relaxation method of Jalali and Weissman, we propose a lossy compression algorithm for continuous amplitude sources that relies on a finite reproduction alphabet that grows with the input length. Our algorithm asymptotically achieves the optimum rate distortion (RD) function universally for stationary ergodic continuous amplitude sources. However, the large alphabet slows down the convergence to the RD function, and is thus an impediment in practice. We thus propose an MCMC-based algorithm that uses a (smaller) adaptive reproduction alphabet. In addition to computational advantages, the reduced alphabet accelerates convergenceto the RD function, and is thus more suitable in practice.}, booktitle={2010 Data Compression Conference}, publisher={IEEE}, author={Baron, Dror and Weissman, Tsachy}, year={2010} } @article{baron_sarvotham_baraniuk_2010, title={Bayesian Compressive Sensing Via Belief Propagation}, volume={58}, ISSN={1053-587X 1941-0476}, url={http://dx.doi.org/10.1109/tsp.2009.2027773}, DOI={10.1109/tsp.2009.2027773}, abstractNote={Compressive sensing (CS) is an emerging field based on the revelation that a small collection of linear projections of a sparse signal contains enough information for stable, sub-Nyquist signal acquisition. When a statistical characterization of the signal is available, Bayesian inference can complement conventional CS methods based on linear programming or greedy algorithms. We perform asymptotically optimal Bayesian inference using belief propagation (BP) decoding, which represents the CS encoding matrix as a graphical model. Fast computation is obtained by reducing the size of the graphical model with sparse encoding matrices. To decode a length-N signal containing K large coefficients, our CS-BP decoding algorithm uses O(K log(N)) measurements and O(N log2(N)) computation. Finally, although we focus on a two-state mixture Gaussian model, CS-BP is easily adapted to other signal models.}, number={1}, journal={IEEE Transactions on Signal Processing}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, Dror and Sarvotham, Shriram and Baraniuk, Richard G.}, year={2010}, month={Jan}, pages={269–280} } @inproceedings{guo_baron_shamai_2009, title={A single-letter characterization of optimal noisy compressed sensing}, ISBN={9781424458714 9781424458707 9781424458714}, url={http://dx.doi.org/10.1109/allerton.2009.5394838}, DOI={10.1109/allerton.2009.5394838}, abstractNote={Compressed sensing deals with the reconstruction of a high-dimensional signal from far fewer linear measurements, where the signal is known to admit a sparse representation in a certain linear space. The asymptotic scaling of the number of measurements needed for reconstruction as the dimension of the signal increases has been studied extensively. This work takes a fundamental perspective on the problem of inferring about individual elements of the sparse signal given the measurements, where the dimensions of the system become increasingly large. Using the replica method, the outcome of inferring about any fixed collection of signal elements is shown to be asymptotically decoupled, i.e., those elements become independent conditioned on the measurements. Furthermore, the problem of inferring about each signal element admits a single-letter characterization in the sense that the posterior distribution of the element, which is a sufficient statistic, becomes asymptotically identical to the posterior of inferring about the same element in scalar Gaussian noise. The result leads to simple characterization of all other elemental metrics of the compressed sensing problem, such as the mean squared error and the error probability for reconstructing the support set of the sparse signal. Finally, the single-letter characterization is rigorously justified in the special case of sparse measurement matrices where belief propagation becomes asymptotically optimal.}, booktitle={2009 47th Annual Allerton Conference on Communication, Control, and Computing (Allerton)}, publisher={IEEE}, author={Guo, Dongning and Baron, D. and Shamai, S.}, year={2009}, month={Sep} } @book{baron_duarte_wakin_sarvotham_baraniuk_2009, title={Distributed Compressive Sensing}, url={http://dx.doi.org/10.21236/ada521228}, DOI={10.21236/ada521228}, abstractNote={Abstract : Compressive sensing is a signal acquisition framework based on the revelation that a small collection of linear projections of a sparse signal contains enough information for stable recovery. In this paper we introduce a new theory for distributed compressive sensing (DCS) that enables new distributed coding algorithms for multi-signal ensembles that exploit both intra- and inter-signal correlation structures. The DCS theory rests on a new concept that we term the joint sparsity of a signal ensemble. Our theoretical contribution is to characterize the fundamental performance limits of DCS recovery for jointly sparse signal ensembles in the noiseless measurement setting; our result connects single-signal, joint, and distributed (multi-encoder) compressive sensing. To demonstrate the efficacy of our framework and to show that additional challenges such as computational tractability can be addressed, we study in detail three example models for jointly sparse signals. For these models, we develop practical algorithms for joint recovery of multiple signals from incoherent projections. In two of our three models, the results are asymptotically best-possible, meaning that both the upper and lower bounds match the performance of our practical algorithms. Moreover, simulations indicate that the asymptotics take effect with just a moderate number of signals. DCS is immediately applicable to a range of problems in sensor arrays and networks.}, institution={Defense Technical Information Center}, author={Baron, Dror and Duarte, Marco F. and Wakin, Michael B. and Sarvotham, Shriram and Baraniuk, Richard G.}, year={2009}, month={Jan} } @article{chandrasekaran_wakin_baron_baraniuk_2009, title={Representation and Compression of Multidimensional Piecewise Functions Using Surflets}, volume={55}, ISSN={0018-9448}, url={http://dx.doi.org/10.1109/tit.2008.2008153}, DOI={10.1109/tit.2008.2008153}, abstractNote={We study the representation, approximation, and compression of functions in M dimensions that consist of constant or smooth regions separated by smooth (M-1)-dimensional discontinuities. Examples include images containing edges, video sequences of moving objects, and seismic data containing geological horizons. For both function classes, we derive the optimal asymptotic approximation and compression rates based on Kolmogorov metric entropy. For piecewise constant functions, we develop a multiresolution predictive coder that achieves the optimal rate-distortion performance; for piecewise smooth functions, our coder has near-optimal rate-distortion performance. Our coder for piecewise constant functions employs surflets, a new multiscale geometric tiling consisting of M-dimensional piecewise constant atoms containing polynomial discontinuities. Our coder for piecewise smooth functions uses surfprints, which wed surflets to wavelets for piecewise smooth approximation. Both of these schemes achieve the optimal asymptotic approximation performance. Key features of our algorithms are that they carefully control the potential growth in surflet parameters at higher smoothness and do not require explicit estimation of the discontinuity. We also extend our results to the corresponding discrete function spaces for sampled data. We provide asymptotic performance results for both discrete function spaces and relate this asymptotic performance to the sampling rate and smoothness orders of the underlying functions and discontinuities. For approximation of discrete data, we propose a new scale-adaptive dictionary that contains few elements at coarse and fine scales, but many elements at medium scales. Simulation results on synthetic signals provide a comparison between surflet-based coders and previously studied approximation schemes based on wedgelets and wavelets.}, number={1}, journal={IEEE Transactions on Information Theory}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Chandrasekaran, Venkat and Wakin, Michael B. and Baron, Dror and Baraniuk, Richard G.}, year={2009}, month={Jan}, pages={374–400} } @inproceedings{duarte_sarvotham_baron_wakin_baraniuk_2008, place={Sedona, AZ}, title={Performance Limits for Jointly Sparse Signals via Graphical Models}, booktitle={Proceedings of Sensor, Signal, and Information Processing Workshop}, author={Duarte, M.F. and Sarvotham, S. and Baron, D. and Wakin, M.B. and Baraniuk, R.G.}, year={2008}, month={May} } @inproceedings{rachlin_baron_2008, title={The secrecy of compressed sensing measurements}, ISBN={9781424429257}, url={http://dx.doi.org/10.1109/allerton.2008.4797641}, DOI={10.1109/allerton.2008.4797641}, abstractNote={Results in compressed sensing describe the feasibility of reconstructing sparse signals using a small number of linear measurements. In addition to compressing the signal, do these measurements provide secrecy? This paper considers secrecy in the context of an adversary that does not know the measurement matrix used to encrypt the signal. We demonstrate that compressed sensing-based encryption does not achieve Shannon's definition of perfect secrecy, but can provide a computational guarantee of secrecy.}, booktitle={2008 46th Annual Allerton Conference on Communication, Control, and Computing}, publisher={IEEE}, author={Rachlin, Yaron and Baron, Dror}, year={2008}, month={Sep} } @book{duarte_sarvotham_wakin_baron_baraniuk_2008, title={Theoretical Performace Limits for Jointly Sparse Signals via Graphical Models}, number={ECE-0802}, institution={Electrical and Computer Engineering Department, Rice University}, author={Duarte, M.F. and Sarvotham, S. and Wakin, M.B. and Baron, D. and Baraniuk, R.G.}, year={2008}, month={Jul} } @inproceedings{takhar_laska_wakin_duarte_baron_sarvotham_kelly_baraniuk_2006, title={A new compressive imaging camera architecture using optical-domain compression}, url={http://dx.doi.org/10.1117/12.659602}, DOI={10.1117/12.659602}, abstractNote={Compressive Sensing is an emerging field based on the revelation that a small number of linear projections of a compressible signal contain enough information for reconstruction and processing. It has many promising implications and enables the design of new kinds of Compressive Imaging systems and cameras. In this paper, we develop a new camera architecture that employs a digital micromirror array to perform optical calculations of linear projections of an image onto pseudorandom binary patterns. Its hallmarks include the ability to obtain an image with a single detection element while sampling the image fewer times than the number of pixels. Other attractive properties include its universality, robustness, scalability, progressivity, and computational asymmetry. The most intriguing feature of the system is that, since it relies on a single photon detector, it can be adapted to image at wavelengths that are currently impossible with conventional CCD and CMOS imagers.}, booktitle={Computational Imaging IV}, publisher={SPIE}, author={Takhar, Dharmpal and Laska, Jason N. and Wakin, Michael B. and Duarte, Marco F. and Baron, Dror and Sarvotham, Shriram and Kelly, Kevin F. and Baraniuk, Richard G.}, editor={Bouman, Charles A. and Miller, Eric L. and Pollak, IlyaEditors}, year={2006}, month={Feb} } @inproceedings{wakin_laska_duarte_baron_sarvotham_takhar_kelly_baraniuk_2006, title={An Architecture for Compressive Imaging}, ISBN={1424404800}, url={http://dx.doi.org/10.1109/icip.2006.312577}, DOI={10.1109/icip.2006.312577}, abstractNote={Compressive sensing is an emerging field based on the rev elation that a small group of non-adaptive linear projections of a compressible signal contains enough information for reconstruction and processing. In this paper, we propose algorithms and hardware to support a new theory of compressive imaging. Our approach is based on a new digital image/video camera that directly acquires random projections of the signal without first collecting the pixels/voxels. Our camera architecture employs a digital micromirror array to perform optical calculations of linear projections of an image onto pseudorandom binary patterns. Its hallmarks include the ability to obtain an image with a single detection element while measuring the image/video fewer times than the number of pixels this can significantly reduce the computation required for video acquisition/encoding. Because our system relies on a single photon detector, it can also be adapted to image at wavelengths that are currently impossible with conventional CCD and CMOS imagers. We are currently testing a proto type design for the camera and include experimental results.}, booktitle={2006 International Conference on Image Processing}, publisher={IEEE}, author={Wakin, Michael B. and Laska, Jason N. and Duarte, Marco F. and Baron, Dror and Sarvotham, Shriram and Takhar, Dharmpal and Kelly, Kevin F. and Baraniuk, Richard G.}, year={2006}, month={Oct} } @inproceedings{kirolos_laska_wakin_duarte_baron_ragheb_massoud_baraniuk_2006, title={Analog-to-Information Conversion via Random Demodulation}, ISBN={1424406692 1424406706}, url={http://dx.doi.org/10.1109/dcas.2006.321036}, DOI={10.1109/dcas.2006.321036}, abstractNote={Many problems in radar and communication signal processing involve radio frequency (RF) signals of very high bandwidth. This presents a serious challenge to systems that might attempt to use a high-rate analog-to-digital converter (ADC) to sample these signals, as prescribed by the Shannon/Nyquist sampling theorem. In these situations, however, the information level of the signal is often far lower than the actual bandwidth, which prompts the question of whether more efficient schemes can be developed for measuring such signals. In this paper we propose a system that uses modulation, filtering, and sampling to produce a low-rate set of digital measurements. Our "analog-to-information converter" (AIC) is inspired by the theory of compressive sensing (CS), which states that a discrete signal having a sparse representation in some dictionary can be recovered from a small number of linear projections of that signal. We generalize the CS theory to continuous-time sparse signals, explain our proposed AIC system in the CS context, and discuss practical issues regarding implementation}, booktitle={2006 IEEE Dallas/CAS Workshop on Design, Applications, Integration and Software}, publisher={IEEE}, author={Kirolos, Sami and Laska, Jason and Wakin, Michael and Duarte, Marco and Baron, Dror and Ragheb, Tamer and Massoud, Yehia and Baraniuk, Richard}, year={2006}, month={Oct} } @inproceedings{baron_sarvotham_baraniuk_2006, title={Coding vs. Packet Retransmission over Noisy Channels}, ISBN={1424403499}, url={http://dx.doi.org/10.1109/ciss.2006.286528}, DOI={10.1109/ciss.2006.286528}, abstractNote={In many packet-based communication systems such as TCP/IP-based systems, packets are communicated over a noisy physical layer (a channel), and if a packet cannot be decoded correctly, then the transport layer retransmits it. Of course, retransmissions consume significant resources and their use should be limited. However, decreasing the likelihood of retransmission requires to encode the packets with strong channel codes in the physical layer, which also requires additional channel resources. In this paper, we study the cross-layer tradeoff between coding and packet retransmissions, and optimize over the total channel resource consumption. We show that as the packet length k increases, the redundancy r beyond the k/C channel uses implied by Shannon's channel capacity C is Theta(radickln(k)) extra channel uses. Moreover, as k increases we must use stronger channel codes. We then apply these results to universal coding over a piecewise memoryless channel with transitions between unknown i.i.d. statistics. Our constructive universal algorithm has redundancy r=O(k 2/3 radicln(k)) using packets of polynomially increasing lengths while accounting for possible packet drops caused by transitions in the statistics.}, booktitle={2006 40th Annual Conference on Information Sciences and Systems}, publisher={IEEE}, author={Baron, Dror and Sarvotham, Shriram and Baraniuk, Richard G.}, year={2006}, month={Mar} } @book{sarvotham_baron_baraniuk_2006, title={Compressed Sensing Reconstruction via Belief Propagation}, number={ECE-0601}, institution={Electrical and Computer Engineering Department, Rice University}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2006}, month={Jul} } @inproceedings{wakin_laska_duarte_baron_sarvotham_takhar_kelly_baraniuk_2006, place={Beijing, China}, title={Compressive Imaging for Video Representation and Coding}, booktitle={Proceedings of Picture Coding Symposium}, author={Wakin, M.B. and Laska, J.N. and Duarte, M.F. and Baron, D. and Sarvotham, S. and Takhar, D. and Kelly, K.F. and Baraniuk, R.G.}, year={2006}, month={May} } @inproceedings{duarte_sarvotham_baron_wakin_baraniuk_2006, title={Distributed Compressed Sensing of Jointly Sparse Signals}, ISBN={1424401313}, url={http://dx.doi.org/10.1109/acssc.2005.1600024}, DOI={10.1109/acssc.2005.1600024}, abstractNote={Compressed sensing is an emerging field based on the revelation that a small collection of linear projections of a sparse signal contains enough information for recon- struction. In this paper we expand our theory for distributed compressed sensing (DCS) that enables new distributed cod- ing algorithms for multi-signal ensembles that exploit both intra- and inter-signal correlation structures. The DCS the- ory rests on a new concept that we term the joint sparsity of a signal ensemble. We present a second new model for jointly sparse signals that allows for joint recovery of multi- ple signals from incoherent projections through simultane- ous greedy pursuit algorithms. We also characterize theo- retically and empirically the number of measurements per sensor required for accurate reconstruction.}, booktitle={Conference Record of the Thirty-Ninth Asilomar Conference onSignals, Systems and Computers, 2005.}, publisher={IEEE}, author={Duarte, M.F. and Sarvotham, S. and Baron, D. and Wakin, M.B. and Baraniuk, R.G.}, year={2006}, month={Mar} } @article{baron_baraniuk_2006, title={Faster sequential universal coding via block partitioning}, volume={52}, ISSN={0018-9448}, url={http://dx.doi.org/10.1109/tit.2006.871041}, DOI={10.1109/tit.2006.871041}, abstractNote={Rissanen provided a sequential universal coding algorithm based on a block partitioning scheme, where the source model is estimated at the beginning of each block. This approach asymptotically approaches the entropy at the fastest possible rate of 1/2log(n) bits per unknown parameter. We show that the complexity of this algorithm is /spl Omega/(nlog(n)), which is comparable to existing sequential universal algorithms. We provide a sequential O(nlog(log(n))) algorithm by modifying Rissanen's block partitioning scheme. The redundancy with our approach is greater than with Rissanen's block partitioning scheme by a multiplicative factor 1+O(1/log(log(n))), hence it asymptotically approaches the entropy at the fastest possible rate.}, number={4}, journal={IEEE Transactions on Information Theory}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, D. and Baraniuk, R.G.}, year={2006}, month={Apr}, pages={1708–1710} } @inproceedings{sarvotham_baron_baraniuk_2006, place={Monticello, IL}, title={Measurements vs. Bits: Compressed Sensing meets Information Theory}, booktitle={Proceedings of 44th Allerton Conference on Communication, Control, and Computing}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2006}, month={Sep} } @inproceedings{tropp_wakin_duarte_baron_baraniuk_2006, title={Random Filters for Compressive Sampling and Reconstruction}, ISBN={142440469X}, url={http://dx.doi.org/10.1109/icassp.2006.1660793}, DOI={10.1109/icassp.2006.1660793}, abstractNote={We propose and study a new technique for efficiently acquiring and reconstructing signals based on convolution with a fixed FIR filter having random taps. The method is designed for sparse and compressible signals, i.e., ones that are well approximated by a short linear combination of vectors from an orthonormal basis. Signal reconstruction involves a nonlinear orthogonal matching pursuit algorithm that we implement efficiently by exploiting the nonadaptive, time-invariant structure of the measurement process. While simpler and more efficient than other random acquisition techniques like compressed sensing, random filtering is sufficiently generic to summarize many types of compressible signals and generalizes to streaming and continuous-time signals. Extensive numerical experiments demonstrate its efficacy for acquiring and reconstructing signals sparse in the time, frequency, and wavelet domains, as well as piecewise smooth signals and Poisson processes}, booktitle={2006 IEEE International Conference on Acoustics Speed and Signal Processing Proceedings}, publisher={IEEE}, author={Tropp, J.A. and Wakin, M.B. and Duarte, M.F. and Baron, D. and Baraniuk, R.G.}, year={2006}, month={Aug} } @inproceedings{sarvotham_baron_baraniuk_2006, place={Seattle, WA}, title={Sudocodes – Fast Measurement and Reconstruction of Sparse Signals}, ISBN={142440505X}, url={http://dx.doi.org/10.1109/isit.2006.261573}, DOI={10.1109/isit.2006.261573}, abstractNote={Sudocodes are a new scheme for lossless compressive sampling and reconstruction of sparse signals. Consider a sparse signal x isin RopfN containing only K Lt N non-zero values. Sudo-encoding computes the codeword via the linear matrix-vector multiplication y = Phix, with K < M Lt N. We propose a non-adaptive construction of a sparse Phi comprising only the values 0 and 1; hence the computation of y involves only sums of subsets of the elements of x. An accompanying sudodecoding strategy efficiently recovers x given y. Sudocodes require only M = O(Klog(N)) measurements for exact reconstruction with worst-case computational complexity O(Klog(K) log(N)). Sudocodes can be used as erasure codes for real-valued data and have potential applications in peer-to-peer networks and distributed data storage systems. They are also easily extended to signals that are sparse in arbitrary bases}, booktitle={2006 IEEE International Symposium on Information Theory}, publisher={IEEE}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2006}, month={Jul} } @inproceedings{duarte_wakin_baron_baraniuk_2006, title={Universal distributed sensing via random projections}, ISBN={1595933344}, url={http://dx.doi.org/10.1109/ipsn.2006.244161}, DOI={10.1109/ipsn.2006.244161}, abstractNote={This paper develops a new framework for distributed coding and compression in sensor networks based on distributed compressed sensing (DCS). DCS exploits both intra-signal and inter-signal correlations through the concept of joint sparsity; just a few measurements of a jointly sparse signal ensemble contain enough information for reconstruction. DCS is well-suited for sensor network applications, thanks to its simplicity, universality, computational asymmetry, tolerance to quantization and noise, robustness to measurement loss, and scalability. It also requires absolutely no inter-sensor collaboration. We apply our framework to several real world datasets to validate the framework.}, booktitle={2006 5th International Conference on Information Processing in Sensor Networks}, publisher={IEEE}, author={Duarte, M.R. and Wakin, M.B. and Baron, D. and Baraniuk, R.G.}, year={2006} } @inproceedings{sarvotham_baron_baraniuk_2006, title={Variable-Rate Universal Slepian-Wolf Coding with Feedback}, ISBN={1424401313}, url={http://dx.doi.org/10.1109/acssc.2005.1599690}, DOI={10.1109/acssc.2005.1599690}, abstractNote={Traditional Slepian-Wolf coding assumes known statistics and relies on asymptotically long sequences. However, in practice the statistics are unknown, and the input sequences are of finite length. In this finite regime, we must allow a non-zero probability of codeword error isin and also pay a penalty by adding redundant bits in the encoding process. In this paper, we develop a universal scheme for Slepian-Wolf coding that allows encoding at variable rates close to the Slepian-Wolf limit. We illustrate our scheme in a setup where we encode a uniform Bernoulli source sequence and the second sequence, which is correlated to the first via a binary symmetric correlation channel, is available as side information at the decoder. This specific setup is easily extended to more general settings. For length n source sequences and a fixed isin, we show that the redundancy of our scheme is O(radicnPhi-1(isin)) bits over the Slepian-Wolf limit. The prior art for Slepian-Wolf coding with known statistics shows that the redundancy is Omega(radicnPhi-1(isin)). Therefore, we infer that for Slepian-Wolf coding, the penalty needed to accommodate universality is thetas(radicnPhi-1(isin))}, booktitle={Conference Record of the Thirty-Ninth Asilomar Conference onSignals, Systems and Computers, 2005.}, publisher={IEEE}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2006}, month={Mar} } @inproceedings{baron_duarte_sarvotham_wakin_baraniuk_2005, place={Monticello, IL}, title={An Information-Theoretic Approach to Distributed Compressed Sensing}, booktitle={Proceedings of 43d Allerton Conference on Communication, Control, and Computing}, author={Baron, D. and Duarte, M.F. and Sarvotham, S. and Wakin, M.B. and Baraniuk, R.G.}, year={2005}, month={Sep} } @book{sarvotham_wakin_baron_duarte_baraniuk_2005, title={Analysis of the DCS One-Stage Greedy Algorithm for Common Sparse Supports}, number={ECE-05-03}, institution={Electrical and Computer Engineering Department, Rice University}, author={Sarvotham, S. and Wakin, M.B. and Baron, D. and Duarte, M.F. and Baraniuk, R.G.}, year={2005}, month={Oct} } @article{baron_bresler_2005, title={Antisequential Suffix Sorting for BWT-Based Data Compression}, volume={54}, ISSN={0018-9340}, url={http://dx.doi.org/10.1109/tc.2005.56}, DOI={10.1109/tc.2005.56}, abstractNote={Suffix sorting requires ordering all suffixes of all symbols in an input sequence and has applications in running queries on large texts and in universal lossless data compression based on the Burrows Wheeler transform (BWT). We propose a new suffix lists data structure that leads to three fast, antisequential, and memory-efficient algorithms for suffix sorting. For a length-N input over a size-|X| alphabet, the worst-case complexities of these algorithms are /spl Theta/(N/sup 2/), O(|X|N log(N/|X|)), and O(N/spl radic/|X|log(N/|X|)), respectively. Furthermore, simulation results indicate performance that is competitive with other suffix sorting methods. In contrast, the suffix sorting methods that are fastest on standard test corpora have poor worst-case performance. Therefore, in comparison with other suffix sorting methods, suffix lists offer a useful trade off between practical performance and worst-case behavior. Another distinguishing feature of suffix lists is that these algorithms are simple; some of them can be implemented in VLSI. This could accelerate suffix sorting by at least an order of magnitude and enable high-speed BWT-based compression systems.}, number={4}, journal={IEEE Transactions on Computers}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, D. and Bresler, Y.}, year={2005}, month={Apr}, pages={385–397} } @inproceedings{baron_khojastepour_baraniuk_2005, title={How quickly can we approach channel capacity?}, ISBN={0780386221}, url={http://dx.doi.org/10.1109/acssc.2004.1399310}, DOI={10.1109/acssc.2004.1399310}, abstractNote={Recent progress in code design has made it crucial to understand how quickly communication systems can approach their limits. To address this issue for the channel capacity C, we define the nonasymptotic capacity C/sub NA/(n, /spl epsi/) as the maximal rate of codebooks that achieve a probability /spl epsi/ of codeword error while using codewords of length n. We prove for the binary symmetric channel that C/sub NA/(n,/spl epsi/)=C-K(/spl epsi/)//spl radic/n+o(1//spl radic/n), where K(/spl epsi/) is available in closed form. We also describe similar results for the Gaussian channel. These results may lead to more efficient resource usage in practical communication systems.}, booktitle={Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers, 2004.}, publisher={IEEE}, author={Baron, D. and Khojastepour, M.A. and Baraniuk, R.G.}, year={2005}, month={Mar} } @inproceedings{duarte_sarvotham_wakin_baron_baraniuk_2005, place={Rennes, France}, title={Joint Sparsity Models for Distributed Compressed Sensing}, booktitle={Online Proceedings of the Workshop on Signal Processing with Adaptive Sparse Structured Representations}, author={Duarte, M.F. and Sarvotham, S. and Wakin, M.B. and Baron, D. and Baraniuk, R.G.}, year={2005}, month={Nov} } @inproceedings{sarvotham_baron_baraniuk_2005, place={Baltimore, MD}, title={Non-Asymptotic Performance of Symmetric Slepian-Wolf Coding}, booktitle={Proceedings of 39th Annual Conference on Information Sciences and Systems}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2005}, month={Mar} } @inproceedings{wakin_sarvotham_duarte_baron_baraniuk_2005, place={Vancouver, Canada}, title={Recovery of Jointly Sparse Signals from Few Random Projections}, booktitle={Proceedings of Workshop on Neural Information Processing Systems}, author={Wakin, M.B. and Sarvotham, S. and Duarte, M.F. and Baron, D. and Baraniuk, R.G.}, year={2005}, month={Dec} } @inproceedings{sarvotham_baron_baraniuk_2005, place={Monticello, IL}, title={Variable-Rate Coding with Feedback for Universal Communication Systems}, booktitle={Proceedings of 43d Allerton Conference on Communication, Control, and Computing}, author={Sarvotham, S. and Baron, D. and Baraniuk, R.G.}, year={2005}, month={Sep} } @article{baron_bresler_2004, title={An O(N) Semipredictive Universal Encoder via the BWT}, volume={50}, ISSN={0018-9448}, url={http://dx.doi.org/10.1109/tit.2004.826664}, DOI={10.1109/tit.2004.826664}, abstractNote={We provide an O(N) algorithm for a nonsequential semipredictive encoder whose pointwise redundancy with respect to any (unbounded depth) tree source is O(1) bits per state above Rissanen's lower bound. This is achieved by using the Burrows-Wheeler transform (BWT), an invertible permutation transform that has been suggested for lossless data compression. First, we use the BWT only as an efficient computational tool for pruning context trees, and encode the input sequence rather than the BWT output. Second, we estimate the minimum description length (MDL) source by incorporating suffix tree methods to construct the unbounded depth context tree that corresponds to the input sequence in O(N) time. Third, we point out that a variety of previous source coding methods required superlinear complexity for determining which tree source state generated each of the symbols of the input. We show how backtracking from the BWT output to the input sequence enables to solve this problem in O(N) worst case complexity.}, number={5}, journal={IEEE Transactions on Information Theory}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, D. and Bresler, Y.}, year={2004}, month={May}, pages={928–937} } @book{chandrasekaran_wakin_baron_baraniuk_2004, title={Compressing Piecewise Smooth Multidimensional Functions Using Surflets: Rate-Distortion Analysis}, institution={Electrical and Computer Engineering Department, Rice University}, author={Chandrasekaran, V. and Wakin, M. and Baron, D. and Baraniuk, R.G.}, year={2004}, month={Mar} } @inproceedings{chandrasekaran_wakin_baron_baraniuk_2004, place={Princeton, NJ}, title={Compression of Higher Dimensional Functions Containing Smooth Discontinuities}, booktitle={Proceedings of 38th Annual Conference on Information Sciences and Systems}, author={Chandrasekaran, V. and Wakin, M. and Baron, D. and Baraniuk, R.G.}, year={2004}, month={Mar} } @inproceedings{baron_singer_baraniuk_2004, place={Princeton, NJ}, title={Probability Assignments with Worst-Case Coding Length Constraints}, booktitle={Proceedings of 38th Annual Conference on Information Sciences and Systems}, author={Baron, D. and Singer, A. and Baraniuk, R.G.}, year={2004} } @inproceedings{baron_khojastepour_baraniuk_2004, place={Monticello, IL}, title={Redundancy Rates of Slepian-Wolf Coding}, booktitle={Proceedings of 42d Allerton Conference on Communication, Control, and Computing}, author={Baron, D. and Khojastepour, M.A. and Baraniuk, R.G.}, year={2004}, month={Sep} } @inproceedings{chandrasekaran_wakin_baron_baraniuk_2004, title={Surflets: a sparse representation for multidimensional functions containing smooth discontinuities}, ISBN={0780382803}, url={http://dx.doi.org/10.1109/isit.2004.1365602}, DOI={10.1109/isit.2004.1365602}, abstractNote={Discontinuities in data often provide vital information, and representing these discontinuities sparsely is an important goal for approximation and compression algorithms. Little work has been done on efficient representations for higher dimensional functions containing arbitrarily smooth discontinuities. We consider the N-dimensional Horizon class-N-dimensional functions containing a C/sup K/ smooth (N-1)-dimensional singularity separating two constant regions. We derive the optimal rate-distortion function for this class and introduce the multiscale surflet representation for sparse piecewise approximation of these functions. We propose a compression algorithm using surflets that achieves the optimal asymptotic rate-distortion performance for Horizon functions. This algorithm can be implemented using knowledge of only the N-dimensional function, without explicitly estimating the (N-1)-dimensional discontinuity.}, booktitle={International Symposium onInformation Theory, 2004. ISIT 2004. Proceedings.}, publisher={IEEE}, author={Chandrasekaran, V. and Wakin, M.B. and Baron, D. and Baraniuk, R.G.}, year={2004}, month={Dec} } @inproceedings{baron_bresler_mihcak_2003, place={Baltimore, MD}, title={Two-Part Codes with Low Worst-Case Redundancies for Distributed Compression of Bernoulli Sequences}, booktitle={Proceedings of 37th Annual Conference on Information Sciences and Systems}, author={Baron, D. and Bresler, Y. and Mihcak, M.K.}, year={2003}, month={Mar} } @article{baron_birk_2002, title={Coding schemes for multislot messages in multichannel ALOHA with deadlines}, volume={1}, ISSN={1536-1276}, url={http://dx.doi.org/10.1109/7693.994823}, DOI={10.1109/7693.994823}, abstractNote={Slotted multichannel ALOHA is the access scheme of choice for short messages and for reserving channels for longer ones in many satellite-based networks. This paper proposes schemes for increasing the capacity (maximum attainable throughput) of multichannel slotted ALOHA subject to meeting a user-specified deadline with a (high) required probability, thereby jointly capturing the users' requirements and the system owner's desires. The focus is on short yet multislot messages. A key idea is to achieve a low probability of missing the deadline by permitting a large maximum resource expenditure per message, while holding the mean expenditure low in order to minimize "pollution." For a K-slot message, redundant single-slot fragments are constructed using block erasure-correcting codes, such that any K fragments suffice for message reception. With multiround coding, an optimized number of fragments are transmitted in each round until K are received or the deadline is reached. Even with very strict constraints, capacities that approach the 1/e limit are attained. The coding-reservation scheme raises capacity above 1/e by allowing the hub, upon receipt of any message fragment(s), to grant contention-free slots for the remaining required fragments. Both schemes are also adapted for use with single-transmitter stations at a small performance penalty in most cases. Finally, because capacity is maximized by minimizing the mean per-message transmission resources, the transmission scheme is also energy-efficient.}, number={2}, journal={IEEE Transactions on Wireless Communications}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, D. and Birk, Y.}, year={2002}, month={Apr}, pages={292–301} } @article{baron_birk_2002, title={Multiple Working Points in Multichannel ALOHA with Deadlines}, volume={8}, number={1}, journal={Wireless Networks}, author={Baron, D. and Birk, Y.}, year={2002}, month={Jan}, pages={5–11} } @book{baron_bresler_2001, title={Linear Complexity MDL Universal Coding with the BWT}, number={UILU-ENG-01-2213}, institution={Coordinated Science Laboratory, University of Illinois}, author={Baron, D. and Bresler, Y.}, year={2001}, month={Jun} } @inproceedings{baron_bresler_2001, place={Washington, DC}, title={Linear Complexity MDL Universal Coding with the BWT}, author={Baron, D. and Bresler, Y.}, year={2001}, month={Jun} } @book{baron_birk_2001, title={Multiround Coding and Coding-Reservation for Multislot Messages in Multichannel ALOHA with Deadlines}, number={EE Pub 1293}, institution={Electrical Engineering Department, Technion}, author={Baron, D. and Birk, Y.}, year={2001}, month={Oct} } @article{baron_singer_2001, title={On the cost of worst case coding length constraints}, volume={47}, ISSN={0018-9448}, url={http://dx.doi.org/10.1109/18.959291}, DOI={10.1109/18.959291}, abstractNote={We investigate the redundancy that arises from adding a worst case length constraint to uniquely decodable fixed-to-variable codes over achievable Huffman (1952) codes. This is in contrast to the traditional metric of the redundancy over the entropy. We show that the cost for adding constraints on the worst case coding length is small, and that the resulting bound is related to the Fibonacci numbers.}, number={7}, journal={IEEE Transactions on Information Theory}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Baron, D. and Singer, A.C.}, year={2001}, pages={3088–3090} } @book{birk_baron_2000, title={Capacity Maximization in Multichannel Slotted ALOHA with Deadlines - an Overview}, number={EE Pub 1248, CCIT Report 314}, institution={Electrical Engineering Department, Technion}, author={Birk, Y. and Baron, D.}, year={2000}, month={Jun} } @book{baron_birk_2000, title={Coding Schemes for Multislot Messages in Multichannel ALOHA with Deadlines}, number={EE Pub 1241, CIT Report 307}, institution={Electrical Engineering Department, Technion}, author={Baron, D. and Birk, Y.}, year={2000}, month={Feb} } @book{baron_birk_2000, title={Multiple Working Points in Multichannel ALOHA with Deadlines}, number={EE Pub 1240, CCIT Report 306}, institution={Electrical Engineering Department, Technion}, author={Baron, D. and Birk, Y.}, year={2000}, month={Jan} } @book{baron_birk_2000, title={On the Merits of Impure Multi-Copy Schemes for MultiChannel Slotted ALOHA with Deadlines}, number={EE Pub 1249, CCIT Report 315}, institution={Electrical Engineering Department, Technion}, author={Baron, D. and Birk, Y.}, year={2000}, month={Jun} } @inproceedings{baron_bresler_2000, place={Princeton, NJ}, title={Tree Source Identification with the Burrows Wheeler Transform}, volume={2}, booktitle={Proceedings of 34th Annual Conference on Information Sciences and Systems}, author={Baron, D. and Bresler, Y.}, year={2000}, pages={FA1–10 - FS1–15} } @inproceedings{baron_birk_1999, place={Monticello, IL}, title={On the use of Multiple Working Points in Multichannel ALOHA with Deadlines}, booktitle={Proceedings of 37th Allerton Conference on Communication, Control, and Computing}, author={Baron, D. and Birk, Y.}, year={1999}, month={Sep}, pages={728–737} }