@article{tan_ma_rueda_baron_arce_2016, title={Compressive Hyperspectral Imaging via Approximate Message Passing}, volume={10}, ISSN={["1941-0484"]}, DOI={10.1109/jstsp.2015.2500190}, abstractNote={We consider a compressive hyperspectral imaging reconstruction problem, where three-dimensional spatio-spectral information about a scene is sensed by a coded aperture snapshot spectral imager (CASSI). The CASSI imaging process can be modeled as suppressing three-dimensional coded and shifted voxels and projecting these onto a two-dimensional plane, such that the number of acquired measurements is greatly reduced. On the other hand, because the measurements are highly compressive, the reconstruction process becomes challenging. We previously proposed a compressive imaging reconstruction algorithm that is applied to two-dimensional images based on the approximate message passing (AMP) framework. AMP is an iterative algorithm that can be used in signal and image reconstruction by performing denoising at each iteration. We employed an adaptive Wiener filter as the image denoiser, and called our algorithm "AMP-Wiener." In this paper, we extend AMP-Wiener to three-dimensional hyperspectral image reconstruction, and call it "AMP-3D-Wiener." Applying the AMP framework to the CASSI system is challenging, because the matrix that models the CASSI system is highly sparse, and such a matrix is not suitable to AMP and makes it difficult for AMP to converge. Therefore, we modify the adaptive Wiener filter and employ a technique called damping to solve for the divergence issue of AMP. Our approach is applied in nature, and the numerical experiments show that AMP-3D-Wiener outperforms existing widely-used algorithms such as gradient projection for sparse reconstruction (GPSR) and two-step iterative shrinkage/thresholding (TwIST) given a similar amount of runtime. Moreover, in contrast to GPSR and TwIST, AMP-3D-Wiener need not tune any parameters, which simplifies the reconstruction process.}, number={2}, journal={IEEE JOURNAL OF SELECTED TOPICS IN SIGNAL PROCESSING}, author={Tan, Jin and Ma, Yanting and Rueda, Hoover and Baron, Dror and Arce, Gonzalo R.}, year={2016}, month={Mar}, pages={389–401} }
@article{tan_carmon_baron_2014, title={Signal Estimation With Additive Error Metrics in Compressed Sensing}, volume={60}, ISSN={["1557-9654"]}, DOI={10.1109/tit.2013.2285214}, abstractNote={Compressed sensing typically deals with the estimation of a system input from its noise-corrupted linear measurements, where the number of measurements is smaller than the number of input components. The performance of the estimation process is usually quantified by some standard error metric such as squared error or support set error. In this correspondence, we consider a noisy compressed sensing problem with any arbitrary error metric. We propose a simple, fast, and highly general algorithm that estimates the original signal by minimizing the error metric defined by the user. We verify that our algorithm is optimal owing to the decoupling principle, and we describe a general method to compute the fundamental information-theoretic performance limit for any error metric. We provide two example metrics --- minimum mean absolute error and minimum mean support error --- and give the theoretical performance limits for these two cases. Experimental results show that our algorithm outperforms methods such as relaxed belief propagation (relaxed BP) and compressive sampling matching pursuit (CoSaMP), and reaches the suggested theoretical limits for our two example metrics.}, number={1}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Tan, Jin and Carmon, Danielle and Baron, Dror}, year={2014}, month={Jan}, pages={150–158} }
@inproceedings{tan_baron_dai_2014, title={Signal estimation with low infinity-norm error by minimizing the mean p-norm error}, DOI={10.1109/ciss.2014.6814074}, abstractNote={We consider the problem of estimating an input signal from noisy measurements in both parallel scalar Gaussian channels and linear mixing systems. The performance of the estimation process is quantified by the ℓ ∞ -norm error metric (worst case error). Our previous results have shown for independent and identically distributed (i.i.d.) Gaussian mixture input signals that, when the input signal dimension goes to infinity, the Wiener filter minimizes the ℓ ∞ -norm error. However, the input signal dimension is finite in practice. In this paper, we estimate the finite dimensional input signal by minimizing the mean ℓ p -norm error. Numerical results show that the ℓ p -norm minimizer outperforms the Wiener filter, provided that the value of p is properly chosen. Our results further suggest that the optimal value of p increases with the signal dimension, and that for i.i.d. Bernoulli-Gaussian input signals, the optimal p increases with the percentage of nonzeros.}, booktitle={2014 48th Annual Conference on Information Sciences and Systems (CISS)}, author={Tan, J. and Baron, Dror and Dai, L. Y.}, year={2014} }
@article{tan_baron_dai_2014, title={Wiener Filters in Gaussian Mixture Signal Estimation With l(infinity)-Norm Error}, volume={60}, ISSN={["1557-9654"]}, DOI={10.1109/tit.2014.2345260}, abstractNote={Consider the estimation of a signal ${\mathbf {x}}\in \mathbb {R}^{N}$ from noisy observations ${{\mathbf {r}}={\mathbf {x}}+{\mathbf {z}}}$ , where the input ${{\mathbf x}}$ is generated by an independent and identically distributed (i.i.d.) Gaussian mixture source, and ${{\mathbf z}}$ is additive white Gaussian noise in parallel Gaussian channels. Typically, the $\ell _{2}$ -norm error (squared error) is used to quantify the performance of the estimation process. In contrast, we consider the $\ell _\infty $ -norm error (worst case error). For this error metric, we prove that, in an asymptotic setting where the signal dimension $N\to \infty $ , the $\ell _\infty $ -norm error always comes from the Gaussian component that has the largest variance, and the Wiener filter asymptotically achieves the optimal expected $\ell _\infty $ -norm error. The i.i.d. Gaussian mixture case can be extended to i.i.d. Bernoulli-Gaussian distributions, which are often used to model sparse signals. Finally, our results can be extended to linear mixing systems with i.i.d. Gaussian mixture inputs, in settings where a linear mixing system can be decoupled to parallel Gaussian channels.}, number={10}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Tan, Jin and Baron, Dror and Dai, Liyi}, year={2014}, month={Oct}, pages={6626–6635} }
@inproceedings{tan_baron_2013, title={Signal reconstruction in linear mixing systems with different error metrics}, DOI={10.1109/ita.2013.6502925}, abstractNote={We consider the problem of reconstructing a signal from noisy measurements in linear mixing systems. The reconstruction performance is usually quantified by standard error metrics such as squared error, whereas we consider any additive error metric. Under the assumption that relaxed belief propagation (BP) can compute the posterior in the large system limit, we propose a simple, fast, and highly general algorithm that reconstructs the signal by minimizing the user-defined error metric. For two example metrics, we provide performance analysis and convincing numerical results. Finally, our algorithm can be adjusted to minimize the l ∞ error, which is not additive. Interestingly, l ∞ minimization only requires to apply a Wiener filter to the output of relaxed BP.}, booktitle={2013 Information Theory and Applications Workshop (ITA)}, author={Tan, J. and Baron, Dror}, year={2013} }
@inproceedings{tan_carmon_baron_2012, title={Optimal estimation with arbitrary error metrics in compressed sensing}, DOI={10.1109/ssp.2012.6319767}, abstractNote={Noisy compressed sensing deals with the estimation of a system input from its noise-corrupted linear measurements. The performance of the estimation is usually quantified by some standard error metric such as squared error or support error. In this paper, we consider a noisy compressed sensing problem with any arbitrary error metric. We propose a simple, fast, and general algorithm that estimates the original signal by minimizing an arbitrary error metric defined by the user. We verify that, owing to the decoupling principle, our algorithm is optimal, and we describe a general method to compute the fundamental information-theoretic performance limit for any well-defined error metric. We provide an example where the metric is absolute error and give the theoretical performance limit for it. The experimental results show that our algorithm outperforms methods such as relaxed belief propagation, and reaches the suggested theoretical limit for our example error metric.}, booktitle={2012 IEEE Statistical Signal Processing Workshop (ssp)}, author={Tan, J. and Carmon, D. and Baron, Dror}, year={2012}, pages={588–591} }