@article{jiang_huang_panahi_yu_krim_smith_2021, title={Dynamic Graph Learning: A Structure-Driven Approach}, volume={9}, ISSN={["2227-7390"]}, DOI={10.3390/math9020168}, abstractNote={The purpose of this paper is to infer a dynamic graph as a global (collective) model of time-varying measurements at a set of network nodes. This model captures both pairwise as well as higher order interactions (i.e., more than two nodes) among the nodes. The motivation of this work lies in the search for a connectome model which properly captures brain functionality across all regions of the brain, and possibly at individual neurons. We formulate it as an optimization problem, a quadratic objective functional and tensor information of observed node signals over short time intervals. The proper regularization constraints reflect the graph smoothness and other dynamics involving the underlying graph’s Laplacian, as well as the time evolution smoothness of the underlying graph. The resulting joint optimization is solved by a continuous relaxation of the weight parameters and an introduced novel gradient-projection scheme. While the work may be applicable to any time-evolving data set (e.g., fMRI), we apply our algorithm to a real-world dataset comprising recorded activities of individual brain cells. The resulting model is shown to be not only viable but also efficiently computable.}, number={2}, journal={MATHEMATICS}, author={Jiang, Bo and Huang, Yuming and Panahi, Ashkan and Yu, Yiyi and Krim, Hamid and Smith, Spencer L.}, year={2021}, month={Jan} } @article{ghanem_panahi_krim_kerekes_2020, title={Robust Group Subspace Recovery: A New Approach for Multi-Modality Data Fusion}, volume={20}, ISSN={["1558-1748"]}, DOI={10.1109/JSEN.2020.2999461}, abstractNote={Robust Subspace Recovery (RoSuRe) algorithm was recently introduced as a principled and numerically efficient algorithm that unfolds underlying Unions of Subspaces (UoS) structure, present in the data. The union of Subspaces (UoS) is capable of identifying more complex trends in data sets than simple linear models. We build on and extend RoSuRe to prospect the structure of different data modalities individually. We propose a novel multi-modal data fusion approach based on group sparsity which we refer to as Robust Group Subspace Recovery (RoGSuRe). Relying on a bi-sparsity pursuit paradigm and non-smooth optimization techniques, the introduced framework learns a new joint representation of the time series from different data modalities, respecting an underlying UoS model. We subsequently integrate the obtained structures to form a unified subspace structure. The proposed approach exploits the structural dependencies between the different modalities data to cluster the associated target objects. The resulting fusion of the unlabeled sensors’ data from experiments on audio and magnetic data has shown that our method is competitive with other state of the art subspace clustering methods. The resulting UoS structure is employed to classify newly observed data points, highlighting the abstraction capacity of the proposed method.}, number={20}, journal={IEEE SENSORS JOURNAL}, author={Ghanem, Sally and Panahi, Ashkan and Krim, Hamid and Kerekes, Ryan A.}, year={2020}, pages={12307–12316} } @article{tang_panahi_krim_dai_2019, title={Analysis Dictionary Learning Based Classification: Structure for Robustness}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2919409}, abstractNote={A discriminative structured analysis dictionary is proposed for the classification task. A structure of the union of subspaces (UoS) is integrated into the conventional analysis dictionary learning to enhance the capability of discrimination. A simple classifier is also simultaneously included into the formulated function to ensure a more complete consistent classification. The solution of the algorithm is efficiently obtained by the linearized alternating direction method of multipliers. Moreover, a distributed structured analysis dictionary learning is also presented to address large-scale datasets. It can group-(class-) independently train the structured analysis dictionaries by different machines/cores/threads, and therefore avoid a high computational cost. A consensus structured analysis dictionary and a global classifier are jointly learned in the distributed approach to safeguard the discriminative power and the efficiency of classification. Experiments demonstrate that our method achieves a comparable or better performance than the state-of-the-art algorithms in a variety of visual classification tasks. In addition, the training and testing computational complexity are also greatly reduced.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Tang, Wen and Panahi, Ashkan and Krim, Hainid and Dai, Liyi}, year={2019}, month={Dec}, pages={6035–6046} } @article{huang_panahi_krim_dai_2020, title={Community Detection and Improved Detectability in Multiplex Networks}, volume={7}, ISSN={["2327-4697"]}, DOI={10.1109/TNSE.2019.2949036}, abstractNote={We investigate the widely encountered problem of detecting communities in multiplex networks, such as social networks, with an unknown arbitrary heterogeneous structure. To improve detectability, we propose a generative model that leverages the multiplicity of a single community in multiple layers, with no prior assumption on the relation of communities among different layers. Our model relies on a novel idea of incorporating a large set of generic localized community label constraints across the layers, in conjunction with the celebrated Stochastic Block Model (SBM) in each layer. Accordingly, we build a probabilistic graphical model over the entire multiplex network by treating the constraints as Bayesian priors. We mathematically prove that these constraints/priors promote existence of identical communities across layers without introducing further correlation between individual communities. The constraints are further tailored to render a sparse graphical model and the numerically efficient Belief Propagation algorithm is subsequently employed. We further demonstrate by numerical experiments that in the presence of consistent communities between different layers, consistent communities are matched, and the detectability is improved over a single layer. We compare our model with a “correlated model” which exploits the prior knowledge of community correlation between layers. Similar detectability improvement is obtained under such a correlation, even though our model relies on much milder assumptions than the correlated model. Our model even shows a better detection performance over a certain correlation and signal to noise ratio (SNR) range. In the absence of community correlation, the correlation model naturally fails, while ours maintains its performance.}, number={3}, journal={IEEE TRANSACTIONS ON NETWORK SCIENCE AND ENGINEERING}, author={Huang, Yuming and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2020}, pages={1697–1709} } @article{mahdizadehaghdam_panahi_krim_dai_2019, title={Deep Dictionary Learning: A PARametric NETwork Approach}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2914376}, abstractNote={Deep dictionary learning seeks multiple dictionaries at different image scales to capture complementary coherent characteristics. We propose a method for learning a hierarchy of synthesis dictionaries with an image classification goal. The dictionaries and classification parameters are trained by a classification objective, and the sparse features are extracted by reducing a reconstruction loss in each layer. The reconstruction objectives in some sense regularize the classification problem and inject source signal information in the extracted features. The performance of the proposed hierarchical method increases by adding more layers, which consequently makes this model easier to tune and adapt. The proposed algorithm furthermore shows a remarkably lower fooling rate in the presence of adversarial perturbation. The validation of the proposed approach is based on its classification performance using four benchmark datasets and is compared to a Convolutional Neural Network (CNN) of similar size.}, number={10}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Mahdizadehaghdam, Shahin and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2019}, month={Oct}, pages={4790–4802} } @article{mahdizadehaghdam_panahi_krim_2019, title={Sparse Generative Adversarial Network}, ISSN={["2473-9936"]}, DOI={10.1109/ICCVW.2019.00369}, abstractNote={We propose a new approach to Generative Adversarial Networks (GANs) to achieve an improved performance with additional robustness to its so-called and well-recognized mode collapse. We first proceed by mapping the desired data onto a frame-based space for a sparse representation to lift any limitation of small support features prior to learning the structure. To that end, we start by dividing an image into multiple patches and modifying the role of the generative network from producing an entire image, at once, to creating a sparse representation vector for each image patch. We synthesize an entire image by multiplying generated sparse representations to a pre-trained dictionary and assembling the resulting patches. This approach restricts the output of the generator to a particular structure, obtained by imposing a Union of Subspaces (UoS) model to the original training data, leading to more realistic images, while maintaining a desired diversity. To further regularize GANs in generating high-quality images and to avoid the notorious mode-collapse problem, we introduce a third player in GANs, called reconstructor. This player utilizes an auto-encoding scheme to ensure that first, the input-output relation in the generator is injective and second each real image corresponds to some input noise. We present a number of experiments, where the proposed algorithm shows a remarkably higher inception score compared to the equivalent conventional GANs.}, journal={2019 IEEE/CVF INTERNATIONAL CONFERENCE ON COMPUTER VISION WORKSHOPS (ICCVW)}, author={Mahdizadehaghdam, Shahin and Panahi, Ashkan and Krim, Hamid}, year={2019}, pages={3063–3071} } @article{panahi_bian_krim_dai_2018, title={Robust Subspace Clustering by Bi-sparsity Pursuit: Guarantees and Sequential Algorithm}, ISSN={["2472-6737"]}, DOI={10.1109/wacv.2018.00147}, abstractNote={We consider subspace clustering under sparse noise, for which a non-convex optimization framework based on sparse data representations has been recently developed. This setup is suitable for a large variety of applications with high dimensional data, such as image processing, which is naturally decomposed into a sparse unstructured foreground and a background residing in a union of low-dimensional subspaces. In this framework, we further discuss both performance and implementation of the key optimization problem. We provide an analysis of this optimization problem demonstrating that our approach is capable of recovering linear subspaces as a local optimal solution for sufficiently large data sets and sparse noise vectors. We also propose a sequential algorithmic solution, which is particularly useful for extremely large data sets and online vision applications such as video processing.}, journal={2018 IEEE WINTER CONFERENCE ON APPLICATIONS OF COMPUTER VISION (WACV 2018)}, author={Panahi, Ashkan and Bian, Xiao and Krim, Liamid and Dai, Liyi}, year={2018}, pages={1302–1311} } @article{panahi_viberg_2017, title={Performance Analysis of Sparsity-Based Parameter Estimation}, volume={65}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2017.2755602}, abstractNote={Since the advent of the $\ell _1$ regularized least squares method (LASSO), a new line of research has emerged, which has been geared toward the application of the LASSO to parameter estimation problems. Recent years witnessed a considerable progress in this area. The notorious difficulty with discretization has been settled in the recent literature, and an entirely continuous estimation method is now available. However, an adequate analysis of this approach lacks in the current literature. This paper provides a novel analysis of the LASSO as an estimator of continuous parameters. This analysis is different from the previous ones in that our parameters of interest are associated with the support of the LASSO solution. In other words, our analysis characterizes the error in the parameterization of the support. We provide a novel framework for our analysis by studying nearly ideal sparse solutions. In this framework, we quantify the error in the high signal-to-noise ratio regime. As the result depends on the choice of the regularization parameter, our analysis also provides a new insight into the problem of selecting the regularization parameter. Without loss of generality, the results are expressed in the context of direction of arrival estimation problem.}, number={24}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Panahi, Ashkan and Viberg, Mats}, year={2017}, month={Dec}, pages={6478–6488} }