@article{huang_panahi_krim_dai_2020, title={Community Detection and Improved Detectability in Multiplex Networks}, volume={7}, ISSN={["2327-4697"]}, DOI={10.1109/TNSE.2019.2949036}, abstractNote={We investigate the widely encountered problem of detecting communities in multiplex networks, such as social networks, with an unknown arbitrary heterogeneous structure. To improve detectability, we propose a generative model that leverages the multiplicity of a single community in multiple layers, with no prior assumption on the relation of communities among different layers. Our model relies on a novel idea of incorporating a large set of generic localized community label constraints across the layers, in conjunction with the celebrated Stochastic Block Model (SBM) in each layer. Accordingly, we build a probabilistic graphical model over the entire multiplex network by treating the constraints as Bayesian priors. We mathematically prove that these constraints/priors promote existence of identical communities across layers without introducing further correlation between individual communities. The constraints are further tailored to render a sparse graphical model and the numerically efficient Belief Propagation algorithm is subsequently employed. We further demonstrate by numerical experiments that in the presence of consistent communities between different layers, consistent communities are matched, and the detectability is improved over a single layer. We compare our model with a “correlated model” which exploits the prior knowledge of community correlation between layers. Similar detectability improvement is obtained under such a correlation, even though our model relies on much milder assumptions than the correlated model. Our model even shows a better detection performance over a certain correlation and signal to noise ratio (SNR) range. In the absence of community correlation, the correlation model naturally fails, while ours maintains its performance.}, number={3}, journal={IEEE TRANSACTIONS ON NETWORK SCIENCE AND ENGINEERING}, author={Huang, Yuming and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2020}, pages={1697–1709} } @article{tang_panahi_krim_dai_2019, title={Analysis Dictionary Learning Based Classification: Structure for Robustness}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2919409}, abstractNote={A discriminative structured analysis dictionary is proposed for the classification task. A structure of the union of subspaces (UoS) is integrated into the conventional analysis dictionary learning to enhance the capability of discrimination. A simple classifier is also simultaneously included into the formulated function to ensure a more complete consistent classification. The solution of the algorithm is efficiently obtained by the linearized alternating direction method of multipliers. Moreover, a distributed structured analysis dictionary learning is also presented to address large-scale datasets. It can group-(class-) independently train the structured analysis dictionaries by different machines/cores/threads, and therefore avoid a high computational cost. A consensus structured analysis dictionary and a global classifier are jointly learned in the distributed approach to safeguard the discriminative power and the efficiency of classification. Experiments demonstrate that our method achieves a comparable or better performance than the state-of-the-art algorithms in a variety of visual classification tasks. In addition, the training and testing computational complexity are also greatly reduced.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Tang, Wen and Panahi, Ashkan and Krim, Hainid and Dai, Liyi}, year={2019}, month={Dec}, pages={6035–6046} } @article{mahdizadehaghdam_panahi_krim_dai_2019, title={Deep Dictionary Learning: A PARametric NETwork Approach}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2914376}, abstractNote={Deep dictionary learning seeks multiple dictionaries at different image scales to capture complementary coherent characteristics. We propose a method for learning a hierarchy of synthesis dictionaries with an image classification goal. The dictionaries and classification parameters are trained by a classification objective, and the sparse features are extracted by reducing a reconstruction loss in each layer. The reconstruction objectives in some sense regularize the classification problem and inject source signal information in the extracted features. The performance of the proposed hierarchical method increases by adding more layers, which consequently makes this model easier to tune and adapt. The proposed algorithm furthermore shows a remarkably lower fooling rate in the presence of adversarial perturbation. The validation of the proposed approach is based on its classification performance using four benchmark datasets and is compared to a Convolutional Neural Network (CNN) of similar size.}, number={10}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Mahdizadehaghdam, Shahin and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2019}, month={Oct}, pages={4790–4802} } @inproceedings{skau_wohlberg_krim_dai_2016, title={Pansharpening via coupled triple factorization dictionary learning}, DOI={10.1109/icassp.2016.7471873}, abstractNote={Data fusion is the operation of integrating data from different modalities to construct a single consistent representation. This paper proposes variations of coupled dictionary learning through an additional factorization. One variation of this model is applicable to the pansharpening data fusion problem. Real world pansharpening data was applied to train and test our proposed formulation. The results demonstrate that the data fusion model can successfully be applied to the pan-sharpening problem.}, booktitle={International conference on acoustics speech and signal processing}, author={Skau, E. and Wohlberg, B. and Krim, H. and Dai, L. Y.}, year={2016}, pages={1234–1237} }