@article{tang_chouzenoux_pesquet_krim_2022, title={Deep transform and metric learning network: Wedding deep dictionary learning and neural network}, volume={509}, ISSN={["1872-8286"]}, DOI={10.1016/j.neucom.2022.08.069}, abstractNote={On account of its many successes in inference tasks and imaging applications, Dictionary Learning (DL) and its related sparse optimization problems have garnered a lot of research interest. In DL area, most solutions are focused on single-layer dictionaries, whose reliance on handcrafted features achieves a somewhat limited performance. With the rapid development of deep learning, improved DL methods called Deep DL (DDL), have been recently proposed an end-to-end flexible inference solution with a much higher performance. The proposed DDL techniques have, however, also fallen short on a number of issues, namely, computational cost and the difficulties in gradient updating and initialization. While a few differential programming solutions have been proposed to speed-up the single-layer DL, none of them could ensure an efficient, scalable, and robust solution for DDL methods. To that end, we propose herein, a novel differentiable programming approach, which yields an efficient, competitive and reliable DDL solution. The novel DDL method jointly learns deep transforms and deep metrics, where each DL layer is theoretically reformulated as a combination of one linear layer and a Recurrent Neural Network (RNN). The RNN is also shown to flexibly account for the layer-associated approximation together with a learnable metric. Additionally, our proposed work unveils new insights into Neural Network (NN) and DDL, bridging the combinations of linear and RNN layers with DDL methods. Extensive experiments on image classification problems are carried out to demonstrate that the proposed method can not only outperform existing DDL several counts including, efficiency, scaling and discrimination, but also achieve better accuracy and increased robustness against adversarial perturbations than CNNs.}, journal={NEUROCOMPUTING}, author={Tang, Wen and Chouzenoux, Emilie and Pesquet, Jean-Christophe and Krim, Hamid}, year={2022}, month={Oct}, pages={244–256} } @article{tang_chakeri_krim_2022, title={Discovering urban functional zones from biased and sparse points of interests and sparse human activities}, volume={207}, ISSN={["1873-6793"]}, DOI={10.1016/j.eswa.2022.118062}, abstractNote={With rapid development of socio-economics, the task of discovering functional zones becomes critical to better understand the interactions between social activities and spatial locations. In this paper, we propose a framework to discover the real functional zones from the biased and extremely sparse Point of Interests (POIs). To cope with the bias and sparsity of POIs, the unbiased inner influences between spatial locations and human activities are introduced to learn a balanced and dense latent region representation. In addition, a spatial location based clustering method is also included to enrich the spatial information for latent region representation and enhance the region functionality consistency for the fine-grained region segmentation. Moreover, to properly annotate the various and fine-grained region functionalities, we estimate the functionality of the regions and rank them by the differences between the normalized POI distributions to reduce the inconsistency caused by the fine-grained segmentation. Thus, our whole framework is able to properly address the biased categories in sparse POI data and explore the true functional zones with a fine-grained level. To validate the proposed framework, a case study is evaluated by using very large real-world users GPS and POIs data from city of Raleigh. The results demonstrate that the proposed framework can better identify functional zones than the benchmarks, and, therefore, enhance understanding of urban structures with a finer granularity under practical conditions.}, journal={EXPERT SYSTEMS WITH APPLICATIONS}, author={Tang, Wen and Chakeri, Alireza and Krim, Hamid}, year={2022}, month={Nov} } @article{tang_chouzenoux_pesquet_krim_2021, title={DEEP TRANSFORM AND METRIC LEARNING NETWORKS}, DOI={10.1109/ICASSP39728.2021.9414990}, abstractNote={Based on its great successes in inference and denosing tasks, Dictionary Learning (DL) and its related sparse optimization formulations have garnered a lot of research interest. While most solutions have focused on single layer dictionaries, the recently improved Deep DL methods have also fallen short on a number of issues. We hence propose a novel Deep DL approach where each DL layer can be formulated and solved as a combination of one linear layer and a Recurrent Neural Network, where the RNN is flexibly regraded as a layer-associated learned metric. Our proposed work unveils new insights between the Neural Networks and Deep DL, and provides a novel, efficient and competitive approach to jointly learn the deep transforms and metrics. Extensive experiments are carried out to demonstrate that the proposed method can not only outperform existing Deep DL, but also state-of-the-art generic Convolutional Neural Networks.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP 2021)}, author={Tang, Wen and Chouzenoux, Emilie and Pesquet, Jean-Christophe and Krim, Hamid}, year={2021}, pages={2735–2739} } @article{chen_tang_2020, title={Analysis of Network Effect in the Competition of Self- Publishing Market}, volume={15}, ISSN={["0718-1876"]}, DOI={10.4067/S0718-18762020000300105}, abstractNote={Self-publishing has become a popular e-commerce model. In the current study, we build a duopoly model to examine the impact of the network effect on competition in this emerging market. We investigate four cases depending on two factors: market size and the self-publishing platforms’ compatibility strategy. Our results show that self-publishing writers receive higher royalties compared with the scenario of no network effect except in the case of both platforms choosing incompatibility strategy in the standard market. We also find that self-publishing platforms do not always benefit from the network effect. In the standard market, the platforms will be better off only when they choose incompatibility strategy and the network effect intensity is greater than a certain threshold. In the expanded market, our computational analysis shows that the revenue of the less-known self-publishing platform increases, but the revenue of the leading platform decreases when both platforms choose compatibility strategy. Our findings also show that both self-publishing platforms prefer incompatibility strategy under a strong network effect in the standard market. Otherwise, they prefer compatibility strategy under a weak network effect. In the expanded market, self-publishing platforms have an incentive to choose incompatibility strategy.}, number={3}, journal={JOURNAL OF THEORETICAL AND APPLIED ELECTRONIC COMMERCE RESEARCH}, author={Chen, Li and Tang, Wen}, year={2020}, month={Sep}, pages={50–68} } @article{tang_panahi_krim_dai_2019, title={Analysis Dictionary Learning Based Classification: Structure for Robustness}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2919409}, abstractNote={A discriminative structured analysis dictionary is proposed for the classification task. A structure of the union of subspaces (UoS) is integrated into the conventional analysis dictionary learning to enhance the capability of discrimination. A simple classifier is also simultaneously included into the formulated function to ensure a more complete consistent classification. The solution of the algorithm is efficiently obtained by the linearized alternating direction method of multipliers. Moreover, a distributed structured analysis dictionary learning is also presented to address large-scale datasets. It can group-(class-) independently train the structured analysis dictionaries by different machines/cores/threads, and therefore avoid a high computational cost. A consensus structured analysis dictionary and a global classifier are jointly learned in the distributed approach to safeguard the discriminative power and the efficiency of classification. Experiments demonstrate that our method achieves a comparable or better performance than the state-of-the-art algorithms in a variety of visual classification tasks. In addition, the training and testing computational complexity are also greatly reduced.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Tang, Wen and Panahi, Ashkan and Krim, Hainid and Dai, Liyi}, year={2019}, month={Dec}, pages={6035–6046} } @inproceedings{guan_tang_krim_keiser_rindos_sazdanovic_2016, title={A topological collapse for document summarization}, volume={2016-August}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84984653594&partnerID=MN8TOARS}, DOI={10.1109/spawc.2016.7536867}, abstractNote={As a useful tool to summarize documents, keyphrase extraction extracts a set of single or multiple words, called keyphrases, that capture the primary topics discussed in a document. In this paper we propose DoCollapse, a topological collapse-based unsupervised keyphrase extraction method that relies on networking document by semantic relatedness of candidate keyphrases. A semantic graph is built with candidates keyphrases as vertices and then reduced to its core using topological collapse algorithm to facilitate final keyphrase selection. Iteratively collapsing dominated vertices aids in removing noisy candidates and revealing important points. We conducted experiments on two standard evaluation datasets composed of scientific papers and found that DoCollapse outperforms state-of-the-art methods. Results show that simplifying a document graph by homology-preserving topological collapse benefits keyphrase extraction.}, booktitle={2016 IEEE 17th International Workshop on Signal Processing Advances in Wireless Communications (SPAWC)}, publisher={IEEE}, author={Guan, Hui and Tang, Wen and Krim, Hamid and Keiser, James and Rindos, Andrew and Sazdanovic, Radmila}, year={2016}, month={Jul} } @inproceedings{tang_otero_krim_dai_2016, title={Analysis dictionary learning for scene classification}, DOI={10.1109/ssp.2016.7551849}, abstractNote={This paper proposes a new framework for scene classification based on an analysis dictionary learning approach. Despite their tremendous success in various image processing tasks, synthesis-based and analysis-based sparse models fall short in classification tasks. It was hypothesized that this is partly due to the linear dependence of the dictionary atoms. In this work, we aim at improving classification performances by compensating for such dependence. The proposed methodology consists in grouping the atoms of the dictionary using clustering methods. This allows to sparsely model images from various scene classes and use such a model for classification. Experimental evidence shows the benefit of such an approach. Finally, we propose a supervised way to train the baseline representation for each class-specific dictionary, and achieve multiple classification by finding the minimum distance between the learned baseline representation and the data's sub-dictionary representation. Experiments seem to indicate that such approach achieves scene-classification performances that are comparable to the state of the art.}, booktitle={2016 IEEE Statistical Signal Processing Workshop (SSP)}, author={Tang, W. and Otero, I. R. and Krim, H. and Dai, L. Y.}, year={2016} }