@article{tang_chouzenoux_pesquet_krim_2022, title={Deep transform and metric learning network: Wedding deep dictionary learning and neural network}, volume={509}, ISSN={["1872-8286"]}, DOI={10.1016/j.neucom.2022.08.069}, abstractNote={• Reformulate Dictionary Learning (DL) as Transform and Metric learning. • The above reformulation is implemented as a tandem of a linear layer and an RNN. • This is the first work which bridges deep DL and the combination of linear and RNNs. • The proposed method demonstrates efficiency, scalability and discrimination power. • The combinations of CNN architectures achieve better accuracy and stronger robustness. On account of its many successes in inference tasks and imaging applications, Dictionary Learning (DL) and its related sparse optimization problems have garnered a lot of research interest. In DL area, most solutions are focused on single-layer dictionaries, whose reliance on handcrafted features achieves a somewhat limited performance. With the rapid development of deep learning, improved DL methods called Deep DL (DDL), have been recently proposed an end-to-end flexible inference solution with a much higher performance. The proposed DDL techniques have, however, also fallen short on a number of issues, namely, computational cost and the difficulties in gradient updating and initialization. While a few differential programming solutions have been proposed to speed-up the single-layer DL, none of them could ensure an efficient, scalable, and robust solution for DDL methods. To that end, we propose herein, a novel differentiable programming approach, which yields an efficient, competitive and reliable DDL solution. The novel DDL method jointly learns deep transforms and deep metrics, where each DL layer is theoretically reformulated as a combination of one linear layer and a Recurrent Neural Network (RNN). The RNN is also shown to flexibly account for the layer-associated approximation together with a learnable metric. Additionally, our proposed work unveils new insights into Neural Network (NN) and DDL, bridging the combinations of linear and RNN layers with DDL methods. Extensive experiments on image classification problems are carried out to demonstrate that the proposed method can not only outperform existing DDL several counts including, efficiency, scaling and discrimination, but also achieve better accuracy and increased robustness against adversarial perturbations than CNNs.}, journal={NEUROCOMPUTING}, author={Tang, Wen and Chouzenoux, Emilie and Pesquet, Jean-Christophe and Krim, Hamid}, year={2022}, month={Oct}, pages={244–256} }
@article{tang_chakeri_krim_2022, title={Discovering urban functional zones from biased and sparse points of interests and sparse human activities}, volume={207}, ISSN={["1873-6793"]}, DOI={10.1016/j.eswa.2022.118062}, abstractNote={With rapid development of socio-economics, the task of discovering functional zones becomes critical to better understand the interactions between social activities and spatial locations. In this paper, we propose a framework to discover the real functional zones from the biased and extremely sparse Point of Interests (POIs). To cope with the bias and sparsity of POIs, the unbiased inner influences between spatial locations and human activities are introduced to learn a balanced and dense latent region representation. In addition, a spatial location based clustering method is also included to enrich the spatial information for latent region representation and enhance the region functionality consistency for the fine-grained region segmentation. Moreover, to properly annotate the various and fine-grained region functionalities, we estimate the functionality of the regions and rank them by the differences between the normalized POI distributions to reduce the inconsistency caused by the fine-grained segmentation. Thus, our whole framework is able to properly address the biased categories in sparse POI data and explore the true functional zones with a fine-grained level. To validate the proposed framework, a case study is evaluated by using very large real-world users GPS and POIs data from city of Raleigh. The results demonstrate that the proposed framework can better identify functional zones than the benchmarks, and, therefore, enhance understanding of urban structures with a finer granularity under practical conditions.}, journal={EXPERT SYSTEMS WITH APPLICATIONS}, author={Tang, Wen and Chakeri, Alireza and Krim, Hamid}, year={2022}, month={Nov} }
@article{tran_sakla_krim_2022, title={SAR Self-Enhanced by Electro-optical Network (SARSEEN)}, volume={12122}, ISBN={["978-1-5106-5120-3"]}, ISSN={["1996-756X"]}, DOI={10.1117/12.2618829}, abstractNote={We investigate the relationship between paired SAR and optical images. SAR sensors have the capabilities of penetrating clouds and capturing data at night, whereas optical sensors cannot. We are interested in the case where we have access to both modalities during training, but only the SAR during test time. To that end, we developed a framework that inputs a SAR image and predicts a Canny edge map of the optical image, which retains structural information, while removing superfluous details. Our experiments show that by additionally using this predicted edge map for downstream tasks, we can outperform the same model that only uses the SAR image.}, journal={SIGNAL PROCESSING, SENSOR/INFORMATION FUSION, AND TARGET RECOGNITION XXXI}, author={Tran, Kenneth and Sakla, Wesam and Krim, Hamid}, year={2022} }
@article{guan_shen_krim_2021, title={An Automatic Synthesizer of Advising Tools for High Performance Computing}, volume={32}, ISSN={["1558-2183"]}, DOI={10.1109/TPDS.2020.3018636}, abstractNote={This article presents Egeria, the first automatic synthesizer of advising tools for High-Performance Computing (HPC). When one provides it with some HPC programming guides as inputs, Egeria automatically constructs a text retrieval tool that can advise on what to do to improve the performance of a given program. The advising tool provides a concise list of essential rules automatically extracted from the documents and can retrieve relevant optimization knowledge for optimization questions. Egeria is built based on a distinctive multi-layered design that leverages natural language processing (NLP) techniques and extends them with HPC-specific knowledge and considerations. This article presents the design, implementation, and both quantitative and qualitative evaluation results of Egeria.}, number={2}, journal={IEEE TRANSACTIONS ON PARALLEL AND DISTRIBUTED SYSTEMS}, author={Guan, Hui and Shen, Xipeng and Krim, Hamid}, year={2021}, month={Feb}, pages={330–341} }
@article{asthana_krim_sun_roheda_xie_2021, title={Atlantic Hurricane Activity Prediction: A Machine Learning Approach}, volume={12}, ISSN={["2073-4433"]}, url={https://doi.org/10.3390/atmos12040455}, DOI={10.3390/atmos12040455}, abstractNote={Long-term hurricane predictions have been of acute interest in order to protect the community from the loss of lives, and environmental damage. Such predictions help by providing an early warning guidance for any proper precaution and planning. In this paper, we present a machine learning model capable of making good preseason-prediction of Atlantic hurricane activity. The development of this model entails a judicious and non-linear fusion of various data modalities such as sea-level pressure (SLP), sea surface temperature (SST), and wind. A Convolutional Neural Network (CNN) was utilized as a feature extractor for each data modality. This is followed by a feature level fusion to achieve a proper inference. This highly non-linear model was further shown to have the potential to make skillful predictions up to 18 months in advance.}, number={4}, journal={ATMOSPHERE}, publisher={MDPI AG}, author={Asthana, Tanmay and Krim, Hamid and Sun, Xia and Roheda, Siddharth and Xie, Lian}, year={2021}, month={Apr} }
@article{tang_chouzenoux_pesquet_krim_2021, title={DEEP TRANSFORM AND METRIC LEARNING NETWORKS}, DOI={10.1109/ICASSP39728.2021.9414990}, abstractNote={Based on its great successes in inference and denosing tasks, Dictionary Learning (DL) and its related sparse optimization formulations have garnered a lot of research interest. While most solutions have focused on single layer dictionaries, the recently improved Deep DL methods have also fallen short on a number of issues. We hence propose a novel Deep DL approach where each DL layer can be formulated and solved as a combination of one linear layer and a Recurrent Neural Network, where the RNN is flexibly regraded as a layer-associated learned metric. Our proposed work unveils new insights between the Neural Networks and Deep DL, and provides a novel, efficient and competitive approach to jointly learn the deep transforms and metrics. Extensive experiments are carried out to demonstrate that the proposed method can not only outperform existing Deep DL, but also state-of-the-art generic Convolutional Neural Networks.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP 2021)}, author={Tang, Wen and Chouzenoux, Emilie and Pesquet, Jean-Christophe and Krim, Hamid}, year={2021}, pages={2735–2739} }
@article{jiang_yu_krim_smith_2021, title={DYNAMIC GRAPH LEARNING BASED ON GRAPH LAPLACIAN}, DOI={10.1109/ICASSP39728.2021.9413744}, abstractNote={The purpose of this paper is to infer a global (collective) model of time-varying responses of a set of nodes as a dynamic graph, where the individual time series are respectively observed at each of the nodes. The motivation of this work lies in the search for a connectome model which properly captures brain functionality upon observing activities in different regions of the brain and possibly of individual neurons. We formulate the problem as a quadratic objective functional of observed node signals over short time intervals, subjected to the proper regularization reflecting the graph smoothness and other dynamics involving the underlying graph’s Laplacian, as well as the time evolution smoothness of the underlying graph. The resulting joint optimization is solved by a continuous relaxation and an introduced novel gradient-projection scheme. We apply our algorithm to a real-world dataset comprising recorded activities of individual brain cells. The resulting model is shown to not only be viable but also efficiently computable.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP 2021)}, author={Jiang, Bo and Yu, Yiyi and Krim, Hamid and Smith, Spencer L.}, year={2021}, pages={1090–1094} }
@article{jiang_huang_panahi_yu_krim_smith_2021, title={Dynamic Graph Learning: A Structure-Driven Approach}, volume={9}, ISSN={["2227-7390"]}, DOI={10.3390/math9020168}, abstractNote={The purpose of this paper is to infer a dynamic graph as a global (collective) model of time-varying measurements at a set of network nodes. This model captures both pairwise as well as higher order interactions (i.e., more than two nodes) among the nodes. The motivation of this work lies in the search for a connectome model which properly captures brain functionality across all regions of the brain, and possibly at individual neurons. We formulate it as an optimization problem, a quadratic objective functional and tensor information of observed node signals over short time intervals. The proper regularization constraints reflect the graph smoothness and other dynamics involving the underlying graph’s Laplacian, as well as the time evolution smoothness of the underlying graph. The resulting joint optimization is solved by a continuous relaxation of the weight parameters and an introduced novel gradient-projection scheme. While the work may be applicable to any time-evolving data set (e.g., fMRI), we apply our algorithm to a real-world dataset comprising recorded activities of individual brain cells. The resulting model is shown to be not only viable but also efficiently computable.}, number={2}, journal={MATHEMATICS}, author={Jiang, Bo and Huang, Yuming and Panahi, Ashkan and Yu, Yiyi and Krim, Hamid and Smith, Spencer L.}, year={2021}, month={Jan} }
@article{roheda_krim_luo_wu_2021, title={Event driven sensor fusion}, volume={188}, ISSN={["1872-7557"]}, DOI={10.1016/j.sigpro.2021.108241}, abstractNote={• Targets of interest can be defined as combination of feature (velocity, weight) events. • Fusion of information from multiple sources leads to improved classification. • Important to explore extent of correlation between sensors when performing fusion. • A common subspace exists between various modalities observing the same target. • Such a common subspace can safeguard performance against sensor damage. Multi sensor fusion has long been of interest in target detection and tracking. Different sensors are capable of observing different characteristics about a target, hence, providing additional information toward determining a target’s identity. If used constructively, any additional information should have a positive impact on the performance of the system. In this paper, we consider such a scenario and present a principled approach toward ensuring constructive combination of the various sensors. We look at Decision Level Sensor Fusion under a different light wherein each sensor is said to make a decision on occurrence of certain events that it is capable of observing rather than making a decision on whether a certain target is present. These events are formalized to each sensor according to its potentially extracted attributes to define targets. The proposed technique also explores the extent of dependence between features/events being observed by the sensors, and hence generates more informed probability distributions over the events. In our case, we will study two different datasets. The first one, combines a Radar sensor with an optical sensor for detection of space debris, while the second one combines a seismic sensor with an acoustic sensor in order to detect human and vehicular targets in a field of interest. Provided some additional information about the features of the object, this fusion technique can outperform other existing decision level fusion approaches that may not take into account the relationship between different features. Furthermore, this paper also addresses the issue of coping with damaged sensors when using the model, by learning a hidden space between sensor modalities which can be exploited to safeguard detection performance.}, journal={SIGNAL PROCESSING}, author={Roheda, Siddharth and Krim, Hamid and Luo, Zhi-Quan and Wu, Tianfu}, year={2021}, month={Nov} }
@article{tran_sakla_krim_2021, title={GENERATIVE INFORMATION FUSION}, DOI={10.1109/ICASSP39728.2021.9414284}, abstractNote={In this work, we demonstrate the ability to exploit sensing modalities for mitigating an unrepresented modality or for potentially re-targeting resources. This is tantamount to developing proxy sensing capabilities for multi-modal learning. In classical fusion, multiple sensors are required to capture different information about the same target. Maintaining and collecting samples from multiple sensors can be financially demanding. Additionally, the effort necessary to ensure a logical mapping between the modalities may be prohibitively limiting. We examine the scenario where we have access to all modalities during training, but only a single modality at testing. In our approach, we initialize the parameters of our single modality inference network with weights learned from the fusion of multiple modalities through both classification and GANs losses. Our experiments show that emulating a multi-modal system by perturbing a single modality with noise can help us achieve competitive results compared to using multiple modalities.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH AND SIGNAL PROCESSING (ICASSP 2021)}, author={Tran, Kenneth and Sakla, Wesam and Krim, Hamid}, year={2021}, pages={3990–3994} }
@article{jaffard_krim_2021, title={Regularity properties of Haar Frames}, volume={359}, ISSN={["1778-3569"]}, DOI={10.5802/crmath.228}, abstractNote={We prove that pointwise and global Hölder regularity can be characterized using the coefficients on the Haar tight frame obtained by using a finite union of shifted Haar bases, despite the fact that the elements composing the frame are discontinuous.}, number={9}, journal={COMPTES RENDUS MATHEMATIQUE}, author={Jaffard, Stephane and Krim, Hamid}, year={2021}, pages={1107–1117} }
@article{zhang_guan_ding_shen_krim_2021, title={Reuse-centric k-means configuration}, volume={100}, ISSN={["1873-6076"]}, url={https://doi.org/10.1016/j.is.2021.101787}, DOI={10.1016/j.is.2021.101787}, abstractNote={K-means configuration is to find a configuration of k-means (e.g., the number of clusters, feature sets) that maximize some objectives. It is a time-consuming process due to the iterative nature of k-means. This paper proposes reuse-centric k-means configuration to accelerate k-means configuration. It is based on the observation that the explorations of different configurations share lots of common or similar computations. Effectively reusing the computations from prior trials of different configurations could largely shorten the configuration time. To materialize the idea, the paper presents a set of novel techniques, including reuse-based filtering, center reuse, and a two-phase design to capitalize on the reuse opportunities on three levels: validation, number of clusters, and feature sets. Experiments on k-means–based data classification tasks show that reuse-centric k-means configuration can speed up a heuristic search-based configuration process by a factor of 5.8, and a uniform search-based attainment of classification error surfaces by a factor of 9.1. The paper meanwhile provides some important insights on how to effectively apply the acceleration techniques to tap into a full potential.}, journal={INFORMATION SYSTEMS}, author={Zhang, Lijun and Guan, Hui and Ding, Yufei and Shen, Xipeng and Krim, Hamid}, year={2021}, month={Sep} }
@article{roheda_krim_riggan_2021, title={Robust Multi-Modal Sensor Fusion: An Adversarial Approach}, volume={21}, ISSN={["1558-1748"]}, DOI={10.1109/JSEN.2020.3018698}, abstractNote={In recent years, multi-modal fusion has attracted a lot of research interest, both in academia, and in industry. Multimodal fusion entails the combination of information from a set of different types of sensors. Exploiting complementary information from different sensors, we show that target detection and classification problems can greatly benefit from this fusion approach and result in a performance increase. To achieve this gain, the information fusion from various sensors is shown to require some principled strategy to ensure that additional information is constructively used, and has a positive impact on performance. We subsequently demonstrate the viability of the proposed fusion approach by weakening the strong dependence on the functionality of all sensors, hence introducing additional flexibility in our solution and lifting the severe limitation in unconstrained surveillance settings with potential environmental impact. Our proposed data driven approach to multimodal fusion, exploits selected optimal features from an estimated latent space of data across all modalities. This hidden space is learned via a generative network conditioned on individual sensor modalities. The hidden space, as an intrinsic structure, is then exploited in detecting damaged sensors, and in subsequently safeguarding the performance of the fused sensor system. Experimental results show that such an approach can achieve automatic system robustness against noisy/damaged sensors.}, number={2}, journal={IEEE SENSORS JOURNAL}, author={Roheda, Siddharth and Krim, Hamid and Riggan, Benjamin S.}, year={2021}, month={Jan}, pages={1885–1896} }
@article{huang_panahi_krim_dai_2020, title={Community Detection and Improved Detectability in Multiplex Networks}, volume={7}, ISSN={["2327-4697"]}, DOI={10.1109/TNSE.2019.2949036}, abstractNote={We investigate the widely encountered problem of detecting communities in multiplex networks, such as social networks, with an unknown arbitrary heterogeneous structure. To improve detectability, we propose a generative model that leverages the multiplicity of a single community in multiple layers, with no prior assumption on the relation of communities among different layers. Our model relies on a novel idea of incorporating a large set of generic localized community label constraints across the layers, in conjunction with the celebrated Stochastic Block Model (SBM) in each layer. Accordingly, we build a probabilistic graphical model over the entire multiplex network by treating the constraints as Bayesian priors. We mathematically prove that these constraints/priors promote existence of identical communities across layers without introducing further correlation between individual communities. The constraints are further tailored to render a sparse graphical model and the numerically efficient Belief Propagation algorithm is subsequently employed. We further demonstrate by numerical experiments that in the presence of consistent communities between different layers, consistent communities are matched, and the detectability is improved over a single layer. We compare our model with a "correlated model" which exploits the prior knowledge of community correlation between layers. Similar detectability improvement is obtained under such a correlation, even though our model relies on much milder assumptions than the correlated model. Our model even shows a better detection performance over a certain correlation and signal to noise ratio (SNR) range. In the absence of community correlation, the correlation model naturally fails, while ours maintains its performance.}, number={3}, journal={IEEE TRANSACTIONS ON NETWORK SCIENCE AND ENGINEERING}, author={Huang, Yuming and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2020}, pages={1697–1709} }
@article{ghanem_panahi_krim_kerekes_2020, title={Robust Group Subspace Recovery: A New Approach for Multi-Modality Data Fusion}, volume={20}, ISSN={["1558-1748"]}, DOI={10.1109/JSEN.2020.2999461}, abstractNote={Robust Subspace Recovery (RoSuRe) algorithm was recently introduced as a principled and numerically efficient algorithm that unfolds underlying Unions of Subspaces (UoS) structure, present in the data. The union of Subspaces (UoS) is capable of identifying more complex trends in data sets than simple linear models. We build on and extend RoSuRe to prospect the structure of different data modalities individually. We propose a novel multi-modal data fusion approach based on group sparsity which we refer to as Robust Group Subspace Recovery (RoGSuRe). Relying on a bi-sparsity pursuit paradigm and non-smooth optimization techniques, the introduced framework learns a new joint representation of the time series from different data modalities, respecting an underlying UoS model. We subsequently integrate the obtained structures to form a unified subspace structure. The proposed approach exploits the structural dependencies between the different modalities data to cluster the associated target objects. The resulting fusion of the unlabeled sensors' data from experiments on audio and magnetic data has shown that our method is competitive with other state of the art subspace clustering methods. The resulting UoS structure is employed to classify newly observed data points, highlighting the abstraction capacity of the proposed method.}, number={20}, journal={IEEE SENSORS JOURNAL}, author={Ghanem, Sally and Panahi, Ashkan and Krim, Hamid and Kerekes, Ryan A.}, year={2020}, pages={12307–12316} }
@article{mahdizadehaghdam_panahi_krim_dai_2019, title={Deep Dictionary Learning: A PARametric NETwork Approach}, volume={28}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2019.2914376}, abstractNote={Deep dictionary learning seeks multiple dictionaries at different image scales to capture complementary coherent characteristics. We propose a method for learning a hierarchy of synthesis dictionaries with an image classification goal. The dictionaries and classification parameters are trained by a classification objective, and the sparse features are extracted by reducing a reconstruction loss in each layer. The reconstruction objectives in some sense regularize the classification problem and inject source signal information in the extracted features. The performance of the proposed hierarchical method increases by adding more layers, which consequently makes this model easier to tune and adapt. The proposed algorithm furthermore shows a remarkably lower fooling rate in the presence of adversarial perturbation. The validation of the proposed approach is based on its classification performance using four benchmark datasets and is compared to a Convolutional Neural Network (CNN) of similar size.}, number={10}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Mahdizadehaghdam, Shahin and Panahi, Ashkan and Krim, Hamid and Dai, Liyi}, year={2019}, month={Oct}, pages={4790–4802} }
@article{mahdizadehaghdam_panahi_krim_2019, title={Sparse Generative Adversarial Network}, ISSN={["2473-9936"]}, DOI={10.1109/ICCVW.2019.00369}, abstractNote={We propose a new approach to Generative Adversarial Networks (GANs) to achieve an improved performance with additional robustness to its so-called and well-recognized mode collapse. We first proceed by mapping the desired data onto a frame-based space for a sparse representation to lift any limitation of small support features prior to learning the structure. To that end, we start by dividing an image into multiple patches and modifying the role of the generative network from producing an entire image, at once, to creating a sparse representation vector for each image patch. We synthesize an entire image by multiplying generated sparse representations to a pre-trained dictionary and assembling the resulting patches. This approach restricts the output of the generator to a particular structure, obtained by imposing a Union of Subspaces (UoS) model to the original training data, leading to more realistic images, while maintaining a desired diversity. To further regularize GANs in generating high-quality images and to avoid the notorious mode-collapse problem, we introduce a third player in GANs, called reconstructor. This player utilizes an auto-encoding scheme to ensure that first, the input-output relation in the generator is injective and second each real image corresponds to some input noise. We present a number of experiments, where the proposed algorithm shows a remarkably higher inception score compared to the equivalent conventional GANs.}, journal={2019 IEEE/CVF INTERNATIONAL CONFERENCE ON COMPUTER VISION WORKSHOPS (ICCVW)}, author={Mahdizadehaghdam, Shahin and Panahi, Ashkan and Krim, Hamid}, year={2019}, pages={3063–3071} }
@article{thomaz_jardim_silva_silva_netto_krim_2018, title={Anomaly Detection in Moving-Camera Video Sequences Using Principal Subspace Analysis}, volume={65}, ISSN={["1558-0806"]}, DOI={10.1109/tcsi.2017.2758379}, abstractNote={This paper presents a family of algorithms based on sparse decompositions that detect anomalies in video sequences obtained from slow moving cameras. These algorithms start by computing the union of subspaces that best represents all the frames from a reference (anomaly free) video as a low-rank projection plus a sparse residue. Then, they perform a low-rank representation of a target (possibly anomalous) video by taking advantage of both the union of subspaces and the sparse residue computed from the reference video. Such algorithms provide good detection results while at the same time obviating the need for previous video synchronization. However, this is obtained at the cost of a large computational complexity, which hinders their applicability. Another contribution of this paper approaches this problem by using intrinsic properties of the obtained data representation in order to restrict the search space to the most relevant subspaces, providing computational complexity gains of up to two orders of magnitude. The developed algorithms are shown to cope well with videos acquired in challenging scenarios, as verified by the analysis of 59 videos from the VDAO database that comprises videos with abandoned objects in a cluttered industrial scenario.}, number={3}, journal={IEEE TRANSACTIONS ON CIRCUITS AND SYSTEMS I-REGULAR PAPERS}, author={Thomaz, Lucas A. and Jardim, Eric and Silva, Allan F. and Silva, Eduardo A. B. and Netto, Sergio L. and Krim, Hamid}, year={2018}, month={Mar}, pages={1003–1015} }
@article{bian_panahi_krim_2018, title={Bi-sparsity pursuit: A paradigm for robust subspace recovery}, volume={152}, ISSN={0165-1684}, url={http://dx.doi.org/10.1016/J.SIGPRO.2018.05.024}, DOI={10.1016/J.SIGPRO.2018.05.024}, abstractNote={Abstract The success of sparse models in computer vision and machine learning is due to the fact that, high dimensional data is distributed in a union of low dimensional subspaces in many real-world applications. The underlying structure may, however, be adversely affected by sparse errors. In this paper, we propose a bi-sparse model as a framework to analyze this problem, and provide a novel algorithm to recover the union of subspaces in the presence of sparse corruptions. We further show the effectiveness of our method by experiments on real-world vision data.}, journal={Signal Processing}, publisher={Elsevier BV}, author={Bian, Xiao and Panahi, Ashkan and Krim, Hamid}, year={2018}, month={Nov}, pages={148–159} }
@article{wang_skau_krim_cervone_2018, title={Fusing Heterogeneous Data: A Case for Remote Sensing and Social Media}, volume={56}, ISSN={["1558-0644"]}, DOI={10.1109/TGRS.2018.2846199}, abstractNote={Data heterogeneity can pose a great challenge to process and systematically fuse low-level data from different modalities with no recourse to heuristics and manual adjustments and refinements. In this paper, a new methodology is introduced for the fusion of measured data for detecting and predicting weather-driven natural hazards. The proposed research introduces a robust theoretical and algorithmic framework for the fusion of heterogeneous data in near real time. We establish a flexible information-based fusion framework with a target optimality criterion of choice, which for illustration, is specialized to a maximum entropy principle and a least effort principle for semisupervised learning with noisy labels. We develop a methodology to account for multimodality data and a solution for addressing inherent sensor limitations. In our case study of interest, namely, that of flood density estimation, we further show that by fusing remote sensing and social media data, we can develop well founded and actionable flood maps. This capability is valuable in situations where environmental hazards, such as hurricanes or severe weather, affect very large areas. Relative to the state of the art working with such data, our proposed information-theoretic solution is principled and systematic, while offering a joint exploitation of any set of heterogeneous sensor modalities with minimally assuming priors. This flexibility is coupled with the ability to quantitatively and clearly state the fusion principles with very reasonable computational costs. The proposed method is tested and substantiated with the multimodality data of a 2013 Boulder Colorado flood event.}, number={12}, journal={IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING}, author={Wang, Han and Skau, Erik and Krim, Hamid and Cervone, Guido}, year={2018}, month={Dec}, pages={6956–6968} }
@article{ghanem_krim_clouse_sakla_2018, title={Metric Driven Classification: A Non-Parametric Approach Based on the Henze-Penrose Test Statistic}, volume={27}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2018.2862352}, abstractNote={Entropy-based divergence measures have proven their effectiveness in many areas of computer vision and pattern recognition. However, the complexity of their implementation might be prohibitive in resource-limited applications, as they require estimates of probability densities which are expensive to compute directly for high-dimensional data. In this paper, we investigate the usage of a non-parametric distribution-free metric, known as the Henze-Penrose test statistic to obtain bounds for the $k$ -nearest neighbors ( $k$ -NN) classification accuracy. Simulation results demonstrate the effectiveness and the reliability of this metric in estimating the inter-class separability. In addition, the proposed bounds on the $k$ -NN classification are exploited for evaluating the efficacy of different pre-processing techniques as well as selecting the least number of features that would achieve the desired classification performance.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Ghanem, Sally and Krim, Hamid and Clouse, Hamilton Scott and Sakla, Wesam}, year={2018}, month={Dec}, pages={5947–5956} }
@article{guan_ding_shen_krim_2018, title={Reuse-Centric K-Means Configuration}, ISSN={["1084-4627"]}, DOI={10.1109/ICDE.2018.00116}, abstractNote={K-means configuration is a time-consuming process due to the iterative nature of k-means. This paper proposes reuse-centric k-means configuration to accelerate k-means configuration. It is based on the observation that the explorations of different configurations share lots of common or similar computations. Effectively reusing the computations from prior trials of different configurations could largely shorten the configuration time. The paper presents a set of novel techniques to materialize the idea, including reuse-based filtering, center reuse, and a two-phase design to capitalize on the reuse opportunities on three levels: validation, k, and feature sets. Experiments show that our approach can accelerate some common configuration tuning methods by 5-9X.}, journal={2018 IEEE 34TH INTERNATIONAL CONFERENCE ON DATA ENGINEERING (ICDE)}, author={Guan, Hui and Ding, Yufei and Shen, Xipeng and Krim, Hamid}, year={2018}, pages={1224–1227} }
@article{panahi_bian_krim_dai_2018, title={Robust Subspace Clustering by Bi-sparsity Pursuit: Guarantees and Sequential Algorithm}, ISSN={["2472-6737"]}, DOI={10.1109/wacv.2018.00147}, abstractNote={We consider subspace clustering under sparse noise, for which a non-convex optimization framework based on sparse data representations has been recently developed. This setup is suitable for a large variety of applications with high dimensional data, such as image processing, which is naturally decomposed into a sparse unstructured foreground and a background residing in a union of low-dimensional subspaces. In this framework, we further discuss both performance and implementation of the key optimization problem. We provide an analysis of this optimization problem demonstrating that our approach is capable of recovering linear subspaces as a local optimal solution for sufficiently large data sets and sparse noise vectors. We also propose a sequential algorithmic solution, which is particularly useful for extremely large data sets and online vision applications such as video processing.}, journal={2018 IEEE WINTER CONFERENCE ON APPLICATIONS OF COMPUTER VISION (WACV 2018)}, author={Panahi, Ashkan and Bian, Xiao and Krim, Liamid and Dai, Liyi}, year={2018}, pages={1302–1311} }
@article{lee_krim_2017, title={3D face recognition in the Fourier domain using deformed circular curves}, volume={28}, ISSN={0923-6082 1573-0824}, url={http://dx.doi.org/10.1007/S11045-015-0334-7}, DOI={10.1007/S11045-015-0334-7}, number={1}, journal={Multidimensional Systems and Signal Processing}, publisher={Springer Science and Business Media LLC}, author={Lee, Deokwoo and Krim, Hamid}, year={2017}, month={Jan}, pages={105–127} }
@inproceedings{thomaz_silva_silva_netto_krim_2017, title={Detection of abandoned objects using robust subspace recovery with intrinsic video alignment}, DOI={10.1109/iscas.2017.8050385}, abstractNote={The detection of abandoned objects in videos from moving cameras is of great importance to automatic surveillance systems that monitor large and visually complex areas. This paper proposes a new method based on sparse decompositions to identify video anomalies associated with abandoned objects. The proposed scheme inherently incorporates synchronization between the reference (anomaly-free) and target (under analysis) sequences thus reducing the implementation complexity of the overall surveillance system. Results indicate that the proposed video-processing scheme can lead to 95% complexity reduction while maintaining excellent detection capability of foreground objects.}, booktitle={2017 ieee international symposium on circuits and systems (iscas)}, author={Thomaz, L. A. and Silva, A. F. and Silva, E. A. B. and Netto, S. L. and Krim, H.}, year={2017}, pages={599–602} }
@article{lee_krim_2017, title={Determination of a Sampling Criterion for 3D Reconstruction}, volume={61}, ISSN={["1943-3522"]}, DOI={10.2352/j.imagingsci.technol.2017.61.4.040501}, number={4}, journal={JOURNAL OF IMAGING SCIENCE AND TECHNOLOGY}, author={Lee, Deokwoo and Krim, Hamid}, year={2017}, month={Jul} }
@article{lee_krim_2017, title={Sampling Density Criterion for Circular Structured Light 3D Imaging}, DOI={10.5220/0006147504780483}, journal={PROCEEDINGS OF THE 12TH INTERNATIONAL JOINT CONFERENCE ON COMPUTER VISION, IMAGING AND COMPUTER GRAPHICS THEORY AND APPLICATIONS (VISIGRAPP 2017), VOL 6}, author={Lee, Deokwoo and Krim, Hamid}, year={2017}, pages={478–483} }
@article{lee_krim_2017, title={System input-output theoretic three-dimensional measurement based on circular-shaped structured light patterns}, volume={56}, ISSN={["1560-2303"]}, DOI={10.1117/1.oe.56.7.073104}, abstractNote={Alternative approaches to three-dimensional (3-D) reconstruction by employing the concepts of “system identification” and “communication systems” based on structured light patterns are proposed. In addition, a sampling criterion of the light source is derived in the case of using multiple projectors because 3-D reconstruction sometimes employs multiple viewpoints (cameras) and multiple structured light sources (or projectors). To reformulate a reconstruction problem, an input–output (I/O) system theoretic is adopted, and camera(s) and light source(s) that are located at different positions are defined as the output and the input, respectively. Akin to the system identification problem, the ratio of an output to an input, the “system function,” is defined as a 3-D measurement result. Alternatively, the reconstruction work can employ the concept of the “modulation and demodulation theory,” and the reconstruction work can be reinterpreted as an “input estimation problem.” This contribution chiefly deals with approximate reconstruction results that are sufficient for practical applications, such as 3-D object detection, clarification, recognition, and classification, rather than a perfect 3-D reconstruction itself. To that end, the development of an efficient and fast 3-D imaging system framework is proposed.}, number={7}, journal={OPTICAL ENGINEERING}, author={Lee, Deokwoo and Krim, Hamid}, year={2017}, month={Jul} }
@inproceedings{liang_wang_krirn_2016, title={A behavior-based evaluation of product quality}, DOI={10.1109/icassp.2016.7472010}, abstractNote={In the pharmaceutical industry, quality is often measured by the impact of a product on a population. Knowledge about the behaviour of mosquitos responding to a repellent is a case in point in helping to improve the effect of insect repellent. It is ideally carried out using 3D videos which require a stereoscopic apparatus. To do so using 2D video and effectively evaluate the repellent is an difficult problem as is known in the biotechnology research field. In this paper, we propose a general framework for the swarm motion analysis of multiple mosquitos based on 2D videos. The effectiveness and robustness of our algorithm are verified by multiple 2D videos capturing mosquitos behavior in different experimental conditions.}, booktitle={International conference on acoustics speech and signal processing}, author={Liang, W. and Wang, H. and Krirn, H.}, year={2016}, pages={1916–1920} }
@inproceedings{guan_tang_krim_keiser_rindos_sazdanovic_2016, title={A topological collapse for document summarization}, volume={2016-August}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-84984653594&partnerID=MN8TOARS}, DOI={10.1109/spawc.2016.7536867}, abstractNote={As a useful tool to summarize documents, keyphrase extraction extracts a set of single or multiple words, called keyphrases, that capture the primary topics discussed in a document. In this paper we propose DoCollapse, a topological collapse-based unsupervised keyphrase extraction method that relies on networking document by semantic relatedness of candidate keyphrases. A semantic graph is built with candidates keyphrases as vertices and then reduced to its core using topological collapse algorithm to facilitate final keyphrase selection. Iteratively collapsing dominated vertices aids in removing noisy candidates and revealing important points. We conducted experiments on two standard evaluation datasets composed of scientific papers and found that DoCollapse outperforms state-of-the-art methods. Results show that simplifying a document graph by homology-preserving topological collapse benefits keyphrase extraction.}, booktitle={2016 IEEE 17th International Workshop on Signal Processing Advances in Wireless Communications (SPAWC)}, publisher={IEEE}, author={Guan, Hui and Tang, Wen and Krim, Hamid and Keiser, James and Rindos, Andrew and Sazdanovic, Radmila}, year={2016}, month={Jul} }
@inproceedings{tang_otero_krim_dai_2016, title={Analysis dictionary learning for scene classification}, DOI={10.1109/ssp.2016.7551849}, abstractNote={This paper proposes a new framework for scene classification based on an analysis dictionary learning approach. Despite their tremendous success in various image processing tasks, synthesis-based and analysis-based sparse models fall short in classification tasks. It was hypothesized that this is partly due to the linear dependence of the dictionary atoms. In this work, we aim at improving classification performances by compensating for such dependence. The proposed methodology consists in grouping the atoms of the dictionary using clustering methods. This allows to sparsely model images from various scene classes and use such a model for classification. Experimental evidence shows the benefit of such an approach. Finally, we propose a supervised way to train the baseline representation for each class-specific dictionary, and achieve multiple classification by finding the minimum distance between the learned baseline representation and the data's sub-dictionary representation. Experiments seem to indicate that such approach achieves scene-classification performances that are comparable to the state of the art.}, booktitle={2016 IEEE Statistical Signal Processing Workshop (SSP)}, author={Tang, W. and Otero, I. R. and Krim, H. and Dai, L. Y.}, year={2016} }
@inproceedings{shen_krim_gu_2016, title={Beyond union of subspaces: Subspace pursuit on grassmann manifold for data representation}, DOI={10.1109/icassp.2016.7472444}, abstractNote={Discovering the underlying structure of a high-dimensional signal or big data has always been a challenging topic, and has become harder to tackle especially when the observations are exposed to arbitrary sparse perturbations. In this paper, built on the model of a union of subspaces (UoS) with sparse outliers and inspired by a basis pursuit strategy, we exploit the fundamental structure of a Grassmann manifold, and propose a new technique of pursuing the subspaces systematically by solving a non-convex optimization problem using the alternating direction method of multipliers. This problem as noted is further complicated by non-convex constraints on the Grassmann manifold, as well as the bilinearity in the penalty caused by the subspace bases and coefficients. Nevertheless, numerical experiments verify that the proposed algorithm, which provides elegant solutions to the sub-problems in each step, is able to de-couple the subspaces and pursue each of them under time-efficient parallel computation.}, booktitle={International conference on acoustics speech and signal processing}, author={Shen, X. Y. and Krim, H. and Gu, Y. T.}, year={2016}, pages={4079–4083} }
@article{krim_gentimis_chintakunta_2016, title={DISCOVERING THE WHOLE BY THE COARSE A topological paradigm for data analysis}, volume={33}, ISSN={["1558-0792"]}, DOI={10.1109/msp.2015.2510703}, abstractNote={The increasing interest in big data applications is ushering in a large effort in seeking new, efficient, and adapted data models to reduce complexity, while preserving maximal intrinsic information. Graph-based models have recently been getting a lot of attention on account of their intuitive and direct connection to the data [43]. The cost of these models, however, is to some extent giving up geometric insight as well as algebraic flexibility.}, number={2}, journal={IEEE SIGNAL PROCESSING MAGAZINE}, author={Krim, Hamid and Gentimis, Thanos and Chintakunta, Harish}, year={2016}, month={Mar}, pages={95–104} }
@article{steenbock_shultz_kirk_herrmann_2016, title={Influence of Radical Bridges on Electron Spin Coupling}, volume={121}, ISSN={1089-5639 1520-5215}, url={http://dx.doi.org/10.1021/ACS.JPCA.6B07270}, DOI={10.1021/acs.jpca.6b07270}, abstractNote={Increasing interactions between spin centers in molecules and molecular materials is a desirable goal for applications such as single-molecule magnets for information storage or magnetic metal–organic frameworks for adsorptive separation and targeted drug delivery and release. To maximize these interactions, introducing unpaired spins on bridging ligands is a concept used in several areas where such interactions are otherwise quite weak, in particular, lanthanide-based molecular magnets and magnetic metal–organic frameworks. Here, we use Kohn–Sham density functional theory to study how much the ground spin state is stabilized relative to other low-lying spin states by creating an additional spin center on the bridge for a series of simple model compounds. The di- and triradical structures consist of nitronyl nitroxide (NNO) and semiquinone (SQ) radicals attached to a meta-phenylene(R) bridge (where R = −NH•/–NH2, −O•/OH, −CH2•/CH2). These model compounds are based on a fully characterized SQ–meta-phenylene–NNO diradical with moderately strong antiferromagnetic coupling. Replacing closed-shell substituents CH3 and NH2 with their radical counterparts CH2• and NH• leads to an increase in stabilization of the ground state with respect to other low-lying spin states by a factor of 3–6, depending on the exchange–correlation functional. For OH compared with O• substituents, no conclusions can be drawn as the spin state energetics depend strongly on the functional. This could provide a basis for constructing sensitive test systems for benchmarking theoretical methods for spin state energy splittings. Reassuringly, the stabilization found for a potentially synthesizable complex (up to a factor of 3.5) is in line with the simple model systems (where a stabilization of up to a factor of 6.2 was found). Absolute spin state energy splittings are considerably smaller for the potentially stable system than those for the model complexes, which points to a dependence on the spin delocalization from the radical substituent on the bridge.}, number={1}, journal={The Journal of Physical Chemistry A}, publisher={American Chemical Society (ACS)}, author={Steenbock, Torben and Shultz, David A. and Kirk, Martin L. and Herrmann, Carmen}, year={2016}, month={Dec}, pages={216–225} }
@article{mahdizadehaghdam_wang_krim_dai_2016, title={Information Diffusion of Topic Propagation in Social Media}, volume={2}, ISSN={["2373-776X"]}, DOI={10.1109/tsipn.2016.2618324}, abstractNote={Real-world social and/or operational networks consist of agents with associated states, whose connectivity forms complex topologies. This complexity is further compounded by interconnected information layers, consisting, for instance, documents/resources of the agents which mutually share topical similarities. Our goal in this work is to predict the specific states of the agents, as their observed resources evolve in time and get updated. The information diffusion among the agents and the publications themselves effectively result in a dynamic process which we capture by an interconnected system of networks (i.e. layered). More specifically, we use a notion of a supra-Laplacian matrix to address such a generalized diffusion of an interconnected network starting with the classical "graph Laplacian". The auxiliary and external input update is modeled by a multidimensional Brownian process, yielding two contributions to the variations in the states of the agents: one that is due to the intrinsic interactions in the network system, and the other due to the external inputs or innovations. A variation on this theme, a priori knowledge of a fraction of the agents' states is shown to lead to a Kalman predictor problem. This helps us refine the predicted states exploiting the error in estimating the states of agents. Three real-world datasets are used to evaluate and validate the information diffusion process in this novel layered network approach. Our results demonstrate a lower prediction error when using the interconnected network rather than the single connectivity layer between the agents. The prediction error is further improved by using the estimated diffusion connection and by applying the Kalman approach with partial observations.}, number={4}, journal={IEEE TRANSACTIONS ON SIGNAL AND INFORMATION PROCESSING OVER NETWORKS}, author={Mahdizadehaghdam, Shahin and Wang, Han and Krim, Hamid and Dai, Liyi}, year={2016}, month={Dec}, pages={569–581} }
@inproceedings{chintakunta_robinson_krim_2016, title={Introduction to the special session on topological data Analysis, ICASSP 2016}, DOI={10.1109/icassp.2016.7472911}, abstractNote={Topological Data Analysis (TDA) is a topic which has recently seen many applications. The goal of this special session is to highlight the bridge between signal processing, machine learning and techniques in topological data analysis. In this way, we hope to encourage more engineers to start exploring TDA and its applications. This paper briefly introduces the standard techniques used in this area, delineates the common theme connecting the works presented in this session, and concludes with a brief summary of each of the papers presented.}, booktitle={International conference on acoustics speech and signal processing}, author={Chintakunta, H. and Robinson, M. and Krim, H.}, year={2016}, pages={6410–6414} }
@article{gamble_chintakunta_wilkerson_krim_2016, title={Node Dominance: Revealing Community and Core-Periphery Structure in Social Networks}, volume={2}, ISSN={["2373-776X"]}, DOI={10.1109/tsipn.2016.2527923}, abstractNote={This study relates the local property of node dominance to local and global properties of a network. Iterative removal of dominated nodes yields a distributed algorithm for computing a core-periphery decomposition of a social network, where nodes in the network core are seen to be essential in terms of network flow and global structure. Additionally, the connected components in the periphery give information about the community structure of the network, aiding in community detection. A number of explicit results are derived, relating the core and periphery to network flow, community structure, and global network structure, which are corroborated by observational results. The method is illustrated using a real world network (DBLP co-authorship network), with ground-truth communities.}, number={2}, journal={IEEE TRANSACTIONS ON SIGNAL AND INFORMATION PROCESSING OVER NETWORKS}, author={Gamble, Jennifer and Chintakunta, Harish and Wilkerson, Adam and Krim, Hamid}, year={2016}, month={Jun}, pages={186–199} }
@inproceedings{ghanem_skau_krim_clouse_sakla_2016, title={Non-parametric bounds on the nearest neighbor classification accuracy based on the Henze-Penrose metric}, DOI={10.1109/icip.2016.7532581}, abstractNote={Analysis procedures for higher-dimensional data are generally computationally costly; thereby justifying the high research interest in the area. Entropy-based divergence measures have proven their effectiveness in many areas of computer vision and pattern recognition. However, the complexity of their implementation might be prohibitive in resource-limited applications, as they require estimates of probability densities which are very difficult to compute directly for high-dimensional data. In this paper, we investigate the usage of a non-parametric distribution-free metric, known as the Henze-Penrose test statistic, to estimate the divergence between different classes of vehicles. In this regard, we apply some common feature extraction techniques to further characterize the distributional separation relative to the original data. Moreover, we employ the Henze-Penrose metric to obtain bounds for the Nearest Neighbor (NN) classification accuracy. Simulation results demonstrate the effectiveness and the reliability of this metric in estimating the inter-class separability. In addition, the proposed bounds are exploited for selecting the least number of features that would retain sufficient discriminative information.}, booktitle={2016 ieee international conference on image processing (icip)}, author={Ghanem, S. and Skau, E. and Krim, H. and Clouse, H. S. and Sakla, W.}, year={2016}, pages={1364–1368} }
@inproceedings{skau_wohlberg_krim_dai_2016, title={Pansharpening via coupled triple factorization dictionary learning}, DOI={10.1109/icassp.2016.7471873}, abstractNote={Data fusion is the operation of integrating data from different modalities to construct a single consistent representation. This paper proposes variations of coupled dictionary learning through an additional factorization. One variation of this model is applicable to the pansharpening data fusion problem. Real world pansharpening data was applied to train and test our proposed formulation. The results demonstrate that the data fusion model can successfully be applied to the pan-sharpening problem.}, booktitle={International conference on acoustics speech and signal processing}, author={Skau, E. and Wohlberg, B. and Krim, H. and Dai, L. Y.}, year={2016}, pages={1234–1237} }
@article{bian_krim_bronstein_dai_2016, title={Sparsity and Nullity: Paradigms for Analysis Dictionary Learning}, volume={9}, ISSN={["1936-4954"]}, DOI={10.1137/15m1030376}, abstractNote={Sparse models in dictionary learning have been successfully applied in a wide variety of machine learning and computer vision problems, and as a result have recently attracted increased research interest. Another interesting related problem based on linear equality constraints, namely the sparse null space (SNS) problem, first appeared in 1986 and has since inspired results on sparse basis pursuit. In this paper, we investigate the relation between the SNS problem and the analysis dictionary learning (ADL) problem, and show that the SNS problem plays a central role, and may be utilized to solve dictionary learning problems. Moreover, we propose an efficient algorithm of sparse null space basis pursuit (SNS-BP) and extend it to a solution of ADL. Experimental results on numerical synthetic data and real-world data are further presented to validate the performance of our method.}, number={3}, journal={SIAM JOURNAL ON IMAGING SCIENCES}, author={Bian, Xiao and Krim, Hamid and Bronstein, Alex and Dai, Liyi}, year={2016}, pages={1107–1126} }
@article{emrani_saponas_morris_krim_2015, title={A Novel Framework for Pulse Pressure Wave Analysis Using Persistent Homology}, volume={22}, ISSN={["1558-2361"]}, DOI={10.1109/lsp.2015.2441068}, abstractNote={Four characteristic points of pulse pressure waves–the systolic peak, the anacrotic notch, the dicrotic notch, and the diastolic foot–are used to estimate various aspects of cardiovascular function, such as heart rate and augmentation index. We propose a novel approach to extracting these characteristic points using a topological signal processing framework. We characterize the topology of the signals using a collection of persistence intervals, which are encapsulated in a persistence diagram. The characteristic points are identified based on their time of occurrence and their distance from the identity line in the persistence diagram. We validate this approach by collecting radial pulse pressure data from twenty-eight participants using a wearable tonometer, and computing the peripheral augmentation index using a traditional derivative-based method and our novel persistence-based method. The augmentation index values computed using the two methods are statistically indistinguishable, suggesting that this representation merits further exploration as a tool for analyzing pulse pressure waves.}, number={11}, journal={IEEE SIGNAL PROCESSING LETTERS}, author={Emrani, Saba and Saponas, T. Scott and Morris, Dan and Krim, Hamid}, year={2015}, month={Nov} }
@article{chintakunta_gentimis_gonzalez-diaz_jimenez_krim_2015, title={An entropy-based persistence barcode}, volume={48}, ISSN={0031-3203}, url={http://dx.doi.org/10.1016/J.PATCOG.2014.06.023}, DOI={10.1016/J.PATCOG.2014.06.023}, abstractNote={In persistent homology, the persistence barcode encodes pairs of simplices meaning birth and death of homology classes. Persistence barcodes depend on the ordering of the simplices (called a filter) of the given simplicial complex. In this paper, we define the notion of minimal barcodes in terms of entropy. Starting from a given filtration of a simplicial complex K, an algorithm for computing a filter (a total ordering of the simplices preserving the partial ordering imposed by the filtration as well as achieving a persistence barcode with small entropy) is detailed, by way of computation, and subsequent modification, of maximum matchings on subgraphs of the Hasse diagram associated to K. Examples demonstrating the utility of computing such a proper ordering on the simplices are given. HighlightsWe define the notion of minimal barcodes in terms of entropy.Given a simplicial complex, an algorithm for computing a proper filter F is detailed.F preserves the partial ordering imposed by the filtration.F achieves a persistence barcode with small entropy.Examples demonstrating the utility of computing such a proper filter are given.}, number={2}, journal={Pattern Recognition}, publisher={Elsevier BV}, author={Chintakunta, Harish and Gentimis, Thanos and Gonzalez-Diaz, Rocio and Jimenez, Maria-Jose and Krim, Hamid}, year={2015}, month={Feb}, pages={391–401} }
@inproceedings{bian_krim_2015, title={Bi-sparsity pursuit for robust subspace recovery}, DOI={10.1109/icip.2015.7351462}, abstractNote={The success of sparse models in computer vision and machine learning in many real-world applications, may be attributed in large part, to the fact that many high dimensional data are distributed in a union of low dimensional subspaces. The underlying structure may, however, be adversely affected by sparse errors, thus inducing additional complexity in recovering it. In this paper, we propose a bi-sparse model as a framework to investigate and analyze this problem, and provide as a result, a novel algorithm to recover the union of subspaces in presence of sparse corruptions. We additionally demonstrate the effectiveness of our method by experiments on real-world vision data.}, booktitle={2015 ieee international conference on image processing (icip)}, author={Bian, X. and Krim, H.}, year={2015}, pages={3535–3539} }
@article{gamble_chintakunta_krim_2015, title={Coordinate-free quantification of coverage in dynamic sensor networks}, volume={114}, ISSN={["1872-7557"]}, DOI={10.1016/j.sigpro.2015.02.013}, abstractNote={We present a methodology for analyzing coverage properties in dynamic sensor networks. The dynamic sensor network under consideration is studied through a series of snapshots, and is represented by a sequence of simplicial complexes, built from the communication graph at each time point. A method from computational topology called zigzag persistent homology takes this sequence of simplicial complexes as input, and returns a 'barcode' containing the birth and death times of homological features in this sequence. We derive useful statistics from this output for analyzing time-varying coverage properties.In addition, we propose a method which returns specific representative cycles for these homological features, at each point along the birth-death intervals. These representative cycles are then used to track coverage holes in the network, and obtain size estimates for individual holes at each time point. A weighted barcode, incorporating the size information, is then used as a visual and quantitative descriptor of the dynamic network coverage. Graphical abstractDisplay Omitted HighlightsEach sensor has only a list of its neighboring sensors, with no coordinates, or inter-sensor distance information.Using these snapshots of local information, we describe the dynamic coverage properties of the network.Quantitative output is a weighted barcode, obtained using zigzag persistent homology.Estimated hole size and duration are encoded in this barcode.Method is able to distinguish between different sensor network mobility patterns.}, journal={SIGNAL PROCESSING}, author={Gamble, Jennifer and Chintakunta, Harish and Krim, Hamid}, year={2015}, month={Sep}, pages={1–18} }
@inproceedings{gamble_chintakunta_krim_2015, title={Emergence of core-periphery structure from local node dominance in social networks}, DOI={10.1109/eusipco.2015.7362716}, abstractNote={There has been growing evidence recently for the view that social networks can be divided into a well connected core, and a sparse periphery. This paper describes how such a global description can be obtained from local dominance relation ships between vertices, to naturally yield a distributed algo rithm for such a decomposition. It is shown that the resulting core describes the global structure of the network, while also preserving shortest paths, and displaying expander-like properties. Moreover, the periphery obtained from this de composition consists of a large number of connected com ponents, which can be used to identify communities in the network. These are used for a ‘divide-and-conquer’ strategy for community detection, where the peripheral components are obtained as a pre-processing step to identify the small sets most likely to contain communities. The method is illustrated using a real world network (DBLP co-authorship network), with ground-truth communities.}, booktitle={2015 23rd european signal processing conference (eusipco)}, author={Gamble, J. and Chintakunta, H. and Krim, H.}, year={2015}, pages={1910–1914} }
@book{krim_hamza_2015, title={Geometric methods in signal and image analysis}, ISBN={9781107033900}, DOI={10.1017/cbo9781139523967}, abstractNote={This comprehensive guide offers a new approach for developing and implementing robust computational methodologies that uncover the key geometric and topological information from signals and images. With the help of detailed real-world examples and applications, readers will learn how to solve complex signal and image processing problems in fields ranging from remote sensing to medical imaging, bioinformatics, robotics, security, and defence. With an emphasis on intuitive and application-driven arguments, this text covers not only a range of methods in use today, but also introduces promising new developments for the future, bringing the reader up-to-date with the state of the art in signal and image analysis. Covering basic principles as well as advanced concepts and applications, and with examples and homework exercises, this is an invaluable resource for graduate students, researchers, and industry practitioners in a range of fields including signal and image processing, biomedical engineering, and computer graphics.}, publisher={Cambridge, United Kingdom: Cambridge University Press}, author={Krim, H. and Hamza, A. B.}, year={2015} }
@inproceedings{jardim_bian_silva_netto_krim_2015, title={On the detection of abandoned objects with a moving camera using robust subspace recovery and sparse representation}, DOI={10.1109/icassp.2015.7178179}, abstractNote={We consider the application of sparse-representation and robust-subspace-recovery techniques to detect abandoned objects in a target video acquired with a moving camera. In the proposed framework, the target video is compared to a previously acquired reference video, which is assumed to have no abandoned objects. The detection method explores the low-rank similarities among the reference and target videos, as well as the sparsity of the differences between the two video sequences caused by the unexpected object in the target video. A three-step procedure is then presented adapting a previous low-rank and sparse image representation to the problem at hand. Performance of the proposed technique is verified using a large video database for abandoned-object detection in a cluttered environment. Results demonstrate the technique effectiveness even in the presence of some significant camera shake along its trajectory.}, booktitle={International conference on acoustics speech and signal processing}, author={Jardim, E. and Bian, X. and Silva, E. A. B. and Netto, S. L. and Krim, H.}, year={2015}, pages={1295–1299} }
@inproceedings{ayllon_gil-pita_rosa-zurera_krim_2015, title={Real-time multiple DOA estimation of speech sources in wireless acoustic sensor networks}, DOI={10.1109/icassp.2015.7178463}, abstractNote={Indoor localization of multiple speech sources in wireless acoustic sensor networks (WASNs) is an open and interesting problem with many practical applications, but the presence of noise and reverberations complicates the problem. In this paper, a distributed algorithm for multiple DOA estimation of speech sources in WASNs is presented. The method exploits the sparsity of speech sources in the time-frequency domain to obtain DOA estimations locally in each node of the network. The DOA estimations of different nodes are further combined to increase the accuracy of the local DOA estimations. Since the local DOAs are estimated using only the microphones of the same node, the synchronization between input channels and localization of the microphones from different nodes are not an issue.}, booktitle={International conference on acoustics speech and signal processing}, author={Ayllon, D. and Gil-Pita, R. and Rosa-Zurera, M. and Krim, H.}, year={2015}, pages={2709–2713} }
@inproceedings{bian_krim_bronstein_dai_2015, title={Sparse null space basis pursuit and analysis dictionary learning for high-dimensional data analysis}, DOI={10.1109/icassp.2015.7178678}, abstractNote={Sparse models in dictionary learning have been successfully applied in a wide variety of machine learning and computer vision problems, and have also recently been of increasing research interest. Another interesting related problem based on a linear equality constraint, namely the sparse null space problem (SNS), first appeared in 1986, and has since inspired results on sparse basis pursuit. In this paper, we investigate the relation between the SNS problem and the analysis dictionary learning problem, and show that the SNS problem plays a central role, and may be utilized to solve dictionary learning problems. Moreover, we propose an efficient algorithm of sparse null space basis pursuit, and extend it to a solution of analysis dictionary learning. Experimental results on numerical synthetic data and real-world data are further presented to validate the performance of our method.}, booktitle={International conference on acoustics speech and signal processing}, author={Bian, X. and Krim, H. and Bronstein, A. and Dai, L. Y.}, year={2015}, pages={3781–3785} }
@inproceedings{emrani_krim_2015, title={Spectral estimation in highly transient data}, DOI={10.1109/eusipco.2015.7362678}, abstractNote={We propose a new framework for estimating different frequencies in piece-wise periodic signals with time varying amplitude and phase. Through a 3-dimensional delay embedding of the introduced model, we construct a union of intersecting planes where each plane corresponds to one frequency. The equations of each of these planes only depend on the associated frequency, and are used to calculate the tone in each segment. A sparse subspace clustering technique is utilized to find the segmentation of the data, and the points in each cluster are used to compute the normal vectors. In the presence of white Gaussian noise, principal component analysis is used to robustly perform this computation. Experimental results demonstrate the effectiveness of the proposed framework.}, booktitle={2015 23rd european signal processing conference (eusipco)}, author={Emrani, S. and Krim, H.}, year={2015}, pages={1721–1725} }
@article{wang_krim_viniotis_2014, title={Analysis and Control of Beliefs in Social Networks}, volume={62}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2014.2352591}, abstractNote={In this paper, we investigate the problem of how beliefs diffuse among members of social networks. We propose an information flow model (IFM) of belief that captures how interactions among members affect the diffusion and eventual convergence of a belief. The IFM model includes a generalized Markov Graph (GMG) model as a social network model, which reveals that the diffusion of beliefs depends heavily on two characteristics of the social network characteristics, namely degree centralities and clustering coefficients. We apply the IFM to both converged belief estimation and belief control strategy optimization. The model is compared with an IFM including the Barabasi-Albert model, and is evaluated via experiments with published real social network data.}, number={21}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Wang, Tian and Krim, Hamid and Viniotis, Yannis}, year={2014}, month={Nov}, pages={5552–5564} }
@inproceedings{wilkerson_chintakunta_krim_2014, title={Computing persistent features in big data: A distributed dimension reduction approach}, DOI={10.1109/icassp.2014.6853548}, abstractNote={Persistent homology has become one of the most popular tools used in topological data analysis for analyzing big data sets. In an effort to minimize the computational complexity of finding the persistent homology of a data set, we develop a simplicial collapse algorithm called the selective collapse. This algorithm works by representing the previously developed strong collapse as a forest and uses that forest data to improve the speed of both the strong collapse and of persistent homology. Finally, we demonstrate the savings in computational complexity using geometric random graphs.}, booktitle={International conference on acoustics speech and signal processing}, author={Wilkerson, A. C. and Chintakunta, H. and Krim, H.}, year={2014} }
@article{chintakunta_krim_2014, title={Distributed Localization of Coverage Holes Using Topological Persistence}, volume={62}, ISSN={["1941-0476"]}, DOI={10.1109/tsp.2014.2314063}, abstractNote={We develop distributed algorithms to detect and localize coverage holes in sensor networks. We neither assume coordinate information of the nodes, neither any distances between the nodes. We use algebraic topological methods to define a coverage hole, and develop provably correct algorithm to detect a hole. We then partition the network into smaller subnetworks, while ensuring that the holes are preserved, and checking for holes in each. We show that repeating this process leads to localizing the coverage holes. We demonstrate the improved complexity of our algorithm using simulations.}, number={10}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Chintakunta, Harish and Krim, Hamid}, year={2014}, month={May}, pages={2531–2541} }
@article{emrani_gentimis_krim_2014, title={Persistent Homology of Delay Embeddings and its Application to Wheeze Detection}, volume={21}, ISSN={["1558-2361"]}, DOI={10.1109/lsp.2014.2305700}, abstractNote={We propose a new approach to detect and quantify the periodic structure of dynamical systems using topological methods. We propose to use delay-coordinate embedding as a tool to detect the presence of harmonic structures by using persistent homology for robust analysis of point clouds of delay-coordinate embeddings. To discover the proper delay, we propose an autocorrelation like (ACL) function of the signals, and apply the introduced topological approach to analyze breathing sound signals for wheeze detection. Experiments have been carried out to substantiate the capabilities of the proposed method.}, number={4}, journal={IEEE SIGNAL PROCESSING LETTERS}, author={Emrani, Saba and Gentimis, Thanos and Krim, Hamid}, year={2014}, month={Apr}, pages={459–463} }
@inproceedings{emrani_chintakunta_krim_2014, title={Real time detection of harmonic structure: A case for topological signal analysis}, DOI={10.1109/icassp.2014.6854240}, abstractNote={The goal of this study is to find evidence of cyclicity or periodicity in data with low computational complexity and high accuracy. Using delay embeddings, we transform the timedomain signal into a point cloud, whose topology reflects the periodic behavior of the signal. Persistent homology is employed to determine the underlying manifold of the point cloud, and the Euler characteristic provides for a fast computation of topology of the resulting manifold. We apply the introduced approach to breathing sound signals for wheeze detection. Our experiments substantiate the capabilities of the proposed method.}, booktitle={International conference on acoustics speech and signal processing}, author={Emrani, S. and Chintakunta, H. and Krim, H.}, year={2014} }
@article{yi_krim_2014, title={Subspace Learning of Dynamics on a Shape Manifold: A Generative Modeling Approach}, volume={23}, ISSN={["1941-0042"]}, DOI={10.1109/tip.2014.2358200}, abstractNote={In this paper, we propose a novel subspace learning algorithm of shape dynamics. Compared to the previous works, our method is invertible and better characterizes the nonlinear geometry of a shape manifold while retaining a good computational efficiency. In this paper, using a parallel moving frame on a shape manifold, each path of shape dynamics is uniquely represented in a subspace spanned by the moving frame, given an initial condition (the starting point and starting frame). Mathematically, such a representation may be formulated as solving a manifold-valued differential equation, which provides a generative modeling of high-dimensional shape dynamics in a lower dimensional subspace. Given the parallelism and a path on a shape manifold, the parallel moving frame along the path is uniquely determined up to the choice of the starting frame. With an initial frame, we minimize the reconstruction error from the subspace to shape manifold. Such an optimization characterizes well the Riemannian geometry of the manifold by imposing parallelism (equivalent as a Riemannian metric) constraints on the moving frame. The parallelism in this paper is defined by a Levi-Civita connection, which is consistent with the Riemannian metric of the shape manifold. In the experiments, the performance of the subspace learning is extensively evaluated using two scenarios: 1) how the high dimensional geometry is characterized in the subspace and 2) how the reconstruction compares with the original shape dynamics. The results demonstrate and validate the theoretical advantages of the proposed approach.}, number={11}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Yi, Sheng and Krim, Hamid}, year={2014}, month={Nov}, pages={4907–4919} }
@article{wang_krim_viniotis_2013, title={A Generalized Markov Graph Model: Application to Social Network Analysis}, volume={7}, ISSN={["1941-0484"]}, DOI={10.1109/jstsp.2013.2246767}, abstractNote={In this paper we propose a generalized Markov Graph model for social networks and evaluate its application in social network synthesis, and in social network classification. The model reveals that the degree distribution, the clustering coefficient distribution as well as a newly discovered feature, a crowding coefficient distribution, are fundamental to characterizing a social network. The application of this model to social network synthesis leads to a capacity to generate networks dominated by the degree distribution and the clustering coefficient distribution. Another application is a new social network classification method based on comparing the statistics of their degree distributions and clustering coefficient distributions as well as their crowding coefficient distributions. In contrast to the widely held belief that a social network graph is solely defined by its degree distribution, the novelty of this paper consists in establishing the strong dependence of social networks on the degree distribution, the clustering coefficient distribution and the crowding coefficient distribution, and in demonstrating that they form minimal information to classify social networks as well as to design a new social network synthesis tool. We provide numerous experiments with published data and demonstrate very good performance on both counts.}, number={2}, journal={IEEE JOURNAL OF SELECTED TOPICS IN SIGNAL PROCESSING}, author={Wang, Tian and Krim, Hamid and Viniotis, Yannis}, year={2013}, month={Apr}, pages={318–332} }
@inbook{yi_krim_2013, title={A Subspace Learning of Dynamics on a Shape Manifold: A Generative Modeling Approach}, ISBN={9783642400193 9783642400209}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-40020-9_8}, DOI={10.1007/978-3-642-40020-9_8}, abstractNote={In this paper, we propose a novel subspace learning of shape dynamics. In comparison with the previous works, our method is invertible and better characterises the nonlinear geometry of a shape manifold while being computationally more efficient. In this work, with a parallel moving frame on a shape manifold, each path of shape dynamics is uniquely represented in a subspace spanned by the moving frame, given an initial condition (the starting point and the starting frame). Given the parallelism of the frame and ensured by a Levi-Civita connection, and a path on a shape manifold, the parallel moving frame along the path is uniquely determined up to the choice of the starting frame.}, booktitle={Lecture Notes in Computer Science}, publisher={Springer Berlin Heidelberg}, author={Yi, Sheng and Krim, Hamid}, year={2013}, pages={84–91} }
@inbook{bian_krim_2013, title={Activity Video Analysis via Operator-Based Local Embedding}, ISBN={9783642400193 9783642400209}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-40020-9_95}, DOI={10.1007/978-3-642-40020-9_95}, booktitle={Lecture Notes in Computer Science}, publisher={Springer Berlin Heidelberg}, author={Bian, Xiao and Krim, Hamid}, year={2013}, pages={845–852} }
@inbook{bian_krim_2013, title={Optimal Operator Space Pursuit: A Framework for Video Sequence Data Analysis}, ISBN={9783642374432 9783642374449}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-37444-9_59}, DOI={10.1007/978-3-642-37444-9_59}, booktitle={Computer Vision – ACCV 2012}, publisher={Springer Berlin Heidelberg}, author={Bian, Xiao and Krim, Hamid}, year={2013}, pages={760–769} }
@inproceedings{wilkerson_moore_swami_krim_2013, title={Simplifying the homology of networks via strong collapses}, DOI={10.1109/icassp.2013.6638666}, abstractNote={There has recently been increased interest in applications of topology to areas ranging from control and sensing, to social network analysis, to high-dimensional point cloud data analysis. Here we use simplicial complexes to represent the group relationship structure in a network. We detail a novel algorithm for simplifying homology and “hole location” computations on a complex by reducing it to its core using a strong collapse. We show that the homology and hole locations are preserved and provide motivation for interest in this reduction technique with applications in sensor and social networks. Since the complexity of finding “holes” is quintic in the number of simplices, the proposed reduction leads to significant savings in complexity.}, booktitle={International conference on acoustics speech and signal processing}, author={Wilkerson, A. C. and Moore, T. J. and Swami, A. and Krim, H.}, year={2013}, pages={5258–5262} }
@inbook{gonzález-díaz_jiménez_krim_2013, title={Towards Minimal Barcodes}, ISBN={9783642382208 9783642382215}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-38221-5_20}, DOI={10.1007/978-3-642-38221-5_20}, abstractNote={AbstractIn the setting of persistent homology computation, a useful tool is the persistence barcode representation in which pairs of birth and death times of homology classes are encoded in the form of intervals. Starting from a polyhedral complex K (an object subdivided into cells which are polytopes) and an initial order of the set of vertices, we are concerned with the general problem of searching for filters (an order of the rest of the cells) that provide a minimal barcode representation in the sense of having minimal number of “k-significant” intervals, which correspond to homology classes with life-times longer than a fixed number k. As a first step, in this paper we provide an algorithm for computing such a filter for k = 1 on the Hasse diagram of the poset of faces of K.KeywordsPersistent homologypersistence barcodesgraphspolyhedral complexes}, booktitle={Graph-Based Representations in Pattern Recognition}, publisher={Springer Berlin Heidelberg}, author={González-Díaz, Rocío and Jiménez, María-José and Krim, Hamid}, year={2013}, pages={184–193} }
@article{emrani_krim_2013, title={Wheeze detection and location using spectro-temporal analysis of lung sounds}, ISSN={["1086-4105"]}, DOI={10.1109/sbec.2013.27}, abstractNote={Wheezes are abnormal lung sounds, which usually imply obstructive airways diseases. The objective of this study is to design an automatic wheeze detector for a wearable health monitoring system, which is able to locate the wheezes inside the respiratory cycle with high accuracy, and low computational complexity. We compute important features of wheezes, which we classify as temporal and spectral characteristics and employed to analyze recorded lung sounds including wheezes from patients with asthma. Time-frequency (TF) technique as well as wavelet packet decomposition (WPD) is used for this purpose. Experimental results verify the promising performance of described methods.}, journal={29TH SOUTHERN BIOMEDICAL ENGINEERING CONFERENCE (SBEC 2013)}, author={Emrani, Saba and Krim, Hamid}, year={2013}, pages={37–38} }
@inproceedings{lee_krim_2012, title={A sampling theorem for a 2D surface}, volume={6667}, DOI={10.1007/978-3-642-24785-9_47}, abstractNote={AbstractThe sampling rate for signal reconstruction has been and remains an important and central criterion in numerous applications. We propose, in this paper, a new approach to determining an optimal sampling rate for a 2D-surface reconstruction using the so-called Two-Thirds Power Law. This paper first introduces an algorithm of a 2D surface reconstruction from a 2D image of circular light patterns projected on the surface. Upon defining the Two-Thirds Power Law we show how the extracted spectral information helps define an optimal sampling rate of the surface, reflected in the number of projected circular patterns required for its reconstruction. This result is of interest in a number of applications such as 3D face recognition and development of new efficient 3D cameras. Substantive examples are provided.KeywordsSampling rateReconstructionThe Two-Thirds Power Law Structured light patterns}, booktitle={Scale space and variational methods in computer vision}, author={Lee, D. and Krim, H.}, year={2012}, pages={556–567} }
@article{yi_krim_norris_2012, title={Human Activity as a Manifold-Valued Random Process}, volume={21}, ISSN={["1941-0042"]}, DOI={10.1109/tip.2012.2197008}, abstractNote={Most of previous shape based human activity models were built with either a linear assumption or an extrinsic interpretation of the nonlinear geometry of the shape space, both of which proved to be problematic on account of the nonlinear intrinsic geometry of the associated shape spaces. In this paper we propose an intrinsic stochastic modeling of human activity on a shape manifold. More importantly, within an elegant and theoretically sound framework, our work effectively bridges the nonlinear modeling of human activity on a nonlinear space, with the classic stochastic modeling in a Euclidean space, and thereby provides a foundation for a more effective and accurate analysis of the nonlinear feature space of activity models. From a video sequence, human activity is extracted as a sequence of shapes. Such a sequence is considered as one realization of a random process on a shape manifold. Different activities are then modeled as manifold valued random processes with different distributions. To address the problem of stochastic modeling on a manifold, we first construct a nonlinear invertible map of a manifold valued process to a Euclidean process. The resulting process is then modeled as a global or piecewise Brownian motion. The mapping from a manifold to a Euclidean space is known as a stochastic development. The advantage of such a technique is that it yields a one-one correspondence, and the resulting Euclidean process intrinsically captures the curvature on the original manifold. The proposed algorithm is validated on two activity databases [15], [5] and compared with the related works on each of these. The substantiating results demonstrate the viability and high accuracy of our modeling technique in characterizing and classifying different activities.}, number={8}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Yi, Sheng and Krim, Hamid and Norris, Larry K.}, year={2012}, month={Aug}, pages={3416–3428} }
@inproceedings{yi_krim_norris_2012, title={Human activity modeling as Brownian motion on shape manifold}, volume={6667}, DOI={10.1007/978-3-642-24785-9_53}, abstractNote={In this paper we propose a stochastic modeling of human activity on a shape manifold. From a video sequence, human activity is extracted as a sequence of shape. Such a sequence is considered as one realization of a random process on shape manifold. Then Different activities are modeled by manifold valued random processes with different distributions. To solve the problem of stochastic modeling on a manifold, we first regress a manifold values process to a Euclidean process. The resulted process then could be modeled by linear models such as a stationary incremental process and a piecewise stationary incremental process. The mapping from manifold to Euclidean space is known as a stochastic development. The idea is to parallelly transport the tangent along curve on manifold to a single tangent space. The advantage of such technique is the one to one correspondence between the process in Euclidean space and the one on manifold. The proposed algorithm is tested on database [5] and compared with the related work in [5]. The result demonstrate the high accuracy of our modeling in characterizing different activities.}, booktitle={Scale space and variational methods in computer vision}, author={Yi, S. and Krim, H. and Norris, L. K.}, year={2012}, pages={628–639} }
@inbook{lee_krim_2012, title={System Identification: 3D Measurement Using Structured Light System}, ISBN={9783642331398 9783642331404}, ISSN={0302-9743 1611-3349}, url={http://dx.doi.org/10.1007/978-3-642-33140-4_1}, DOI={10.1007/978-3-642-33140-4_1}, booktitle={Advanced Concepts for Intelligent Vision Systems}, publisher={Springer Berlin Heidelberg}, author={Lee, Deokwoo and Krim, Hamid}, year={2012}, pages={1–11} }
@article{clouse_krim_mendoza-schrock_2011, title={A Scaled, Performance Driven Evaluation of the Layered Sensing Framework Utilizing Polarimetric Infrared Imagery}, volume={8059}, ISSN={["1996-756X"]}, DOI={10.1117/12.886510}, abstractNote={The layered sensing framework, in application, provides a useful, but complex integration of information sources,
e.g. multiple sensing modalities and operating conditions. It is the implied trade-off between sensor fidelity
and system complexity that we address here. Abstractly, each sensor/source of information in a layered sensing
application can be viewed as a node in the network of constituent sensors. Regardless of the sensing modality,
location, scope, etc., each sensor collects information locally to be utilized by the system as a whole for further
exploitation. Consequently, the information may be distributed throughout the network and not necessarily
coalesced in a central node/location. We present, initially, an analysis of polarimetric infrared data, with two
novel features, as one of the input modalities to such a system. We then proceed with statistical and geometric
analyses of an example network, thus quantifying the advantages and drawbacks of a specific application of the
layered sensing framework.}, journal={EVOLUTIONARY AND BIO-INSPIRED COMPUTATION: THEORY AND APPLICATIONS V}, author={Clouse, Hamilton Scott and Krim, Hamid and Mendoza-Schrock, Olga}, year={2011} }
@inproceedings{yi_krim_norris_2011, title={A invertible dimension reduction of curves on a manifold}, DOI={10.1109/iccvw.2011.6130412}, abstractNote={In this paper, we propose a novel lower dimensional representation of a shape sequence. The proposed dimension reduction is invertible and computationally more efficient in comparison to other related works. Theoretically, the differential geometry tools such as moving frame and parallel transportation are successfully adapted into the dimension reduction problem of high dimensional curves. Intuitively, instead of searching for a global flat subspace for curve embedding, we deployed a sequence of local flat subspaces adaptive to the geometry of both of the curve and the manifold it lies on. In practice, the experimental results of the dimension reduction and reconstruction algorithms well illustrate the advantages of the proposed theoretical innovation.}, booktitle={2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)}, author={Yi, S. and Krim, H. and Norris, L. K.}, year={2011} }
@inproceedings{miao_krim_2011, title={Robustness and expression independence in 3D face recognition}, DOI={10.1109/sips.2011.6088991}, abstractNote={We describe a robust method for 3D face recognition under variance of facial expressions. The method utilizes the identical areas on two facial images as the measurement of distance. To segment the identical area, partial shape matching is performed using closest point registration and level set method. The segmentation problem is formalized into an Eikonal equation, which can be efficiently solved by fast marching method. The presented 3D face recognition method shows a very promising recognition rate.}, booktitle={2011 ieee workshop on signal processing systems (sips)}, author={Miao, S. and Krim, H.}, year={2011}, pages={289–292} }
@article{clouse_krim_sakla_mendoza-schrock_2011, title={Vehicle Tracking Through the Exploitation of Remote Sensing and LWIR Polarization Science}, volume={8160}, ISSN={["1996-756X"]}, DOI={10.1117/12.901556}, abstractNote={ABSTRACT Vehicle tracking is an integral component in layered sensing exploitation applications. The utilization of acombination of sensing modalities and processing techniques provides better insight about a situation than canbe achieved with a single sensing modality. In this work, sev eral robust features are explored for vehicle trackingusing data captured in a remote sensing setting. A target area is surveyed by a sensor operating capturingpolarization information in the longwave infrared (LWIR) band. We here extend our previous work ([1]) toexperimental analysis of several feature sets including three classic features (Stokes images, DoLP, the Degreeof Linear Polarization, and AoP, the Angle of Polarization) and several geometry inspired features. 1 Keywords: layered sensing, distributed sensing, polarimetric, infrared, tracking, feature-aided, fusion, multi-sensor 1. INTRODUCTION Polarimetry information allows a more complete description of observed waveforms. In the thermal or long-waveinfrared (LWIR) the polarization information makes an already robust observation mode even more so. LWIRsensors, while dependent upon material properties of the objects in the observed scene, are independent of thescene intensity. The geomet ry afforded by the polarimetry gains a furth er removal from the co nfusers in a sceneby generating intensity invariant feature sets.These features are inter-related and therefore can be repr esented as functions of each other. Feature selection isa process by which the most useful, in some sense, can b e singled out and utilized alone. The process followedin this work was:1. Feature selection2. Fusion3. TrackingUsing polarimetric infrared data as an example, feature selection was used to generate feature sets from the geo-metric properties of the polarization ellipse. The generate d feature sets were fused with two different techniques.Tracking was performed with the input being both the fused feature sets and the features independently. For thetracks resulting from independent elements of the feature sets, the tracks were fused via the method described in[1] and reiterated in section 4.3.2. The automatically generated tracks were compared to truth data generatedby human-in-the-loop techniqu es for accuracy evaluation.In Section 2 we discuss, in general, the polarization phenomena of electromagnetic waves and the associatedmathematical representations as well as the methodologies of feature selection, data fusion and tracking viaKalman filters.}, journal={POLARIZATION SCIENCE AND REMOTE SENSING V}, author={Clouse, Hamilton Scott and Krim, Hamid and Sakla, Wesam and Mendoza-Schrock, Olga}, year={2011} }
@article{miao_krim_2010, title={3D FACE RECOGNITION BASED ON EVOLUTION OF ISO-GEODESIC DISTANCE CURVES}, ISSN={["1520-6149"]}, DOI={10.1109/icassp.2010.5495363}, abstractNote={This paper presents a novel 3D face recognition method by means of the evolution of iso-geodesic distance curves. Specifically, the proposed method compares two neighboring iso-geodesic distance curves, and formalizes the evolution between them as a one-dimensional function, named evolution angle function, which is Euclidean invariant. The novelty of this paper consists in formalizing 3D face by an evolution angle functions, and in computing the distance between two faces by that of two functions. Experiments on Face Recognition Grand Challenge (FRGC) ver2.0 shows that our approach works very well on both neutral faces and non-neutral faces. By introducing a weight function, we also show a very promising result on non-neutral face database.}, journal={2010 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH, AND SIGNAL PROCESSING}, author={Miao, Shun and Krim, Hamid}, year={2010}, pages={1134–1137} }
@inproceedings{lee_krim_2010, title={3D surface reconstruction using structured circular light patterns}, volume={6474}, DOI={10.1007/978-3-642-17688-3_27}, booktitle={Advanced concepts for intelligent vision systems, pt i}, author={Lee, D. and Krim, H.}, year={2010}, pages={279–289} }
@article{feng_kogan_krim_2010, title={Classification of Curves in 2D and 3D via Affine Integral Signatures}, volume={109}, ISSN={["1572-9036"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-77949264986&partnerID=MN8TOARS}, DOI={10.1007/s10440-008-9353-9}, abstractNote={We propose new robust classification algorithms for planar and spatial curves subjected to affine transformations. Our motivation comes from the problems in computer image recognition. To each planar or spatial curve, we assign a planar signature curve. Curves, equivalent under an affine transformation, have the same signature. The signatures are based on integral invariants, which are significantly less sensitive to small perturbations of curves and noise than classically known differential invariants. Affine invariants are derived in terms of Euclidean invariants. We present two types of signatures: the global and the local signature. Both signatures are independent of curve parameterization. The global signature depends on a choice of the initial point and, therefore, cannot be used for local comparison. The local signature, albeit being slightly more sensitive to noise, is independent of the choice of the initial point and can be used to solve local equivalence problem. An experiment that illustrates robustness of the proposed signatures is presented.}, number={3}, journal={ACTA APPLICANDAE MATHEMATICAE}, author={Feng, Shuo and Kogan, Irina and Krim, Hamid}, year={2010}, month={Mar}, pages={903–937} }
@article{yang_lichtenwalner_morris_krim_kingon_2010, title={Contact degradation in hot/cold operation of direct contact micro-switches}, volume={20}, ISSN={0960-1317 1361-6439}, url={http://dx.doi.org/10.1088/0960-1317/20/10/105028}, DOI={10.1088/0960-1317/20/10/105028}, abstractNote={Degradation of gold contacts in micro-switches was studied under an extensive range of operation conditions including high-electric-field ac/dc hot switching, low-electric-field hot switching and cold switching. Tests were conducted in a unique experimental switching operation set-up. Gold micro-contacts were characterized by an atomic force microscope. It was found that a unique material transfer/distribution feature was correlated with specific switching operations. New insights into contact degradation mechanisms of micro-switches were achieved by analyzing contact degradation features under varied operation conditions. Our results indicate that for high-electric-field hot switching, gold atoms are transferred from the 'anode' contact to the 'cathode' contact via field evaporation; for low-electric-field hot switching, material transfer is induced by transient heat; and for cold switching, contact resistance can be jeopardized by surface contamination and surface roughness if contact force is low.}, number={10}, journal={Journal of Micromechanics and Microengineering}, publisher={IOP Publishing}, author={Yang, Z and Lichtenwalner, D and Morris, A and Krim, J and Kingon, A I}, year={2010}, month={Sep}, pages={105028} }
@article{poliannikov_zhizhina_krim_2010, title={Global optimization by adapted diffusion}, volume={58}, DOI={10.1109/tsp.2010.2071867}, abstractNote={In this paper, we study a diffusion stochastic dynamics with a general diffusion coefficient. The main result is that adapting the diffusion coefficient to the Hamiltonian allows to escape local wide minima and to speed up the convergence of the dynamics to the global minima. We prove the convergence of the invariant measure of the modified dynamics to a measure concentrated on the set of global minima and show how to choose a diffusion coefficient for a certain class of Hamiltonians.}, number={12}, journal={IEEE Transactions on Signal Processing}, author={Poliannikov, O. V. and Zhizhina, E. and Krim, H.}, year={2010}, pages={6119–6125} }
@article{chen_krim_mendoza_2010, title={Multiphase Joint Segmentation-Registration and Object Tracking for Layered Images}, volume={19}, ISSN={["1941-0042"]}, DOI={10.1109/tip.2010.2045164}, abstractNote={In this paper we propose to jointly segment and register objects of interest in layered images. Layered imaging refers to imageries taken from different perspectives and possibly by different sensors. Registration and segmentation are therefore the two main tasks which contribute to the bottom level, data alignment, of the multisensor data fusion hierarchical structures. Most exploitations of two layered images assumed that scanners are at very high altitudes and that only one transformation ties the two images. Our data are however taken at mid-range and therefore requires segmentation to assist us examining different object regions in a divide-and-conquer fashion. Our approach is a combination of multiphase active contour method with a joint segmentation-registration technique (which we called MPJSR) carried out in a local moving window prior to a global optimization. To further address layered video sequences and tracking objects in frames, we propose a simple adaptation of optical flow calculations along the active contours in a pair of layered image sequences. The experimental results show that the whole integrated algorithm is able to delineate the objects of interest, align them for a pair of layered frames and keep track of the objects over time.}, number={7}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Chen, Ping-Feng and Krim, Hamid and Mendoza, Olga L.}, year={2010}, month={Jul}, pages={1706–1719} }
@article{baloch_krim_2010, title={Object Recognition Through Topo-Geometric Shape Models Using Error-Tolerant Subgraph Isomorphisms}, volume={19}, ISSN={["1941-0042"]}, DOI={10.1109/tip.2009.2039372}, abstractNote={We propose a method for 3-D shape recognition based on inexact subgraph isomorphisms, by extracting topological and geometric properties of a shape in the form of a shape model, referred to as topo-geometric shape model (TGSM). In a nutshell, TGSM captures topological information through a rigid transformation invariant skeletal graph that is constructed in a Morse theoretic framework with distance function as the Morse function. Geometric information is then retained by analyzing the geometric profile as viewed through the distance function. Modeling the geometric profile through elastic yields a weighted skeletal representation, which leads to a complete shape signature. Shape recognition is carried out through inexact subgraph isomorphisms by determining a sequence of graph edit operations on model graphs to establish subgraph isomorphisms with a test graph. Test graph is recognized as a shape that yields the largest subgraph isomorphism with minimal cost of edit operations. In this paper, we propose various cost assignments for graph edit operations for error correction that takes into account any shape variations arising from noise and measurement errors.}, number={5}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Baloch, Sajjad and Krim, Hamid}, year={2010}, month={May}, pages={1191–1200} }
@article{aouada_krim_2010, title={Squigraphs for Fine and Compact Modeling of 3-D Shapes}, volume={19}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2009.2034693}, abstractNote={We propose to superpose global topological and local geometric 3-D shape descriptors in order to define one compact and discriminative representation for a 3-D object. While a number of available 3-D shape modeling techniques yield satisfactory object classification rates, there is still a need for a refined and efficient identification/recognition of objects among the same class. In this paper, we use Morse theory in a two-phase approach. To ensure the invariance of the final representation to isometric transforms, we choose the Morse function to be a simple and intrinsic global geodesic function defined on the surface of a 3-D object. The first phase is a coarse representation through a reduced topological Reeb graph. We use it for a meaningful decomposition of shapes into primitives. During the second phase, we add detailed geometric information by tracking the evolution of Morse function's level curves along each primitive. We then embed the manifold of these curves into [Formula: see text], and obtain a single curve. By combining phase one and two, we build new graphs rich in topological and geometric information that we refer to as squigraphs. Our experiments show that squigraphs are more general than existing techniques. They achieve similar classification rates to those achieved by classical shape descriptors. Their performance, however, becomes clearly superior when finer classification and identification operations are targeted. Indeed, while other techniques see their performances dropping, squigraphs maintain a performance rate of the order of 97%.}, number={2}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Aouada, Djamila and Krim, Hamid}, year={2010}, month={Feb}, pages={306–321} }
@article{yi_labate_easley_krim_2009, title={A Shearlet Approach to Edge Analysis and Detection}, volume={18}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2009.2013082}, abstractNote={It is well known that the wavelet transform provides a very effective framework for analysis of multiscale edges. In this paper, we propose a novel approach based on the shearlet transform: a multiscale directional transform with a greater ability to localize distributed discontinuities such as edges. Indeed, unlike traditional wavelets, shearlets are theoretically optimal in representing images with edges and, in particular, have the ability to fully capture directional and other geometrical features. Numerical examples demonstrate that the shearlet approach is highly effective at detecting both the location and orientation of edges, and outperforms methods based on wavelets as well as other standard methods. Furthermore, the shearlet approach is useful to design simple and effective algorithms for the detection of corners and junctions}, number={5}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Yi, Sheng and Labate, Demetrio and Easley, Glenn R. and Krim, Hamid}, year={2009}, month={May}, pages={929–941} }
@article{aouada_krim_2009, title={NOVEL SIMILARITY INVARIANT FOR SPACE CURVES USING TURNING ANGLES AND ITS APPLICATION TO OBJECT RECOGNITION}, ISBN={["978-1-4244-2353-8"]}, ISSN={["1520-6149"]}, DOI={10.1109/icassp.2009.4959824}, abstractNote={We present a new similarity invariant signature for space curves. This signature is based on the information contained in the turning angles of both the tangent and the binormal vectors at each point on the curve. For an accurate comparison of these signatures, we define a Riemannian metric on the space of the invariant. We show through relevant examples that, unlike classical invariants, the one we define in this paper enjoys multiple important properties at the same time, namely, a high discrimination level, independence of any reference point, uniqueness property, as well as a good preservation of the correspondence between curves. Moreover, we illustrate how to match 3D objects by extracting and comparing the invariant signatures of their curved skeletons.}, journal={2009 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH, AND SIGNAL PROCESSING, VOLS 1- 8, PROCEEDINGS}, author={Aouada, Djamila and Krim, Hamid}, year={2009}, pages={1277–1280} }
@inproceedings{chen_steen_yezzi_krim_2009, title={brain MRi T-1-map and T-1-weighted image segmentation in a variational framework}, DOI={10.1109/icassp.2009.4959609}, abstractNote={In this paper we propose a constrained version of Mumford-Shah's[1] segmentationwith an information-theoretic point of view[2] in order to devise a systematic procedure to segment brain MRI data for two modalities of parametric T 1 -Map and T 1 -weighted images in both 2-D and 3-D settings. The incorporation of a tuning weight in particular adds a probabilistic flavor to our segmentation method, and makes the three-tissue segmentation possible. Our method uses region based active contours which have proven to be robust. The method is validated by two real objects which were used to generate T 1 -Maps and also by two simulated brains of T 1 -weighted data from the BrainWeb[3] public database.}, booktitle={International conference on acoustics speech and signal processing}, author={Chen, P. F. and Steen, R. G. and Yezzi, A. and Krim, H.}, year={2009}, pages={417–420} }
@article{el ouafdi_ziou_krim_2008, title={A smart stochastic approach for manifolds smoothing}, volume={27}, ISSN={["1467-8659"]}, DOI={10.1111/j.1467-8659.2008.01275.x}, abstractNote={In this paper, we present a probabilistic approach for 3D object's smoothing. The core idea behind the proposed method is to relate the problem of smoothing objects to that of tracking the transition probability density functions of an underlying random process. We show that such an approach allows for additional insight and sufficient flexibility compared with existing standard smoothing techniques. In particular, we are able to propose a newer, faster, and simpler smoothing approach that retains and enhances important manifold features. Furthermore, it is demonstrated to improve performance over existing smoothing techniques.}, number={5}, journal={COMPUTER GRAPHICS FORUM}, author={El Ouafdi, A. F. and Ziou, D. and Krim, H.}, year={2008}, month={Jul}, pages={1357–1364} }
@article{aouada_dreisigmeyer_krim_2008, title={Geometric modeling of rigid and non-rigid 3D shapes using the global geodesic function}, DOI={10.1109/cvprw.2008.4563075}, abstractNote={In this paper, we present a novel intrinsic geometric representation of 3D objects. We add the proposed modeling of objects to their topological graphs to ensure a full and compact description necessary for shape-based retrieval, recognition and analysis of 3D models. In our approach, we address the challenges due to pose variability, computational complexity and noisy data by intrinsically and simply describing a 3D object by a global geodesic function. We exploit the geometric features contained in the dense set of iso-levels of this function. Using Whitney easy embedding theorem, we embed the manifold of the geodesic iso-levels in Ropf ^{3} and obtain a single space curve as our geometry descriptor. 3D shape comparison is then reduced to comparing the resulting modeling curves. To quantify the dissimilarities between them we simply compute an L ^{2} distance between classical Euclidian invariants applied to space curves. The experimental results show that in addition to being straightforward and easy to compute, our modeling technique achieves a high level of discrimination, and appears to be robust to both noise and decimation.}, journal={Pattern Recognition}, author={Aouada, D. and Dreisigmeyer, D. W. and Krim, H.}, year={2008}, pages={935–942} }
@article{wu_an_krim_lin_2007, title={An independent component analysis approach for minimizing effects of recirculation in dynamic susceptibility contrast magnetic resonance imaging}, volume={27}, ISSN={["1559-7016"]}, DOI={10.1038/sj.jcbfm.9600374}, abstractNote={In dynamic susceptibility contrast (DSC) perfusion-weighted imaging, effects of recirculation are normally minimized by a gamma-variate fitting procedure of the concentration curves before estimating hemodynamic parameters. The success of this method, however, hinges largely on the extent to which magnetic resonance signal is altered in the presence of a contrast agent and a temporal separation between the first and subsequent passages of the contrast agent. Moreover, important physiologic information might be compromised by imposing an analytic equation to all measured concentration curves. This investigation proposes to exploit independent component analysis to minimize effects of recirculation in DSC. Results obtained from simulation, normal healthy volunteers, and acute stroke patients show that such a technique can greatly minimize the effects of recirculation despite a substantial overlap between the first passage and recirculation. This in turn should improve estimation of cerebral hemodynamics particularly when an overlap between the first passage and recirculation is suspected as in an ischemic lesion.}, number={3}, journal={JOURNAL OF CEREBRAL BLOOD FLOW AND METABOLISM}, author={Wu, Yang and An, Hongyu and Krim, Hamid and Lin, Weili}, year={2007}, month={Mar}, pages={632–645} }
@article{baloch_krim_2007, title={Flexible skew-symmetric shape model for shape representation, classification, and sampling}, volume={16}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2006.888348}, abstractNote={Skewness of shape data often arises in applications (e.g., medical image analysis) and is usually overlooked in statistical shape models. In such cases, a Gaussian assumption is unrealistic and a formulation of a general shape model which accounts for skewness is in order. In this paper, we present a novel statistical method for shape modeling, which we refer to as the flexible skew-symmetric shape model (FSSM). The model is sufficiently flexible to accommodate a departure from Gaussianity of the data and is fairly general to learn a "mean shape" (template), with a potential for classification and random generation of new realizations of a given shape. Robustness to skewness results from deriving the FSSM from an extended class of flexible skew-symmetric distributions. In addition, we demonstrate that the model allows us to extract principal curves in a point cloud. The idea is to view a shape as a realization of a spatial random process and to subsequently learn a shape distribution which captures the inherent variability of realizations, provided they remain, with high probability, within a certain neighborhood range around a mean. Specifically, given shape realizations, FSSM is formulated as a joint bimodal distribution of angle and distance from the centroid of an aggregate of random points. Mean shape is recovered from the modes of the distribution, while the maximum likelihood criterion is employed for classification.}, number={2}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Baloch, Sajjad H. and Krim, Hamid}, year={2007}, month={Feb}, pages={317–328} }
@article{ben hamza_krim_2006, title={Geodesic matching of triangulated surfaces}, volume={15}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2006.875250}, abstractNote={Recognition of images and shapes has long been the central theme of computer vision. Its importance is increasing rapidly in the field of computer graphics and multimedia communication because it is difficult to process information efficiently without its recognition. In this paper, we propose a new approach for object matching based on a global geodesic measure. The key idea behind our methodology is to represent an object by a probabilistic shape descriptor that measures the global geodesic distance between two arbitrary points on the surface of an object. In contrast to the Euclidean distance which is more suitable for linear spaces, the geodesic distance has the advantage to be able to capture the intrinsic geometric structure of the data. The matching task therefore becomes a one-dimensional comparison problem between probability distributions which is clearly much simpler than comparing three-dimensional structures. Object matching can then be carried out by an information-theoretic dissimilarity measure calculations between geodesic shape distributions, and is additionally computationally efficient and inexpensive.}, number={8}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Ben Hamza, A. and Krim, Hamid}, year={2006}, month={Aug}, pages={2249–2258} }
@book{statistics and analysis of shapes_2006, ISBN={0817643761}, DOI={10.1007/0-8176-4481-4}, abstractNote={Introduction S. Bouix, K. Siddiqi, A. Tannenbaum, and S.W. Zucker: Medial Axis Computation and Evolution P.T. Fletcher, S.M. Pizer, adn S.C. Joshi: Shape Variation of Medial Axis Representations via Principal Geodesic Analysis of Symmetric Spaces S.H. Balloch and H. Krim: 2D Shape Modeling Using Skeletal Graphs in a Morse Theoretic Framework S. Belongie, G. Mori, and J. Malik: Matching with Shape Contexts P. Muse, F. Sur, F. Cao, Y. Gousseau, and J.-M. Morel: Shape Recognition Based on a Contrario Methodology S. Manay, D. Cremers, B.-W. Hong, A. Yezzi, Jr., and S. Soatto: Integral Invariants and Shape Matching N. Paragios, M. Taron, X. Huang, M. Rousson, and D. Metaxas: On the Representation of Shapes Using Implicit Functions F. Memoli and G. Sapiro: Computing with Point Cloud Data J.A. Costa and A.O. Hero III: Determining Intrinsic Dimension and Entropy of High-Dimensional Shape Spaces G. Arnold, P.F. Stiller, and K. Sturtz: Object-Image Metrics for Generalized Weak Perspective Projection X. Descombes and E. Pechersky: Wulff Shapes at Zero Temperature for Some Models Used in Image Processing S. Angenent, A. Tannenbaum, A. Yezzi, Jr., and O. Zeitouni: Curve Shortening and Interacting Particle Systems S. Joshi, D. Kaziska, A. Srivastava, and W. Mio: Riemannian Structures on Shape Spaces: A Framework for Statistical Inferences J. Glaunes, A. Trouve, and L. Younes: Modeling Planar Shape Variation via Hamiltonian Flows of Curves G. Charpiat, O. Faugeras, R. Keriven, and P. Maurel: Approximations of Shape Metrics and Application to Shape Warping and Empirical Shape Statistics}, publisher={Boston: Birkhauser}, year={2006} }
@article{ben hamza_he_krim_willsky_2005, title={A multiscale approach to pixel-level image fusion}, volume={12}, DOI={10.3233/ica-2005-12201}, abstractNote={Pixel level image fusion refers to the processing and synergistic combination of information gathered by various imaging sources to provide a better understanding of a scene. We formulate the image fusion as an optimization problem and propose an inf}, number={2}, journal={Integrated Computer-aided Engineering}, author={Ben Hamza, A. and He, Y. and Krim, H. and Willsky, A.}, year={2005}, pages={135–146} }
@article{unal_krim_yezzi_2005, title={Fast incorporation of optical flow into active polygons}, volume={14}, ISSN={["1941-0042"]}, DOI={10.1109/TIP.2005.847286}, abstractNote={In this paper, we first reconsider, in a different light, the addition of a prediction step to active contour-based visual tracking using an optical flow and clarify the local computation of the latter along the boundaries of continuous active contours with appropriate regularizers. We subsequently detail our contribution of computing an optical flow-based prediction step directly from the parameters of an active polygon, and of exploiting it in object tracking. This is in contrast to an explicitly separate computation of the optical flow and its ad hoc application. It also provides an inherent regularization effect resulting from integrating measurements along polygon edges. As a result, we completely avoid the need of adding ad hoc regularizing terms to the optical flow computations, and the inevitably arbitrary associated weighting parameters. This direct integration of optical flow into the active polygon framework distinguishes this technique from most previous contour-based approaches, where regularization terms are theoretically, as well as practically, essential. The greater robustness and speed due to a reduced number of parameters of this technique are additional and appealing features.}, number={6}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Unal, G and Krim, H and Yezzi, A}, year={2005}, month={Jun}, pages={745–759} }
@article{poliannikov_krim_2005, title={Identification of a discrete planar symmetric shape from a single noisy view}, volume={14}, ISSN={["1057-7149"]}, DOI={10.1109/TIP.2005.859387}, abstractNote={In this paper, we propose a method for identifying a discrete planar symmetric shape from an arbitrary viewpoint. Our algorithm is based on a newly proposed notion of a view's skeleton. We show that this concept yields projective invariants which facilitate the identification procedure. It is, furthermore, shown that the proposed method may be extended to the case of noisy data to yield an optimal estimate of a shape in question. Substantiating examples are provided.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Poliannikov, OV and Krim, H}, year={2005}, month={Dec}, pages={2051–2059} }
@article{unal_yezzi_krim_2005, title={Information-theoretic active polygons for unsupervised texture segmentation}, volume={62}, ISSN={["1573-1405"]}, DOI={10.1007/s11263-005-4880-6}, abstractNote={Curve evolution models used in image segmentation and based on image region information usually utilize simple statistics such as means and variances, hence can not account for higher order nature of the textural characteristics of image regions. In addition, the object delineation by active contour methods, results in a contour representation which still requires a substantial amount of data to be stored for subsequent multimedia applications such as visual information retrieval from databases. Polygonal approximations of the extracted continuous curves are required to reduce the amount of data since polygons are powerful approximators of shapes for use in later recognition stages such as shape matching and coding. The key contribution of this paper is the development of a new active contour model which nicely ties the desirable polygonal representation of an object directly to the image segmentation process. This model can robustly capture texture boundaries by way of higher-order statistics of the data and using an information-theoretic measure and with its nature of the ordinary differential equations. This new variational texture segmentation model, is unsupervised since no prior knowledge on the textural properties of image regions is used. Another contribution in this sequel is a new polygon regularizer algorithm which uses electrostatics principles. This is a global regularizer and is more consistent than a local polygon regularization in preserving local features such as corners.}, number={3}, journal={INTERNATIONAL JOURNAL OF COMPUTER VISION}, author={Unal, G and Yezzi, A and Krim, H}, year={2005}, pages={199–220} }
@article{wu_an_krim_vo_lee_lin_2005, title={Intracranial vascular transfer function in acute stroke patients}, volume={25}, ISSN={0271-678X 1559-7016}, url={http://dx.doi.org/10.1038/sj.jcbfm.9591524.0394}, DOI={10.1038/sj.jcbfm.9591524.0394}, abstractNote={Vascular transfer function (VTF) could potentially provide highly relevant physiological information, particularly in patients with cerebrovascular diseases. In this study, we aim to investigate potential alterations of intracranial VTF in patients with acute stroke. The widely employed dynamic susceptibility contrast (DSC) MR approach was employed to acquire images and spatial independent component analysis (ICA) was used to determine local arterial function (LAF) 1, reflecting MR signal changes resulted in the passage of the injected contrast. Subsequently, pixel-by-pixel VTF was derived through the deconvolution of the LAFs with a global artery function (GAF) obtained from the middle cerebral artery (MCA) using singular value decomposition (SVD). The ability to non-invasively depict VTF may offer new insights into blood flow related alterations in acute stroke patients. Perfusion images (PWI) were acquired using DSC from three healthy volunteers at 3 T and five acute stroke patients within 3-6 hrs from symptom onset at 1.5 T using a single shot T2*-weighted EPI sequence. In addition, diffusion-weighted (DWI) images were also acquired. GAF, Cga(t), was obtained through averaging contrast induced signal changes in the contralateral MCA with recirculation effects removed. The susceptibility related signal changes were converted to concentration curves. ICA analysis (ISP group, DTU, http://isp.imm.dtu.dk/toolbox) was applied to the concentration time curves throughout the entire brain 1. LAFs, Cla(x,t), were constructed based on both the spatial mappings and the temporal characteristics of the components, similar to that proposed in reference 1. Finally, VTF (T(x,t)) was obtained through SVD by deconvolving LAFs with GAF. In order to characterize how VTF differs between brain regions, DWI and PWI images were employed to define two region-of-interests (ROIs), namely, DWI-defined lesions and PWI/DWI mismatched regions while a normal ROI was defined in the contralateral hemisphere. In contrast, two ROIs were placed in the two hemispheres for the normal volunteers. Finally, the full-width-half-maximum (FWHM) and the power (EVTF) of the first harmonic of VTF were used to quantitatively determine the discrepancies between different ROIs. For comparison purposes, the ETVF obtained in stroke patients was normalized to that obtained from the normal volunteers. The FWHM obtained from normal volunteers is 5.8+/-0.2 s and 6.0+/-0.02 s, respectively, in the two ROIs. In contrast, for the stroke patients the DWI-defined lesions exhibit a much larger FWHM (9.0+/-8.8 s) while a similar FWHM was obtained for both the PWI/DWI mismatched regions (5.5+/-1.7 s) and the contralateral hemisphere (4.9+/-1.4 s) when compared with that obtained in normal subjects. In addition, the normalized power of the first harmonic of the VTF demonstrates that the DWI-defined lesion, PWI/DWI mismatched regions, and the contralateral hemisphere is 24.1+/-31.1%, 43.5+/-35.4%, and 153.5+/-103.8% with respect to that obtained in normal subjects, respectively. These findings suggest that the DWI-defined lesions exhibit the largest bolus dispersion and smallest power when compared with that obtained in the normal subjects as well as other brain regions in stroke patients. Although our study has a limited sample size, we have demonstrated a novel tool for obtaining VTF in acute stroke patients.}, number={1_suppl}, journal={Journal of Cerebral Blood Flow & Metabolism}, publisher={SAGE Publications}, author={Wu, Yang and An, Hongyu and Krim, Hamid and Vo, Katie and Lee, Jin-Moo and Lin, Weili}, year={2005}, month={Aug}, pages={S394–S394} }
@article{ben hamza_krim_zerubia_2004, title={A nonlinear entropic variational model for image filtering}, volume={2004}, ISSN={["1687-0433"]}, DOI={10.1155/S1110865704407197}, abstractNote={We propose an information-theoretic variational filter for image denoising. It is a result of minimizing a functional subject to some noise constraints, and takes a hybrid form of a negentropy variational integral for small gradient magnitudes and a total variational integral for large gradient magnitudes. The core idea behind this approach is to use geometric insight in helping to construct regularizing functionals and avoiding a subjective choice of a prior in maximum a posteriori estimation. Illustrative experimental results demonstrate a much improved performance of the approach in the presence of Gaussian and heavy-tailed noise.}, number={16}, journal={EURASIP JOURNAL ON APPLIED SIGNAL PROCESSING}, author={Ben Hamza, A and Krim, H and Zerubia, J}, year={2004}, month={Nov}, pages={2408–2422} }
@inbook{krim_abdelmaksoud_borovsky_winder_2004, title={Scanning tunneling microscope-quartz crystal microbalance studies of "real world" and model lubricants}, volume={882}, DOI={10.1021/bk-2004-0882.ch001}, booktitle={Dynamics and friction of submicrometer confining systems}, publisher={Washington, D.C.: American Chemical Society}, author={Krim, J. and Abdelmaksoud, M. and Borovsky, B. and Winder, S. M.}, year={2004} }
@inbook{baloch_krim_genton_2004, title={Shape representation with flexible skew-symmetric distributions}, ISBN={1584884312}, DOI={10.1201/9780203492000.ch17}, booktitle={Skew-elliptical distibutions and their applications: A journey beyond normality}, publisher={Boca Raton, FL: Chapman & Hall/CRC}, author={Baloch, S. H. and Krim, H. and Genton, M. G.}, year={2004} }
@article{bao_krim_2004, title={Smart nonlinear diffusion: A probabilistic approach}, volume={26}, DOI={10.1109/TPAMI.2004.1261079}, abstractNote={In this paper, a stochastic interpretation of nonlinear diffusion equations used for image filtering is proposed. This is achieved by relating the problem of evolving/smoothing images to that of tracking the transition probability density functions of an underlying random process. We show that such an interpretation of, e.g., Perona-Malik equation, in turn allows additional insight and sufficient flexibility to further investigate some outstanding problems of nonlinear diffusion techniques. In particular, upon unraveling the limitations as well as the advantages of such an equation, we are able to propose a new approach which is demonstrated to improve performance over existing approaches and, more importantly, to lift the longstanding problem of a stopping criterion for a nonlinear evolution equation with no data term constraint. Substantiating examples in image enhancement and segmentation are provided.}, number={1}, journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, author={Bao, Y. F. and Krim, H.}, year={2004}, pages={63–72} }
@article{he_hamza_krim_2003, title={A generalized divergence measure for robust image registration}, volume={51}, DOI={10.1109/TSP.2003.810305}, abstractNote={Entropy-based divergence measures have shown promising results in many areas of engineering and image processing. We define a new generalized divergence measure, namely, the Jensen-Renyi (1996, 1976) divergence. Some properties such as convexity and its upper bound are derived. Based on the Jensen-Renyi divergence, we propose a new approach to the problem of image registration. Some appealing advantages of registration by Jensen-Renyi divergence are illustrated, and its connections to mutual information-based registration techniques are analyzed. As the key focus of this paper, we apply Jensen-Renyi divergence for inverse synthetic aperture radar (ISAR) image registration. The goal is to estimate the target motion during the imaging time. Our approach applies Jensen-Renyi divergence to measure the statistical dependence between consecutive ISAR image frames, which would be maximal if the images are geometrically aligned. Simulation results demonstrate that the proposed method is efficient and effective.}, number={5}, journal={IEEE Transactions on Signal Processing}, author={He, Y. and Hamza, A. B. and Krim, H.}, year={2003}, pages={1211–1220} }
@article{benazza-benyahia_pesquet_krim_2003, title={A nonlinear diffusion-based three-band filter bank}, volume={10}, ISSN={["1558-2361"]}, DOI={10.1109/LSP.2003.818864}, abstractNote={In this letter, we revisit a number of concepts that have recently proven to be useful in multiresolution signal analysis, specifically by replacing the now classical linear-scale transition operators by nonlinear ones. More precisely, we address the problem of designing appropriate operators associated to nonlinear filter banks using multiscale analysis. We first establish a connection between nonlinear filter banks and partial differential equations operators used in scale-space theory. Toward this end, we propose specific structures of nonlinear three-band decompositions ensuring a perfect reconstruction. The behavior of the proposed structures is analyzed for a step-like signal in a high SNR scenario, and a simulation is proposed for a more complex scenario.}, number={12}, journal={IEEE SIGNAL PROCESSING LETTERS}, author={Benazza-Benyahia, A and Pesquet, JC and Krim, H}, year={2003}, month={Dec}, pages={360–363} }
@article{karacali_krim_2003, title={Fast minimization of structural risk by nearest neighbor rule}, volume={14}, ISSN={["1941-0093"]}, DOI={10.1109/TNN.2002.804315}, abstractNote={In this paper, we present a novel nearest neighbor rule-based implementation of the structural risk minimization principle to address a generic classification problem. We propose a fast reference set thinning algorithm on the training data set similar to a support vector machine (SVM) approach. We then show that the nearest neighbor rule based on the reduced set implements the structural risk minimization principle, in a manner which does not involve selection of a convenient feature space. Simulation results on real data indicate that this method significantly reduces the computational cost of the conventional SVMs, and achieves a nearly comparable test error performance.}, number={1}, journal={IEEE TRANSACTIONS ON NEURAL NETWORKS}, author={Karacali, B and Krim, H}, year={2003}, month={Jan}, pages={127–137} }
@inbook{ben hamza_krim_2003, title={Geodesic object representation and recognition}, volume={2886}, ISBN={3540204997}, DOI={10.1007/978-3-540-39966-7_36}, abstractNote={This paper describes a shape signature that captures the intrinsic geometric structure of 3D objects. The primary motivation of the proposed approach is to encode a 3D shape into a one-dimensional geodesic distribution function. This compact and computationally simple representation is based on a global geodesic distance defined on the object surface, and takes the form of a kernel density estimate. To gain further insight into the geodesic shape distribution and its practicality in 3D computer imagery, some numerical experiments are provided to demonstrate the potential and the much improved performance of the proposed methodology in 3D object matching. This is carried out using an information-theoretic measure of dissimilarity between probabilistic shape distributions.}, booktitle={Discrete geometry for computer imagery: 11th International Conference, DGCI 2003, Naples, Italy, November 19-21, 2003}, publisher={Berlin; New York: Springer}, author={Ben Hamza, A. and Krim, H.}, editor={B. Hamza and Krim, H.Editors}, year={2003}, pages={378–387} }
@inbook{ben hamza_krim_2003, title={Image registration and segmentation by maximizing the Jensen-Renyi divergence}, volume={2683}, ISBN={3540404988}, DOI={10.1007/978-3-540-45063-4_10}, booktitle={Energy minimization methods in computer vision and pattern recognition}, publisher={Berlin; New York: Springer}, author={Ben Hamza, A. and Krim, H.}, editor={A. Rangarajan, M. Figueiredo and Zerubia, J.Editors}, year={2003}, pages={147–163} }
@article{zhang_zhang_krim_walter_2003, title={Object representation and recognition in shape spaces}, volume={36}, ISSN={["0031-3203"]}, DOI={10.1016/S0031-3203(02)00226-1}, abstractNote={In this paper, we describe a shape space based approach for invariant object representation and recognition. In this approach, an object and all its similarity transformed versions are identified with a single point in a high-dimensional manifold called the shape space. Object recognition is achieved by measuring the geodesic distance between an observed object and a model in the shape space. This approach produced promising results in 2D object recognition experiments: it is invariant to similarity transformations and is relatively insensitive to noise and occlusion. Potentially, it can also be used for 3D object recognition.}, number={5}, journal={PATTERN RECOGNITION}, author={Zhang, J and Zhang, X and Krim, H and Walter, GG}, year={2003}, month={May}, pages={1143–1154} }
@article{hero_krim_2002, title={Mathematical methods in imaging}, volume={19}, ISSN={["1053-5888"]}, DOI={10.1109/MSP.2002.1028348}, number={5}, journal={IEEE SIGNAL PROCESSING MAGAZINE}, author={Hero, AO and Krim, H}, year={2002}, month={Sep}, pages={13–14} }
@article{he_krim_2002, title={Multiscale signal enhancement: Beyond the normality and independence assumption}, volume={11}, ISSN={["1057-7149"]}, DOI={10.1109/TIP.2002.999676}, abstractNote={Current approaches to denoising or signal enhancement in a wavelet-based framework have generally relied on the assumption of normally distributed perturbations. In practice, this assumption is often violated and sometimes prior information of the probability distribution of a noise process is not even available. To relax this assumption, we propose a novel nonlinear filtering technique in this paper. The key idea is to project a noisy signal onto a wavelet domain and to suppress wavelet coefficients by a mask derived from curvature extrema in its scale space representation. For a piecewise smooth signal, it can be shown that filtering by this curvature mask is equivalent to preserving the signal pointwise Hölder exponents at the singular points and lifting its smoothness elsewhere.}, number={4}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={He, Y and Krim, H}, year={2002}, month={Apr}, pages={423–433} }
@article{unal_krim_yezzi_2002, title={Stochastic differential equations and geometric flows}, volume={11}, ISSN={["1057-7149"]}, DOI={10.1109/TIP.2002.804568}, abstractNote={In previous years, curve evolution, applied to a single contour or to the level sets of an image via partial differential equations, has emerged as an important tool in image processing and computer vision. Curve evolution techniques have been utilized in problems such as image smoothing, segmentation, and shape analysis. We give a local stochastic interpretation of the basic curve smoothing equation, the so called geometric heat equation, and show that this evolution amounts to a tangential diffusion movement of the particles along the contour. Moreover, assuming that a priori information about the shapes of objects in an image is known, we present modifications of the geometric heat equation designed to preserve certain features in these shapes while removing noise. We also show how these new flows may be applied to smooth noisy curves without destroying their larger scale features, in contrast to the original geometric heat flow which tends to circularize any closed curve.}, number={12}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Unal, G and Krim, H and Yezzi, A}, year={2002}, month={Dec}, pages={1405–1416} }
@article{hamza_krim_unal_2002, title={Unifying probabilistic and variational estimation}, volume={19}, ISSN={["1053-5888"]}, DOI={10.1109/MSP.2002.1028351}, abstractNote={A maximum a posteriori (MAP) estimator using a Markov or a maximum entropy random field model for a prior distribution may be viewed as a minimizer of a variational problem.Using notions from robust statistics, a variational filter referred to as a Huber gradient descent flow is proposed. It is a result of optimizing a Huber functional subject to some noise constraints and takes a hybrid form of a total variation diffusion for large gradient magnitudes and of a linear diffusion for small gradient magnitudes. Using the gained insight, and as a further extension, we propose an information-theoretic gradient descent flow which is a result of minimizing a functional that is a hybrid between a negentropy variational integral and a total variation. Illustrating examples demonstrate a much improved performance of the approach in the presence of Gaussian and heavy tailed noise. In this article, we present a variational approach to MAP estimation with a more qualitative and tutorial emphasis. The key idea behind this approach is to use geometric insight in helping construct regularizing functionals and avoiding a subjective choice of a prior in MAP estimation. Using tools from robust statistics and information theory, we show that we can extend this strategy and develop two gradient descent flows for image denoising with a demonstrated performance.}, number={5}, journal={IEEE SIGNAL PROCESSING MAGAZINE}, author={Hamza, AB and Krim, H and Unal, GB}, year={2002}, month={Sep}, pages={37–47} }
@article{ben hamza_krim_2001, title={A variational approach to maximum a posteriori estimation for image denoising}, volume={2134}, DOI={10.1007/3-540-44745-8_2}, abstractNote={Using first principles, we establish in this paper a connection between the maximum a posteriori (MAP) estimator and the variational formulation of optimizing a given functional subject to some noise constraints. A MAP estimator which uses a Markov or a maximum entropy random field model for a prior distribution can be viewed as a minimizer of a variational problem. Using notions from robust statistics, a variational filter called Huber gradient descent flow is proposed. It yields the solution to a Huber type functional subject to some noise constraints, and the resulting filter behaves like a total variation anisotropic diffusion for large gradient magnitudes and like an isotropic diffusion for small gradient magnitudes. Using some of the gained insight, we are also able to propose an information-theoretic gradient descent flow whose functional turns out to be a compromise between a neg-entropy variational integral and a total variation. Illustrating examples demonstrate a much improved performance of the proposed filters in the presence of Gaussian and heavy tailed noise.}, journal={Energy minimization methods in computer vision and pattern recognition: Third International Workshop, EMMCVPR 2001, Sophia Antipolis, France, September 3-5, 2001: Proceedings}, publisher={Berlin ; New York: Springer}, author={Ben Hamza, A. and Krim, H.}, editor={M. Figueiredo, J. Zerubia and Jain, A. K.Editors}, year={2001}, pages={19–33} }
@article{ben hamza_krim_2001, title={Image denoising: A nonlinear robust statistical approach}, volume={49}, ISSN={["1053-587X"]}, DOI={10.1109/78.969512}, abstractNote={Nonlinear filtering techniques based on the theory of robust estimation are introduced. Some deterministic and asymptotic properties are derived. The proposed denoising methods are optimal over the Huber /spl epsi/-contaminated normal neighborhood and are highly resistant to outliers. Experimental results showing a much improved performance of the proposed filters in the presence of Gaussian and heavy-tailed noise are analyzed and illustrated.}, number={12}, journal={IEEE TRANSACTIONS ON SIGNAL PROCESSING}, author={Ben Hamza, A and Krim, H}, year={2001}, month={Dec}, pages={3045–3054} }
@article{pollak_willsky_krim_2000, title={Image segmentation and edge enhancement with stabilized inverse diffusion equations}, volume={9}, ISSN={["1941-0042"]}, DOI={10.1109/83.821738}, abstractNote={We introduce a family of first-order multidimensional ordinary differential equations (ODEs) with discontinuous right-hand sides and demonstrate their applicability in image processing. An equation belonging to this family is an inverse diffusion everywhere except at local extrema, where some stabilization is introduced. For this reason, we call these equations "stabilized inverse diffusion equations" (SIDEs). Existence and uniqueness of solutions, as well as stability, are proven for SIDEs. A SIDE in one spatial dimension may be interpreted as a limiting case of a semi-discretized Perona-Malik equation (1990, 19994). In an experiment, SIDE's are shown to suppress noise while sharpening edges present in the input signal. Their application to image segmentation is also demonstrated.}, number={2}, journal={IEEE TRANSACTIONS ON IMAGE PROCESSING}, author={Pollak, I and Willsky, AS and Krim, H}, year={2000}, month={Feb}, pages={256–266} }
@article{poliannikov_bao_krim_1999, title={Levy processes for image modeling}, ISBN={["0-7695-0141-9"]}, DOI={10.1109/host.1999.778732}, abstractNote={Nonhomogenous random fields are known to be well adapted to modeling a wide class of images. Their computational complexity generally causes their lack of appeal, we propose a more efficient model capable of capturing textures, shapes, as well as jumps typically encountered in infra-red images. The so-called Levy random fields as we show, can indeed represent a very well adapted alternative for inference applications and the like.}, journal={PROCEEDINGS OF THE IEEE SIGNAL PROCESSING WORKSHOP ON HIGHER-ORDER STATISTICS}, author={Poliannikov, OV and Bao, YF and Krim, H}, year={1999}, pages={233–236} }
@article{krim_schick_1999, title={Minimax description length for signal denoising and optimized representation}, volume={45}, ISSN={["0018-9448"]}, DOI={10.1109/18.761331}, abstractNote={Approaches to wavelet-based denoising (or signal enhancement) have generally relied on the assumption of normally distributed perturbations. To relax this assumption, which is often violated in practice, we derive a robust wavelet thresholding technique based on the minimax description length (MMDL) principle. We first determine the least favorable distribution in the /spl epsiv/-contaminated normal family as the member that maximizes the entropy. We show that this distribution, and the best estimate based upon it, namely the maximum-likelihood estimate, together constitute a saddle point. The MMDL approach results in a thresholding scheme that is resistant to heavy tailed noise. We further extend this framework and propose a novel approach to selecting an adapted or best basis (BB) that results in optimal signal reconstruction. Finally, we address the practical case where the underlying signal is known to be bounded, and derive a two-sided thresholding technique that is resistant to outliers and has bounded error.}, number={3}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Krim, H and Schick, IC}, year={1999}, month={Apr}, pages={898–908} }
@article{krim_willinger_juditski_tse_1999, title={Multiscale statistical signal analysis and its applications - Introduction}, volume={45}, ISSN={["0018-9448"]}, DOI={10.1109/TIT.1999.761320}, number={3}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Krim, H and Willinger, W and Juditski, A and Tse, DNC}, year={1999}, month={Apr}, pages={825–827} }
@article{krim_tucker_mallat_donoho_1999, title={On denoising and best signal representation}, volume={45}, ISSN={["1557-9654"]}, DOI={10.1109/18.796365}, abstractNote={We propose a best basis algorithm for signal enhancement in white Gaussian noise. The best basis search is performed in families of orthonormal bases constructed with wavelet packets or local cosine bases. We base our search for the "best" basis on a criterion of minimal reconstruction error of the underlying signal. This approach is intuitively appealing, because the enhanced or estimated signal has an associated measure of performance, namely, the resulting mean-square error. Previous approaches in this framework have focused on obtaining the most "compact" signal representations, which consequently contribute to effective denoising. These approaches, however, do not possess the inherent measure of performance which our algorithm provides. We first propose an estimator of the mean-square error, based on a heuristic argument and subsequently compare the reconstruction performance based upon it to that based on the Stein (1981) unbiased risk estimator. We compare the two proposed estimators by providing both qualitative and quantitative analyses of the bias term. Having two estimators of the mean-square error, we incorporate these cost functions into the search for the "best" basis, and subsequently provide a substantiating example to demonstrate their performance.}, number={7}, journal={IEEE TRANSACTIONS ON INFORMATION THEORY}, author={Krim, H and Tucker, D and Mallat, S and Donoho, D}, year={1999}, month={Nov}, pages={2225–2238} }
@misc{chen_hero_djuric_messer_goldberg_thomson_amin_krim_pesquet_giannakis_et al._1998, title={Highlights of statistical signal and array processing}, volume={15}, ISSN={["1558-0792"]}, DOI={10.1109/79.708539}, abstractNote={The Statistical Signal and Array Processing Technical Committee (SSAP-TC) deals with signals that are random and processes an array of signals simultaneously. The field of SSAP represents both solid theory and practical applications. Starting with research in spectrum estimation and statistical modeling, study in this field is always full of elegant mathematical tools such as statistical analysis and matrix theory. The area of statistical signal processing expands into estimation and detection algorithms, time-frequency domain analysis, system identification, and channel modeling and equalization. The area of array signal processing also extends into multichannel filtering, source localization and separation, and so on. This article represents an endeavor by the members of the SSAT-TC to review all the significant developments in the field of SSAP. To provide readers with pointers for further study of the field, this article includes a very impressive bibliography-close to 500 references are cited. This is just one of the indications that the field of statistical signals has been an extremely active one in the signal processing community. The article also introduces the recent reorganization of three technical committees of the Signal Processing Society.}, number={5}, journal={IEEE SIGNAL PROCESSING MAGAZINE}, author={Chen, TH and Hero, A and Djuric, PM and Messer, H and Goldberg, J and Thomson, DJ and Amin, MG and Krim, H and Pesquet, JC and Giannakis, G and et al.}, year={1998}, month={Sep}, pages={21–64} }
@article{krim_viberg_1996, title={Two decades of array signal processing research: the parametric approach}, volume={13}, ISSN={1053-5888}, url={http://dx.doi.org/10.1109/79.526899}, DOI={10.1109/79.526899}, abstractNote={The quintessential goal of sensor array signal processing is the estimation of parameters by fusing temporal and spatial information, captured via sampling a wavefield with a set of judiciously placed antenna sensors. The wavefield is assumed to be generated by a finite number of emitters, and contains information about signal parameters characterizing the emitters. A review of the area of array processing is given. The focus is on parameter estimation methods, and many relevant problems are only briefly mentioned. We emphasize the relatively more recent subspace-based methods in relation to beamforming. The article consists of background material and of the basic problem formulation. Then we introduce spectral-based algorithmic solutions to the signal parameter estimation problem. We contrast these suboptimal solutions to parametric methods. Techniques derived from maximum likelihood principles as well as geometric arguments are covered. Later, a number of more specialized research topics are briefly reviewed. Then, we look at a number of real-world problems for which sensor array processing methods have been applied. We also include an example with real experimental data involving closely spaced emitters and highly correlated signals, as well as a manufacturing application example.}, number={4}, journal={IEEE Signal Processing Magazine}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Krim, H. and Viberg, M.}, year={1996}, month={Jul}, pages={67–94} }