@article{muhammad_clark_haque_williams_sozzani_long_2022, title={POPEYE intercellular localization mediates cell-specific iron deficiency responses}, volume={8}, ISSN={["1532-2548"]}, url={https://doi.org/10.1093/plphys/kiac357}, DOI={10.1093/plphys/kiac357}, abstractNote={Abstract Plants must tightly regulate iron (Fe) sensing, acquisition, transport, mobilization, and storage to ensure sufficient levels of this essential micronutrient. POPEYE (PYE) is an iron responsive transcription factor that positively regulates the iron deficiency response, while also repressing genes essential for maintaining iron homeostasis. However, little is known about how PYE plays such contradictory roles. Under iron-deficient conditions, pPYE:GFP accumulates in the root pericycle while pPYE:PYE–GFP is localized to the nucleus in all Arabidopsis (Arabidopsis thaliana) root cells, suggesting that PYE may have cell-specific dynamics and functions. Using scanning fluorescence correlation spectroscopy and cell-specific promoters, we found that PYE–GFP moves between different cells and that the tendency for movement corresponds with transcript abundance. While localization to the cortex, endodermis, and vasculature is required to manage changes in iron availability, vasculature and endodermis localization of PYE–GFP protein exacerbated pye-1 defects and elicited a host of transcriptional changes that are detrimental to iron mobilization. Our findings indicate that PYE acts as a positive regulator of iron deficiency response by regulating iron bioavailability differentially across cells, which may trigger iron uptake from the surrounding rhizosphere and impact root energy metabolism.}, journal={PLANT PHYSIOLOGY}, publisher={Oxford University Press (OUP)}, author={Muhammad, DurreShahwar and Clark, Natalie M. and Haque, Samiul and Williams, Cranos M. and Sozzani, Rosangela and Long, Terri A.}, year={2022}, month={Aug} } @article{haque_lobaton_nelson_yencho_pecota_mierop_kudenov_boyette_williams_2021, title={Computer vision approach to characterize size and shape phenotypes of horticultural crops using high-throughput imagery}, volume={182}, ISSN={0168-1699}, url={http://dx.doi.org/10.1016/j.compag.2021.106011}, DOI={10.1016/j.compag.2021.106011}, abstractNote={For many horticultural crops, variation in quality (e.g., shape and size) contributes significantly to the crop's market value. Metrics characterizing less subjective harvest quantities (e.g., yield and total biomass) are routinely monitored. In contrast, metrics quantifying more subjective crop quality characteristics such as ideal size and shape remain difficult to characterize objectively at the production-scale due to the lack of modular technologies for high-throughput sensing and computation. Several horticultural crops are sent to packing facilities after having been harvested, where they are sorted into boxes and containers using high-throughput scanners. These scanners capture images of each fruit or vegetable being sorted and packed, but the images are typically used solely for sorting purposes and promptly discarded. With further analysis, these images could offer unparalleled insight on how crop quality metrics vary at the industrial production-scale and provide further insight into how these characteristics translate to overall market value. At present, methods for extracting and quantifying quality characteristics of crops using images generated by existing industrial infrastructure have not been developed. Furthermore, prior studies that investigated horticultural crop quality metrics, specifically of size and shape, used a limited number of samples, did not incorporate deformed or non-marketable samples, and did not use images captured from high-throughput systems. In this work, using sweetpotato (SP) as a use case, we introduce a computer vision algorithm for quantifying shape and size characteristics in a high-throughput manner. This approach generates 3D model of SPs from two 2D images captured by an industrial sorter 90 degrees apart and extracts 3D shape features in a few hundred milliseconds. We applied the 3D reconstruction and feature extraction method to thousands of image samples to demonstrate how variations in shape features across SP cultivars can be quantified. We created a SP shape dataset containing SP images, extracted shape features, and qualitative shape types (U.S. No. 1 or Cull). We used this dataset to develop a neural network-based shape classifier that was able to predict Cull vs. U.S. No. 1 SPs with 84.59% accuracy. In addition, using univariate Chi-squared tests and random forest, we identified the most important features for determining qualitative shape type (U.S. No. 1 or Cull) of the SPs. Our study serves as a key step towards enabling big data analytics for industrial SP agriculture. The methodological framework is readily transferable to other horticultural crops, particularly those that are sorted using commercial imaging equipment.}, journal={Computers and Electronics in Agriculture}, publisher={Elsevier BV}, author={Haque, Samiul and Lobaton, Edgar and Nelson, Natalie and Yencho, G. Craig and Pecota, Kenneth V. and Mierop, Russell and Kudenov, Michael W. and Boyette, Mike and Williams, Cranos M.}, year={2021}, month={Mar}, pages={106011} } @article{enan_rahman_haque_howlader_hatzinakos_2020, title={Object Labeling in 3D from Multi-view Scenes Using Gaussian-Hermite Moment-Based Depth Map}, volume={1024}, ISBN={["978-981-32-9290-1"]}, ISSN={["2194-5365"]}, DOI={10.1007/978-981-32-9291-8_8}, abstractNote={Depth as well as intensityEnan, Sadman Sakib of a pixel plays a significantMahbubur Rahman, S. M. role in labeling objectsHatzinakos, Dimitrios in 3D environmentsHaque, Samiul. This paper presentsHowlader, Tamanna a novel approach of labeling objects from multi-view video sequences by incorporating rich depth information. The depth map of a scene is estimated from focus-cues using the Gaussian–Hermite moments (GHMs) of local neighboring pixels. It is expected that the depth map obtained from GHMs provides robust features as compared to that provided by other popular depth maps such as those obtained from Kinect and defocus cue. We use the rich depth and intensity values of a pixel to score every point of a video frame for generating labeled probability maps in a 3D environment. These maps are then used to create a 3D scene wherein available objects are labeled distinctively. Experimental results reveal that our proposed approach yields excellent performance of object labeling for different multi-view scenes taken from RGB-D object dataset, in particular showing significant improvements in precision–recall characteristics and F1-score.}, journal={PROCEEDINGS OF 3RD INTERNATIONAL CONFERENCE ON COMPUTER VISION AND IMAGE PROCESSING, CVIP 2018, VOL 2}, author={Enan, Sadman Sakib and Rahman, S. M. Mahbubur and Haque, Samiul and Howlader, Tamanna and Hatzinakos, Dimitrios}, year={2020}, pages={87–99} } @article{haque_foster_keeney_boys_narayanan_2019, title={Output and input bias effects of U.S. direct payments}, volume={50}, ISSN={0169-5150}, url={http://dx.doi.org/10.1111/agec.12479}, DOI={10.1111/agec.12479}, abstractNote={AbstractThis study examines the national effect of U.S. direct payments on the extent and direction of biased technical change on U.S. agriculture. We also assess the economic significance of the estimated bias effects for economic policy modeling endeavors involving a reduction of domestic support payments. A two outputs (livestock and crops) and four inputs (labor, capital, land, and material) translog cost function was estimated from national time series (1948–2011) data. Results indicate that payments do not induce output‐biased technical change. We do find evidence of Hicksian bias that is land using and material input saving attributable to support payments. Global computable general equilibrium simulations suggest that price and output effects of discontinuing direct payments are 1/4 to 1/3 the size once the bias effects are incorporated.}, number={2}, journal={Agricultural Economics}, publisher={Wiley}, author={Haque, Samiul and Foster, Kenneth A. and Keeney, Roman and Boys, Kathryn A. and Narayanan, Badri G.}, year={2019}, month={Jan}, pages={229–236} } @article{haque_kindrat_zhang_mikheev_kim_liu_chung_kuian_massad_smith_2018, title={Uncertainty-enabled design of electromagnetic reflectors with integrated shape control}, volume={10596}, ISSN={["1996-756X"]}, DOI={10.1117/12.2300396}, abstractNote={We implemented a computationally efficient model for a corner-supported, thin, rectangular, orthotropic polyvinylidene fluoride (PVDF) laminate membrane, actuated by a two-dimensional array of segmented electrodes. The laminate can be used as shape-controlled electromagnetic reflector and the model estimates the reflector’s shape given an array of control voltages. In this paper, we describe a model to determine the shape of the laminate for a given distribution of control voltages. Then, we investigate the surface shape error and its sensitivity to the model parameters. Subsequently, we analyze the simulated deflection of the actuated bimorph using a Zernike polynomial decomposition. Finally, we provide a probabilistic description of reflector performance using statistical methods to quantify uncertainty. We make design recommendations for nominal parameter values and their tolerances based on optimization under uncertainty using multiple methods.}, journal={BEHAVIOR AND MECHANICS OF MULTIFUNCTIONAL MATERIALS AND COMPOSITES XII}, author={Haque, Samiul and Kindrat, Laszlo P. and Zhang, Li and Mikheev, Vikenty and Kim, Daewa and Liu, Sijing and Chung, Jooyeon and Kuian, Mykhailo and Massad, Jordan E. and Smith, Ralph C.}, year={2018} } @article{haque_rahman_hatzinakos_2016, title={Gaussian-Hermite moment-based depth estimation from single still image for stereo vision}, volume={41}, ISSN={["1095-9076"]}, DOI={10.1016/j.jvcir.2016.10.008}, abstractNote={Depth information of objects plays a significant role in image-based rendering. Traditional depth estimation techniques use different visual cues including the disparity, motion, geometry, and defocus of objects. This paper presents a novel approach of focus cue-based depth estimation for still images using the Gaussian-Hermite moments (GHMs) of local neighboring pixels. The GHMs are chosen due to their superior reconstruction ability and invariance properties to intensity and geometric distortions of objects as compared to other moments. Since depths of local neighboring pixels are significantly correlated, the Laplacian matting is employed to obtain final depth map from the moment-based focus map. Experiments are conducted on images of indoor and outdoor scenes having objects with varying natures of resolution, edge, occlusion, and blur contents. Experimental results reveal that the depth estimated from GHMs can provide anaglyph images with stereo quality better than that provided by existing methods using traditional visual cues.}, journal={JOURNAL OF VISUAL COMMUNICATION AND IMAGE REPRESENTATION}, author={Haque, Samiul and Rahman, S. M. Mahbubur and Hatzinakos, Dimitrios}, year={2016}, month={Nov}, pages={281–295} }