@article{cai_dong_friedrich_rozsa_pnevmatikakis_giovannucci_2023, title={FIOLA: an accelerated pipeline for fluorescence imaging online analysis}, volume={20}, ISSN={["1548-7105"]}, DOI={10.1038/s41592-023-01964-2}, abstractNote={Optical microscopy methods such as calcium and voltage imaging enable fast activity readout of large neuronal populations using light. However, the lack of corresponding advances in online algorithms has slowed progress in retrieving information about neural activity during or shortly after an experiment. This gap not only prevents the execution of real-time closed-loop experiments, but also hampers fast experiment–analysis–theory turnover for high-throughput imaging modalities. Reliable extraction of neural activity from fluorescence imaging frames at speeds compatible with indicator dynamics and imaging modalities poses a challenge. We therefore developed FIOLA, a framework for fluorescence imaging online analysis that extracts neuronal activity from calcium and voltage imaging movies at speeds one order of magnitude faster than state-of-the-art methods. FIOLA exploits algorithms optimized for parallel processing on GPUs and CPUs. We demonstrate reliable and scalable performance of FIOLA on both simulated and real calcium and voltage imaging datasets. Finally, we present an online experimental scenario to provide guidance in setting FIOLA parameters and to highlight the trade-offs of our approach. FIOLA is a pipeline for processing calcium or voltage imaging data. Its advantages include the fast speed and online processing.}, number={9}, journal={NATURE METHODS}, author={Cai, Changjia and Dong, Cynthia and Friedrich, Johannes and Rozsa, Marton and Pnevmatikakis, Eftychios A. and Giovannucci, Andrea}, year={2023}, month={Sep}, pages={1417-+} } @article{zhu_grier_tandon_cai_agarwal_giovannucci_kaufman_pandarinath_2022, title={A deep learning framework for inference of single-trial neural population dynamics from calcium imaging with subframe temporal resolution}, ISSN={["1546-1726"]}, DOI={10.1038/s41593-022-01189-0}, abstractNote={In many areas of the brain, neural populations act as a coordinated network whose state is tied to behavior on a millisecond timescale. Two-photon (2p) calcium imaging is a powerful tool to probe such network-scale phenomena. However, estimating the network state and dynamics from 2p measurements has proven challenging because of noise, inherent nonlinearities and limitations on temporal resolution. Here we describe Recurrent Autoencoder for Discovering Imaged Calcium Latents (RADICaL), a deep learning method to overcome these limitations at the population level. RADICaL extends methods that exploit dynamics in spiking activity for application to deconvolved calcium signals, whose statistics and temporal dynamics are quite distinct from electrophysiologically recorded spikes. It incorporates a new network training strategy that capitalizes on the timing of 2p sampling to recover network dynamics with high temporal precision. In synthetic tests, RADICaL infers the network state more accurately than previous methods, particularly for high-frequency components. In 2p recordings from sensorimotor areas in mice performing a forelimb reach task, RADICaL infers network state with close correspondence to single-trial variations in behavior and maintains high-quality inference even when neuronal populations are substantially reduced.}, journal={NATURE NEUROSCIENCE}, author={Zhu, Feng and Grier, Harrison A. and Tandon, Raghav and Cai, Changjia and Agarwal, Anjali and Giovannucci, Andrea and Kaufman, Matthew T. and Pandarinath, Chethan}, year={2022}, month={Nov} } @article{liu_lu_villette_gou_colbert_lai_guan_land_lee_assefa_et al._2022, title={Sustained deep-tissue voltage recording using a fast indicator evolved for two-photon microscopy}, volume={185}, ISSN={["1097-4172"]}, DOI={10.1016/j.cell.2022.07.013}, abstractNote={Genetically encoded voltage indicators are emerging tools for monitoring voltage dynamics with cell-type specificity. However, current indicators enable a narrow range of applications due to poor performance under two-photon microscopy, a method of choice for deep-tissue recording. To improve indicators, we developed a multiparameter high-throughput platform to optimize voltage indicators for two-photon microscopy. Using this system, we identified JEDI-2P, an indicator that is faster, brighter, and more sensitive and photostable than its predecessors. We demonstrate that JEDI-2P can report light-evoked responses in axonal termini of Drosophila interneurons and the dendrites and somata of amacrine cells of isolated mouse retina. JEDI-2P can also optically record the voltage dynamics of individual cortical neurons in awake behaving mice for more than 30 min using both resonant-scanning and ULoVE random-access microscopy. Finally, ULoVE recording of JEDI-2P can robustly detect spikes at depths exceeding 400 μm and report voltage correlations in pairs of neurons.}, number={18}, journal={CELL}, author={Liu, Zhuohe and Lu, Xiaoyu and Villette, Vincent and Gou, Yueyang and Colbert, Kevin L. and Lai, Shujuan and Guan, Sihui and Land, Michelle A. and Lee, Jihwan and Assefa, Tensae and et al.}, year={2022}, month={Sep}, pages={3408-+} } @misc{grienberger_giovannucci_zeiger_portera-cailliau_2022, title={Two-photon calcium imaging of neuronal activity}, volume={2}, ISSN={["2662-8449"]}, DOI={10.1038/s43586-022-00147-1}, abstractNote={In vivo two-photon calcium imaging (2PCI) is a technique used for recording neuronal activity in the intact brain. It is based on the principle that, when neurons fire action potentials, intracellular calcium levels rise, which can be detected using fluorescent molecules that bind to calcium. This Primer is designed for scientists who are considering embarking on experiments with 2PCI. We provide the reader with a background on the basic concepts behind calcium imaging and on the reasons why 2PCI is an increasingly powerful and versatile technique in neuroscience. The Primer explains the different steps involved in experiments with 2PCI, provides examples of what ideal preparations should look like and explains how data are analysed. We also discuss some of the current limitations of the technique, and the types of solutions to circumvent them. Finally, we conclude by anticipating what the future of 2PCI might look like, emphasizing some of the analysis pipelines that are being developed and international efforts for data sharing.}, number={1}, journal={NATURE REVIEWS METHODS PRIMERS}, author={Grienberger, Christine and Giovannucci, Andrea and Zeiger, William and Portera-Cailliau, Carlos}, year={2022}, month={Sep} } @article{kline_aponte_tsukano_giovannucci_kato_2021, title={Inhibitory gating of coincidence-dependent sensory binding in secondary auditory cortex}, volume={12}, ISSN={["2041-1723"]}, DOI={10.1038/s41467-021-24758-6}, abstractNote={ Abstract }, number={1}, journal={NATURE COMMUNICATIONS}, author={Kline, Amber M. and Aponte, Destinee A. and Tsukano, Hiroaki and Giovannucci, Andrea and Kato, Hiroyuki K.}, year={2021}, month={Jul} } @article{cai_friedrich_singh_eybposh_pnevmatikakis_podgorski_giovannucci_2021, title={VolPy: Automated and scalable analysis pipelines for voltage imaging datasets}, volume={17}, ISSN={["1553-7358"]}, DOI={10.1371/journal.pcbi.1008806}, abstractNote={Voltage imaging enables monitoring neural activity at sub-millisecond and sub-cellular scale, unlocking the study of subthreshold activity, synchrony, and network dynamics with unprecedented spatio-temporal resolution. However, high data rates (>800MB/s) and low signal-to-noise ratios create bottlenecks for analyzing such datasets. Here we present VolPy, an automated and scalable pipeline to pre-process voltage imaging datasets. VolPy features motion correction, memory mapping, automated segmentation, denoising and spike extraction, all built on a highly parallelizable, modular, and extensible framework optimized for memory and speed. To aid automated segmentation, we introduce a corpus of 24 manually annotated datasets from different preparations, brain areas and voltage indicators. We benchmark VolPy against ground truth segmentation, simulations and electrophysiology recordings, and we compare its performance with existing algorithms in detecting spikes. Our results indicate that VolPy’s performance in spike extraction and scalability are state-of-the-art.}, number={4}, journal={PLOS COMPUTATIONAL BIOLOGY}, author={Cai, Changjia and Friedrich, Johannes and Singh, Amrita and Eybposh, M. Hossein and Pnevmatikakis, Eftychios A. and Podgorski, Kaspar and Giovannucci, Andrea}, year={2021}, month={Apr} } @article{abbas_masip_giovannucci_2020, title={Limbs Detection and Tracking of Head-Fixed Mice for Behavioral Phenotyping Using Motion Tubes and Deep Learning}, volume={8}, ISSN={["2169-3536"]}, DOI={10.1109/ACCESS.2020.2975926}, abstractNote={The broad accessibility of affordable and reliable recording equipment and its relative ease of use has enabled neuroscientists to record large amounts of neurophysiological and behavioral data. Given that most of this raw data is unlabeled, great effort is required to adapt it for behavioral phenotyping or signal extraction, for behavioral and neurophysiological data, respectively. Traditional methods for labeling datasets rely on human annotators which is a resource and time intensive process, which often produce data that that is prone to reproducibility errors. Here, we propose a deep learning-based image segmentation framework to automatically extract and label limb movements from movies capturing frontal and lateral views of head-fixed mice. The method decomposes the image into elemental regions (superpixels) with similar appearance and concordant dynamics and stacks them following their partial temporal trajectory. These 3D descriptors (referred as motion cues) are used to train a deep convolutional neural network (CNN). We use the features extracted at the last fully connected layer of the network for training a Long Short Term Memory (LSTM) network that introduces spatio-temporal coherence to the limb segmentation. We tested the pipeline in two video acquisition settings. In the first, the camera is installed on the right side of the mouse (lateral setting). In the second, the camera is installed facing the mouse directly (frontal setting). We also investigated the effect of the noise present in the videos and the amount of training data needed, and we found that reducing the number of training samples does not result in a drop of more than 5% in detection accuracy even when as little as 10% of the available data is used for training.}, journal={IEEE ACCESS}, author={Abbas, Waseem and Masip, David and Giovannucci, Andrea}, year={2020}, pages={37891–37901} }