@article{wang_zhou_yu_wang_zu_lalush_lin_wu_zhou_shen_2019, title={3D Auto-Context-Based Locality Adaptive Multi-Modality GANs for PET Synthesis}, volume={38}, ISSN={["1558-254X"]}, DOI={10.1109/TMI.2018.2884053}, abstractNote={Positron emission tomography (PET) has been substantially used recently. To minimize the potential health risk caused by the tracer radiation inherent to PET scans, it is of great interest to synthesize the high-quality PET image from the low-dose one to reduce the radiation exposure. In this paper, we propose a 3D auto-context-based locality adaptive multi-modality generative adversarial networks model (LA-GANs) to synthesize the high-quality FDG PET image from the low-dose one with the accompanying MRI images that provide anatomical information. Our work has four contributions. First, different from the traditional methods that treat each image modality as an input channel and apply the same kernel to convolve the whole image, we argue that the contributions of different modalities could vary at different image locations, and therefore a unified kernel for a whole image is not optimal. To address this issue, we propose a locality adaptive strategy for multi-modality fusion. Second, we utilize $1 \times 1 \times 1$ kernel to learn this locality adaptive fusion so that the number of additional parameters incurred by our method is kept minimum. Third, the proposed locality adaptive fusion mechanism is learned jointly with the PET image synthesis in a 3D conditional GANs model, which generates high-quality PET images by employing large-sized image patches and hierarchical features. Fourth, we apply the auto-context strategy to our scheme and propose an auto-context LA-GANs model to further refine the quality of synthesized images. Experimental results show that our method outperforms the traditional multi-modality fusion methods used in deep networks, as well as the state-of-the-art PET estimation approaches.}, number={6}, journal={IEEE TRANSACTIONS ON MEDICAL IMAGING}, author={Wang, Yan and Zhou, Luping and Yu, Biting and Wang, Lei and Zu, Chen and Lalush, David S. and Lin, Weili and Wu, Xi and Zhou, Jiliu and Shen, Dinggang}, year={2019}, month={Jun}, pages={1328–1339} } @article{wang_zhou_wang_yu_zu_lalush_lin_wu_zhou_shen_2018, title={Locality Adaptive Multi-modality GANs for High-Quality PET Image Synthesis}, volume={11070}, ISBN={["978-3-030-00927-4"]}, ISSN={["1611-3349"]}, DOI={10.1007/978-3-030-00928-1_38}, abstractNote={Positron emission topography (PET) has been substantially used in recent years. To minimize the potential health risks caused by the tracer radiation inherent to PET scans, it is of great interest to synthesize the high-quality full-dose PET image from the low-dose one to reduce the radiation exposure while maintaining the image quality. In this paper, we propose a locality adaptive multi-modality generative adversarial networks model (LA-GANs) to synthesize the full-dose PET image from both the low-dose one and the accompanying T1-weighted MRI to incorporate anatomical information for better PET image synthesis. This paper has the following contributions. First, we propose a new mechanism to fuse multi-modality information in deep neural networks. Different from the traditional methods that treat each image modality as an input channel and apply the same kernel to convolute the whole image, we argue that the contributions of different modalities could vary at different image locations, and therefore a unified kernel for a whole image is not appropriate. To address this issue, we propose a method that is locality adaptive for multi-modality fusion. Second, to learn this locality adaptive fusion, we utilize 1 × 1 × 1 kernel so that the number of additional parameters incurred by our method is kept minimum. This also naturally produces a fused image which acts as a pseudo input for the subsequent learning stages. Third, the proposed locality adaptive fusion mechanism is learned jointly with the PET image synthesis in an end-to-end trained 3D conditional GANs model developed by us. Our 3D GANs model generates high quality PET images by employing large-sized image patches and hierarchical features. Experimental results show that our method outperforms the traditional multi-modality fusion methods used in deep networks, as well as the state-of-the-art PET estimation approaches.}, journal={MEDICAL IMAGE COMPUTING AND COMPUTER ASSISTED INTERVENTION - MICCAI 2018, PT I}, author={Wang, Yan and Zhou, Luping and Wang, Lei and Yu, Biting and Zu, Chen and Lalush, David S. and Lin, Weili and Wu, Xi and Zhou, Jiliu and Shen, Dinggang}, year={2018}, pages={329–337} } @article{wang_zhang_an_ma_kang_shi_wu_zhou_lalush_lin_et al._2016, title={Predicting standard-dose PET image from low-dose PET and multimodal MR images using mapping-based sparse representation}, volume={61}, ISSN={0031-9155 1361-6560}, url={http://dx.doi.org/10.1088/0031-9155/61/2/791}, DOI={10.1088/0031-9155/61/2/791}, abstractNote={Positron emission tomography (PET) has been widely used in clinical diagnosis for diseases and disorders. To obtain high-quality PET images requires a standard-dose radionuclide (tracer) injection into the human body, which inevitably increases risk of radiation exposure. One possible solution to this problem is to predict the standard-dose PET image from its low-dose counterpart and its corresponding multimodal magnetic resonance (MR) images. Inspired by the success of patch-based sparse representation (SR) in super-resolution image reconstruction, we propose a mapping-based SR (m-SR) framework for standard-dose PET image prediction. Compared with the conventional patch-based SR, our method uses a mapping strategy to ensure that the sparse coefficients, estimated from the multimodal MR images and low-dose PET image, can be applied directly to the prediction of standard-dose PET image. As the mapping between multimodal MR images (or low-dose PET image) and standard-dose PET images can be particularly complex, one step of mapping is often insufficient. To this end, an incremental refinement framework is therefore proposed. Specifically, the predicted standard-dose PET image is further mapped to the target standard-dose PET image, and then the SR is performed again to predict a new standard-dose PET image. This procedure can be repeated for prediction refinement of the iterations. Also, a patch selection based dictionary construction method is further used to speed up the prediction process. The proposed method is validated on a human brain dataset. The experimental results show that our method can outperform benchmark methods in both qualitative and quantitative measures.}, number={2}, journal={Physics in Medicine and Biology}, publisher={IOP Publishing}, author={Wang, Yan and Zhang, Pei and An, Le and Ma, Guangkai and Kang, Jiayin and Shi, Feng and Wu, Xi and Zhou, Jiliu and Lalush, David S and Lin, Weili and et al.}, year={2016}, month={Jan}, pages={791–812} }