@article{sun_wu_2019, title={Image Synthesis From Reconfigurable Layout and Style}, ISSN={["1550-5499"]}, DOI={10.1109/ICCV.2019.01063}, abstractNote={Despite remarkable recent progress on both unconditional and conditional image synthesis, it remains a long- standing problem to learn generative models that are capable of synthesizing realistic and sharp images from re- configurable spatial layout (i.e., bounding boxes + class labels in an image lattice) and style (i.e., structural and appearance variations encoded by latent vectors), especially at high resolution. By reconfigurable, it means that a model can preserve the intrinsic one-to-many mapping from a given layout to multiple plausible images with different styles, and is adaptive with respect to perturbations of a layout and style latent code. In this paper, we present a layout- and style-based architecture for generative adversarial networks (termed LostGANs) that can be trained end-to-end to generate images from reconfigurable layout and style. Inspired by the vanilla StyleGAN, the proposed LostGAN consists of two new components: (i) learning fine-grained mask maps in a weakly-supervised manner to bridge the gap between layouts and images, and (ii) learning object instance-specific layout-aware feature normalization (ISLA-Norm) in the generator to realize multi-object style generation. In experiments, the proposed method is tested on the COCO-Stuff dataset and the Visual Genome dataset with state-of-the-art performance obtained. The code and pretrained models are available at https://github.com/iVMCL/LostGANs.}, journal={2019 IEEE/CVF INTERNATIONAL CONFERENCE ON COMPUTER VISION (ICCV 2019)}, author={Sun, Wei and Wu, Tianfu}, year={2019}, pages={10530–10539} }