@article{lin_lin_chu_2024, title={A Low-Overhead Dynamic Formation Method for LEO Satellite Swarm Using Imperfect CSI}, volume={73}, ISSN={["1939-9359"]}, DOI={10.1109/TVT.2023.3347077}, abstractNote={In 6G systems, non-terrestrial networks (NTNs) are poised to address the limitations of terrestrial systems, particularly in unserved or underserved areas, by providing infrastructure with mobility that enhances reliability, availability, and responsiveness. Among various types of mobile infrastructures, low earth orbit (LEO) satellite communication (SATCOM) has the potential to offer extended coverage that supports numerous devices simultaneously with low latency. Consequently, LEO SATCOM attracts significant attention from academia, government, and industry. The dynamic formation problem must be solved to form a swarm connecting to the ground station with the most appropriate satellites to achieve LEO SATCOM systems with higher throughput. Existing solutions use computationally demanding methods to solve the NP-hard problem and cannot be employed for SATCOM systems with short coherence time. Furthermore, precise channel state information (CSI) between the ground station and all candidate satellites is required for formation designs, resulting in significant signaling overheads. To overcome these drawbacks, we propose a learning-based dynamic formation method for real-time dynamic formation capability. Specifically, motivated by the channel features of LEO SATCOM, we develop a CSI estimation method to provide coarse CSI (i.e., imperfect CSI) solely based on available geometrical information of LEO SATCOM and without any signaling overhead. Then, our approach can utilize the obtained coarse CSI as inputs and provide valuable guidelines as priorities to access specific satellites for fine-grained CSI (i.e., precise CSI). The prediction results are validated using a small-scale brute force method to determine the final formation. Our intensive simulation results suggest that the proposed method can aid current LEO SATCOM by providing real-time formation results, particularly in low-transmit power regions. Specifically, the proposed method can achieve 90% of full capacity with only 32% signaling overhead to build high-throughput LEO SATCOM.}, number={5}, journal={IEEE TRANSACTIONS ON VEHICULAR TECHNOLOGY}, author={Lin, Chia-Hung and Lin, Shih-Chun and Chu, Liang C.}, year={2024}, month={May}, pages={6923–6936} } @article{lin_lin_lee_2024, title={Privacy-Preserving Serverless Edge Learning With Decentralized Small-Scale Mobile Data}, volume={38}, ISSN={["1558-156X"]}, DOI={10.1109/MNET.135.2200611}, abstractNote={In next-generation (i.e., 6G) networking systems, the data-driven approach will play an essential role, being an efficient tool for networking system management and bringing popular user applications. With those unprecedented and novel usages, existing frameworks fail to consider the complex nature of the next-generation networking system and consequently fail to be applied to future communication systems directly. Moreover, existing frameworks also fail to support popular privacy-preserving learning strategies efficiently by presenting special designs to respond to the resource-demanding nature of the aforementioned strategies. To fill this gap, this paper extends conventional serverless platforms with serverless edge learning architectures, providing a mature and efficient distributed training framework by fully exploiting limited wireless communication and edge computation resources in the considered networking system with the following three features. Firstly, this framework dynamically orchestrates resources among heterogeneous physical units to efficiently fulfill privacy-preserving learning objectives. The design jointly considers learning task requests and underlying infrastructure heterogeneity, including last-mile transmissions, computation abilities of edge and cloud computing centers, and loading status of infrastructure. Secondly, the proposed framework can easily work with data-driven approaches to improve network management efficiency, realizing AI for network promise of next-generation networking systems to provide efficient network automation. Lastly, to significantly reduce distributed training overheads, small-scale data training is proposed by integrating with a general, simple data classifier. This low-load enhancement can seamlessly work with various distributed deep models in the proposed framework to improve communications and computation efficiencies during the training phase. Based on the above innovations, open challenges, and future research directions encourage the research community to develop efficient privacy-preserving learning techniques.}, number={2}, journal={IEEE NETWORK}, author={Lin, Shih-Chun and Lin, Chia-Hung and Lee, Myungjin}, year={2024}, month={Mar}, pages={264–271} } @article{lin_rohit_lin_chu_2023, title={6G-AUTOR: Autonomic Transceiver via Realtime On-Device Signal Analytics}, volume={5}, ISSN={["1939-8115"]}, DOI={10.1007/s11265-023-01858-8}, journal={JOURNAL OF SIGNAL PROCESSING SYSTEMS FOR SIGNAL IMAGE AND VIDEO TECHNOLOGY}, author={Lin, Chia-Hung and Rohit, K. V. S. and Lin, Shih-Chun and Chu, Liang C.}, year={2023}, month={May} } @article{lin_lin_wang_chase_2021, title={A C-V2X Platform Using Transportation Data and Spectrum-Aware Sidelink Access}, ISSN={["1062-922X"]}, DOI={10.1109/SMC52423.2021.9659109}, abstractNote={Intelligent transportation systems and autonomous vehicles are expected to bring new experiences with enhanced efficiency and safety to road users in the near future. However, an efficient and robust vehicular communication system should act as a strong backbone to offer the needed infrastructure connectivity. Deep learning (DL)-based algorithms are widely adopted recently in various vehicular communication applications due to their achieved low latency and fast reconfiguration properties. Yet, collecting actual and sufficient transportation data to train DL-based vehicular communication models is costly and complex. This paper introduces a cellular vehicle-to-everything (C-V2X) verification platform based on an actual traffic simulator and spectrum-aware access. This integrated platform can generate realistic transportation and communication data, benefiting the development and adaptivity of DL-based solutions. Accordingly, vehicular spectrum recognition and management are further investigated to demonstrate the potentials of dynamic slidelink access. Numerical results show that our platform can effectively train and realize DL-based C-V2X algorithms. The developed slidelink communication scheme can adopt different operating bands with remarkable spectrum detection performance, validating its practicality in real-world vehicular environments.}, journal={2021 IEEE INTERNATIONAL CONFERENCE ON SYSTEMS, MAN, AND CYBERNETICS (SMC)}, author={Lin, Chia-Hung and Lin, Shih-Chun and Wang, Chien-Yuan and Chase, Thomas}, year={2021}, pages={1293–1298} } @article{lin_lin_wu_chung_lee_2021, title={A Survey on Deep Learning-Based Vehicular Communication Applications}, volume={93}, ISSN={["1939-8115"]}, DOI={10.1007/s11265-020-01587-2}, number={4}, journal={JOURNAL OF SIGNAL PROCESSING SYSTEMS FOR SIGNAL IMAGE AND VIDEO TECHNOLOGY}, author={Lin, Chia-Hung and Lin, Yu-Chien and Wu, Yen-Jung and Chung, Wei-Ho and Lee, Ta-Sung}, year={2021}, month={Apr}, pages={369–388} } @article{lin_gu_lin_lee_2021, title={Deep-Learning Based Decentralized Frame-to-Frame Trajectory Prediction Over Binary Range-Angle Maps for Automotive Radars}, volume={70}, ISSN={["1939-9359"]}, DOI={10.1109/TVT.2021.3082213}, abstractNote={Reliable trajectory prediction methods are critical in providing predictive safety intelligence for vital decision making in intelligent transportation systems to further enhance the safety of drivers and passengers. To tackle complicated maneuvering and interactions between objects, learning-based algorithms were used to replace classic model-based trajectory prediction algorithms. However, most algorithms implicitly presume that they are executed at centralized processing units after gathering data from edge sensors and delivering the results back to the on-board units. This causes an increase in computation time and latency; thus, reducing the reaction time of the drivers. To reduce the computation time and latency and consider the robustness of local sensors, we propose a decentralized radar-dedicated framework with a deep-learning (DL) model, called predictive RadarNet, to predict future trajectories over binary range angle (RA) maps with a probabilistic representation according to the original radar RA maps for presenting the uncertainty of the estimated trajectories. In addition, to reduce the model size for low-complexity, we designed a prepossessing technique that can largely reduce the size of the input tensors without losing information. Moreover, we found that the functions of the DL-model consist of two operations: future inference of original radar RA maps and transformation to binary RA maps. Thus, we designed two models with different kernels that are suitable for dealing with the two operations. Simulations show that the proposed decentralized framework using predictive RadarNet can provide reliable prediction results with a low computation time.}, number={7}, journal={IEEE TRANSACTIONS ON VEHICULAR TECHNOLOGY}, author={Lin, Yu-Chien and Gu, Meng-Xun and Lin, Chia-Hung and Lee, Ta-Sung}, year={2021}, month={Jul}, pages={6385–6398} } @article{lin_fang_chang_lin_chung_lin_lee_2021, title={GCN-CNVPS: Novel Method for Cooperative Neighboring Vehicle Positioning System Based on Graph Convolution Network}, volume={9}, ISSN={["2169-3536"]}, DOI={10.1109/ACCESS.2021.3127914}, abstractNote={To provide coordinate information for the use of intelligent transportation systems (ITSs) and autonomous vehicles (AVs), the global positioning system (GPS) is commonly used in vehicle localization as a cheap and easily accessible solution for global positioning. However, several factors contribute to GPS errors, decreasing the safety and precision of AV and ITS applications, respectively. Extensive research has been conducted to address this problem. More specifically, several optimization-based cooperative vehicle localization algorithms have been developed to improve the localization results by exchanging information with neighboring vehicles to acquire additional information. Nevertheless, existing optimization-based algorithms still suffer from an unacceptable performance and poor scalability. In this study, we investigated the development of deep learning (DL) based cooperative vehicle localization algorithms to provide GPS refinement solutions with low complexity, high performance, and flexibility. Specifically, we propose three DL models to address the problem of interest by emphasizing the temporal and spatial correlations of the extra given information. The simulation results confirm that the developed algorithms outperform existing optimization-based algorithms in terms of refined error statistics. Moreover, a comparison of the three proposed algorithms also demonstrates that the proposed graph convolution network-based cooperative vehicle localization algorithm can effectively utilize temporal and spatial correlations in the extra information, leading to a better performance and lower training overhead.}, journal={IEEE ACCESS}, author={Lin, Chia-Hung and Fang, Yo-Hui and Chang, Hsin-Yuan and Lin, Yu-Chien and Chung, Wei-Ho and Lin, Shih-Chun and Lee, Ta-Sung}, year={2021}, pages={153429–153441} } @article{lin_lin_blasch_2021, title={TULVCAN: Terahertz Ultra-broadband Learning Vehicular Channel-Aware Networking}, ISSN={["2159-4228"]}, DOI={10.1109/INFOCOMWKSHPS51825.2021.9484613}, abstractNote={Due to spectrum scarcity and increasing wireless capacity demands, terahertz (THz) communications at 0.1-10THz and the corresponding spectrum characterization have emerged to meet diverse service requirements in future 5G and 6G wireless systems. However, conventional compressed sensing techniques to reconstruct the original wideband spectrum with under-sampled measurements become inefficient as local spectral correlation is deliberately omitted. Recent works extend communication methods with deep learning-based algorithms but lack strong ties to THz channel properties. This paper introduces novel THz channel-aware spectrum learning solutions that fully disclose the uniqueness of THz channels when performing such ultra-broadband sensing in vehicular environments. Specifically, a joint design of spectrum compression and reconstruction is proposed through a structured sensing matrix and two-phase reconstruction based on high spreading loss and molecular absorption at THz frequencies. An end-to-end learning framework, namely compression and reconstruction network (CRNet), is further developed with the mean-square-error loss function to improve sensing accuracy while significantly reducing computational complexity. Numerical results show that the CRNet solutions outperform the latest generative adversarial network (GAN) realization with a much higher cosine and structure similarity measures, smaller learning errors, and 56\% less required training overheads. This THz Ultra-broadband Learning Vehicular Channel-Aware Networking (TULVCAN) work successfully achieves effective THz spectrum learning and hence allows frequency-agile access.}, journal={IEEE CONFERENCE ON COMPUTER COMMUNICATIONS WORKSHOPS (IEEE INFOCOM WKSHPS 2021)}, author={Lin, Chia-Hung and Lin, Shih-Chun and Blasch, Erik}, year={2021} } @article{lin_wu_chen_lee_2020, title={A Variational Autoencoder-Based Secure Transceiver Design Using Deep Learning}, ISSN={["2576-6813"]}, DOI={10.1109/GLOBECOM42002.2020.9348041}, abstractNote={To achieve new applications for 5G communications, physical layer security has recently drawn significant attention. In a wiretap channel system, our goal is to minimize information leakage to an eavesdropper while maximizing the performance of transmission to the desired or legitimate receiver. Complicated systems or channel models make it difficult to design secrecy systems based on the information theory. In this paper, we propose a deep learning-based transceiver design for secrecy systems as an alternative. Specifically, we modify the loss function design of a variational autoencoder, which is a special type of neural network, making it possible to provide both robust data transmission and security in an unsupervised fashion. We further investigate the impact of an imperfect channel state information and use simulation results to prove that our approach can outperform the existing learning-based methods.}, journal={2020 IEEE GLOBAL COMMUNICATIONS CONFERENCE (GLOBECOM)}, author={Lin, Chia-Hung and Wu, Chao-Chin and Chen, Kuan-Fu and Lee, Ta-Sung}, year={2020} } @article{lin_lee_chung_lin_lee_2020, title={Unsupervised ResNet-Inspired Beamforming Design Using Deep Unfolding Technique}, ISSN={["2576-6813"]}, DOI={10.1109/GLOBECOM42002.2020.9322638}, abstractNote={Beamforming is a key technology in communication systems of the fifth generation and beyond. However, traditional optimization-based algorithms are often computationally prohibited from performing in a real-time manner. On the other hand, the performance of existing deep learning (DL)-based algorithms can be further improved. As an alternative, we propose an unsupervised ResNet-inspired beamforming (RI-BF) algorithm in this paper that inherits the advantages of both pure optimization-based and DL-based beamforming for efficiency. In particular, a deep unfolding technique is introduced to reference the optimization process of the gradient ascent beamforming algorithm for the design of our neural network (NN) architecture. Moreover, the proposed RI-BF has three features. First, unlike the existing DL-based beamforming method, which employs a regularization term for the loss function or an output scaling mechanism to satisfy system power constraints, a novel NN architecture is introduced in RI-BF to generate initial beamforming with a promising performance. Second, inspired by the success of residual neural network (ResNet)-based DL models, a deep unfolding module is constructed to mimic the residual block of the ResNet-based model, further improving the performance of RI-BF based on the initial beamforming. Third, the entire RI-BF is trained in an unsupervised manner; as a result, labelling efforts are unnecessary. The simulation results demonstrate that the performance and computational complexity of our RI-BF improves significantly compared to the existing DL-based and optimization-based algorithms.}, journal={2020 IEEE GLOBAL COMMUNICATIONS CONFERENCE (GLOBECOM)}, author={Lin, Chia-Hung and Lee, Yen-Ting and Chung, Wei-Ho and Lin, Shih-Chun and Lee, Ta-Sung}, year={2020} }