@article{dal fabbro_mitra_pappas_2023, title={Federated TD Learning Over Finite-Rate Erasure Channels: Linear Speedup Under Markovian Sampling}, volume={7}, ISSN={["2475-1456"]}, DOI={10.1109/LCSYS.2023.3287499}, abstractNote={Federated learning (FL) has recently gained much attention due to its effectiveness in speeding up supervised learning tasks under communication and privacy constraints. However, whether similar speedups can be established for reinforcement learning remains much less understood theoretically. Towards this direction, we study a federated policy evaluation problem where agents communicate via a central aggregator to expedite the evaluation of a common policy. To capture typical communication constraints in FL, we consider finite capacity up-link channels that can drop packets based on a Bernoulli erasure model. Given this setting, we propose and analyze QFedTD - a quantized federated temporal difference learning algorithm with linear function approximation. Our main technical contribution is to provide a finite-sample analysis of QFedTD that (i) highlights the effect of quantization and erasures on the convergence rate; and (ii) establishes a linear speedup w.r.t. the number of agents under Markovian sampling. Notably, while different quantization mechanisms and packet drop models have been extensively studied in the FL, distributed optimization, and networked control systems literature, our work is the first to provide a non-asymptotic analysis of their effects in multi-agent and federated reinforcement learning.}, journal={IEEE CONTROL SYSTEMS LETTERS}, author={Dal Fabbro, Nicolo and Mitra, Aritra and Pappas, George J.}, year={2023}, pages={2461–2466} }