@article{forbes_wang_villalobos-arias_jhala_roberts_2025, title={Action-Dependent Optimality-Preserving Reward Shaping}, volume={5}, DOI={10.65109/yupb3982}, abstractNote={Recent RL research has utilized reward shaping--particularly complex shaping rewards such as intrinsic motivation (IM)--to encourage agent exploration in sparse-reward environments. While often effective, ''reward hacking'' can lead to the shaping reward being optimized at the expense of the extrinsic reward. Prior techniques have mitigated this, allowing for implementing IM without altering optimal policies, but have only thus far been tested in simple environments. In this work we show that they are effectively unsuitable for complex, exploration-heavy environments with long episodes. To remedy this, we introduce Action-Dependent Optimality Preserving Shaping (ADOPS), a method of converting arbitrary intrinsic rewards to an optimality-preserving form that allows agents to utilize them more effectively in the extremely sparse environment of Montezuma's Revenge. We demonstrate significant improvement over prior SOTA optimality-preserving IM-conversion methods, and argue that these improvements come from ADOPS's ability to preserve 'action-dependent' IM terms.}, author={Forbes, Grant C. and Wang, Jianxun and Villalobos-Arias, Leonardo and Jhala, Arnav and Roberts, David I.}, year={2025}, month={May} } @article{villalobos-arias_forbes_wang_roberts_jhala_2025, title={Minding Motivation: The Effect of Intrinsic Motivation on Agent Behaviors}, DOI={10.1609/aiide.v21i1.36838}, abstractNote={Games are challenging for Reinforcement Learning (RL) agents due to their reward sparsity, as rewards are only obtainable after long sequences of deliberate actions. Intrinsic Motivation (IM) methods—which introduce exploration rewards—are an effective solution to reward sparsity. However, IM also causes an issue known as ‘reward hacking’, where the agent optimizes for the new reward at the expense of properly playing the game. The larger problem is that reward hacking itself is largely unknown; there is no answer to whether, and to what extent, IM rewards change the behavior of RL agents. This study takes a first step by empirically evaluating the impact on behavior of three IM techniques on the MiniGrid game-like environment. We compare these IM models with Generalized Reward Matching (GRM), a method that can be used with any intrinsic reward function to guarantee optimality. Our results suggest that IM causes noticeable change by increasing the initial rewards, but also altering the way the agent plays, and that GRM mitigated reward hacking in some scenarios.}, journal={Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment}, author={Villalobos-Arias, Leonardo and Forbes, Grant and Wang, Jianxun and Roberts, David L. and Jhala, Arnav}, year={2025}, month={Nov} }