@article{tarun_du_kannan_gehringer_2025, title={Human-in-the-Loop Systems for Adaptive Learning Using Generative AI}, volume={11}, DOI={10.1109/fie63693.2025.11328658}, abstractNote={A Human-in-the-Loop (HITL) approach leverages generative AI to enhance personalized learning by directly integrating student feedback into AI-generated solutions. Students critique and modify AI responses using predefined feedback tags, fostering deeper engagement and understanding. This empowers students to actively shape their learning, with AI serving as an adaptive partner. The system uses a tagging technique and prompt engineering to personalize content, informing a Retrieval-Augmented Generation (RAG) system to retrieve relevant educational material and adjust explanations in real time. This builds on existing research in adaptive learning, demonstrating how student-driven feedback loops can modify AI-generated responses for improved student retention and engagement, particularly in STEM education. Preliminary findings from a study with STEM students indicate improved learning outcomes and confidence compared to traditional AI tools. This work highlights AI's potential to create dynamic, feedback-driven, and personalized learning environments through iterative refinement.}, author={Tarun, Bhavishya and Du, Haoze and Kannan, Dinesh and Gehringer, Edward F.}, year={2025}, month={Nov} } @article{du_kannan_tarun_gehringer_2025, title={Leveraging Large Language Models and Human-In-The-Loop for Interactive Learning Pipelines}, volume={11}, DOI={10.1109/fie63693.2025.11328405}, abstractNote={This research-to-practice paper introduces a human-in-the-loop learning framework that utilizes large language models (LLMs) to enhance student engagement, critical thinking, and learning retention. Traditional AI tutoring systems often lack interactivity, limiting their ability to dynamically engage students. Our framework transforms LLMs into interactive learning companions by incorporating a first-guess approach: the model provides an initial step-by-step solution, enabling students to critique, refine, and enhance their understanding through a guided feedback loop. A controlled study in a software engineering class evaluated this framework. Data collected included engagement metrics (e.g., frequency of feedback interactions and response modifications), as well as surveys and interviews to assess student perceptions, confidence, and learning effectiveness. The preliminary simulated results indicated that students using the framework demonstrated greater learning gains, improved problem solving confidence, and deeper comprehension compared to those relying on standard LLM-generated responses. This work underscores the potential of human-in-the-loop mechanisms to complement traditional instruction, fostering personalized and dynamic educational experiences. Future research will explore refining the feedback process, adaptive learning techniques, and applications across various academic disciplines.}, author={Du, Haoze and Kannan, Dinesh and Tarun, Bhavishya and Gehringer, Edward F.}, year={2025}, month={Nov} }