@inbook{shimmei_bier_matsuda_2023, title={Machine-Generated Questions Attract Instructors When Acquainted with Learning Objectives}, url={https://doi.org/10.1007/978-3-031-36272-9_1}, DOI={10.1007/978-3-031-36272-9_1}, abstractNote={Answering questions is an essential learning activity on online courseware. It has been shown that merely answering questions facilitates learning. However, generating pedagogically effective questions is challenging. Although there have been studies on automated question generation, the primary research concern thus far is about if and how those question generation techniques can generate answerable questions and their anticipated effectiveness. We propose Quadl, a pragmatic method for generating questions that are aligned with specific learning objectives. We applied Quadl to an existing online course and conducted an evaluation study with in-service instructors. The results showed that questions generated by Quadl were evaluated as on-par with human-generated questions in terms of their relevance to the learning objectives. The instructors also expressed that they would be equally likely to adapt Quadl-generated questions to their course as they would human-generated questions. The results further showed that Quadl-generated questions were better than those generated by a state-of-the-art question generation model that generates questions without taking learning objectives into account.}, author={Shimmei, Machi and Bier, Norman and Matsuda, Noboru}, year={2023} } @article{shimmei_matsuda_2021, title={Learning Association Between Learning Objectives and Key Concepts to Generate Pedagogically Valuable Questions}, volume={12749}, ISBN={["978-3-030-78269-6"]}, ISSN={["1611-3349"]}, url={https://doi.org/10.1007/978-3-030-78270-2_57}, DOI={10.1007/978-3-030-78270-2_57}, abstractNote={It has been shown that answering questions contributes to students learning effectively. However, generating questions is an expensive task and requires a lot of effort. Although there has been research reported on the automation of question generation in the literature of Natural Language Processing, these technologies do not necessarily generate questions that are useful for educational purposes. To fill this gap, we propose QUADL, a method for generating questions that are aligned with a given learning objective. The learning objective reflects the skill or concept that students need to learn. The QUADL method first identifies a key concept, if any, in a given sentence that has a strong connection with the given learning objective. It then converts the given sentence into a question for which the predicted key concept becomes the answer. The results from the survey using Amazon Mechanical Turk suggest that the QUADL method can be a step towards generating questions that effectively contribute to students’ learning.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION (AIED 2021), PT II}, publisher={Springer International Publishing}, author={Shimmei, Machi and Matsuda, Noboru}, year={2021}, pages={320–324} } @article{shimmei_matsuda_2019, title={Evidence-Based Recommendation for Content Improvement Using Reinforcement Learning}, volume={11626}, ISBN={["978-3-030-23206-1"]}, ISSN={["1611-3349"]}, url={http://www.scopus.com/inward/record.url?eid=2-s2.0-85068349976&partnerID=MN8TOARS}, DOI={10.1007/978-3-030-23207-8_68}, abstractNote={One of the most challenging issues for online-courseware engineering is to maintain the quality of instructional elements. However, it is hard to know how each instructional element on the courseware contributes to students’ learning. To address this challenge, an evidence-based learning-engineering method for validating the quality of instructional elements on online courseware is proposed. Students’ learning trajectories on particular online courseware and their final learning outcomes are consolidated into a state transition graph. The value iteration technique is applied to compute the worst actions taken (a converse policy) to yield the least successful learning. We hypothesize that the converse policy reflects the quality of instructional elements. As a proof of concept, this paper describes an evaluation study where we simulated online learning data on three hypothetical pieces of online courseware. The result showed that our method can detect more than a half of the ineffective instructional elements on three types of courseware containing various ratios of ineffective instructional elements.}, journal={ARTIFICIAL INTELLIGENCE IN EDUCATION, AIED 2019, PT II}, author={Shimmei, Machi and Matsuda, Noboru}, year={2019}, pages={369–373} }