phd-interview/references.bib
2026-04-23 13:08:25 +01:00

969 lines
87 KiB
BibTeX
Executable file
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

@Article{EdwardL.Deci2011,
author = {Edward L. Deci, Richard M. Ryan},
title = {Self-Determination Theory},
year = {2011},
pages = {416--432},
abstract = {Self-determination theory (SDT) is an empirically
derived theory of human motivation and personal-
ity in social contexts that differentiates motivation
in terms of being autonomous and controlled.
Work leading to the theory began with experi-
ments examining the effects of extrinsic rewards
on intrinsic motivation. During more than thirty
years since the initial studies, we have developed
five mini-theories to address different, though
related, issues: the effects of social environments
on intrinsic motivation; the development of auton-
omous extrinsic motivation and self-regulation
through internalization and integration; individual
differences in general motivational orientations;
the functioning of fundamental universal psycho-
logical needs that are essential for growth, integ-
rity, and wellness; and the effects of different goal
contents on well-being and performance. We have
subsequently used SDT and its mini-theories to
guide and interpret research on many new issues,
including motivation and wellness across cultures,
close relationships, enhancement and depletion of
energy and vitality, and the roles of both mindful
awareness and nonconscious processes in behav-
ioral regulation. Although much of SDT was devel-
oped through laboratory experiments, it is also
supported by a great deal of applied research
using both field studies and clinical trials to address
significant social issues. We briefly mention
some of that work, especially related to health
behavior change, education, psychotherapy, work
motivation, sport and exercise, and prosocial
behaviors.},
date = {2011},
file = {Full Text PDF:http\://www.torrossa.com/gs/resourceProxy?an=5017496&publisher=FZ7200:application/pdf},
groups = {Motivation/Engagement},
journaltitle = {Handbook of {Theories} of {Social} {Psychology}},
language = {en},
publisher = {SAGE Publications Ltd},
ranking = {rank5},
readstatus = {skimmed},
url = {https://www.torrossa.com/en/resources/an/5017496},
urldate = {2025-10-22},
}
@InCollection{Malone1987,
author = {Malone, Thomas W. and Lepper, Mark R.},
publisher = {Routledge},
title = {Making {Learning} {Fun}: {A} {Taxonomy} of {Intrinsic} {Motivations} for {Learning}},
abstract = {Over the past 2 decades, great strides have been made in analyzing the cognitive processes involved in learning and instruction. During the same period,},
date = {1987},
file = {:MakingLearningFun-ATaxonomyOfIntrinsicMotivationsForLearning.pdf:PDF:https\://ocw.metu.edu.tr/mod/resource/view.php?id=1311},
groups = {Pedagodgy, Motivation/Engagement},
ranking = {rank5},
readstatus = {skimmed},
shorttitle = {Making {Learning} {Fun}},
}
@Article{Ross2022,
author = {Ross, Robert and Bennett, Artemisia},
journal = {IEEE Transactions on Games},
title = {Increasing {Engagement} {With} {Engineering} {Escape} {Rooms}},
year = {2022},
issn = {2475-1510},
month = jun,
number = {2},
pages = {161--169},
volume = {14},
abstract = {Although games are intrinsically engaging, standard educational practices are not necessarily. So, gamification and game-based learning within education is a topic of interest. In this article, we use the popular escape room concept within an engineering education environment using a physical hardware decoder box. We outline a collection of four sets of puzzles, which we have used to increase engagement, with four different sets of participants within the engineering department: faculty, international students, high school students, and undergraduate students. Our results indicate that the escape room learning paradigm is well received by participants. They report high levels of engagement and motivation as a result of the activity.},
date = {2022-06},
doi = {10.1109/TG.2020.3025003},
file = {:Increasing_Engagement_With_Engineering_Escape_Rooms.pdf:PDF:https\://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9200567},
groups = {Motivation/Engagement},
journaltitle = {IEEE Transactions on Games},
keywords = {Games, Decoding, Writing, Data analysis, Engineering education, Task analysis, Engineering education, escape rooms, game-based learning, gamification, student engagement},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
ranking = {rank3},
readstatus = {read},
url = {https://ieeexplore.ieee.org/document/9200567},
urldate = {2025-10-22},
}
@Article{Kantharaju2022,
author = {Kantharaju, Pavan and Alderfer, Katelyn and Zhu, Jichen and Char, Bruce and Smith, Brian and Ontañón, Santiago},
journal = {IEEE Transactions on Games},
title = {Modeling {Player} {Knowledge} in a {Parallel} {Programming} {Educational} {Game}},
year = {2022},
issn = {2475-1510},
month = mar,
number = {1},
pages = {64--75},
volume = {14},
abstract = {This article focuses on tracing player knowledge in educational games. Specifically, given a set of concepts or skills required to master a game, the goal is to estimate the likelihood with which the current player has mastery of each of those concepts or skills. The main contribution of the work is an approach that integrates machine learning and domain knowledge rules to find when the player applied a certain skill and either succeeded or failed. This is then given as input to a standard knowledge tracing module (such as those from intelligent tutoring systems) to perform knowledge tracing. We evaluate our approach in the context of an educational game called Parallel to teach parallel and concurrent programming with data collected from real users, showing our approach can predict students skills with a low mean-squared error. We also provide results from deployment of our system in a classroom environment.},
date = {2022-03},
doi = {10.1109/TG.2020.3037505},
file = {:Modeling_Player_Knowledge_in_a_Parallel_Programming_Educational_Game.pdf:PDF:https\://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9257367},
groups = {Pedagodgy},
journaltitle = {IEEE Transactions on Games},
keywords = {Games, Hidden Markov models, Knowledge engineering, Programming profession, Mathematical model, Concurrent computing, Bayes methods, Collaborative learning, intelligent tutoring systems (ITS), student modeling},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
ranking = {rank3},
readstatus = {read},
url = {https://ieeexplore.ieee.org/document/9257367},
urldate = {2025-10-22},
}
@Article{Lin2003,
author = {Lin, Yi-Guang and McKeachie, Wilbert J and Kim, Yung Che},
journal = {Learning and Individual Differences},
title = {College student intrinsic and/or extrinsic motivation and learning},
year = {2003},
issn = {1041-6080},
number = {3},
pages = {251--258},
volume = {13},
abstract = {How do extrinsic and intrinsic motives affect learning? We predicted that they would not be additive but rather that there would be interactive or curvilinear effects. Extrinsic and intrinsic goal orientation scales were administered to 13 classes—six psychology classes (two in Korea), three biology classes, three English classes, and one sociology class in a liberal arts college, a comprehensive university, and a community college. As predicted, students in the mid-third of the distribution in motivation for grades (extrinsic) who were high in intrinsic motivation achieved better grades than students with higher or lower extrinsic motivation.},
date = {2003-01},
doi = {10.1016/S1041-6080(02)00092-4},
file = {ScienceDirect Full Text PDF:https\://www.sciencedirect.com/science/article/abs/pii/S1041608002000924/pdfft?download=true:application/pdf;:College student intrinsic andor extrinsic motivation and learning.pdf:PDF:https\://files.eric.ed.gov/fulltext/ED435954.pdf},
groups = {Pedagodgy, Motivation/Engagement},
journaltitle = {Learning and Individual Differences},
keywords = {Intrinsic motivation, Extrinsic motivation, Learning},
publisher = {Elsevier BV},
ranking = {rank4},
readstatus = {skimmed},
url = {https://www.sciencedirect.com/science/article/pii/S1041608002000924},
urldate = {2025-10-22},
}
@InProceedings{Saraswat2022,
author = {Saraswat, Mala and Gupta, Swapnil and Satwik, Satwik and Kohli, Vrinda},
booktitle = {2022 4th International Conference on Advances in Computing, Communication Control and Networking (ICAC3N)},
title = {{STUDY} {BUDDY}-{Making} {Learning} {Easy}},
year = {2022},
month = dec,
pages = {2215--2220},
publisher = {IEEE},
abstract = {Study buddy is an all-in-one package for your studies. We provide an automatically generated timetable as well as pdfs and video links all for Free for our students of class 10th, 12th and, those appearing for JEE. The aim is to increase job opportunities for college students with expertise in a particular subject as well as retired teachers to help school students divide their time and stay motivated. Kids nowadays are always stressed during exam days because they cant make a schedule to study. So we wanted to bring an end to those days of anxiety and fear. We decided to make a website, "Study Buddy" where a student will have the following 4 options: a) Generate Time table b) Reference Material c) Discussion Forum d) Mentor Help.In our research, we would walk through the above-mentioned points and also compare how our proposed method and software package stands out from the already existing software. This will be followed by our system design, result and future scope. This software is a buddy for the students during their hard times. It will not only help the students but will also provide an employment opportunity to retired teachers and college students. We also plan to implement machine learning in order to understand the basic trend of student psychology during exams.},
date = {2022-12},
doi = {10.1109/ICAC3N56670.2022.10074209},
keywords = {Schedules, Discussion forums, Software packages, Employment, Anxiety disorders, Psychology, Process control, E-learning, Online education, online platform, student guide, COVID-19, sockets, robotic processing automation, machine learning},
ranking = {rank1},
readstatus = {read},
url = {https://ieeexplore.ieee.org/abstract/document/10074209},
urldate = {2025-10-22},
}
@InCollection{Albayrak2023,
author = {Albayrak, Özlem and Albayrak, Duygu},
booktitle = {Handbook of {Research} on {Decision}-{Making} {Capabilities} {Improvement} {With} {Serious} {Games}},
publisher = {IGI Global Scientific Publishing},
title = {Instructor-{Related} {Factors} {Affecting} {Game} {Utilization} in {Software} {Engineering} {Education}: {A} {Replication} {Study}},
year = {2023},
isbn = {9781668491669},
month = jun,
pages = {151--175},
abstract = {Software engineering education is challenging. To cope with various challenges of software engineering education, instructors at universities utilize different ways. One of these ways is to use games in education. In this study, a replication of a previous survey was conducted to check factors that...},
copyright = {Access limited to members},
date = {2023},
doi = {10.4018/978-1-6684-9166-9.ch007},
file = {Full Text PDF:https\://www.igi-global.com/ViewTitle.aspx?TitleId=326420&isxn=9781668491669:application/pdf},
groups = {Pedagodgy},
issn = {2327-1833},
language = {en},
ranking = {rank3},
readstatus = {skimmed},
shorttitle = {Instructor-{Related} {Factors} {Affecting} {Game} {Utilization} in {Software} {Engineering} {Education}},
url = {https://www.igi-global.com/chapter/instructor-related-factors-affecting-game-utilization-in-software-engineering-education/www.igi-global.com/chapter/instructor-related-factors-affecting-game-utilization-in-software-engineering-education/326420},
urldate = {2025-10-22},
}
@Article{Baylor2011,
author = {Baylor, Amy L.},
journal = {Educational Technology Research and Development},
title = {The design of motivational agents and avatars},
year = {2011},
issn = {1556-6501},
month = apr,
number = {2},
pages = {291--300},
volume = {59},
abstract = {While the addition of an anthropomorphic interface agent to a learning system generally has little direct impact on learning, it potentially has a huge impact on learner motivation. As such agents become increasingly ubiquitous on the Internet, in virtual worlds, and as interfaces for learning and gaming systems, it is important to design them to optimally impact motivation. The focus of this paper is on the design of agents and avatars (ones self-representation as a visual agent) for enhancing motivational and affective outcomes, such as improving self-efficacy, engagement and satisfaction, moderating frustration, and/or improving stereotypes. Together with motivational messages and dialogue (which are not discussed here), the agents appearance is the most important design feature as it dictates the learners perception of the agent as a virtual social model, in the Bandurian sense. The message delivery, through a human-like voice with appropriate and relevant emotional expressions, is also a key motivational design feature. More research is needed to determine the specifics with respect to the ideal agent voice and the role of other nonverbal communication (e.g., deictic gestures) that may contribute to the agents role as an embodied motivator, particularly in the long-term.},
doi = {10.1007/s11423-011-9196-3},
file = {:Baylor2011 - The Design of Motivational Agents and Avatars.pdf:PDF:https\://amybaylor.com/Articles/2011etrd.pdf},
groups = {Avatars, Motivation/Engagement},
keywords = {Interface agents, Pedagogical agents, Avatars, Motivation, Persuasion, Attitudes, Self-efficacy},
language = {en},
publisher = {Springer Science and Business Media LLC},
url = {https://doi.org/10.1007/s11423-011-9196-3},
urldate = {2025-10-22},
}
@Article{Hu2013,
author = {Hu, Helen H. and Shepherd, Tricia D.},
journal = {ACM Trans. Comput. Educ.},
title = {Using {POGIL} to help students learn to program},
year = {2013},
issn = {1946-6226},
month = aug,
number = {3},
pages = {13:1--13:23},
volume = {13},
abstract = {POGIL has been successfully implemented in a scientific computing course to teach science students how to program in Python. Following POGIL guidelines, the authors have developed guided inquiry activities that lead student teams to discover and understand programming concepts. With each iteration of the scientific computing course, the authors have refined the activities and learned how to better adapt POGIL for the computer science classroom. This article details how POGIL activities differ from both traditional computer science labs and other active-learning pedagogies. Background is provided on POGIL's effectiveness. The article then includes a full description of how POGIL activities were used in the scientific computing course, as well as an example POGIL activity on recursion. Discussion is provided on how to facilitate and develop POGIL activities. Quotes from student evaluations and an assessment on how well students learned to program are provided.},
doi = {10.1145/2499947.2499950},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/2499947.2499950:application/pdf},
groups = {Pedagodgy},
publisher = {Association for Computing Machinery (ACM)},
ranking = {rank5},
readstatus = {skimmed},
url = {https://dl.acm.org/doi/10.1145/2499947.2499950},
urldate = {2025-10-29},
}
@Article{DeLiema2019,
author = {DeLiema, David and Dahn, Maggie and Flood, Virginia J and Asuncion, Ana and Abrahamson, Dor and Enyedy, Noel and Steen, Francis},
journal = {Deeper learning dialogic learning and critical thinking: Research-based strategies for the classroom. Hrsg. von Emmanuel Manalo},
title = {Debugging as a context for fostering reflection on critical thinking and emotion},
year = {2019},
pages = {209--228},
file = {:DeLiema2019 - Debugging As a Context for Fostering Reflection on Critical Thinking and Emotion.pdf:PDF:https\://library.oapen.org/bitstream/handle/20.500.12657/102736/9781000617443.pdf?sequence=1#page=232},
groups = {Emotions, Debugging, Self efficacy},
publisher = {Routledge},
}
@InProceedings{Pozzan2026,
author = {Pozzan, Gabriele and Bollin, Andreas and Vardanega, Tullio},
booktitle = {Informatics in {Schools}. {Fostering} {Problem}-{Solving}, {Creativity}, and {Critical} {Thinking} {Through} {Computer} {Science} {Education}},
title = {A {Teaching} and {Learning} {Oriented} {Decomposition} of {Debugging} {Subskills} {Informed} by {Cognitive} {Load} {Theory}},
year = {2026},
address = {Cham},
editor = {Staub, Jacqueline and Singla, Adish},
month = aug,
pages = {153--166},
publisher = {Springer Nature Switzerland},
abstract = {Debugging is a natural part of the programming process. It comes into play as soon as novices make their first mistakes in creating programming artifacts. It is also consistently reported to be a skill that is difficult to learn and teach effectively. Research in Computer Science Education has often focused on breaking down debugging in steps connected by temporal and causal dependencies. We refer to this as decomposition of the debugging process. In this work, we look at debugging from the standpoint of Cognitive Load theory, and break it down into a tree-shaped model of subskills that enable and are prerequisite to one another. Our decomposition of debugging in subskills complements the work done on the debugging process and suggests viable learning trajectories that take into account the Cognitive Load of learners.},
doi = {10.1007/978-3-032-01222-7_12},
groups = {Pedagodgy, Cognitive Load, Debugging},
isbn = {9783032012227},
issn = {1611-3349},
keywords = {Debugging, Cognitive Load theory, 4C/ID},
language = {en},
}
@Article{Sweller1988,
author = {Sweller, John},
journal = {Cognitive Science},
title = {Cognitive {Load} {During} {Problem} {Solving}: {Effects} on {Learning}},
year = {1988},
issn = {1551-6709},
month = apr,
number = {2},
pages = {257--285},
volume = {12},
abstract = {Considerable evidence indicates that domain specific knowledge in the form of schemas is the primary factor distinguishing experts from novices in problem-solving skill. Evidence that conventional problem-solving activity is not effective in schema acquisition is also accumulating. It is suggested that a major reason for the ineffectiveness of problem solving as a learning device, is that the cognitive processes required by the two activities overlap insufficiently, and that conventional problem solving in the form of means-ends analysis requires a relatively large amount of cognitive processing capacity which is consequently unavailable for schema acquisition. A computational model and experimental evidence provide support for this contention. Theoretical and practical implications are discussed.},
copyright = {© 1988 Cognitive Science Society, Inc.},
doi = {10.1207/s15516709cog1202_4},
file = {Full Text PDF:https\://onlinelibrary.wiley.com/doi/pdfdirect/10.1207/s15516709cog1202_4:application/pdf},
groups = {Cognitive Load, Pedagodgy},
language = {en},
publisher = {Wiley},
ranking = {rank5},
shorttitle = {Cognitive {Load} {During} {Problem} {Solving}},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1207/s15516709cog1202_4},
urldate = {2025-11-05},
}
@Article{Kalyuga2010,
author = {Kalyuga, Slava and Renkl, Alexander and Paas, Fred},
journal = {Educational Psychology Review},
title = {Facilitating {Flexible} {Problem} {Solving}: {A} {Cognitive} {Load} {Perspective}},
year = {2010},
issn = {1573-336X},
month = jun,
number = {2},
pages = {175--186},
volume = {22},
abstract = {The development of flexible, transferable problem-solving skills is an important aim of contemporary educational systems. Since processing limitations of our mind represent a major factor influencing any meaningful learning, the acquisition of flexible problem-solving skills needs to be based on known characteristics of our cognitive architecture in order to be effective and efficient. This paper takes a closer look at the processes involved in the acquisition of flexible problem-solving skills within a cognitive load framework. It concludes that (1) cognitive load theory can benefit from putting more emphasis on generalized knowledge structures; (2) there are tradeoffs between generality and power with respect to specific versus generalized knowledge structures; (3) generalized knowledge structures of “medium” generality are essential for flexible expertise; and (4) cognitive load theory could provide a valuable framework for considering essential attributes of flexible expertise.},
doi = {10.1007/s10648-010-9132-9},
file = {Full Text PDF:Kalyuga2010 - Facilitating Flexible Problem Solving_ a Cognitive Load Perspective.pdf:PDF:https\://link.springer.com/content/pdf/10.1007%2Fs10648-010-9132-9.pdf},
groups = {Cognitive Load, Debugging},
keywords = {Cognitive load, Flexible problem solving, Generalized knowledge structures},
language = {en},
publisher = {Springer Science and Business Media LLC},
ranking = {rank4},
shorttitle = {Facilitating {Flexible} {Problem} {Solving}},
url = {https://doi.org/10.1007/s10648-010-9132-9},
urldate = {2025-11-05},
}
@Article{Kinnunen2012,
author = {Kinnunen, Päivi and Simon, Beth},
journal = {Computer Science Education},
title = {My program is ok am {I}? {Computing} freshmen's experiences of doing programming assignments},
year = {2012},
issn = {0899-3408},
month = mar,
number = {1},
pages = {1--28},
volume = {22},
abstract = {This article provides insight into how computing majors experience the process of doing programming assignments in their first programming course. This grounded theory study sheds light on the various processes and contexts through which students constantly assess their self-efficacy as a programmer. The data consists of a series of four interviews conducted with a purposeful sample of nine computer science majors in a research intensive state university in the United States. Use of the constant comparative method elicited two forms of results. First, we identified six stages of doing a programming assignment. Analysis captures the dimensional variation in students' experiences with programming assignments on a detailed level. We identified a core category resulting from students' reflected emotions in conjunction with self-efficacy assessment. We provide a descriptive model of how computer science majors build their self-efficacy perceptions, reported via four narratives. Our key findings are that some students reflect negative views of their efficacy, even after having a positive programming experience and that in other situations, students having negative programming experiences still have a positive outlook on their efficacy. We consider these findings in light of possible languages and support structures for introductory programming courses.},
doi = {10.1080/08993408.2012.655091},
file = {Full Text PDF:https\://www.tandfonline.com/doi/pdf/10.1080/08993408.2012.655091:application/pdf;:My program is ok am I Computing freshmen s experiences of doing programming assignments.pdf:PDF},
groups = {Debugging, Emotions},
keywords = {computing majors, CS1, self-efficacy, experience, programming assignment},
publisher = {Routledge},
ranking = {rank5},
readstatus = {skimmed},
shorttitle = {My program is ok am {I}?},
url = {https://doi.org/10.1080/08993408.2012.655091},
urldate = {2025-11-10},
}
@Article{Guener2025,
author = {Güner, Hacer and Er, Erkan},
journal = {Education and Information Technologies},
title = {{AI} in the classroom: {Exploring} students interaction with {ChatGPT} in programming learning},
year = {2025},
issn = {1573-7608},
month = jun,
number = {9},
pages = {12681--12707},
volume = {30},
abstract = {As being more prevalent in educational settings, understanding the impact of artificial intelligence tools on student behaviors and interactions has become crucial. In this regard, this study investigates the dynamic interactions between students and ChatGPT in programming learning, focusing on how different instructional interventions influence their learning and AI-interaction. Conducted over three sessions, students were allowed to use ChatGPT to complete programming tasks. The first session had no guidance, the second included hands-on training in prompt writing and effective ChatGPT use, and the third provided a lab guide with sample prompts. After each session, students took a post-test on the activitys subject. Analyzing students prompting behaviors, five AI interaction profiles were identified: AI-Reliant Code Generators, AI-Reliant Code Generator \& Refiners, AI-Collaborative Coders, AI-Assisted Code Refiners, and AI-Independent Coders. These profiles were examined to understand their evolution across interventions and their relationship with students learning performance. Findings revealed significant changes in profile distribution across interventions, and a notable difference between students post-test scores and their AI interaction profiles. Besides, training in prompting skills and effective use of AI significantly impacted students interactions with AI. These insights can contribute to the knowledge of integrating generative AI tools in education, highlighting how AI can enhance teaching practices. Understanding student-AI interaction dynamics can allow educators to tailor instructional strategies for optimal learning. This study also underscores the importance of guidance on effective AI use and prompting skills, which can lead students to use AI more meaningfully for their learning.},
doi = {10.1007/s10639-025-13337-7},
file = {Full Text PDF:Guener2025 - AI in the Classroom_ Exploring Students Interaction with ChatGPT in Programming Learning.pdf:PDF:https\://link.springer.com/content/pdf/10.1007%2Fs10639-025-13337-7.pdf},
groups = {Debugging, Pedagodgy, Motivation/Engagement, Generative AI},
keywords = {Student-AI interaction, ChatGPT integration, Programming education, AI in education},
language = {en},
publisher = {Springer Science and Business Media LLC},
shorttitle = {{AI} in the classroom},
url = {https://doi.org/10.1007/s10639-025-13337-7},
urldate = {2025-11-10},
}
@Article{Lo2024,
author = {Lo, Chung Kwan and Hew, Khe Foon and Jong, Morris Siu-yung},
journal = {Computers \& Education},
title = {The influence of {ChatGPT} on student engagement: {A} systematic review and future research agenda},
year = {2024},
issn = {0360-1315},
month = oct,
pages = {105100},
volume = {219},
abstract = {ChatGPT, a state-of-the-art artificial intelligence (AI) chatbot, has gained considerable attention as a transformative yet controversial tool for enhancing teaching and learning experiences. Several reviews and numerous articles have been written about harnessing ChatGPT in education since its release on November 30, 2022. Besides summarising its strengths, weaknesses, opportunities, and threats (SWOT) as identified in previous systematic reviews of ChatGPT research, this systematic review aims to develop a new understanding of its influence on student engagement by synthesising the existing related research using a three-dimensional framework comprising behavioural, emotional, and cognitive aspects. We searched relevant databases and included 72 empirical studies published within one year of ChatGPT's initial release. The findings reveal robust but narrowly focused evidence related to behavioural engagement (i.e., work with ChatGPT) and disengagement (i.e., academic dishonesty). The evidence related to the emotional aspect is mixed, with instances of both engagement (e.g., satisfaction and interest/fun) and disengagement (e.g., disappointment and worry/anxiety). There is broad but weak evidence regarding cognitive engagement (e.g., increased understanding and positive self-perception) and disengagement (e.g., reduced critical thinking and overreliance). Our review uncovers several under-explored indicators of student engagement, pointing to the need for further research. Specifically, future studies could focus on students' study habits and attendance (behavioural engagement), social interaction (emotional engagement), and self-regulation and critical thinking (cognitive engagement) in ChatGPT-supported learning environments.},
doi = {10.1016/j.compedu.2024.105100},
file = {ScienceDirect Full Text PDF:https\://www.sciencedirect.com/science/article/pii/S0360131524001143/pdfft?download=true:application/pdf},
groups = {Pedagodgy, Generative AI, Motivation/Engagement},
keywords = {ChatGPT, OpenAI, Student engagement, Systematic review, Artificial intelligence},
publisher = {Elsevier BV},
shorttitle = {The influence of {ChatGPT} on student engagement},
url = {https://www.sciencedirect.com/science/article/pii/S0360131524001143},
urldate = {2025-11-10},
}
@Article{Philbin2023,
author = {Philbin, Carrie Anne},
journal = {ACM Inroads},
title = {Exploring the {Potential} of {Artificial} {Intelligence} {Program} {Generators} in {Computer} {Programming} {Education} for {Students}},
year = {2023},
issn = {2153-2184},
month = aug,
number = {3},
pages = {30--38},
volume = {14},
doi = {10.1145/3610406},
groups = {Pedagodgy, Generative AI},
publisher = {Association for Computing Machinery (ACM)},
url = {https://doi.org/10.1145/3610406},
urldate = {2025-11-10},
}
@InProceedings{Thang2025,
author = {Thang, Ha Viet and Vi, Truong Loi},
booktitle = {2025 10th International STEM Education Conference (iSTEM-Ed)},
title = {Impact of {AI} {Debugging} on {Student} {Code} {Complexity} and {Readability}: {A} {Literature} {Review}},
year = {2025},
month = jul,
pages = {1--6},
publisher = {IEEE},
abstract = {Debugging is a critical yet challenging skill for novice programmers. This literature review examines how Al-assisted debugging influences the complexity and readability of student-written code, compared to traditional human-guided debugging. We present a historical overview of instructor-led debugging pedagogy and contrast it with emerging Al debugging assistants. We then analyze the general impacts of Al-assisted debugging tools on coding practices, focusing on qualitative factors such as code maintainability, clarity, and student perceptions. The review spans various educational contexts, from university courses to high school classes and coding bootcamps, to assess how different learner populations are affected. We find that Al assistants can accelerate the debugging process and sometimes produce code of similar complexity and readability to human-developed code. However, concerns remain about overreliance, superficial understanding, and maintaining code clarity. Comparisons with traditional debugging highlight trade-offs between efficiency and learning depth. This review sheds light on the current findings and offers an analytical perspective on integrating Al-assisted debugging into programming education.},
doi = {10.1109/iSTEM-Ed65612.2025.11129285},
groups = {Generative AI, Debugging},
keywords = {Codes, Scalability, Education, Refining, Debugging, Encoding, Complexity theory, Artificial intelligence, Programming profession, Systematic literature review, AI-assisted debugging, code complexity, readability, programming education, literature review},
shorttitle = {Impact of {AI} {Debugging} on {Student} {Code} {Complexity} and {Readability}},
url = {https://ieeexplore.ieee.org/abstract/document/11129285},
urldate = {2025-11-10},
}
@Book{Sajland2025,
author = {Sajland, Nelly and Collins, Elliot},
title = {Human {Work} and {AI} {Delegation}: {Understanding} {Programming} {Students} {Debugging} {Process} in the {Era} of {Generative} {AI} : {The} {Influence} of {Generative} {AI} on {Undergraduate} {Students} {Competence} and {Perceptions} on their {Studies} and {Future} {Work}},
year = {2025},
abstract = {DiVA portal is a finding tool for research publications and student theses written at the following 50 universities and research institutions.},
file = {Full Text PDF:Sajland2025 - Human Work and AI Delegation_ Understanding Programming Students Debugging Process in the Era of Generative AI _ the Influence of Generative AI on Undergraduate Students Competence and Perceptions on Their Studies and Future Work.pdf:PDF:https\://www.diva-portal.org/smash/get/diva2\:1977937/FULLTEXT02},
groups = {Generative AI, Debugging},
language = {eng},
shorttitle = {Human {Work} and {AI} {Delegation}},
url = {https://urn.kb.se/resolve?urn=urn:nbn:se:mau:diva-78150},
urldate = {2025-11-10},
}
@InProceedings{Yang2024,
author = {Yang, Stephanie and Zhao, Hanzhang and Xu, Yudian and Brennan, Karen and Schneider, Bertrand},
booktitle = {Proceedings of the 2024 {ACM} {Conference} on {International} {Computing} {Education} {Research} - {Volume} 1},
title = {Debugging with an {AI} {Tutor}: {Investigating} {Novice} {Help}-seeking {Behaviors} and {Perceived} {Learning}},
year = {2024},
address = {New York, NY, USA},
month = aug,
pages = {84--94},
publisher = {Association for Computing Machinery},
series = {{ICER} '24},
volume = {1},
abstract = {Debugging is a crucial skill for programmers, yet it can be challenging for novices to learn. The introduction of large language models (LLMs) has opened up new possibilities for providing personalized debugging support to students. However, concerns have been raised about potential student over-reliance on LLM-based tools. This mixed-methods study investigates how a pedagogically-designed LLM-based chatbot supports students debugging efforts in an introductory programming course. We conducted interviews and debugging think-aloud tasks with 20 students at three points throughout the semester. We specifically focused on characterizing when students initiate help from the chatbot during debugging, how they engage with the chatbots responses, and how they describe their learning experiences with the chatbot. By analyzing data from the debugging tasks, we identified varying help-seeking behaviors and levels of engagement with the chatbots responses, depending on students familiarity with the suggested strategies. Interviews revealed that students appreciated the content and experiential knowledge provided by the chatbot, but did not view it as a primary source for learning debugging strategies. Additionally, students self-identified certain chatbot usage behaviors as negative, “non-ideal” engagement and others as positive, “learning-oriented” usage. Based on our findings, we discuss pedagogical implications and future directions for designing pedagogical chatbots to support debugging.},
collection = {ICER 2024},
doi = {10.1145/3632620.3671092},
groups = {Generative AI, Debugging},
isbn = {9798400704758},
ranking = {rank5},
readstatus = {skimmed},
shorttitle = {Debugging with an {AI} {Tutor}},
url = {https://doi.org/10.1145/3632620.3671092},
urldate = {2025-11-10},
}
@Article{Hagendorff2024,
author = {Hagendorff, Thilo},
journal = {Minds and Machines},
title = {Mapping the {Ethics} of {Generative} {AI}: {A} {Comprehensive} {Scoping} {Review}},
year = {2024},
issn = {1572-8641},
month = sep,
number = {4},
pages = {39},
volume = {34},
abstract = {The advent of generative artificial intelligence and the widespread adoption of it in society engendered intensive debates about its ethical implications and risks. These risks often differ from those associated with traditional discriminative machine learning. To synthesize the recent discourse and map its normative concepts, we conducted a scoping review on the ethics of generative artificial intelligence, including especially large language models and text-to-image models. Our analysis provides a taxonomy of 378 normative issues in 19 topic areas and ranks them according to their prevalence in the literature. The study offers a comprehensive overview for scholars, practitioners, or policymakers, condensing the ethical debates surrounding fairness, safety, harmful content, hallucinations, privacy, interaction risks, security, alignment, societal impacts, and others. We discuss the results, evaluate imbalances in the literature, and explore unsubstantiated risk scenarios.},
doi = {10.1007/s11023-024-09694-w},
file = {Full Text PDF:Hagendorff2024 - Mapping the Ethics of Generative AI_ a Comprehensive Scoping Review.pdf:PDF:https\://link.springer.com/content/pdf/10.1007%2Fs11023-024-09694-w.pdf},
groups = {Generative AI, Ethics/Sustainability},
keywords = {Generative artificial intelligence, Large language models, Image generation models, Ethics},
language = {en},
publisher = {Springer Science and Business Media LLC},
shorttitle = {Mapping the {Ethics} of {Generative} {AI}},
url = {https://doi.org/10.1007/s11023-024-09694-w},
urldate = {2025-11-10},
}
@InCollection{2016,
publisher = {Routledge},
title = {Instruction {Based} on {Adaptive} {Learning} {Technologies} {VINCENT} {ALEVEN} , {ELIZABETH} {A} . {MCL} {AU} {GHLIN} ,},
year = {2016},
edition = {2},
abstract = {A common intuition is that instruction is most effective if it takes into account (a) that
learners are different, and (b) that they change as they learn. But},
groups = {Motivation/Engagement, Pedagodgy},
}
@Article{Blake2010,
author = {Anne M. Blake and James L. Moseley},
journal = {Educational Technology},
title = {The Emerging Technology of Avatars: Some Educational Considerations},
year = {2010},
issn = {00131962},
number = {2},
pages = {13--20},
volume = {50},
abstract = {Avatars are gaining popularity as an emerging technology to facilitate learning and instruction. Avatars can be used as agents of e-learning applications or as part of immersive learning environments such as Second Life. Research indicates that avatar use has numerous potential benefits, including increased student engagement and opportunities for quality interaction among online participants. Educators should consider effective design principles and criteria when deciding to include avatars in course design and development.},
file = {:Blake-EmergingTechnologyAvatars-2010.pdf:PDF},
groups = {Avatars, Pedagodgy},
publisher = {Educational Technology Publications, Inc.},
url = {http://www.jstor.org/stable/44429772},
urldate = {2026-01-03},
}
@InProceedings{Scott2014,
author = {Scott, Michael James and Ghinea, Gheorghita},
booktitle = {Proceedings of the tenth annual conference on {International} computing education research},
title = {Measuring enrichment: the assembly and validation of an instrument to assess student self-beliefs in {CS1}},
year = {2014},
address = {New York, NY, USA},
month = jul,
pages = {123--130},
publisher = {Association for Computing Machinery},
series = {{ICER} '14},
abstract = {Educational research has shown that self-beliefs can have profound influences on learning behaviour and achievement. It follows, then, that beliefs about the nature of programming aptitude (e.g., students' mindset) and the way in which individuals perceive themselves as programmers (e.g., students' self-concept) could also have a salient impact on programming practice behaviour and the development of programming expertise. However, in order to test this hypothesis, a valid and reliable measurement instrument is needed. This paper draws upon the Control-Value Theory of Achievement Emotion to assemble such a measurement instrument. An evaluation of the proposed measurement instrument with three cohorts of undergraduate computing students (N=239) then demonstrates that reliability and construct validity are adequate, while the concurrent validity of the conceptual framework is satisfactory. This suggests that the measurement instrument is suitable for further research into students' self-beliefs within the introductory programming context. However, it is important to note that this work represents only a first step and further validation is required to establish whether the measurement instrument is valid across different contexts and populations.},
doi = {10.1145/2632320.2632350},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/2632320.2632350?download=true:application/pdf},
groups = {Motivation/Engagement},
isbn = {9781450327558},
shorttitle = {Measuring enrichment},
url = {https://dl.acm.org/doi/10.1145/2632320.2632350},
urldate = {2026-01-27},
}
@Article{Bandura1977,
author = {Bandura, Albert},
journal = {Psychological Review},
title = {Self-efficacy: {Toward} a unifying theory of behavioral change},
year = {1977},
issn = {1939-1471},
number = {2},
pages = {191--215},
volume = {84},
abstract = {Presents an integrative theoretical framework to explain and to predict psychological changes achieved by different modes of treatment. This theory states that psychological procedures, whatever their form, alter the level and strength of self-efficacy. It is hypothesized that expectations of personal efficacy determine whether coping behavior will be initiated, how much effort will be expended, and how long it will be sustained in the face of obstacles and aversive experiences. Persistence in activities that are subjectively threatening but in fact relatively safe produces, through experiences of mastery, further enhancement of self-efficacy and corresponding reductions in defensive behavior. In the proposed model, expectations of personal efficacy are derived from 4 principal sources of information: performance accomplishments, vicarious experience, verbal persuasion, and physiological states. Factors influencing the cognitive processing of efficacy information arise from enactive, vicarious, exhortative, and emotive sources. The differential power of diverse therapeutic procedures is analyzed in terms of the postulated cognitive mechanism of operation. Findings are reported from microanalyses of enactive, vicarious, and emotive modes of treatment that support the hypothesized relationship between perceived self-efficacy and behavioral changes. (21/2 p ref) (PsycInfo Database Record (c) 2025 APA, all rights reserved)},
address = {US},
doi = {10.1037/0033-295X.84.2.191},
groups = {Self efficacy},
keywords = {Behavior Change, Cognitive Processes, Personality Processes, Personality Theory, Self-Concept, Treatment},
publisher = {American Psychological Association},
shorttitle = {Self-efficacy},
}
@Article{Honicke2016,
author = {Honicke, Toni and Broadbent, Jaclyn},
journal = {Educational Research Review},
title = {The influence of academic self-efficacy on academic performance: {A} systematic review},
year = {2016},
issn = {1747-938X},
month = feb,
pages = {63--84},
volume = {17},
abstract = {This review integrates 12 years of research on the relationship between academic self-efficacy and university student's academic performance, and known cognitive and motivational variables that explain this relationship. Previous reviews report moderate correlations between these variables, but few discuss mediating and moderating factors that impact this relationship. Systematic searches were conducted in April 2015 of psychological, educational, and relevant online databases for studies investigating academic self-efficacy and performance in university populations published between September 2003 and April 2015. Fifty-nine papers were eligible. Academic self-efficacy moderately correlated with academic performance. Several mediating and moderating factors were identified, including effort regulation, deep processing strategies and goal orientations. Given the paucity of longitudinal studies identified in this review, further research into how these variables relate over time is necessary in order to establish causality and uncover the complex interaction between academic self-efficacy, performance, and motivational and cognitive variables that impact it.},
doi = {10.1016/j.edurev.2015.11.002},
file = {ScienceDirect Full Text PDF:https\://www.sciencedirect.com/science/article/abs/pii/S1747938X15000639/pdfft?download=true:application/pdf},
groups = {Self efficacy},
keywords = {Academic self-efficacy, Academic performance, University, Student, Systematic review},
shorttitle = {The influence of academic self-efficacy on academic performance},
url = {https://www.sciencedirect.com/science/article/pii/S1747938X15000639},
urldate = {2026-01-27},
}
@InProceedings{Lee2014,
author = {Lee, Michael J.},
title = {Gidget: {An} online debugging game for learning and engagement in computing education},
year = {2014},
month = jul,
note = {ISSN: 1943-6106},
pages = {193--194},
abstract = {As interest in acquiring programming skills continue to increase, many are turning to discretionary online resources to learn programming. However, researchers and educators need more data to better understand who these learners are and what their needs are to create useful and sustainable learning technologies to support them. In my work, I investigate the factors that make a learning game engaging for users, and examine if playing through the game shows measurable learning outcomes. The game will be released the public, giving us the opportunity to collect large amounts of data. This data can be shared with other researchers to improve discretionary online tools such as educational games to support large-scale computing education efforts designed for a wide-range of users.},
doi = {10.1109/VLHCC.2014.6883051},
groups = {Self efficacy, Avatars},
issn = {1943-6106},
keywords = {Games, Programming profession, Debugging, Computers, Educational institutions},
shorttitle = {Gidget},
url = {https://ieeexplore.ieee.org/abstract/document/6883051},
urldate = {2026-01-27},
}
@InProceedings{Lee2011,
author = {Lee, Michael J. and Ko, Amy J.},
booktitle = {Proceedings of the seventh international workshop on {Computing} education research},
title = {Personifying programming tool feedback improves novice programmers' learning},
year = {2011},
address = {New York, NY, USA},
month = aug,
pages = {109--116},
publisher = {Association for Computing Machinery},
series = {{ICER} '11},
abstract = {Many novice programmers view programming tools as all-knowing, infallible authorities about what is right and wrong about code. This misconception is particularly detrimental to beginners, who may view the cold, terse, and often judgmental errors from compilers as a sign of personal failure. It is possible, however, that attributing this failure to the computer, rather than the learner, may improve learners' motivation to program. To test this hypothesis, we present Gidget, a game where the eponymous robot protagonist is cast as a fallible character that blames itself for not being able to correctly write code to complete its missions. Players learn programming by working with Gidget to debug its problematic code. In a two-condition controlled experiment, we manipulated Gidget's level of personification in: communication style, sound effects, and image. We tested our game with 116 self-described novice programmers recruited on Amazon's Mechanical Turk and found that, when given the option to quit at any time, those in the experimental condition (with a personable Gidget) completed significantly more levels in a similar amount of time. Participants in the control and experimental groups played the game for an average time of 39.4 minutes (SD=34.3) and 50.1 minutes (SD=42.6) respectively. These finding suggest that how programming tool feedback is portrayed to learners can have a significant impact on motivation to program and learning success.},
doi = {10.1145/2016911.2016934},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/2016911.2016934?download=true:application/pdf},
groups = {Avatars, Debugging},
isbn = {9781450308298},
url = {https://dl.acm.org/doi/10.1145/2016911.2016934},
urldate = {2026-01-28},
}
@InProceedings{Pechorina2023,
author = {Pechorina, Yulia and Anderson, Keith and Denny, Paul},
booktitle = {Proceedings of the 25th {Australasian} {Computing} {Education} {Conference}},
title = {Metacodenition: {Scaffolding} the {Problem}-{Solving} {Process} for {Novice} {Programmers}},
year = {2023},
address = {New York, NY, USA},
month = jan,
pages = {59--68},
publisher = {Association for Computing Machinery},
series = {{ACE} '23},
abstract = {Problem-solving is a central activity to computing, and thus a key skill that novices develop when learning programming. While most programming-related concepts are taught explicitly in introductory courses, it is common for problem-solving to be taught implicitly usually through having students perform programming drills. This approach can be problematic for some learners and may lead to the development of unproductive problem solving strategies that leave students feeling lost when faced with new problems. A substantial body of work has investigated the explicit teaching of problem-solving and related metacognitive skills. This prior literature has shown that teaching students a model for the problem-solving process and how to track their progress within that model leads to greater self-efficacy and productivity. Interventions targeting isolated steps in these models have been shown to work, but there have been few efforts to combine such interventions into a single coherent system. Our contribution is a novel tool called Metacodenition, which is a programming environment for novices that provides metacognitive scaffolding around an existing problem-solving framework. We find that Metacodenitions scaffolding improves performance on code-writing tasks and that students view Metacodenition to be a helpful tool they would use voluntarily.},
doi = {10.1145/3576123.3576130},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/3576123.3576130?download=true:application/pdf},
groups = {Debugging},
isbn = {9781450399418},
shorttitle = {Metacodenition},
url = {https://dl.acm.org/doi/10.1145/3576123.3576130},
urldate = {2026-01-28},
}
@InProceedings{AbuDeeb2021,
author = {Abu Deeb, Fatima and Hickey, Timothy},
booktitle = {Proceedings of the 23rd {Australasian} {Computing} {Education} {Conference}},
title = {Reflective {Debugging} in {Spinoza} {V3}.0},
year = {2021},
address = {New York, NY, USA},
month = mar,
pages = {125--130},
publisher = {Association for Computing Machinery},
series = {{ACE} '21},
abstract = {In this paper we present an online IDE (Spinoza 3.0) for teaching Python programming in which the students are (sometimes) required to verbally reflect on their error messages and unit test failures before being allowed to modify their code. This system was designed to be used in large synchronous in-person, remote, or hybrid classes for either in-class problem solving or out-of-class homework problems. For each student and problem, the system makes a random choice about whether to require reflection on all debugging steps. If the student/problem pair required reflection, then after each time the student ran the program and received feedback as an error message or a set of unit test results, they were required to type in a description of the bug and a plan for how to modify the program to eliminate the bug. The main result is that the number of debugging steps to reach a correct solution was statistically significantly less for problems where the students were required to reflect on each debugging step. We suggest that future developers of pedagogical IDEs consider adding features which require students to reflect frequently during the debugging process.},
doi = {10.1145/3441636.3442313},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/3441636.3442313?download=true:application/pdf},
groups = {Debugging},
isbn = {9781450389761},
url = {https://dl.acm.org/doi/10.1145/3441636.3442313},
urldate = {2026-01-28},
}
@Article{Nooijen2024,
author = {van Nooijen, Christine C. A. and de Koning, Bjorn B. and Bramer, Wichor M. and Isahakyan, Anna and Asoodar, Maryam and Kok, Ellen and van Merrienboer, Jeroen J. G. and Paas, Fred},
journal = {Educational Psychology Review},
title = {A {Cognitive} {Load} {Theory} {Approach} to {Understanding} {Expert} {Scaffolding} of {Visual} {Problem}-{Solving} {Tasks}: {A} {Scoping} {Review}},
year = {2024},
issn = {1573-336X},
month = jan,
number = {1},
pages = {12},
volume = {36},
abstract = {Visual problem-solving is an essential skill for professionals in various visual domains. Novices in these domains acquire such skills through interactions with experts (e.g., apprenticeships). Experts guide novice visual problem-solving with scaffolding behaviours. However, there is little consensus about the description and classification of scaffolding behaviours in practice, and to our knowledge, no framework connects scaffolding to underlying cognitive mechanisms. Understanding effective scaffolding is particularly relevant to domain-specific expert-novice research regarding visual problem-solving, where in-person scaffolding by an expert is a primary teaching method. Scaffolding regulates the flow of information within the learners working memory, thereby reducing cognitive load. By examining scaffolding research from the perspective of cognitive load theory, we aspire to classify scaffolding behaviours as cognitive behaviours of cueing (which involves attention allocation) and chunking (the practice of grouping information, often in conjunction with prior knowledge), into a cohesive and unified framework. In this scoping review, 6533 articles were considered, from which 18 were included. From these 18 articles, 164 excerpts describing expert-novice interaction were examined and categorised based on cognitive strategy (cueing or chunking) and method of expression (verbal or nonverbal). An inductive category (active or passive) was also identified and coded. Most scaffolding behaviours were categorised as active verbal cueing and active verbal chunking. Qualitative patterns in excerpts were collated into 12 findings. Our framework may help to integrate existing and new scaffolding research, form the basis for future expert-novice interaction research, and provide insights into the fine-grained processes that comprise scaffolded visual problem-solving.},
doi = {10.1007/s10648-024-09848-3},
file = {Full Text PDF:Nooijen2024 - A Cognitive Load Theory Approach to Understanding Expert Scaffolding of Visual Problem Solving Tasks_ a Scoping Review.pdf:PDF:https\://link.springer.com/content/pdf/10.1007%2Fs10648-024-09848-3.pdf},
groups = {Debugging},
keywords = {Expert-novice interaction, Visual problem-solving, Scaffolding, Cognitive load theory, Scoping review},
language = {en},
ranking = {rank4},
shorttitle = {A {Cognitive} {Load} {Theory} {Approach} to {Understanding} {Expert} {Scaffolding} of {Visual} {Problem}-{Solving} {Tasks}},
url = {https://doi.org/10.1007/s10648-024-09848-3},
urldate = {2026-01-28},
}
@InProceedings{Miljanovic2017,
author = {Michael A. Miljanovic and Jeremy S. Bradbury},
booktitle = {Proceedings of the 2017 {ACM} Conference on International Computing Education Research, {ICER} 2017, Tacoma, WA, USA, August 18-20, 2017},
title = {RoboBUG: {A} Serious Game for Learning Debugging Techniques},
year = {2017},
editor = {Josh Tenenberg and Donald Chinn and Judy Sheard and Lauri Malmi},
pages = {93--100},
publisher = {{ACM}},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/conf/icer/MiljanovicB17.bib},
doi = {10.1145/3105726.3106173},
groups = {Evaluation},
}
@Article{Davis1989,
author = {Fred D. Davis},
journal = {{MIS} Q.},
title = {Perceived Usefulness, Perceived Ease of Use, and User Acceptance of Information Technology},
year = {1989},
number = {3},
pages = {319--340},
volume = {13},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/journals/misq/Davis89.bib},
groups = {Evaluation},
url = {http://misq.org/perceived-usefulness-perceived-ease-of-use-and-user-acceptance-of-information-technology.html},
}
@article{likert1932,
title = {A Technique for the Measurement of Attitudes},
author = {Likert, Rensis},
journal = {Archives of Psychology},
year = {1932},
volume = {140},
pages = {1--55},
url = {https://psycnet.apa.org/record/1933-01885-001}
}
@Book{Coe2025,
editor = {Robert Coe and Michael Waring and Larry Hedges and Laura Day Ashley},
publisher = {SAGE Publications},
title = {Research Methods and Methodologies in Education},
year = {2025},
edition = {4},
isbn = {9781529685336},
groups = {Evaluation, Method},
pages = {448},
qualityassured = {qualityAssured},
url = {https://uk.sagepub.com/en-gb/eur/research-methods-and-methodologies-in-education/book287326#description},
}
@Article{Yang2024a,
author = {Stephanie Yang and Miles Baird and Eleanor O'Rourke and Karen Brennan and Bertrand Schneider},
journal = {{ACM} Trans. Comput. Educ.},
title = {Decoding Debugging Instruction: {A} Systematic Literature Review of Debugging Interventions},
year = {2024},
number = {4},
pages = {1--44},
volume = {24},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/journals/toce/YangBOBS24.bib},
doi = {10.1145/3690652},
groups = {Debugging, Self efficacy},
}
@Article{Liu2020,
author = {Xiaoxuan Liu and Samantha {Cruz Rivera} and David Moher and Melanie J Calvert and Alastair K Denniston and Hutan Ashrafian and Andrew L Beam and An-Wen Chan and Gary S Collins and Ara DarziJonathan J Deeks and M Khair ElZarrad and Cyrus Espinoza and Andre Esteva and Livia Faes and Lavinia {Ferrante di Ruffano} and John Fletcher and Robert Golub and Hugh Harvey and Charlotte Haug and Christopher Holmes and Adrian Jonas and Pearse A Keane and Christopher J Kelly and Aaron Y Lee and Cecilia S Lee and Elaine Manna and James Matcham and Melissa McCradden and Joao Monteiro and Cynthia Mulrow and Luke Oakden-Rayner and Dina Paltoo and Maria Beatrice Panico and Gary Price and Samuel Rowley and Richard Savage and Rupa Sarkar and Sebastian J Vollmer and Christopher Yau},
journal = {The Lancet Digital Health},
title = {Reporting guidelines for clinical trial reports for interventions involving artificial intelligence: the CONSORT-AI extension},
year = {2020},
issn = {2589-7500},
number = {10},
pages = {e537-e548},
volume = {2},
abstract = {Summary
The CONSORT 2010 statement provides minimum guidelines for reporting randomised trials. Its widespread use has been instrumental in ensuring transparency in the evaluation of new interventions. More recently, there has been a growing recognition that interventions involving artificial intelligence (AI) need to undergo rigorous, prospective evaluation to demonstrate impact on health outcomes. The CONSORT-AI (Consolidated Standards of Reporting Trials-Artificial Intelligence) extension is a new reporting guideline for clinical trials evaluating interventions with an AI component. It was developed in parallel with its companion statement for clinical trial protocols: SPIRIT-AI (Standard Protocol Items: Recommendations for Interventional Trials-Artificial Intelligence). Both guidelines were developed through a staged consensus process involving literature review and expert consultation to generate 29 candidate items, which were assessed by an international multi-stakeholder group in a two-stage Delphi survey (103 stakeholders), agreed upon in a two-day consensus meeting (31 stakeholders), and refined through a checklist pilot (34 participants). The CONSORT-AI extension includes 14 new items that were considered sufficiently important for AI interventions that they should be routinely reported in addition to the core CONSORT 2010 items. CONSORT-AI recommends that investigators provide clear descriptions of the AI intervention, including instructions and skills required for use, the setting in which the AI intervention is integrated, the handling of inputs and outputs of the AI intervention, the humanAI interaction and provision of an analysis of error cases. CONSORT-AI will help promote transparency and completeness in reporting clinical trials for AI interventions. It will assist editors and peer reviewers, as well as the general readership, to understand, interpret, and critically appraise the quality of clinical trial design and risk of bias in the reported outcomes.},
doi = {https://doi.org/10.1016/S2589-7500(20)30218-1},
groups = {Method},
url = {https://www.sciencedirect.com/science/article/pii/S2589750020302181},
}
@InProceedings{Mitchell2021,
author = {Mitchell, Alexander and Greer, Terry and New, Warwick and Walton-Rivers, Joseph and Watkins, Matt and Brown, Douglas and Scott, Michael James},
booktitle = {Proceedings of the 2021 Conference on United Kingdom \& Ireland Computing Education Research},
title = {Student Perspectives on the Purpose of Peer Evaluation During Group Game Development Projects},
year = {2021},
address = {New York, NY, USA},
publisher = {Association for Computing Machinery},
series = {UKICER '21},
abstract = {Being able to work well in a team is valued in industry and beyond. As such, many university educators strive to help their students to collaborate effectively. However, it is typically the case that more than ad-hoc experience is needed to master teamwork. Often, students need to become reflective practitioners who learn from their experiences and enact change. Self and peer evaluation can help evoke such reflection. However, the facilitating conditions for effective learning from peer evaluation during group projects in computing are not yet well-defined. This research is an initial step in identifying these conditions. In this study, students engaged in a long-term multidisciplinary software engineering project in which they produced a digital game. They completed regular exercises in which they reflected upon and wrote about their contributions to the project as well as those of their peers. Thematic analysis of 200 responses to an open-ended question about the purpose of these exercises illustrated student perspectives: giving and receiving feedback; prompting personal reflection and improvement; supporting supervision; aiding marking; informing project planning and management; exploring and reshaping group dynamics; improving project outputs; providing a system to hold group members accountable; and giving a sense of safety to raise issues without repercussion. Giving consideration to these differing perceptions will help educators to address student concerns about group projects, notably standardisation, workload efficiency, and fairness, and will lay the foundations for a model of peer evaluation which improves teamwork.},
articleno = {7},
doi = {10.1145/3481282.3481294},
groups = {My Papers},
isbn = {9781450385688},
keywords = {Teamwork, Student Team Projects, Software Development, Project-based Learning, Peer Review, Peer Rating, Peer Evaluation, Peer Assessment, Group Work, Collaborative Learning},
location = {Glasgow, United Kingdom},
numpages = {7},
url = {https://doi.org/10.1145/3481282.3481294},
}
@InProceedings{Mitchell2022,
author = {Mitchell, Alexander and Scott, Michael and Walton-Rivers, Joseph and Watkins, Matt and New, Warwick and Brown, Douglas},
booktitle = {Proceedings of the 2022 Conference on United Kingdom \& Ireland Computing Education Research},
title = {An Exploratory Analysis of Student Experiences with Peer Evaluation in Group Game Development Projects},
year = {2022},
address = {New York, NY, USA},
publisher = {Association for Computing Machinery},
series = {UKICER '22},
abstract = {Collaborative projects are commonplace in computing education. They typically enable students to gain experience building software in teams, equipping them with the teamwork skills they need to be competitive in the labour market. However, students often need encouragement to reflect upon and synthesise their experience to attain the most learning. Peer evaluation offers one such approach, but the conditions which facilitate effective peer evaluation have not yet been established. This paper seeks to provide insight into student experiences with peer evaluation. It builds upon prior qualitative work, analysing quantitative data collected through a questionnaire taken by undergraduate students on a collaborate digital game development module. An exploratory factor analysis identifies seven dimensions of variance in the student experience: perceived impact; arbitrary influence; inconsistency; team cohesiveness; assessment pressure; ease and professionalism. Correlation analysis suggests some factors such as arbitrary influence, team cohesion, assessment pressure, and professionalism are associated with attained learning, whilst factors such as inconsistency and onerousness are not. This informs the development of a conceptual framework, suggesting focuses which facilitate effective peer evaluation. Expanding this conceptual framework and validating it across different demographics, contexts, and project types are suggested as avenues for further investigation.},
articleno = {11},
doi = {10.1145/3555009.3555021},
groups = {My Papers},
isbn = {9781450397421},
keywords = {Assessment, Collaboration, Evaluation, Peer, Project-based Learning, Rating, Review, Software Development},
location = {Dublin, Ireland},
numpages = {7},
url = {https://doi.org/10.1145/3555009.3555021},
}
@Misc{AmutriLtd2025,
author = {{Amutri Ltd}},
note = {Accessed: 2026-14-04},
title = {{Amutri}},
year = {2025},
address = {{Amutri Ltd, Launchpad, Penryn, Cornwall, UK, TR10 9FE}},
url = {https://www.amutri.com/},
urldate = {2026-14-04},
}
@InProceedings{Renzella2025,
author = {Renzella, Jake and Vassar, Alexandra and Lee Solano, Lorenzo and Taylor, Andrew},
booktitle = {Proceedings of the 56th ACM Technical Symposium on Computer Science Education V. 1},
title = {Compiler-Integrated, Conversational AI for Debugging CS1 Programs},
year = {2025},
address = {New York, NY, USA},
pages = {9941000},
publisher = {Association for Computing Machinery},
series = {SIGCSETS 2025},
abstract = {Large Language Models (LLMs) present a transformative opportunity to address longstanding challenges in computing education. This paper presents a conversational AI extension to an LLM-enhanced C/C++ compiler which generates pedagogically sound programming error explanations. Our new tool, DCC Sidekick, retains compiler integration, allowing students to see their code, error messages, and stack frames alongside a conversational AI interface. Compiler context improves error explanations, and provides a seamless development experience. We present quantitative analyses of Sidekick's usage and engagement patterns in a large CS1 course. In the first seven weeks of use, 959 students initiated 11,222 DCC Sidekick sessions, generating 17,982 error explanations. Over half of all conversations occur outside of business hours, highlighting the value of these always-available tools. Early results indicate strong adoption of conversational AI debugging tools, demonstrating scalability in supporting large CS1 courses. We share implementation details and lessons learned, offering guidance to educators considering integrating AI tools with pedagogical guardrails.},
doi = {10.1145/3641554.3701827},
groups = {Generative AI},
isbn = {9798400705311},
keywords = {ai in education, cs1, generative ai, programming error messages},
location = {Pittsburgh, PA, USA},
numpages = {7},
url = {https://doi.org/10.1145/3641554.3701827},
}
@Article{Prof.Dr.YoesoepEdhieRachmad2024,
author = {Prof. Dr. Yoesoep Edhie Rachmad, D. B. A.},
title = {Adaptive {Learning} {Theory}},
year = {2024},
abstract = {Rachmad, Yoesoep Edhie. 2022. Adaptive Learning Theory. La Paz Costanera Publicaciones Internacionales, Edición Especial 2022. https://doi.org/10.17605/osf.io/vfz38
Adaptive Learning Theory, introduced by Yoesoep Edhie Rachmad in 2022, aims to understand how adaptive learning can enhance educational outcomes and skills in various environments. In an era of rapidly evolving information, the ability to learn and adapt is crucial. This theory provides guidelines on creating a flexible and responsive learning environment tailored to individual needs.
Adaptive Learning Theory defines adaptive learning as an approach that utilizes technology and methodologies to tailor the learning process according to the needs, pace, and preferences of each individual. The core concept of this theory is that personalized and adaptive learning can enhance engagement and learning effectiveness. It emphasizes the importance of flexibility, timely feedback, and using data to adjust learning strategies.
The theory is based on the phenomenon that traditional learning methods often fail to meet the needs of all learners effectively. For instance, in large classrooms, it is challenging to tailor lesson material to each student's learning pace. This phenomenon highlights the need for a more adaptive and personalized approach to improve learning outcomes.
The working principles of Adaptive Learning Theory involve several key steps. First, it is essential to identify the learning needs and preferences of each individual through initial assessments. Second, developing content and learning strategies that can be dynamically adjusted based on data and feedback obtained during the learning process is crucial. Third, implementing technology that supports adaptive learning, such as e-learning platforms using algorithms to tailor lesson materials, is necessary. The theory also stresses the importance of continuous monitoring and evaluation to ensure the effectiveness of adaptive learning strategies.
Key indicators of this theory include student engagement levels, improvement in learning outcomes, and student satisfaction with the learning methods used. Other indicators involve the ability to adjust learning pace and paths and the effectiveness of technology in supporting learning. Measuring these indicators can be done through surveys, learning data analysis, and performance evaluations. These indicators help evaluate how well adaptive learning improves engagement and learning outcomes.
Operational variables of Adaptive Learning Theory include measuring student engagement levels, the effectiveness of adaptive strategies applied, and achieved learning outcomes. These variables can be measured through surveys, performance assessments, and learning data analysis. This data provides insights into how adaptive learning is managed and integrated into the learning process and how interventions can be designed to improve learning effectiveness.
The theory can be applied in various fields such as Human Resource Management (HRM), Psychology, Education, Social Communication, and Health. In HRM, this theory helps develop adaptive training programs to enhance employee skills. In Psychology, it is useful for understanding how adaptive learning methods can support mental and emotional development. In Education, the theory supports developing flexible and responsive curricula to meet students' needs. In Social Communication, it guides strategies to increase awareness and adoption of adaptive learning methods in society. In Health, the theory helps design training programs that support health professionals in keeping up with advancements in knowledge and technology.
The success of applying this theory is determined by several key factors. Support from leaders and an organizational culture that supports adaptive learning is vital. Additionally, having adequate resources to develop and implement adaptive learning technology is a key factor. Success is also influenced by the ability to utilize feedback and adjust learning strategies based on evaluation results and individual needs.
Implementing this theory requires a structured and continuous approach. The first step is identifying needs and barriers in adopting adaptive learning and developing appropriate programs. Effective strategies include providing training on the importance of adaptive learning, developing policies that support the use of adaptive learning technology, and creating mechanisms for feedback and evaluation. Continuous evaluation and adjusting strategies based on stakeholder feedback are crucial to ensure long-term success.
Challenges in applying this theory include resistance to change, lack of understanding of the importance of adaptive learning, and limited resources to develop and implement adaptive learning programs. However, with strong support from leaders and an organizational culture that supports, along with continuous education, these challenges can be overcome. Success also depends on the ability to create an environment that supports active engagement and effective adaptive learning management, as well as the ability to manage disruptions and barriers that may arise in the adaptive learning process.
Adaptive Learning Theory emphasizes the importance of flexibility and personalization in achieving optimal learning outcomes. This theory provides a comprehensive framework for understanding and facilitating adaptive learning in various fields. With proper application, this theory can help individuals and organizations improve their ability to manage and implement adaptive learning, better face challenges, and create positive impacts on performance and well-being.
Table of Contents
Adaptive Learning Theory
By Yoesoep Edhie Rachmad
Published by La Paz Costanera Publicaciones Internacionales, Edición Especial 2022
DOI: https://doi.org/10.17605/osf.io/vfz38
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
Chapter 1: Introduction to Adaptive Learning
1.1 Defining Adaptive Learning and Its Importance ............. 3
1.2 Historical Overview of Learning Theories ............. 9
1.3 How Adaptive Learning Differs from Traditional Methods ............. 15
Chapter 2: The Foundations of Adaptive Learning Theory
2.1 The Role of Technology in Adaptive Learning ............. 21
2.2 Personalized Learning: The Core of Adaptivity ............. 27
2.3 Key Concepts: Flexibility, Feedback, and Data-Driven Learning ............. 33
Chapter 3: Implementing Adaptive Learning in Education
3.1 Developing Adaptive Learning Models for Classrooms ............. 41
3.2 Tools and Technologies Supporting Adaptive Learning ............. 47
3.3 Designing Curriculum for Adaptive Learning Systems ............. 53
Chapter 4: Adaptive Learning in Online and Digital Platforms
4.1 E-learning and Adaptive Systems: An Overview ............. 61
4.2 How Algorithms Tailor Learning Paths for Students ............. 69
4.3 The Future of Digital Education: Adaptive Learning Trends ............. 75
Chapter 5: Data Analytics in Adaptive Learning
5.1 Using Data to Shape Individual Learning Experiences ............. 81
5.2 Key Metrics for Measuring Success in Adaptive Learning ............. 87
5.3 The Impact of Learning Analytics on Student Outcomes ............. 93
Chapter 6: Psychological Aspects of Adaptive Learning
6.1 Cognitive Development and Adaptive Learning ............. 101
6.2 Motivation and Engagement in Personalized Learning ............. 109
6.3 Emotional Responses to Adaptive Learning Systems ............. 115
Chapter 7: Adaptive Learning in Professional Training
7.1 Customizing Corporate Learning for Employee Growth ............. 121
7.2 Adaptive Learning in Human Resource Management ............. 127
7.3 Case Studies: Successful Adaptive Training Programs ............. 133
Chapter 8: Challenges in Implementing Adaptive Learning
8.1 Overcoming Resistance to Change in Education ............. 141
8.2 Addressing Technological Barriers in Learning Systems ............. 149
8.3 Strategies for Scaling Adaptive Learning Programs ............. 155
Chapter 9: Measuring the Effectiveness of Adaptive Learning
9.1 Indicators of Learning Success and Student Satisfaction ............. 161
9.2 Evaluating the Long-Term Impact of Adaptive Learning ............. 167
9.3 Adaptive Learning and its Role in Lifelong Education ............. 173
Chapter 10: Adaptive Learning and Social Equity
10.1 Closing the Achievement Gap through Personalization ............. 181
10.2 Making Adaptive Learning Accessible to All Learners ............. 187
10.3 Adaptive Learning as a Tool for Social Change ............. 193
Chapter 11: Future Directions in Adaptive Learning
11.1 Emerging Technologies in Adaptive Learning Systems ............. 201
11.2 How Artificial Intelligence Will Transform Adaptive Education ............. 209
11.3 Vision for the Future: A Global Approach to Personalized Learning ............. 215
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
Appendices
• Appendix A: Adaptive Learning Tools and Platforms ............. 221
• Appendix B: Case Studies of Adaptive Learning Applications ............. 229
• Appendix C: International Standards for Adaptive Learning Programs ............. 237
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
References .......................................................... 243
Index ...................................................................... 251
Acknowledgments ................................................... 259
AUTHOR PROFILE
In 2016, the author earned the title of Doctor of Humanity, hold a Ph.D. in Information Technology and a DBA in General Management. Since 2016, the author has been teaching at international universities in Malaysia, Singapore, Thailand, and the USA. In 1999, the author founded the Education Training Centre (ETC), an organization dedicated to providing educational services and social support for the underprivileged. This organization offers shelter homes for children in need of a safe place to live and drop-in schools for those who need to continue their education. The ETC is also involved in research aimed at advancing science, which led to the author earning the title of Professor and joining the WPF. Additionally, the author is actively involved in global social development programs through the United Nations. They are a member of the UN Global Compact (id-137635), the UN Global Market (id-709131), and the UN ECOSOC (id-677556). The author has served as a reviewer for several international journals and book chapters, and has written numerous books and articles on a wide range of topics including Philosophy, Economics, Management, Arts and Culture, Anthropology, Law, Psychology, Education, Sociology, Health, Technology, Tourism, and Communication.},
collaborator = {{Center For Open Science}},
doi = {10.17605/OSF.IO/VFZ38},
groups = {Motivation/Engagement, Pedagodgy},
publisher = {OSF},
url = {https://osf.io/vfz38/},
urldate = {2025-11-10},
}
@InProceedings{Hou2024,
author = {Hou, Xinying and Ericson, Barbara Jane and Wang, Xu},
booktitle = {Proceedings of the 23rd {Koli} {Calling} {International} {Conference} on {Computing} {Education} {Research}},
title = {Understanding the {Effects} of {Using} {Parsons} {Problems} to {Scaffold} {Code} {Writing} for {Students} with {Varying} {CS} {Self}-{Efficacy} {Levels}},
year = {2024},
address = {New York, NY, USA},
month = feb,
pages = {1--12},
publisher = {Association for Computing Machinery},
series = {Koli {Calling} '23},
abstract = {Introductory programming courses aim to teach students to write code independently. However, transitioning from studying worked examples to generating their own code is often difficult and frustrating for students, especially those with lower CS self-efficacy in general. Therefore, we investigated the impact of using Parsons problems as a code-writing scaffold for students with varying levels of CS self-efficacy. Parsons problems are programming tasks where students arrange mixed-up code blocks in the correct order. We conducted a between-subjects study with undergraduate students (N=89) on a topic where students have limited code-writing expertise. Students were randomly assigned to one of two conditions. Students in one condition practiced writing code without any scaffolding, while students in the other condition were provided with scaffolding in the form of an equivalent Parsons problem. We found that, for students with low CS self-efficacy levels, those who received scaffolding achieved significantly higher practice performance and in-practice problem-solving efficiency compared to those without any scaffolding. Furthermore, when given Parsons problems as scaffolding during practice, students with lower CS self-efficacy were more likely to solve them. In addition, students with higher pre-practice knowledge on the topic were more likely to effectively use the Parsons scaffolding. This study provides evidence for the benefits of using Parsons problems to scaffold students write-code activities. It also has implications for optimizing the Parsons scaffolding experience for students, including providing personalized and adaptive Parsons problems based on the students current problem-solving status.},
doi = {10.1145/3631802.3631832},
file = {Full Text PDF:https\://dl.acm.org/doi/pdf/10.1145/3631802.3631832?download=true:application/pdf},
groups = {Self efficacy, Debugging},
isbn = {9798400716539},
url = {https://dl.acm.org/doi/10.1145/3631802.3631832},
urldate = {2026-01-28},
}
@InProceedings{Lewis2011,
author = {Lewis, Colleen M. and Yasuhara, Ken and Anderson, Ruth E.},
booktitle = {Proceedings of the Seventh International Workshop on Computing Education Research},
title = {Deciding to major in computer science: a grounded theory of students' self-assessment of ability},
year = {2011},
address = {New York, NY, USA},
pages = {310},
publisher = {Association for Computing Machinery},
series = {ICER '11},
abstract = {There is great interest in understanding and influencing students' attraction to computing-related majors. This qualitative study is based on interviews with 31 students enrolled in introductory programming courses at two public universities in the United States. This paper presents a model of five factors that influence student decisions to major in CS and elaborates on our grounded theory analysis of one of these factors: how students assess their CS-related ability. We describe how students measure their ability in terms of speed, grades, and previous experience and how students make interpretations and decisions based upon these measurements. We found that students' interpretations were influenced by experiences in their environments and beliefs about ability as being fixed or malleable.},
doi = {10.1145/2016911.2016915},
groups = {Motivation/Engagement},
isbn = {9781450308298},
keywords = {major choice, grounded theory, ability},
location = {Providence, Rhode Island, USA},
numpages = {8},
url = {https://doi.org/10.1145/2016911.2016915},
}
@Book{Vickers2008,
author = {Paul Vickers},
publisher = {Thomson Learning},
title = {How to think like a programmer : problem solving for the bewildered},
year = {2008},
isbn = {9781844809004},
abstract = {How to Think Like a Programmer is a bright, accessible, fun read describing the mindset and mental methods of programmers. Anticipating the problems that students have through the character of Brian the Bewildered Wildebeest, the slower pace required for this approach is made interesting and engaging by hand-drawn sketches, frequent (paper-based) activities and the everyday tasks (e.g. coffee making) used as a basis of worked examples. How to Think Like a Programmer provides a fun and accessible way to learn the mental models needed to approach computational programmable problems.},
groups = {Pedagodgy},
language = {English},
}
@Article{Braun2006,
author = {Virginia Braun and Victoria Clarke},
journal = {Qualitative Research in Psychology},
title = {Using thematic analysis in psychology},
year = {2006},
number = {2},
pages = {77--101},
volume = {3},
abstract = {Thematic analysis is a poorly demarcated, rarely acknowledged, yet widely used qualitative analytic method within psychology. In this paper, we argue that it offers an accessible and theoretically flexible approach to analysing qualitative data. We outline what thematic analysis is, locating it in relation to other qualitative analytic methods that search for themes or patterns, and in relation to different epistemological and ontological positions. We then provide clear guidelines to those wanting to start thematic analysis, or conduct it in a more deliberate and rigorous way, and consider potential pitfalls in conducting thematic analysis. Finally, we outline the disadvantages and advantages of thematic analysis. We conclude by advocating thematic analysis as a useful and flexible method for qualitative research in and beyond psychology.},
doi = {10.1191/1478088706qp063oa},
eprint = {https://doi.org/10.1191/1478088706qp063oa},
groups = {Method},
publisher = {Routledge},
url = {https://doi.org/10.1191/1478088706qp063oa},
}
@Comment{jabref-meta: databaseType:bibtex;}
@Comment{jabref-meta: grouping:
0 AllEntriesGroup:;
1 StaticGroup:Emotions\;0\;0\;0x7d60bfff\;\;\;;
1 StaticGroup:Cognitive Load\;0\;1\;0xa2bf60ff\;\;\;;
1 StaticGroup:Pedagodgy\;0\;1\;0x60bfa2ff\;\;\;;
2 StaticGroup:Debugging\;0\;1\;0x56ac92ff\;\;\;;
2 StaticGroup:Generative AI\;0\;1\;0xac5670ff\;\;\;;
1 StaticGroup:Motivation/Engagement\;0\;0\;0xbf607dff\;\;\;;
1 StaticGroup:Avatars\;0\;1\;0x72bf60ff\;\;\;;
1 StaticGroup:Ethics/Sustainability\;0\;1\;\;\;\;;
1 StaticGroup:Self efficacy\;0\;1\;\;\;\;;
1 StaticGroup:Evaluation\;0\;1\;0xad60bfff\;\;\;;
1 StaticGroup:Method\;0\;1\;0xbf60a0ff\;\;\;;
1 StaticGroup:My Papers\;0\;1\;0x60bf7fff\;\;\;;
}