From 85774f1ed7db810527b91bc0a92c3d899834f8b5 Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Tue, 16 Sep 2025 11:41:52 -0400
Subject: [PATCH 01/21] add temporary endpoint peer.get_gpt_response()
---
.../applications/runestone/controllers/peer.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index d298360cf..88d4e2e80 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -849,3 +849,13 @@ def send_lti_scores():
_try_to_send_lti_grade(sid, assignment_id, force=True)
return json.dumps("success")
+
+
+def get_gpt_response():
+ """
+ Temporary endpoint to verify routing and JSON response.
+ Expects ?message=... and echoes it back.
+ Replace with per-course LLM call once wiring is confirmed.
+ """
+ msg = request.vars.message or ''
+ return response.json(dict(ok=True, echo=msg))
\ No newline at end of file
From a32a0c04218ff4c4e98cc3bfcc558867379c320e Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Tue, 16 Sep 2025 11:50:55 -0400
Subject: [PATCH 02/21] add ping()
---
.../applications/runestone/controllers/peer.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index 88d4e2e80..2d3e3c207 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -858,4 +858,10 @@ def get_gpt_response():
Replace with per-course LLM call once wiring is confirmed.
"""
msg = request.vars.message or ''
- return response.json(dict(ok=True, echo=msg))
\ No newline at end of file
+ return response.json(dict(ok=True, echo=msg))
+
+def ping():
+ """
+ Public probe. Returns plain text.
+ """
+ return "test"
\ No newline at end of file
From ef1fc93ced8378d645354006ced247fc942e9d11 Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Tue, 16 Sep 2025 15:43:57 -0400
Subject: [PATCH 03/21] add dummy page for WoZ chat
---
.../runestone/controllers/peer.py | 46 ++++++++++++++++---
.../runestone/static/js/llmtest.js | 46 +++++++++++++++++++
.../runestone/views/peer/llm_test.html | 14 ++++++
docker-compose.override.yml | 6 +++
4 files changed, 106 insertions(+), 6 deletions(-)
create mode 100644 bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
create mode 100644 bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
create mode 100644 docker-compose.override.yml
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index 2d3e3c207..d9d3cf5a0 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -853,15 +853,49 @@ def send_lti_scores():
def get_gpt_response():
"""
- Temporary endpoint to verify routing and JSON response.
- Expects ?message=... and echoes it back.
- Replace with per-course LLM call once wiring is confirmed.
+ Proto endpoint for PI chat.
+ Accepts:
+ - GET ?message=... (quick test)
+ - POST JSON { "messages": [ { "role": "user"/"assistant"/"system", "content": "..." }, ... ] }
+
+ Returns JSON { ok: bool, reply: str, tokens_used: int, echo?: str }
"""
- msg = request.vars.message or ''
- return response.json(dict(ok=True, echo=msg))
+ if request.env.request_method == "GET":
+ msg = request.vars.message or ""
+ return response.json(dict(ok=True, echo=msg, reply="(echo mode) " + msg, tokens_used=0))
+
+ try:
+ data = request.body.read().decode("utf-8") if hasattr(request, "body") else request.post_vars.get("payload")
+ except Exception:
+ data = None
+
+ import json
+ try:
+ payload = json.loads(data or "{}")
+ except Exception:
+ payload = {}
+
+ messages = payload.get("messages", [])
+ if not isinstance(messages, list) or not messages:
+ return response.json(dict(ok=False, error="messages[] required"))
+
+ user_last = ""
+ for m in reversed(messages):
+ if m.get("role") == "user":
+ user_last = m.get("content", "")
+ break
+
+ reply = f"Okay, here's how I'd think about it: {user_last}"
+ return response.json(dict(ok=True, reply=reply, tokens_used=0))
def ping():
"""
Public probe. Returns plain text.
"""
- return "test"
\ No newline at end of file
+ return "test"
+
+def llm_test():
+ """
+ Simple page to test LLM endpoint with JS.
+ """
+ return dict()
\ No newline at end of file
diff --git a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
new file mode 100644
index 000000000..f023dfee2
--- /dev/null
+++ b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
@@ -0,0 +1,46 @@
+const chatDiv = document.getElementById("chat");
+const inputEl = document.getElementById("msg");
+
+const messages = [
+ { role: "system", content: "You are a helpful CS2 student who explains step-by-step without giving full code." }
+];
+
+function render() {
+ chatDiv.innerHTML = "";
+ for (const m of messages) {
+ const line = document.createElement("div");
+ line.textContent = `${m.role}> ${m.content}`;
+ chatDiv.appendChild(line);
+ }
+ chatDiv.scrollTop = chatDiv.scrollHeight;
+}
+
+function sendMessage() {
+ const msg = inputEl.value.trim();
+ if (!msg) return;
+
+ messages.push({ role: "user", content: msg });
+ inputEl.value = "";
+ render();
+
+ fetch("/runestone/peer/get_gpt_response", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ messages })
+ })
+ .then(res => {
+ if (!res.ok) throw new Error(`HTTP ${res.status}`);
+ return res.json();
+ })
+ .then(data => {
+ if (!data.ok) throw new Error(data.error || "unknown error");
+ messages.push({ role: "assistant", content: data.reply });
+ render();
+ })
+ .catch(err => {
+ messages.push({ role: "assistant", content: `Error: ${err}` });
+ render();
+ });
+}
+
+render();
\ No newline at end of file
diff --git a/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html b/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
new file mode 100644
index 000000000..d0c61ca7e
--- /dev/null
+++ b/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
@@ -0,0 +1,14 @@
+{{extend 'layout.html'}}
+
+LLM Test Page
+
+
+
+
\ No newline at end of file
diff --git a/docker-compose.override.yml b/docker-compose.override.yml
new file mode 100644
index 000000000..66582f945
--- /dev/null
+++ b/docker-compose.override.yml
@@ -0,0 +1,6 @@
+services:
+ runestone:
+ volumes:
+ - ./bases/rsptx/web2py_server/applications/runestone/controllers:/usr/local/lib/python3.10/site-packages/rsptx/web2py_server/applications/runestone/controllers
+ - ./bases/rsptx/web2py_server/applications/runestone/views:/usr/local/lib/python3.10/site-packages/rsptx/web2py_server/applications/runestone/views
+ - ./bases/rsptx/web2py_server/applications/runestone/static/js:/usr/local/lib/python3.10/site-packages/rsptx/web2py_server/applications/runestone/static/js
\ No newline at end of file
From 64658efa5adf67c251135f1234691cece72a9ed4 Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Wed, 17 Sep 2025 16:30:47 -0400
Subject: [PATCH 04/21] add initial llm functionality
---
.../runestone/controllers/peer.py | 70 ++++++++++++++-----
.../runestone/static/js/llmtest.js | 4 +-
docker-compose.override.yml | 2 +
3 files changed, 56 insertions(+), 20 deletions(-)
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index d9d3cf5a0..0b617683c 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -851,27 +851,57 @@ def send_lti_scores():
return json.dumps("success")
+
+
+import os, json
+import requests
+
+def _get_openai_key():
+ return os.environ.get("OPENAI_API_KEY", "").strip()
+
+def _call_openai(messages):
+ """
+ Minimal HTTP call. No SDK needed.
+ messages: list of {role, content}
+ returns reply string
+ """
+ api_key = _get_openai_key()
+ if not api_key:
+ return None
+
+ url = "https://api.openai.com/v1/chat/completions"
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ }
+ payload = {
+ "model": "gpt-4o-mini",
+ "messages": messages,
+ "temperature": 0.4,
+ "max_tokens": 300,
+ }
+ resp = requests.post(url, headers=headers, data=json.dumps(payload), timeout=30)
+ resp.raise_for_status()
+ data = resp.json()
+ return data["choices"][0]["message"]["content"].strip()
+
def get_gpt_response():
"""
- Proto endpoint for PI chat.
- Accepts:
- - GET ?message=... (quick test)
- - POST JSON { "messages": [ { "role": "user"/"assistant"/"system", "content": "..." }, ... ] }
- Returns JSON { ok: bool, reply: str, tokens_used: int, echo?: str }
+ GET ?message=... -> echo mode
+ POST JSON {"messages":[...]} -> calls OpenAI if OPENAI_API_KEY is set, else stub
"""
if request.env.request_method == "GET":
msg = request.vars.message or ""
- return response.json(dict(ok=True, echo=msg, reply="(echo mode) " + msg, tokens_used=0))
+ return response.json(dict(ok=True, echo=msg, reply="(echo) " + msg, tokens_used=0))
try:
- data = request.body.read().decode("utf-8") if hasattr(request, "body") else request.post_vars.get("payload")
+ raw = request.body.read().decode("utf-8")
except Exception:
- data = None
+ raw = "{}"
- import json
try:
- payload = json.loads(data or "{}")
+ payload = json.loads(raw or "{}")
except Exception:
payload = {}
@@ -879,15 +909,17 @@ def get_gpt_response():
if not isinstance(messages, list) or not messages:
return response.json(dict(ok=False, error="messages[] required"))
- user_last = ""
- for m in reversed(messages):
- if m.get("role") == "user":
- user_last = m.get("content", "")
- break
-
- reply = f"Okay, here's how I'd think about it: {user_last}"
- return response.json(dict(ok=True, reply=reply, tokens_used=0))
-
+ try:
+ reply = _call_openai(messages)
+ if reply is None:
+ user_last = next((m.get("content","") for m in reversed(messages) if m.get("role")=="user"), "")
+ reply = f"(stub) Here is how to think about it: {user_last}"
+ return response.json(dict(ok=True, reply=reply, tokens_used=0))
+ except requests.HTTPError as e:
+ return response.json(dict(ok=False, error=f"HTTP {e.response.status_code}: {e.response.text[:200]}"))
+ except Exception as e:
+ return response.json(dict(ok=False, error=str(e)[:200]))
+
def ping():
"""
Public probe. Returns plain text.
diff --git a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
index f023dfee2..fb722b00e 100644
--- a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
+++ b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
@@ -1,13 +1,15 @@
const chatDiv = document.getElementById("chat");
const inputEl = document.getElementById("msg");
+const SHOW_SYSTEM = /[?&]debug=1/.test(location.search);
const messages = [
- { role: "system", content: "You are a helpful CS2 student who explains step-by-step without giving full code." }
+ { role: "system", content: "Peer instruction is defined as: an opportunity for peers to discuss ideas or to share answers to questions in an in-class environment, where they also have opportunities for further interactions with their instructor. You are a helpful CS2 student whos job is to faciliate peer instruction by explaining step-by-step without giving full code. Your interactions should be conversational, you do not need to include a full answer in your repsonse, just respond as a peer." }
];
function render() {
chatDiv.innerHTML = "";
for (const m of messages) {
+ if (m.role === "system" && !SHOW_SYSTEM) continue; // hide system prompt by default
const line = document.createElement("div");
line.textContent = `${m.role}> ${m.content}`;
chatDiv.appendChild(line);
diff --git a/docker-compose.override.yml b/docker-compose.override.yml
index 66582f945..8dd210d22 100644
--- a/docker-compose.override.yml
+++ b/docker-compose.override.yml
@@ -1,5 +1,7 @@
services:
runestone:
+ env_file:
+ - .env
volumes:
- ./bases/rsptx/web2py_server/applications/runestone/controllers:/usr/local/lib/python3.10/site-packages/rsptx/web2py_server/applications/runestone/controllers
- ./bases/rsptx/web2py_server/applications/runestone/views:/usr/local/lib/python3.10/site-packages/rsptx/web2py_server/applications/runestone/views
From 9862df219781893fd34d585a43a58b4f1d5df6fe Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Fri, 26 Sep 2025 14:02:48 -0400
Subject: [PATCH 05/21] give llm question context
---
.../runestone/controllers/peer.py | 50 ++++++-
.../runestone/static/js/llmtest.js | 135 +++++++++++++++++-
2 files changed, 181 insertions(+), 4 deletions(-)
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index 0b617683c..ce733d5da 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -853,9 +853,21 @@ def send_lti_scores():
+
import os, json
import requests
+# --- helper to sanitize long, messy text we include in prompts ---
+def clean_text(s):
+ try:
+ s = s or ""
+ s = str(s)
+ # collapse whitespace per-line, avoid huge payloads
+ s = "\n".join(line.strip() for line in s.splitlines())
+ return s[:2000]
+ except Exception:
+ return ""
+
def _get_openai_key():
return os.environ.get("OPENAI_API_KEY", "").strip()
@@ -906,11 +918,47 @@ def get_gpt_response():
payload = {}
messages = payload.get("messages", [])
+ context = payload.get("context") or {}
+
+ messages_with_context = messages
+ try:
+ if isinstance(context, dict) and context.get("ok") is True:
+ course = context.get("course")
+ basecourse = context.get("basecourse")
+ username = context.get("username")
+ comp_id = context.get("id")
+ comp_type = context.get("type")
+ prompt_txt = clean_text(context.get("prompt"))
+ code_txt = clean_text(context.get("code"))
+ choices = context.get("choices") or []
+ selected = context.get("selected")
+ out_txt = clean_text(context.get("output"))
+ err_txt = clean_text(context.get("error"))
+ coach_txt = clean_text(context.get("coach"))
+
+ sys_ctx = (
+ "You are a friendly peer in a Runestone ebook helping with the CURRENT exercise. "
+ "Be concise, guide reasoning, and avoid giving full solutions. If the user asks 'what is this asking me to do', "
+ "summarize the task in plain language, then suggest first steps.\n\n"
+ f"Course: {course} | Basecourse: {basecourse} | User: {username}\n"
+ f"Component: {comp_type} | ID: {comp_id}\n"
+ f"Prompt:\n{prompt_txt}\n\n"
+ + (f"Starter/Current Code:\n{code_txt}\n\n" if code_txt else "")
+ + ("Choices:\n- " + "\n- ".join(map(str, choices)) + (f"\n\nSelected: {selected}" if selected else "") + "\n\n" if choices else "")
+ + (f"Last Run Output:\n{out_txt}\n\n" if out_txt else "")
+ + (f"Last Run Error:\n{err_txt}\n\n" if err_txt else "")
+ + (f"Coach/Guidance:\n{coach_txt}\n\n" if coach_txt else "")
+ + "Rules: Never reveal solutions verbatim. Encourage peer-instruction style hints."
+ )
+
+ messages_with_context = [{"role": "system", "content": sys_ctx}] + messages
+ except Exception:
+ messages_with_context = messages
if not isinstance(messages, list) or not messages:
return response.json(dict(ok=False, error="messages[] required"))
try:
- reply = _call_openai(messages)
+ reply = _call_openai(messages_with_context)
if reply is None:
user_last = next((m.get("content","") for m in reversed(messages) if m.get("role")=="user"), "")
reply = f"(stub) Here is how to think about it: {user_last}"
diff --git a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
index fb722b00e..3d5a33709 100644
--- a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
+++ b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
@@ -2,10 +2,130 @@ const chatDiv = document.getElementById("chat");
const inputEl = document.getElementById("msg");
const SHOW_SYSTEM = /[?&]debug=1/.test(location.search);
+function readSavedContext() {
+ try { return JSON.parse(localStorage.getItem('pi_context') || 'null'); } catch { return null; }
+}
+function saveContext(ctx) {
+ try { localStorage.setItem('pi_context', JSON.stringify(ctx)); } catch {}
+}
+function readContextFromURL() {
+ try {
+ const p = new URLSearchParams(location.search);
+ const v = p.get('ctx');
+ if (!v) return null;
+ const json = atob(decodeURIComponent(v));
+ return JSON.parse(json);
+ } catch {
+ return null;
+ }
+}
+
const messages = [
- { role: "system", content: "Peer instruction is defined as: an opportunity for peers to discuss ideas or to share answers to questions in an in-class environment, where they also have opportunities for further interactions with their instructor. You are a helpful CS2 student whos job is to faciliate peer instruction by explaining step-by-step without giving full code. Your interactions should be conversational, you do not need to include a full answer in your repsonse, just respond as a peer." }
+ { role: "system", content: "You will never give the answer, your job is to guide the student in the correct instruction. Focus on asking the student questions before giving any code. Peer instruction is defined as: an opportunity for peers to discuss ideas or to share answers to questions in an in-class environment, where they also have opportunities for further interactions with their instructor. You are a helpful CS2 student whos job is to faciliate peer instruction by explaining step-by-step without giving full code. Your interactions should be conversational, you do not need to include a full answer in the repsonse, just respond as a peer. You only engage with CS topics, if the user mentions something else bring it back to the topic." }
];
+
+
+function getCurrentComponentContext(explicitComp) {
+ const el = document.activeElement;
+ const comp = explicitComp || (el && el.closest && el.closest('[data-component]'));
+ if (!comp) return { ok:false, reason:'no focused component' };
+
+ const id = comp.id || null;
+ const type = comp.dataset.component || null;
+
+ const clean = s => (s || '').replace(/\s+\n/g, '\n').replace(/\s{2,}/g, ' ').trim();
+ const textOf = sel => clean(comp.querySelector(sel)?.innerText || '');
+ const valueOf = sel => comp.querySelector(sel)?.value || '';
+
+ const ctx = { ok:true, id, type, prompt:'', code:'', choices:[], selected:null, output:'', error:'', coach:'' };
+
+ if (type === 'activecode') {
+ const cm = comp.querySelector('.CodeMirror')?.CodeMirror;
+ ctx.code = cm ? cm.getValue() : valueOf('textarea');
+
+ ctx.prompt =
+ textOf('.ac_caption, .ac_statement, .ac_question, .runestone_directive') ||
+ (comp.previousElementSibling ? clean(comp.previousElementSibling.innerText) : '') ||
+ clean(comp.closest('.section')?.querySelector('h1,h2,h3,h4')?.innerText || '');
+
+ const outEl = comp.querySelector('.ac_output, .ac_output pre, .stdout, .output, pre.out, .runestone_output');
+ const errEl = comp.querySelector('.ac_error, .alert-danger, .traceback, .runestone_error');
+
+ ctx.output = clean(outEl?.innerText || '');
+ ctx.error = clean(errEl?.innerText || '');
+
+ let coachText = '';
+ const coachEl = comp.querySelector('.codecoach, .ac_codecoach, .coach, .guidance, .helptext');
+ if (coachEl) {
+ coachText = clean(coachEl.innerText);
+ } else {
+ let sib = comp.nextElementSibling;
+ let steps = 0;
+ while (sib && steps < 6 && coachText === '') {
+ const m = sib.querySelector?.('.codecoach, .ac_codecoach, .coach, .guidance, .helptext, .alert, .panel');
+ if (m) coachText = clean(m.innerText);
+ sib = sib.nextElementSibling;
+ steps += 1;
+ }
+ }
+ ctx.coach = coachText;
+ }
+ else if (type === 'mchoice') {
+ ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
+ ctx.choices = Array.from(comp.querySelectorAll('li, .choice, .option')).map(li => clean(li.innerText));
+ const checked = comp.querySelector('input[type=radio]:checked');
+ if (checked) {
+ const li = checked.closest('li, .choice, .option');
+ ctx.selected = li ? clean(li.innerText) : '(selected)';
+ }
+ }
+ else if (type === 'shortanswer') {
+ ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
+ ctx.code = valueOf('textarea, input[type=text]');
+ }
+ else if (type === 'parsons') {
+ ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
+ ctx.code = Array.from(comp.querySelectorAll('.parsons-source .line')).map(n => clean(n.innerText)).join('\n');
+ } else {
+ ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
+ }
+
+ ctx.course = window.eBookConfig?.course || null;
+ ctx.basecourse = window.eBookConfig?.basecourse || null;
+ ctx.username = window.eBookConfig?.username || null;
+
+ saveContext(ctx);
+
+ return ctx;
+}
+
+function openPeerForComponent(comp) {
+ const ctx = getCurrentComponentContext(comp);
+ if (!ctx || ctx.ok === false) { alert('Click inside a question first.'); return; }
+ saveContext(ctx);
+ const json = JSON.stringify(ctx);
+ const b64 = btoa(unescape(encodeURIComponent(json)));
+ const url = `/runestone/peer/llm_test?ctx=${encodeURIComponent(b64)}`;
+ window.open(url, '_blank');
+}
+
+function attachPeerButtons() {
+ const selector = '[data-component="activecode"], [data-component="mchoice"], [data-component="shortanswer"], [data-component="parsons"]';
+ const comps = Array.from(document.querySelectorAll(selector));
+ for (const comp of comps) {
+ if (comp.dataset.piBtnAttached === '1') continue; // already added
+ const btn = document.createElement('button');
+ btn.type = 'button';
+ btn.className = 'pi-ask-btn';
+ btn.textContent = 'Ask a peer about this';
+ btn.style.margin = '8px 0';
+ btn.onclick = () => openPeerForComponent(comp);
+ comp.dataset.piBtnAttached = '1';
+ comp.appendChild(btn);
+ }
+}
+
function render() {
chatDiv.innerHTML = "";
for (const m of messages) {
@@ -25,10 +145,15 @@ function sendMessage() {
inputEl.value = "";
render();
+ let ctx = getCurrentComponentContext();
+ if (!ctx || ctx.ok === false) {
+ ctx = readContextFromURL() || readSavedContext() || { ok:false, reason:'no context available' };
+ }
+
fetch("/runestone/peer/get_gpt_response", {
method: "POST",
headers: { "Content-Type": "application/json" },
- body: JSON.stringify({ messages })
+ body: JSON.stringify({ messages, context: ctx })
})
.then(res => {
if (!res.ok) throw new Error(`HTTP ${res.status}`);
@@ -45,4 +170,8 @@ function sendMessage() {
});
}
-render();
\ No newline at end of file
+if (chatDiv && inputEl) {
+ render();
+} else {
+ attachPeerButtons();
+}
\ No newline at end of file
From 4149a7dc03a15b1a21be90d4f94b0585f921c6e1 Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Sun, 11 Jan 2026 10:35:19 -0500
Subject: [PATCH 06/21] add llm into peer screen; llm can see questions and
somewhat acts like student; cannot move to second question
---
.../runestone/controllers/peer.py | 415 ++++++++++++++----
.../runestone/static/js/llmtest.js | 177 --------
.../runestone/views/peer/llm_test.html | 14 -
.../runestone/views/peer/peer_async.html | 239 +++++++++-
4 files changed, 564 insertions(+), 281 deletions(-)
delete mode 100644 bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
delete mode 100644 bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index ce733d5da..cfd8452af 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -15,6 +15,9 @@
import logging
import os
import random
+import re
+import datetime
+
# Third Party
# -----------
@@ -25,8 +28,12 @@
from rsptx.db.crud import fetch_lti_version
from rs_grading import _try_to_send_lti_grade
-logger = logging.getLogger(settings.logger)
-logger.setLevel(settings.log_level)
+try:
+ logger = logging.getLogger(settings.logger)
+ logger.setLevel(settings.log_level)
+except Exception:
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.INFO)
peerjs = os.path.join("applications", request.application, "static", "js", "peer.js")
try:
@@ -37,6 +44,7 @@
request.peer_mtime = str(mtime)
+
@auth.requires(
lambda: verifyInstructorStatus(auth.user.course_id, auth.user),
requires_login=True,
@@ -78,6 +86,7 @@ def dashboard():
next = False
current_question, done, idx = _get_current_question(assignment_id, next)
+ has_vote1 = _has_vote1(current_question.name, auth.user.username)
all_questions = _get_assignment_questions(assignment_id)
num_questions = len(all_questions)
current_qnum = idx + 1
@@ -133,6 +142,7 @@ def dashboard():
is_instructor=True,
is_last=done,
lti=is_lti,
+ has_vote1=has_vote1,
**course_attrs,
)
@@ -168,7 +178,7 @@ def _get_current_question(assignment_id, get_next):
db(db.assignments.id == assignment_id).update(current_index=idx)
else:
idx = assignment.current_index
- db.commit() # commit changes to current question to prevent race condition.
+ db.commit()
question, done = _get_numbered_question(assignment_id, idx)
return question, done, idx
@@ -177,9 +187,15 @@ def _get_numbered_question(assignment_id, qnum):
all_questions = _get_assignment_questions(assignment_id)
total_questions = len(all_questions)
- done = "false"
- if qnum > total_questions - 1:
+ if total_questions == 0:
+ return None, "true"
+
+ if qnum < 0:
+ qnum = 0
+ elif qnum > total_questions - 1:
qnum = total_questions - 1
+
+ done = "false"
if qnum == total_questions - 1:
done = "true"
@@ -699,10 +715,39 @@ def log_peer_rating():
# --------------------------------------------
+# @auth.requires_login()
+# def peer_async():
+# if "access_token" not in request.cookies:
+# logger.error(f"Missing Access Token: {auth.user.username} adding one Now")
+# create_rs_token()
+
+# assignment_id = request.vars.assignment_id
+
+# qnum = 0
+# if request.vars.question_num:
+# qnum = int(request.vars.question_num)
+
+# current_question, all_done = _get_numbered_question(assignment_id, qnum)
+# course = db(db.courses.course_name == auth.user.course_name).select().first()
+# course_attrs = getCourseAttributesDict(course.id, course.base_course)
+# if "latex_macros" not in course_attrs:
+# course_attrs["latex_macros"] = ""
+
+# return dict(
+# course_id=auth.user.course_name,
+# course=get_course_row(db.courses.ALL),
+# current_question=current_question,
+# assignment_id=assignment_id,
+# nextQnum=qnum + 1,
+# all_done=all_done,
+# **course_attrs,
+# )
@auth.requires_login()
def peer_async():
- if "access_token" not in request.cookies:
- return redirect(URL("default", "accessIssue"))
+ if "access_token" not in request.cookies or not request.cookies.get("access_token"):
+ logger.error(f"Missing Access Token: {auth.user.username} adding one Now")
+ create_rs_token()
+ return redirect(URL("peer", "peer_async", vars=request.vars))
assignment_id = request.vars.assignment_id
@@ -711,8 +756,19 @@ def peer_async():
qnum = int(request.vars.question_num)
current_question, all_done = _get_numbered_question(assignment_id, qnum)
+
+ has_vote1 = False
+ has_reflection = False
+
+ if current_question:
+ div_id = current_question.name
+ sid = auth.user.username
+ has_vote1 = _has_vote1(div_id, sid)
+ has_reflection = _has_reflection(div_id, sid)
+
course = db(db.courses.course_name == auth.user.course_name).select().first()
course_attrs = getCourseAttributesDict(course.id, course.base_course)
+
if "latex_macros" not in course_attrs:
course_attrs["latex_macros"] = ""
@@ -723,82 +779,259 @@ def peer_async():
assignment_id=assignment_id,
nextQnum=qnum + 1,
all_done=all_done,
+ has_vote1=has_vote1,
+ has_reflection=has_reflection,
+ llm_reply=None,
**course_attrs,
)
+# def peer_async():
+# if "access_token" not in request.cookies or not request.cookies.get("access_token"):
+# logger.error(f"Missing Access Token: {auth.user.username} adding one Now")
+# create_rs_token()
+# return redirect(URL("peer", "peer_async", vars=request.vars))
+
+# assignment_id = request.vars.assignment_id
+
+# qnum = 0
+# if request.vars.question_num:
+# qnum = int(request.vars.question_num)
+
+# current_question, all_done = _get_numbered_question(assignment_id, qnum)
+
+# has_vote1 = False
+# has_reflection = False
+
+# if current_question:
+# div_id = current_question.name
+# sid = auth.user.username
+# has_vote1 = _has_vote1(div_id, sid)
+# has_reflection = _has_reflection(div_id, sid)
+
+# llm_reply = None
+# if has_vote1 and has_reflection:
+# try:
+# messages = [
+# {
+# "role": "user",
+# "content": (
+# "I answered this question and wrote a justification. "
+# "How might another student reason differently about this?"
+# )
+# }
+# ]
+# llm_reply = _call_openai(messages)
+# except Exception as e:
+# llm_reply = f"(LLM error: {e})"
+
+# course = db(db.courses.course_name == auth.user.course_name).select().first()
+# course_attrs = getCourseAttributesDict(course.id, course.base_course)
+
+# if "latex_macros" not in course_attrs:
+# course_attrs["latex_macros"] = ""
+
+# return dict(
+# course_id=auth.user.course_name,
+# course=get_course_row(db.courses.ALL),
+# current_question=current_question,
+# assignment_id=assignment_id,
+# nextQnum=qnum + 1,
+# all_done=all_done,
+# has_vote1=has_vote1,
+# has_reflection=has_reflection,
+# llm_reply=llm_reply,
+# **course_attrs,
+# )
+# @auth.requires_login()
+# def get_async_explainer():
+
+# if request.vars.get("disable_llm_peer") == "1":
+# return json.dumps({
+# "mess": "",
+# "user": "",
+# "answer": "",
+# "responses": {}
+# })
+
+# course_name = request.vars.course
+# sid = auth.user.username
+# div_id = request.vars.div_id
+
+# this_answer = _get_user_answer(div_id, sid)
+
+
+# # Messages are in useinfo with an event of "sendmessage" and a div_id corresponding to the div_id of the question.
+# # The act field is to:user:message
+# # Ratings of messages are in useinfo with an event of "ratepeer"
+# # the act field is rateduser:rating (excellent, good, poor)
+# ratings = []
+# for rate in ["excellent", "good"]:
+# ratings = db(
+# (db.useinfo.event == "ratepeer")
+# & (db.useinfo.act.like(f"%{rate}"))
+# & (db.useinfo.div_id == div_id)
+# & (db.useinfo.course_id == course_name)
+# ).select()
+# if len(ratings) > 0:
+# break
+
+# if len(ratings) > 0:
+# done = False
+# tries = 0
+# while not done and tries < 10:
+# idx = random.randrange(len(ratings))
+# act = ratings[idx].act
+# user = act.split(":")[0]
+# peer_answer = _get_user_answer(div_id, user)
+# if peer_answer != this_answer:
+# done = True
+# else:
+# tries += 1
+# mess, participants = _get_user_messages(user, div_id, course_name)
+# # This is the easy solution, but may result in a one-sided conversation.
+# if user in participants:
+# participants.remove(user)
+# else:
+# messages = db(
+# (db.useinfo.event == "sendmessage")
+# & (db.useinfo.div_id == div_id)
+# & (db.useinfo.course_id == course_name)
+# ).select(db.useinfo.sid)
+# if len(messages) > 0:
+# senders = set((row.sid for row in messages))
+# done = False
+# tries = 0
+# while not done and tries < 10:
+# user = random.choice(list(senders))
+# peer_answer = _get_user_answer(div_id, user)
+# if peer_answer != this_answer:
+# done = True
+
+# else:
+# tries += 1
+# mess, participants = _get_user_messages(user, div_id, course_name)
+# else:
+# mess = "Sorry there were no good explanations for you."
+# user = "nobody"
+# participants = set()
+
+# responses = {}
+# for p in participants:
+# responses[p] = _get_user_answer(div_id, p)
+# logger.debug(f"Get message for {div_id}")
+# return json.dumps(
+# {"mess": mess, "user": user, "answer": peer_answer, "responses": responses}
+# )
+
@auth.requires_login()
def get_async_explainer():
- course_name = request.vars.course
- sid = auth.user.username
- div_id = request.vars.div_id
+ return json.dumps({
+ "mess": "",
+ "user": "",
+ "answer": "",
+ "responses": {}
+ })
+
+
+def _get_mcq_context(div_id):
+ q = db(db.questions.name == div_id).select().first()
+ if not q:
+ logger.error(f"_get_mcq_context: no question row for {div_id}")
+ return "", "", []
+
+ question = (q.question or "").strip()
+
+ code = ""
+ if hasattr(q, "code") and q.code:
+ code = q.code.strip()
+ choices = []
+ try:
+ if hasattr(q, "answers") and q.answers:
+ opts = json.loads(q.answers)
+ for i, opt in enumerate(opts):
+ choices.append(f"{chr(65+i)}. {opt.strip()}")
+ except Exception as e:
+ logger.warning(f"Could not parse choices for {div_id}: {e}")
+ return question, code, choices
+
+@auth.requires_login()
+def get_async_llm_reflection():
+ try:
+ data = json.loads(request.body.read().decode("utf-8"))
+ except Exception:
+ return response.json(dict(ok=False, error="invalid json"))
+
+ div_id = (data.get("div_id") or "").strip()
+ selected = (data.get("selected_answer") or "").strip()
+ messages = data.get("messages")
+
+ if not div_id:
+ return response.json(dict(ok=False, error="missing div_id"))
+
+ question, code, choices = _get_mcq_context(div_id)
+
+ sys_content = (
+ "only speak in lower case.\n"
+ "you are a student talking to another student during peer instruction.\n"
+ "you are both looking at the SAME multiple choice question.\n"
+ "you remember the question, code, and answer choices.\n"
+ "do not say which answer is correct.\n"
+ "do not teach.\n"
+ "focus on reasoning and interpretation.\n\n"
+ )
+
+ if question:
+ sys_content += f"question:\n{question}\n\n"
+
+ if code:
+ sys_content += f"code:\n{code}\n\n"
+
+ if choices:
+ sys_content += "answer choices:\n" + "\n".join(choices) + "\n\n"
+
+ if selected:
+ sys_content += f"the other student chose: {selected}\n\n"
+
+ system_msg = {"role": "system", "content": sys_content}
+
+ if not messages:
+ reflection = (data.get("reflection") or "").strip()
+ if not reflection:
+ return response.json(dict(ok=False, error="missing reflection"))
+
+ messages = [
+ system_msg,
+ {
+ "role": "user",
+ "content": (
+ f"i chose answer {selected}. "
+ f"my explanation was:\n\n{reflection}"
+ ),
+ },
+ ]
- this_answer = _get_user_answer(div_id, sid)
-
- # Messages are in useinfo with an event of "sendmessage" and a div_id corresponding to the div_id of the question.
- # The act field is to:user:message
- # Ratings of messages are in useinfo with an event of "ratepeer"
- # the act field is rateduser:rating (excellent, good, poor)
- ratings = []
- for rate in ["excellent", "good"]:
- ratings = db(
- (db.useinfo.event == "ratepeer")
- & (db.useinfo.act.like(f"%{rate}"))
- & (db.useinfo.div_id == div_id)
- & (db.useinfo.course_id == course_name)
- ).select()
- if len(ratings) > 0:
- break
-
- if len(ratings) > 0:
- done = False
- tries = 0
- while not done and tries < 10:
- idx = random.randrange(len(ratings))
- act = ratings[idx].act
- user = act.split(":")[0]
- peer_answer = _get_user_answer(div_id, user)
- if peer_answer != this_answer:
- done = True
- else:
- tries += 1
- mess, participants = _get_user_messages(user, div_id, course_name)
- # This is the easy solution, but may result in a one-sided conversation.
- if user in participants:
- participants.remove(user)
else:
- messages = db(
- (db.useinfo.event == "sendmessage")
- & (db.useinfo.div_id == div_id)
- & (db.useinfo.course_id == course_name)
- ).select(db.useinfo.sid)
- if len(messages) > 0:
- senders = set((row.sid for row in messages))
- done = False
- tries = 0
- while not done and tries < 10:
- user = random.choice(list(senders))
- peer_answer = _get_user_answer(div_id, user)
- if peer_answer != this_answer:
- done = True
-
- else:
- tries += 1
- mess, participants = _get_user_messages(user, div_id, course_name)
+ if not isinstance(messages, list):
+ return response.json(dict(ok=False, error="messages must be a list"))
+
+ if len(messages) == 0 or messages[0].get("role") != "system":
+ messages = [system_msg] + messages
else:
- mess = "Sorry there were no good explanations for you."
- user = "nobody"
- participants = set()
-
- responses = {}
- for p in participants:
- responses[p] = _get_user_answer(div_id, p)
- logger.debug(f"Get message for {div_id}")
- return json.dumps(
- {"mess": mess, "user": user, "answer": peer_answer, "responses": responses}
- )
+ messages[0] = system_msg
+ try:
+ reply = _call_openai(messages)
+ if not reply:
+ return response.json(
+ dict(ok=False, error="llm returned empty reply (missing api key?)")
+ )
+ return response.json(dict(ok=True, reply=reply))
+ except Exception as e:
+ logger.exception("LLM reflection failed")
+ return response.json(dict(ok=False, error=str(e)))
+
def _get_user_answer(div_id, s):
ans = (
db(
@@ -810,12 +1043,35 @@ def _get_user_answer(div_id, s):
.select(orderby=~db.useinfo.id)
.first()
)
- # act is answer:0[,x]+:correct:voteN
if ans:
return ans.act.split(":")[1]
else:
return ""
+def _has_reflection(div_id, sid):
+ row = (
+ db(
+ (db.useinfo.event == "reflection")
+ & (db.useinfo.sid == sid)
+ & (db.useinfo.div_id == div_id)
+ )
+ .select(orderby=~db.useinfo.id)
+ .first()
+ )
+ return row is not None
+def _has_vote1(div_id, sid):
+ row = (
+ db(
+ (db.useinfo.event == "mChoice")
+ & (db.useinfo.sid == sid)
+ & (db.useinfo.div_id == div_id)
+ & (db.useinfo.act.like("%vote1"))
+ )
+ .select(orderby=~db.useinfo.id)
+ .first()
+ )
+ return row is not None
+
def _get_user_messages(user, div_id, course_name):
# this gets both sides of the conversation -- thus the | in the query below.
@@ -967,15 +1223,4 @@ def get_gpt_response():
return response.json(dict(ok=False, error=f"HTTP {e.response.status_code}: {e.response.text[:200]}"))
except Exception as e:
return response.json(dict(ok=False, error=str(e)[:200]))
-
-def ping():
- """
- Public probe. Returns plain text.
- """
- return "test"
-
-def llm_test():
- """
- Simple page to test LLM endpoint with JS.
- """
- return dict()
\ No newline at end of file
+
\ No newline at end of file
diff --git a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js b/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
deleted file mode 100644
index 3d5a33709..000000000
--- a/bases/rsptx/web2py_server/applications/runestone/static/js/llmtest.js
+++ /dev/null
@@ -1,177 +0,0 @@
-const chatDiv = document.getElementById("chat");
-const inputEl = document.getElementById("msg");
-const SHOW_SYSTEM = /[?&]debug=1/.test(location.search);
-
-function readSavedContext() {
- try { return JSON.parse(localStorage.getItem('pi_context') || 'null'); } catch { return null; }
-}
-function saveContext(ctx) {
- try { localStorage.setItem('pi_context', JSON.stringify(ctx)); } catch {}
-}
-function readContextFromURL() {
- try {
- const p = new URLSearchParams(location.search);
- const v = p.get('ctx');
- if (!v) return null;
- const json = atob(decodeURIComponent(v));
- return JSON.parse(json);
- } catch {
- return null;
- }
-}
-
-const messages = [
- { role: "system", content: "You will never give the answer, your job is to guide the student in the correct instruction. Focus on asking the student questions before giving any code. Peer instruction is defined as: an opportunity for peers to discuss ideas or to share answers to questions in an in-class environment, where they also have opportunities for further interactions with their instructor. You are a helpful CS2 student whos job is to faciliate peer instruction by explaining step-by-step without giving full code. Your interactions should be conversational, you do not need to include a full answer in the repsonse, just respond as a peer. You only engage with CS topics, if the user mentions something else bring it back to the topic." }
-];
-
-
-
-function getCurrentComponentContext(explicitComp) {
- const el = document.activeElement;
- const comp = explicitComp || (el && el.closest && el.closest('[data-component]'));
- if (!comp) return { ok:false, reason:'no focused component' };
-
- const id = comp.id || null;
- const type = comp.dataset.component || null;
-
- const clean = s => (s || '').replace(/\s+\n/g, '\n').replace(/\s{2,}/g, ' ').trim();
- const textOf = sel => clean(comp.querySelector(sel)?.innerText || '');
- const valueOf = sel => comp.querySelector(sel)?.value || '';
-
- const ctx = { ok:true, id, type, prompt:'', code:'', choices:[], selected:null, output:'', error:'', coach:'' };
-
- if (type === 'activecode') {
- const cm = comp.querySelector('.CodeMirror')?.CodeMirror;
- ctx.code = cm ? cm.getValue() : valueOf('textarea');
-
- ctx.prompt =
- textOf('.ac_caption, .ac_statement, .ac_question, .runestone_directive') ||
- (comp.previousElementSibling ? clean(comp.previousElementSibling.innerText) : '') ||
- clean(comp.closest('.section')?.querySelector('h1,h2,h3,h4')?.innerText || '');
-
- const outEl = comp.querySelector('.ac_output, .ac_output pre, .stdout, .output, pre.out, .runestone_output');
- const errEl = comp.querySelector('.ac_error, .alert-danger, .traceback, .runestone_error');
-
- ctx.output = clean(outEl?.innerText || '');
- ctx.error = clean(errEl?.innerText || '');
-
- let coachText = '';
- const coachEl = comp.querySelector('.codecoach, .ac_codecoach, .coach, .guidance, .helptext');
- if (coachEl) {
- coachText = clean(coachEl.innerText);
- } else {
- let sib = comp.nextElementSibling;
- let steps = 0;
- while (sib && steps < 6 && coachText === '') {
- const m = sib.querySelector?.('.codecoach, .ac_codecoach, .coach, .guidance, .helptext, .alert, .panel');
- if (m) coachText = clean(m.innerText);
- sib = sib.nextElementSibling;
- steps += 1;
- }
- }
- ctx.coach = coachText;
- }
- else if (type === 'mchoice') {
- ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
- ctx.choices = Array.from(comp.querySelectorAll('li, .choice, .option')).map(li => clean(li.innerText));
- const checked = comp.querySelector('input[type=radio]:checked');
- if (checked) {
- const li = checked.closest('li, .choice, .option');
- ctx.selected = li ? clean(li.innerText) : '(selected)';
- }
- }
- else if (type === 'shortanswer') {
- ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
- ctx.code = valueOf('textarea, input[type=text]');
- }
- else if (type === 'parsons') {
- ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
- ctx.code = Array.from(comp.querySelectorAll('.parsons-source .line')).map(n => clean(n.innerText)).join('\n');
- } else {
- ctx.prompt = textOf('.question, .caption, .runestone_caption, .runestone') || textOf(':scope');
- }
-
- ctx.course = window.eBookConfig?.course || null;
- ctx.basecourse = window.eBookConfig?.basecourse || null;
- ctx.username = window.eBookConfig?.username || null;
-
- saveContext(ctx);
-
- return ctx;
-}
-
-function openPeerForComponent(comp) {
- const ctx = getCurrentComponentContext(comp);
- if (!ctx || ctx.ok === false) { alert('Click inside a question first.'); return; }
- saveContext(ctx);
- const json = JSON.stringify(ctx);
- const b64 = btoa(unescape(encodeURIComponent(json)));
- const url = `/runestone/peer/llm_test?ctx=${encodeURIComponent(b64)}`;
- window.open(url, '_blank');
-}
-
-function attachPeerButtons() {
- const selector = '[data-component="activecode"], [data-component="mchoice"], [data-component="shortanswer"], [data-component="parsons"]';
- const comps = Array.from(document.querySelectorAll(selector));
- for (const comp of comps) {
- if (comp.dataset.piBtnAttached === '1') continue; // already added
- const btn = document.createElement('button');
- btn.type = 'button';
- btn.className = 'pi-ask-btn';
- btn.textContent = 'Ask a peer about this';
- btn.style.margin = '8px 0';
- btn.onclick = () => openPeerForComponent(comp);
- comp.dataset.piBtnAttached = '1';
- comp.appendChild(btn);
- }
-}
-
-function render() {
- chatDiv.innerHTML = "";
- for (const m of messages) {
- if (m.role === "system" && !SHOW_SYSTEM) continue; // hide system prompt by default
- const line = document.createElement("div");
- line.textContent = `${m.role}> ${m.content}`;
- chatDiv.appendChild(line);
- }
- chatDiv.scrollTop = chatDiv.scrollHeight;
-}
-
-function sendMessage() {
- const msg = inputEl.value.trim();
- if (!msg) return;
-
- messages.push({ role: "user", content: msg });
- inputEl.value = "";
- render();
-
- let ctx = getCurrentComponentContext();
- if (!ctx || ctx.ok === false) {
- ctx = readContextFromURL() || readSavedContext() || { ok:false, reason:'no context available' };
- }
-
- fetch("/runestone/peer/get_gpt_response", {
- method: "POST",
- headers: { "Content-Type": "application/json" },
- body: JSON.stringify({ messages, context: ctx })
- })
- .then(res => {
- if (!res.ok) throw new Error(`HTTP ${res.status}`);
- return res.json();
- })
- .then(data => {
- if (!data.ok) throw new Error(data.error || "unknown error");
- messages.push({ role: "assistant", content: data.reply });
- render();
- })
- .catch(err => {
- messages.push({ role: "assistant", content: `Error: ${err}` });
- render();
- });
-}
-
-if (chatDiv && inputEl) {
- render();
-} else {
- attachPeerButtons();
-}
\ No newline at end of file
diff --git a/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html b/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
deleted file mode 100644
index d0c61ca7e..000000000
--- a/bases/rsptx/web2py_server/applications/runestone/views/peer/llm_test.html
+++ /dev/null
@@ -1,14 +0,0 @@
-{{extend 'layout.html'}}
-
-LLM Test Page
-
-
-
-
\ No newline at end of file
diff --git a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
index d482fcd97..ff0c7314d 100644
--- a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
+++ b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
@@ -1,3 +1,35 @@
+{{extend 'runestone.html'}}
+
+
+
+
+
+{{ if has_vote1: }}
+ Vote 1 detected
+{{ else: }}
+ No vote yet
+{{ pass }}
+
+
+
{{extend 'layout.html'}}
{{block moreincludes}}
{{include '_sphinx_static_files.html'}}
@@ -22,7 +54,7 @@ Peer Instruction Question (After Class)
Answer the question again. Even if you are not changing your answer from the first time.
-
+
{{ if all_done == "false": }}
@@ -40,16 +72,44 @@
Congratulations, you have completed this assignment!
__
-
-
-
A discussion for you to consider
+
+
+
+
+
+
+
+
+
+
+
+
+
{{pass}}
+{{ if has_vote1 and has_reflection and llm_reply: }}
+
+
+
An LLM peer said:
+
{{=llm_reply}}
+
+{{ pass }}
{{ if all_done == "false": }}
@@ -60,7 +120,171 @@
A discussion for you to consider
{{ pass }}
+
+
+
+
+
+
+
+
+
From 7a89de2178968de8d59c14055279a1142137662f Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Tue, 27 Jan 2026 10:19:21 -0500
Subject: [PATCH 07/21] fix second vote
---
.../applications/runestone/static/js/peer.js | 89 ++++++++++++++++++-
.../runestone/views/peer/peer_async.html | 76 +++++++++++++---
2 files changed, 150 insertions(+), 15 deletions(-)
diff --git a/bases/rsptx/web2py_server/applications/runestone/static/js/peer.js b/bases/rsptx/web2py_server/applications/runestone/static/js/peer.js
index 40162a91b..5ea5f80d5 100644
--- a/bases/rsptx/web2py_server/applications/runestone/static/js/peer.js
+++ b/bases/rsptx/web2py_server/applications/runestone/static/js/peer.js
@@ -28,6 +28,9 @@ const STEP_CONFIG = {
const CHAT_MODALITIES = ['makep', 'facechat', 'makeabgroups'];
var currentStep = null;
+const REQUIRED_LLM_MESSAGES = 1;
+let llmMessageCount = 0;
+let readyForVote2 = false;
function disableButton(btn) {
if (btn) btn.disabled = true;
@@ -282,7 +285,7 @@ function connect(event) {
"Submit";
window.componentMap[currentQuestion].enableInteraction();
if (typeof studentVoteCount !== "undefined") {
- studentVoteCount += 1;
+ // studentVoteCount += 1;
if (studentVoteCount > 2) {
studentVoteCount = 2;
console.log("WARNING: resetting studentVoteCount to 2");
@@ -484,6 +487,15 @@ async function sendMessage(event) {
// Disable the send button after sending a message
sendButton.classList.add("disabled");
+ llmMessageCount += 1;
+ console.log("LLM message count:", llmMessageCount);
+
+ if (llmMessageCount >= REQUIRED_LLM_MESSAGES) {
+ const btn = document.getElementById("readyVote2Btn");
+ if (btn) {
+ btn.style.display = "inline-block";
+ }
+ }
}
function warnAndStopVote(event) {
@@ -712,9 +724,23 @@ async function showPeerEnableVote2() {
let peerEl = document.getElementById("peerJust");
peerEl.innerHTML = peerMess;
let nextStep = document.getElementById("nextStep");
+
+if (llmMessageCount < REQUIRED_LLM_MESSAGES) {
nextStep.innerHTML =
- "Please answer the question again, even if you do not wish to change your answer. After answering, click the button to go on to the next question.";
+ `Please continue the discussion. You must send at least ${REQUIRED_LLM_MESSAGES} messages before voting again.`;
nextStep.style.color = "red";
+ return;
+}
+
+nextStep.innerHTML =
+ "Please answer the question again, even if you do not wish to change your answer. After answering, click the button to go on to the next question.";
+nextStep.style.color = "red";
+voteStopped = false;
+
+let qq = window.componentMap[currentQuestion];
+qq.submitButton.disabled = false;
+qq.enableInteraction();
+
let cq = document.getElementById(`${currentQuestion}_feedback`);
cq.style.display = "none";
@@ -731,6 +757,42 @@ async function showPeerEnableVote2() {
}
}
+
+function enableSecondVoteAsync() {
+ console.log("Enabling second vote (async)");
+
+ voteStopped = false;
+
+ let qq = window.componentMap[currentQuestion];
+ if (!qq) {
+ console.error("No component found for currentQuestion");
+ return;
+ }
+
+ qq.submitButton.disabled = false;
+ qq.enableInteraction();
+
+ let feedbackDiv = document.getElementById(`${currentQuestion}_feedback`);
+ if (feedbackDiv) {
+ feedbackDiv.style.display = "none";
+ }
+
+ $(".runestone [type=radio]").prop("checked", false);
+ $(".runestone [type=checkbox]").prop("checked", false);
+
+ document.getElementById("readyVote2Btn").style.display = "none";
+
+ let nextStep = document.getElementById("nextStep");
+ if (nextStep) {
+ nextStep.innerHTML =
+ "please submit your answer again, even if you keep the same choice.";
+ nextStep.style.color = "red";
+ }
+
+ console.log("Second vote enabled");
+}
+
+
async function setupPeerGroup() {
let jsonHeaders = new Headers({
"Content-type": "application/json; charset=utf-8",
@@ -786,10 +848,31 @@ async function setupPeerGroup() {
}
+function insertReadyVote2Button() {
+ if (document.getElementById("readyVote2Btn")) return;
+
+ const container = document.getElementById("discussion_panel")
+ || document.getElementById("imessage");
+
+ if (!container) return;
+
+ const btn = document.createElement("button");
+ btn.id = "readyVote2Btn";
+ btn.className = "btn btn-warning";
+ btn.style.display = "none";
+ btn.style.marginTop = "10px";
+ btn.textContent = "I'm ready to vote again";
+
+ btn.addEventListener("click", enableSecondVoteAsync);
+
+ container.appendChild(btn);
+}
+
$(function () {
+ insertReadyVote2Button();
+
let tinput = document.getElementById("messageText");
let sendButton = document.getElementById("sendpeermsg");
-
if (tinput && sendButton) {
tinput.addEventListener("input", function () {
let message = this.value.trim();
diff --git a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
index 5b9831bdb..a365175c3 100644
--- a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
+++ b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
@@ -100,6 +100,14 @@ Congratulations, you have completed this assignment!
+
{{pass}}
@@ -124,6 +132,32 @@ Congratulations, you have completed this assignment!
window.DISABLE_ASYNC_EXPLAINER = true;
+
@@ -316,16 +356,28 @@ Congratulations, you have completed this assignment!
course_name: eBookConfig.course,
})
setTimeout(function () {
- $(window.componentMap[currentQuestion].submitButton).click(function () {
- if (studentVoteCount == 1) {
- let currAnswer = window.componentMap[currentQuestion].answer;
- console.log(`current answer is ${currAnswer}`);
- let sAnswer = answerToString(currAnswer);
- console.log(sAnswer);
- $("#current_answer").html(sAnswer)
- }
- })
- }, 2000)
+ const btn = window.componentMap[currentQuestion]?.submitButton;
+ if (!btn) return;
+
+ $(btn).off("click.pi_async").on("click.pi_async", function () {
+ const currAnswer = window.componentMap[currentQuestion].answer;
+ const sAnswer = answerToString(currAnswer);
+ $("#current_answer").html(sAnswer);
+
+ if (studentVoteCount === 1 && !window._vote2Enabled) {
+ console.log("vote 1 submitted");
+ studentVoteCount = 1;
+ return;
+ }
+
+ if (window._vote2Enabled && !studentSubmittedVote2) {
+ console.log("vote 2 submitted");
+ studentVoteCount = 2;
+ studentSubmittedVote2 = true;
+ window._vote2Enabled = false;
+ }
+ });
+ }, 2000);
});
-->
-
+
@@ -141,32 +141,61 @@ Congratulations, you have completed this assignment!
-
+
+
\( {{=XML(latex_macros)}} \)
@@ -298,65 +297,109 @@
Congratulations, you have completed this assignment!
discussion.style.display = "block";
chat.innerHTML = "
Thinking about your explanation…
";
-const mcq = document.querySelector('.mchoice');
-
-const questionText = mcq ? mcq.innerText : "";
+ if (window.PI_LLM_MODE !== true) {
+ const resp = await fetch("/runestone/peer/get_async_explainer", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ div_id: currentQuestion,
+ course: eBookConfig.course
+ })
+ });
-window._llmMessages = [
- {
- role: "user",
- content: `i chose answer ${selected}. my explanation was:\n\n${reflection}`
- }
-];
+ if (!resp.ok) {
+ chat.innerHTML = "
Error talking to peer.
";
+ return;
+ }
+ const spec = await resp.json();
+ let res = "";
+ for (let response in spec.responses) {
+ res += `User ${response} answered ${answerToString(
+ spec.responses[response]
+ )}
`;
+ }
+ chat.innerHTML = "";
+ chat.innerHTML += `
Other students said:
`;
+ if (res) {
+ chat.innerHTML += `
${res}
`;
+ }
+ if (spec.mess) {
+ chat.innerHTML += spec.mess;
+ }
+ const replyInput = document.getElementById("llmReplyInput");
+ const replyBtn = document.getElementById("llmReplyBtn");
+ if (replyInput) replyInput.style.display = "none";
+ if (replyBtn) replyBtn.style.display = "none";
+ nextStep.textContent =
+ "Please answer the question again, even if you do not wish to change your answer.";
+ const readyBtn = document.getElementById("readyVote2Btn");
+ if (readyBtn) {
+ readyBtn.style.display = "inline-block";
+ readyBtn.disabled = false;
+ readyBtn.title = "";
+ }
+ studentSubmittedVote2 = false;
+ return;
+ } else {
+ const mcq = document.querySelector('.mchoice');
-const resp = await fetch("/runestone/peer/get_async_llm_reflection", {
- method: "POST",
- headers: { "Content-Type": "application/json" },
- body: JSON.stringify({
- div_id: currentQuestion,
- selected_answer: selected,
- messages: window._llmMessages
- })
-});
+ const questionText = mcq ? mcq.innerText : "";
- const data = await resp.json();
- chat.innerHTML = "";
+ window._llmMessages = [
+ {
+ role: "user",
+ content: `i chose answer ${selected}. my explanation was:\n\n${reflection}`
+ }
+ ];
- if (!data.ok) {
- chat.innerHTML = "
Error talking to peer.
";
- return;
- }
- appendMsg("assistant", data.reply);
- if (typeof logPeerEvent === "function") {
- window._llmTurnIndex = (window._llmTurnIndex || 0) + 1;
- logPeerEvent({
- sid: eBookConfig.username,
- div_id: currentQuestion,
- event: "pi_llm_turn",
- act: JSON.stringify({
- pi_attempt_id: getPiAttemptId(),
- turn_index: window._llmTurnIndex,
- role: "assistant",
- content: data.reply,
- }),
- course_name: eBookConfig.course,
+ const resp = await fetch("/runestone/peer/get_async_llm_reflection", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ div_id: currentQuestion,
+ selected_answer: selected,
+ messages: window._llmMessages
+ })
});
- }
- window._llmMessages.push({
- role: "assistant",
- content: data.reply
- });
- nextStep.textContent =
- "Discuss this reasoning, then click 'Vote again' to vote again.";
+ const data = await resp.json();
+ chat.innerHTML = "";
- studentSubmittedVote2 = false;
+ if (!data.ok) {
+ chat.innerHTML = "
Error talking to peer.
";
+ return;
+ }
-
+ appendMsg("assistant", data.reply);
+ if (typeof logPeerEvent === "function") {
+ window._llmTurnIndex = (window._llmTurnIndex || 0) + 1;
+ logPeerEvent({
+ sid: eBookConfig.username,
+ div_id: currentQuestion,
+ event: "pi_llm_turn",
+ act: JSON.stringify({
+ pi_attempt_id: getPiAttemptId(),
+ turn_index: window._llmTurnIndex,
+ role: "assistant",
+ content: data.reply,
+ }),
+ course_name: eBookConfig.course,
+ });
+ }
+
+ window._llmMessages.push({
+ role: "assistant",
+ content: data.reply
+ });
+ nextStep.textContent =
+ "Discuss this reasoning, then click 'Vote again' to vote again.";
- enableChat(reflection, selected);
+ studentSubmittedVote2 = false;
+
+ enableChat(reflection, selected);
+ }
});
})();
From c73958fde4791fe1f4518167ccb0aab31987b2dd Mon Sep 17 00:00:00 2001
From: Seth Bernstein
Date: Fri, 6 Feb 2026 09:36:44 -0500
Subject: [PATCH 18/21] improve error messaging
---
.../runestone/controllers/peer.py | 2 +-
.../runestone/views/peer/peer_async.html | 65 ++++++++++++-------
2 files changed, 44 insertions(+), 23 deletions(-)
diff --git a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
index 02b87c865..a8a30f071 100644
--- a/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
+++ b/bases/rsptx/web2py_server/applications/runestone/controllers/peer.py
@@ -1079,7 +1079,7 @@ def _call_openai(messages):
"temperature": 0.4,
"max_tokens": 300,
}
- resp = requests.post(url, headers=headers, data=json.dumps(payload), timeout=30)
+ resp = requests.post(url, headers=headers, json=payload, timeout=30)
logger.warning(f"PEER LLM CALL | provider=openai-course-token | model={model}")
resp.raise_for_status()
data = resp.json()
diff --git a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
index 26d58ee5a..9f41c4443 100644
--- a/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
+++ b/bases/rsptx/web2py_server/applications/runestone/views/peer/peer_async.html
@@ -222,8 +222,6 @@ Congratulations, you have completed this assignment!
};
})();
-
-
-
-
@@ -39,7 +26,6 @@
eBookConfig.peer = true;
eBookConfig.peerMode = "async";
-
@@ -224,6 +210,25 @@
Congratulations, you have completed this assignment!