-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_all_experiments.py
More file actions
545 lines (451 loc) · 18.7 KB
/
run_all_experiments.py
File metadata and controls
545 lines (451 loc) · 18.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
#!/usr/bin/env python3
"""
run_all_experiments.py -- Master orchestration for all paper experiments.
Runs all experiments needed for the VLDB 2026 paper:
Phase 1: Repeated trials (3x top 6 configs) + Bootstrap CIs
Phase 2: System prompt ablation (5 variants)
Phase 3: Cross-model evaluation (Claude Sonnet 4)
Phase 4: DAIL-SQL baseline comparison
Phase 5: Cross-dataset evaluation (ClickBench + SSB)
Usage:
python evaluation/run_all_experiments.py --all
python evaluation/run_all_experiments.py --phase 1
python evaluation/run_all_experiments.py --phase 2 3 4
python evaluation/run_all_experiments.py --dry-run
"""
from __future__ import annotations
import argparse
import json
import logging
import subprocess
import sys
import time
from datetime import datetime, timezone
from pathlib import Path
project_root = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(project_root))
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
)
logger = logging.getLogger("all_experiments")
RESULTS_DIR = project_root / "evaluation" / "results"
# ---------------------------------------------------------------------------
# Helper script content
# ---------------------------------------------------------------------------
# This script is written to disk and invoked as a subprocess so that we can
# run *arbitrary* (format, scope, metadata, example_strategy) configs on any
# model -- something that run_single_config.py does not support, since it
# hardcodes the "best config" values. The helper accepts all config
# dimensions as positional CLI arguments.
_HELPER_SCRIPT = r'''#!/usr/bin/env python3
"""
_run_config_helper.py -- Run an arbitrary config on any model.
This script is auto-generated by run_all_experiments.py. It exposes the
full (format, scope, metadata, example_strategy) surface via CLI args so
that the orchestrator can evaluate arbitrary configs without modifying
run_single_config.py.
Usage:
python evaluation/_run_config_helper.py \
--output results.jsonl \
--model claude-sonnet-4-20250514 \
--dataset custom_analytics \
--format ddl \
--scope full \
--metadata none \
--examples zero_shot
"""
from __future__ import annotations
import argparse
import json
import logging
import sys
import time
from pathlib import Path
project_root = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(project_root))
from evaluation.run_phase2 import (
evaluate_single_query,
load_all_queries,
compute_aggregate_metrics,
compute_category_metrics,
query_result_to_dict,
QueryEvalResult,
BENCHMARK_DIR,
API_DELAY_SEC,
)
from evaluation.framework.prompt_builder import (
PromptBuilder,
SchemaFormat,
SchemaScope,
MetadataLevel,
ExampleStrategy,
)
from evaluation.framework.llm_caller import LLMCaller
from evaluation.framework.sql_executor import SQLExecutor
from evaluation.framework.schema_linker import SchemaLinker
from evaluation.framework.self_corrector import SelfCorrector
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
)
log = logging.getLogger("config_helper")
def main():
parser = argparse.ArgumentParser(description="Run an arbitrary config on any model")
parser.add_argument("--output", required=True, help="Output JSONL path")
parser.add_argument("--model", default="claude-3-5-sonnet-20241022", help="Model ID")
parser.add_argument("--dataset", default="custom_analytics", help="Dataset name")
parser.add_argument("--format", required=True,
choices=["ddl", "markdown", "json", "natural_language"],
help="Schema format")
parser.add_argument("--scope", required=True,
choices=["full", "relevant_subset", "progressive", "user_guided"],
help="Schema scope")
parser.add_argument("--metadata", required=True,
choices=["none", "descriptions", "sample_values", "statistics", "all"],
help="Metadata level")
parser.add_argument("--examples", required=True,
choices=["zero_shot", "static_few_shot", "dynamic_few_shot",
"schema_matched", "dail_sql"],
help="Example strategy")
args = parser.parse_args()
schema_format = SchemaFormat(args.format)
schema_scope = SchemaScope(args.scope)
metadata_level = MetadataLevel(args.metadata)
example_strategy = ExampleStrategy(args.examples)
config_label = f"{args.format}_{args.scope}_{args.metadata}_{args.examples}"
log.info("Config: %s | Model: %s | Dataset: %s", config_label, args.model, args.dataset)
queries = load_all_queries(BENCHMARK_DIR, args.dataset)
log.info("Loaded %d queries", len(queries))
pb = PromptBuilder(BENCHMARK_DIR)
llm = LLMCaller(model=args.model, max_tokens=2048, temperature=0.0)
sql_exec = SQLExecutor(host="localhost", port=9000)
sl = SchemaLinker()
sc = SelfCorrector(llm_caller=llm, sql_executor=sql_exec, max_retries=2)
if not sql_exec.test_connection():
log.error("ClickHouse connection failed.")
sys.exit(1)
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
results: list[QueryEvalResult] = []
total = len(queries)
for idx, query in enumerate(queries, 1):
qid = query.get("id", f"q_{idx}")
qr = evaluate_single_query(
query=query,
prompt_builder=pb,
llm_caller=llm,
sql_executor=sql_exec,
schema_linker=sl,
schema_format=schema_format,
schema_scope=schema_scope,
metadata_level=metadata_level,
example_strategy=example_strategy,
self_corrector=sc,
)
results.append(qr)
with open(args.output, "a") as f:
f.write(json.dumps(query_result_to_dict(qr)) + "\n")
status = "CORRECT" if qr.result_match else ("EXEC" if qr.pred_executed else "FAIL")
if idx % 10 == 0 or idx == total:
correct_so_far = sum(1 for r in results if r.result_match)
log.info(
" [%d/%d] %s: %s | Running RC: %.1f%% (%d/%d)",
idx, total, qid, status,
100.0 * correct_so_far / len(results), correct_so_far, len(results),
)
else:
log.info(" %s: %s | F1=%.2f", qid, status, qr.overall_f1)
if API_DELAY_SEC > 0:
time.sleep(API_DELAY_SEC)
agg = compute_aggregate_metrics(results)
cats = compute_category_metrics(results)
print(f"\n{'='*70}")
print(f" Config : {config_label}")
print(f" Model : {args.model}")
print(f" Dataset: {args.dataset}")
print(f" EX: {agg['execution_accuracy']:.3f} RC: {agg['result_correctness']:.3f}")
print(f" Correct: {agg['correct_queries']}/{agg['total_queries']}")
print(f"{'='*70}")
print(f"\n Category Breakdown:")
for cat, metrics in sorted(cats.items()):
print(f" {cat:25s}: {metrics['correct_queries']:3d}/{metrics['total_queries']:3d}"
f" = {metrics['result_correctness']:.1%}")
print(f"{'='*70}")
sql_exec.close()
if __name__ == "__main__":
main()
'''
def _ensure_helper_script() -> Path:
"""Write the helper script to disk (idempotent) and return its path."""
helper_path = project_root / "evaluation" / "_run_config_helper.py"
helper_path.write_text(_HELPER_SCRIPT)
helper_path.chmod(0o755)
return helper_path
# ---------------------------------------------------------------------------
# Command runner
# ---------------------------------------------------------------------------
def run_command(cmd: list[str], dry_run: bool = False) -> int:
"""Run a command, logging it first.
Returns the process exit code (0 on dry-run).
"""
cmd_str = " ".join(cmd)
logger.info("Running: %s", cmd_str)
if dry_run:
logger.info(" [DRY RUN] Skipped")
return 0
result = subprocess.run(cmd, cwd=str(project_root))
if result.returncode != 0:
logger.error("Command failed with exit code %d", result.returncode)
return result.returncode
# ---------------------------------------------------------------------------
# Phase implementations
# ---------------------------------------------------------------------------
def phase1_repeated_trials(dry_run: bool = False):
"""Phase 1: Run 3 repeated trials of top 6 configs + bootstrap CIs.
This phase provides the statistical confidence intervals required for
Table 3 in the paper. It calls ``run_repeated_trials.py`` which
internally runs the 6 pre-defined configs, computes bootstrap 95% CIs,
and writes per-trial JSONL plus aggregated summaries.
"""
logger.info("=" * 70)
logger.info("PHASE 1: REPEATED TRIALS (3x top 6 configs)")
logger.info(" Estimated cost: ~$25 | Wall-clock: ~3 hours")
logger.info("=" * 70)
run_command([
sys.executable, "evaluation/run_repeated_trials.py",
"--trials", "3",
"--output-dir", str(RESULTS_DIR / "repeated_trials"),
], dry_run=dry_run)
def phase2_prompt_ablation(dry_run: bool = False):
"""Phase 2: System prompt ablation (5 variants on best config).
Each variant adds cumulative ClickHouse-specific guidance to the system
prompt. This phase uses ``run_single_config.py --prompt-version`` which
keeps the best (format, scope, metadata, examples) fixed and only varies
the system prompt.
Variants (cumulative):
minimal -- No ClickHouse guidance at all
dialect_only -- + ClickHouse syntax/dialect hints
joins -- + Table relationship hints & JOIN guidance
window -- + Window function & aggregation guidance
full -- Full V6 prompt (current best)
"""
logger.info("=" * 70)
logger.info("PHASE 2: SYSTEM PROMPT ABLATION")
logger.info(" Estimated cost: ~$7 | Wall-clock: ~90 min")
logger.info("=" * 70)
ablation_dir = RESULTS_DIR / "ablation"
ablation_dir.mkdir(parents=True, exist_ok=True)
versions = ["minimal", "dialect_only", "joins", "window", "full"]
for pv in versions:
output = str(ablation_dir / f"ablation_{pv}_results.jsonl")
run_command([
sys.executable, "evaluation/run_single_config.py",
"--prompt-version", pv,
"--output", output,
], dry_run=dry_run)
def phase3_cross_model(dry_run: bool = False):
"""Phase 3: Run key configs on Claude Sonnet 4.
We evaluate three configs on the newer model to answer the question
"does the ranking of prompt strategies generalise across models?"
Configs:
1. Best config -- markdown / relevant_subset / descriptions / dynamic_few_shot
2. Baseline -- ddl / full / none / zero_shot
3. Scope-only -- markdown / relevant_subset / none / zero_shot
"""
logger.info("=" * 70)
logger.info("PHASE 3: CROSS-MODEL EVALUATION (Claude Sonnet 4)")
logger.info(" Estimated cost: ~$5 | Wall-clock: ~30 min")
logger.info("=" * 70)
cross_model_dir = RESULTS_DIR / "cross_model"
cross_model_dir.mkdir(parents=True, exist_ok=True)
sonnet4_model = "claude-sonnet-4-20250514"
# Config 1: Best config -- run_single_config already uses best config,
# so we can use it directly with --model override.
run_command([
sys.executable, "evaluation/run_single_config.py",
"--model", sonnet4_model,
"--output", str(cross_model_dir / "sonnet4_best_config_results.jsonl"),
], dry_run=dry_run)
# For configs 2 and 3, we need arbitrary (format, scope, metadata,
# examples) combinations that run_single_config.py does not support.
# Use the helper script instead.
helper_path = _ensure_helper_script()
# Config 2: Baseline -- ddl / full / none / zero_shot
run_command([
sys.executable, str(helper_path),
"--output", str(cross_model_dir / "sonnet4_baseline_results.jsonl"),
"--model", sonnet4_model,
"--format", "ddl",
"--scope", "full",
"--metadata", "none",
"--examples", "zero_shot",
], dry_run=dry_run)
# Config 3: Scope-only -- markdown / relevant_subset / none / zero_shot
run_command([
sys.executable, str(helper_path),
"--output", str(cross_model_dir / "sonnet4_scope_only_results.jsonl"),
"--model", sonnet4_model,
"--format", "markdown",
"--scope", "relevant_subset",
"--metadata", "none",
"--examples", "zero_shot",
], dry_run=dry_run)
def phase4_dail_sql(dry_run: bool = False):
"""Phase 4: DAIL-SQL prompting baseline.
DAIL-SQL is a competitive baseline from the literature. We evaluate it
using the best schema scope + metadata from our ablation so that the
comparison isolates the example-selection strategy.
Config: ddl / relevant_subset / descriptions / dail_sql
"""
logger.info("=" * 70)
logger.info("PHASE 4: DAIL-SQL BASELINE")
logger.info(" Estimated cost: ~$3 | Wall-clock: ~10 min")
logger.info("=" * 70)
dail_dir = RESULTS_DIR / "dail_sql"
dail_dir.mkdir(parents=True, exist_ok=True)
helper_path = _ensure_helper_script()
# DAIL-SQL with best scope + metadata
run_command([
sys.executable, str(helper_path),
"--output", str(dail_dir / "dail_sql_relevant_subset_descriptions_results.jsonl"),
"--format", "ddl",
"--scope", "relevant_subset",
"--metadata", "descriptions",
"--examples", "dail_sql",
], dry_run=dry_run)
def phase5_cross_dataset(dry_run: bool = False):
"""Phase 5: Run experiments on ClickBench and SSB datasets.
This phase evaluates the top 3 configs on two external datasets to
measure generalisability beyond our custom analytics benchmark.
Prerequisites:
- ClickBench data loaded in ClickHouse (see docs/clickbench_setup.md)
- SSB data loaded in ClickHouse (see docs/ssb_setup.md)
- Benchmark query files present under evaluation/benchmark/
Configs per dataset (3 each):
best -- markdown / relevant_subset / descriptions / dynamic_few_shot
baseline -- ddl / full / none / zero_shot
scope_only -- markdown / relevant_subset / none / zero_shot
"""
logger.info("=" * 70)
logger.info("PHASE 5: CROSS-DATASET EVALUATION")
logger.info(" Estimated cost: ~$8 | Wall-clock: ~1.5 hours")
logger.info(" Prerequisite: ClickBench and SSB data loaded in ClickHouse")
logger.info("=" * 70)
cross_dataset_dir = RESULTS_DIR / "cross_dataset"
cross_dataset_dir.mkdir(parents=True, exist_ok=True)
helper_path = _ensure_helper_script()
configs = [
{
"label": "best",
"format": "markdown",
"scope": "relevant_subset",
"metadata": "descriptions",
"examples": "dynamic_few_shot",
},
{
"label": "baseline",
"format": "ddl",
"scope": "full",
"metadata": "none",
"examples": "zero_shot",
},
{
"label": "scope_only",
"format": "markdown",
"scope": "relevant_subset",
"metadata": "none",
"examples": "zero_shot",
},
]
for dataset_name in ["clickbench", "ssb"]:
for cfg in configs:
output = str(
cross_dataset_dir
/ f"{dataset_name}_{cfg['label']}_results.jsonl"
)
run_command([
sys.executable, str(helper_path),
"--output", output,
"--dataset", dataset_name,
"--format", cfg["format"],
"--scope", cfg["scope"],
"--metadata", cfg["metadata"],
"--examples", cfg["examples"],
], dry_run=dry_run)
def generate_outputs(dry_run: bool = False):
"""Regenerate all publication outputs (figures + LaTeX tables)."""
logger.info("=" * 70)
logger.info("GENERATING PUBLICATION OUTPUTS")
logger.info("=" * 70)
run_command([
sys.executable, "evaluation/generate_publication_outputs.py",
], dry_run=dry_run)
# ---------------------------------------------------------------------------
# CLI entrypoint
# ---------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Master experiment orchestrator for VLDB 2026 paper.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Phases:
1 Repeated trials (3x top 6 configs) + Bootstrap CIs ~$25
2 System prompt ablation (5 variants) ~$7
3 Cross-model evaluation (Claude Sonnet 4) ~$5
4 DAIL-SQL baseline comparison ~$3
5 Cross-dataset evaluation (ClickBench + SSB) ~$8
Total: ~$48
""",
)
parser.add_argument(
"--all", action="store_true",
help="Run all phases (1-5) and regenerate publication outputs",
)
parser.add_argument(
"--phase", type=int, nargs="+",
choices=[1, 2, 3, 4, 5],
help="Run specific phase(s), e.g. --phase 1 3",
)
parser.add_argument(
"--generate", action="store_true",
help="Regenerate publication outputs after experiments",
)
parser.add_argument(
"--dry-run", action="store_true",
help="Print commands without executing them",
)
args = parser.parse_args()
if not args.all and not args.phase and not args.generate:
parser.print_help()
return
phases_to_run: set[int] = set()
if args.all:
phases_to_run = {1, 2, 3, 4, 5}
elif args.phase:
phases_to_run = set(args.phase)
start_time = time.time()
logger.info("#" * 70)
logger.info(" DATAPUP: MASTER EXPERIMENT RUNNER")
logger.info(" Phases: %s", sorted(phases_to_run) if phases_to_run else "none")
logger.info(" Dry run: %s", args.dry_run)
logger.info(" Started: %s", datetime.now(timezone.utc).isoformat())
logger.info("#" * 70)
phase_fns = {
1: phase1_repeated_trials,
2: phase2_prompt_ablation,
3: phase3_cross_model,
4: phase4_dail_sql,
5: phase5_cross_dataset,
}
for phase_num in sorted(phases_to_run):
phase_fns[phase_num](dry_run=args.dry_run)
if args.generate or args.all:
generate_outputs(dry_run=args.dry_run)
elapsed = time.time() - start_time
logger.info("#" * 70)
logger.info(" ALL EXPERIMENTS COMPLETE")
logger.info(" Total elapsed: %.1f minutes", elapsed / 60)
logger.info("#" * 70)
if __name__ == "__main__":
main()