diff --git a/.github/workflows/latency-test.yml b/.github/workflows/latency-test.yml new file mode 100644 index 0000000..4dadd15 --- /dev/null +++ b/.github/workflows/latency-test.yml @@ -0,0 +1,131 @@ +name: GP2040-CE Latency Tests + +on: + # Run on a schedule (daily at 03:00 UTC, after performance tests). + schedule: + - cron: "0 3 * * *" + # Allow manual triggering. + workflow_dispatch: + inputs: + sample_count: + description: "Number of latency samples" + required: false + default: "1000" + warmup_samples: + description: "Number of warmup samples to discard" + required: false + default: "50" + +# Restrict GITHUB_TOKEN to the minimum permissions needed. +permissions: + contents: read + actions: write # required to upload artifacts + +# Only one latency test at a time (exclusive hardware access). +concurrency: + group: latency-tests + cancel-in-progress: false + +jobs: + latency-test: + name: "GP2040-CE Input Latency Test" + runs-on: [self-hosted, performancenode] + timeout-minutes: 60 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + # ── Verify HAT and GP2040-CE device are present ────────────────────── + - name: Verify GP2040-CE device + run: | + echo "Checking for GP2040-CE USB device..." + if ! lsusb | grep -qi "2e8a"; then + echo "::error::GP2040-CE device not found. Ensure it is connected via USB." + exit 1 + fi + echo "GP2040-CE device detected." + + echo "Checking GPIO access..." + if [[ ! -e /dev/gpiomem ]] && [[ ! -e /dev/mem ]]; then + echo "::warning::GPIO device not found. HAT may not be connected." + fi + + # ── Set up Python environment ───────────────────────────────────────── + - name: Set up Python virtual environment + run: | + VENV="/opt/performancenode/venv" + if [[ ! -d "$VENV" ]]; then + python3 -m venv "$VENV" + fi + "$VENV/bin/pip" install --upgrade pip --quiet + "$VENV/bin/pip" install RPi.GPIO gpiozero lgpio --quiet + + # ── Override config with workflow inputs ────────────────────────────── + - name: Prepare test configuration + run: | + CONFIG_SRC="hat/config/hat-config.json" + CONFIG_TMP="/tmp/hat-config-run.json" + + SAMPLE_COUNT="${{ inputs.sample_count || '1000' }}" + WARMUP="${{ inputs.warmup_samples || '50' }}" + + jq \ + --argjson sc "$SAMPLE_COUNT" \ + --argjson ws "$WARMUP" \ + '.latency_test.sample_count = $sc | .latency_test.warmup_samples = $ws' \ + "$CONFIG_SRC" > "$CONFIG_TMP" + + echo "Running with sample_count=${SAMPLE_COUNT}, warmup=${WARMUP}" + + # ── Run latency test ────────────────────────────────────────────────── + - name: Run latency test + run: | + OUT_DIR="/opt/performancenode/results/latency" + mkdir -p "$OUT_DIR" + + /opt/performancenode/venv/bin/python3 hat/latency_test.py \ + --config /tmp/hat-config-run.json \ + --output "$OUT_DIR" + + # ── Upload raw results ──────────────────────────────────────────────── + - name: Upload latency results + uses: actions/upload-artifact@v4 + with: + name: latency-results-${{ github.run_number }} + path: /opt/performancenode/results/latency/latency-*.json + retention-days: 365 + + # ── Write step summary ──────────────────────────────────────────────── + - name: Generate summary + if: always() + run: | + LATEST=$(ls -t /opt/performancenode/results/latency/latency-*.json 2>/dev/null | head -1) + + echo "## GP2040-CE Latency Test Results" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "**Run:** ${{ github.run_number }} | **Date:** $(date -u '+%Y-%m-%d %H:%M UTC')" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + + if [[ -n "$LATEST" ]]; then + MEAN=$(jq '.latency_ms.mean' "$LATEST") + MEDIAN=$(jq '.latency_ms.median' "$LATEST") + P95=$(jq '.latency_ms.p95' "$LATEST") + P99=$(jq '.latency_ms.p99' "$LATEST") + STDEV=$(jq '.latency_ms.stdev' "$LATEST") + TIMEOUTS=$(jq '.timeout_count' "$LATEST") + SAMPLES=$(jq '.sample_count' "$LATEST") + + echo "| Metric | Value (ms) |" >> "$GITHUB_STEP_SUMMARY" + echo "| ------------------ | ---------- |" >> "$GITHUB_STEP_SUMMARY" + echo "| Mean | ${MEAN} |" >> "$GITHUB_STEP_SUMMARY" + echo "| Median | ${MEDIAN} |" >> "$GITHUB_STEP_SUMMARY" + echo "| Std Dev | ${STDEV} |" >> "$GITHUB_STEP_SUMMARY" + echo "| p95 | ${P95} |" >> "$GITHUB_STEP_SUMMARY" + echo "| p99 | ${P99} |" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "**Samples:** ${SAMPLES} | **Timeouts:** ${TIMEOUTS}" >> "$GITHUB_STEP_SUMMARY" + else + echo "::warning::No latency result file found." + echo "_No results available._" >> "$GITHUB_STEP_SUMMARY" + fi diff --git a/.github/workflows/performance-test.yml b/.github/workflows/performance-test.yml new file mode 100644 index 0000000..b6958c4 --- /dev/null +++ b/.github/workflows/performance-test.yml @@ -0,0 +1,175 @@ +name: Performance Tests + +on: + # Run on a schedule (daily at 02:00 UTC). + schedule: + - cron: "0 2 * * *" + # Allow manual triggering from the GitHub UI. + workflow_dispatch: + inputs: + test_profile: + description: "Test profile to run" + required: true + default: "all" + type: choice + options: + - all + - cpu + - memory + - network + - io + - http + - stress + +# Restrict GITHUB_TOKEN to the minimum permissions needed. +permissions: + contents: read + actions: write # required to upload artifacts + +# Only one performance test run at a time on the Pi. +concurrency: + group: performance-tests + cancel-in-progress: false + +jobs: + # ── CPU benchmark ───────────────────────────────────────────────────────── + cpu-benchmark: + name: "CPU Benchmark (sysbench)" + runs-on: [self-hosted, performancenode] + if: > + github.event_name == 'schedule' || + inputs.test_profile == 'all' || + inputs.test_profile == 'cpu' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build performance image + run: | + docker build \ + --target sysbench \ + -t performancenode/sysbench:latest \ + docker/performance/ + + - name: Run CPU benchmark + run: | + mkdir -p /opt/performancenode/results/performance + docker run --rm \ + -e SYSBENCH_TEST=cpu \ + -e SYSBENCH_THREADS=$(nproc) \ + -e SYSBENCH_TIME=60 \ + -v /opt/performancenode/results/performance:/results \ + performancenode/sysbench:latest + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: cpu-benchmark-${{ github.run_number }} + path: /opt/performancenode/results/performance/sysbench-cpu-*.json + retention-days: 90 + + # ── Memory benchmark ────────────────────────────────────────────────────── + memory-benchmark: + name: "Memory Benchmark (sysbench)" + runs-on: [self-hosted, performancenode] + if: > + github.event_name == 'schedule' || + inputs.test_profile == 'all' || + inputs.test_profile == 'memory' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build performance image + run: | + docker build \ + --target sysbench \ + -t performancenode/sysbench:latest \ + docker/performance/ + + - name: Run memory benchmark + run: | + mkdir -p /opt/performancenode/results/performance + docker run --rm \ + -e SYSBENCH_TEST=memory \ + -e SYSBENCH_THREADS=$(nproc) \ + -e SYSBENCH_TIME=60 \ + -v /opt/performancenode/results/performance:/results \ + performancenode/sysbench:latest + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: memory-benchmark-${{ github.run_number }} + path: /opt/performancenode/results/performance/sysbench-memory-*.json + retention-days: 90 + + # ── Stress test ──────────────────────────────────────────────────────────── + stress-test: + name: "Stress Test (stress-ng)" + runs-on: [self-hosted, performancenode] + if: > + github.event_name == 'schedule' || + inputs.test_profile == 'all' || + inputs.test_profile == 'stress' + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build stress-ng image + run: | + docker build \ + --target stress-ng \ + -t performancenode/stress-ng:latest \ + docker/performance/ + + - name: Run stress test + run: | + mkdir -p /opt/performancenode/results/performance + docker run --rm \ + -e STRESS_TIMEOUT=120s \ + -e STRESS_CPU=$(nproc) \ + -e STRESS_VM=1 \ + -e STRESS_VM_BYTES=512m \ + -v /opt/performancenode/results/performance:/results \ + performancenode/stress-ng:latest + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: stress-test-${{ github.run_number }} + path: /opt/performancenode/results/performance/stress-ng-*.json + retention-days: 90 + + # ── Summary ──────────────────────────────────────────────────────────────── + summarize: + name: "Summarize Results" + runs-on: [self-hosted, performancenode] + needs: [cpu-benchmark, memory-benchmark, stress-test] + if: always() + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Generate summary + run: | + echo "## Performance Test Results" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "**Run:** ${{ github.run_number }} | **Date:** $(date -u '+%Y-%m-%d %H:%M UTC')" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "| Test | Status |" >> "$GITHUB_STEP_SUMMARY" + echo "| ---- | ------ |" >> "$GITHUB_STEP_SUMMARY" + echo "| CPU Benchmark | ${{ needs.cpu-benchmark.result }} |" >> "$GITHUB_STEP_SUMMARY" + echo "| Memory Benchmark | ${{ needs.memory-benchmark.result }} |" >> "$GITHUB_STEP_SUMMARY" + echo "| Stress Test | ${{ needs.stress-test.result }} |" >> "$GITHUB_STEP_SUMMARY" + + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "### Recent CPU Results" >> "$GITHUB_STEP_SUMMARY" + LATEST=$(ls -t /opt/performancenode/results/performance/sysbench-cpu-*.json 2>/dev/null | head -1) + if [[ -n "$LATEST" ]]; then + echo '```json' >> "$GITHUB_STEP_SUMMARY" + cat "$LATEST" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + else + echo "_No CPU results available._" >> "$GITHUB_STEP_SUMMARY" + fi diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..97e1910 --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +*.egg-info/ + +# Virtual environments +venv/ +.venv/ + +# OS artefacts +.DS_Store +Thumbs.db + +# Editor directories +.idea/ +.vscode/ +*.swp +*.swo + +# Test / benchmark result files (generated at runtime on the Pi) +/opt/performancenode/ +docker/results/ diff --git a/README.md b/README.md index 1e7fa92..d14cf15 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,196 @@ # PerformanceNode -A dedicated performance testing environment. + +A dedicated performance-testing environment built on a **Raspberry Pi 5**. +PerformanceNode runs containerised benchmark suites and hardware-assisted +latency tests for the [GP2040-CE](https://github.com/OpenStickCommunity/GP2040-CE) +gamepad firmware, reporting results directly to GitHub via a self-hosted +GitHub Actions runner. + +--- + +## Overview + +| Component | Purpose | +|---|---| +| **Raspberry Pi 5** | Bare-metal host for all tests | +| **Docker** | Isolated, reproducible benchmark containers | +| **GitHub Actions (self-hosted)** | Test orchestration and result publication | +| **Custom HAT** | GPIO ↔ GP2040-CE bridge for input-latency measurement | + +--- + +## Repository Layout + +``` +.github/ + workflows/ + performance-test.yml # CPU / memory / stress benchmarks (Docker) + latency-test.yml # GP2040-CE input-latency tests (HAT) +docker/ + docker-compose.yml # Compose stack for local ad-hoc test runs + performance/ + Dockerfile # Multi-stage image (wrk / iperf3 / stress-ng / sysbench) + entrypoints/ # Per-tool entrypoint scripts +hat/ + latency_test.py # GPIO-based latency measurement script + config/ + hat-config.json # HAT GPIO pin assignments and test parameters +scripts/ + setup.sh # Main entry-point (runs all steps below) + setup-system.sh # OS hardening, packages, sysctl tuning + setup-docker.sh # Docker Engine installation + setup-github-runner.sh # Self-hosted Actions runner installation + setup-hat.sh # Custom HAT driver, udev rules, Python venv +``` + +--- + +## Hardware + +### Raspberry Pi 5 + +- Raspberry Pi OS Lite (64-bit / bookworm) +- 4 GB or 8 GB RAM recommended +- USB-A port for GP2040-CE device + +### Custom HAT – GP2040-CE Latency Tester + +The HAT bridges the Pi's GPIO to the button inputs of a GP2040-CE device, +enabling automated, microsecond-precision latency measurements. + +**GPIO pin assignments (BCM numbering):** + +| BCM Pin | Direction | Function | +|---------|-----------|----------| +| 4 | IN | Button signal from GP2040-CE | +| 17 | OUT | Trigger output to GP2040-CE button input | +| 27 | OUT | Status LED | + +**Measurement flow:** + +1. Pi asserts GPIO 17 → GP2040-CE receives button press. +2. GP2040-CE processes the input and asserts a GPIO output back to the Pi (GPIO 4). +3. Pi measures the elapsed time between steps 1 and 2. + +--- + +## Setup + +### Prerequisites + +- Fresh Raspberry Pi OS Lite (64-bit) image written to SD card / NVMe. +- SSH access to the Pi. +- A GitHub repository URL and a runner registration token + ([Settings → Actions → Runners → New self-hosted runner](https://github.com/becauseimclever/PerformanceNode/settings/actions/runners/new)). + +### Run the setup script + +```bash +# Clone this repository onto the Pi. +git clone https://github.com/becauseimclever/PerformanceNode.git +cd PerformanceNode + +# Run the full setup (requires root). +sudo bash scripts/setup.sh \ + --runner-url https://github.com/becauseimclever/PerformanceNode \ + --runner-token + +# Reboot to apply all changes. +sudo reboot +``` + +#### Individual steps + +```bash +sudo bash scripts/setup-system.sh # OS tuning only +sudo bash scripts/setup-docker.sh # Docker only +sudo bash scripts/setup-github-runner.sh # Runner only +sudo bash scripts/setup-hat.sh # HAT / GPIO only +``` + +#### Skip individual steps + +```bash +sudo bash scripts/setup.sh --skip-hat # Skip HAT setup (no HAT attached) +sudo bash scripts/setup.sh --skip-runner # Skip runner setup +``` + +--- + +## Running Tests + +### Via GitHub Actions (recommended) + +All workflows run automatically on a daily schedule and can be triggered +manually from **Actions → workflow → Run workflow**. + +| Workflow | Schedule | Manual trigger | +|----------|----------|---------------| +| Performance Tests | Daily 02:00 UTC | ✅ | +| GP2040-CE Latency Tests | Daily 03:00 UTC | ✅ | + +Results are uploaded as workflow artifacts and summarised in the run's +**Summary** tab. + +### Locally with Docker Compose + +```bash +# CPU benchmark +docker compose --profile cpu -f docker/docker-compose.yml up sysbench + +# Memory benchmark +SYSBENCH_TEST=memory docker compose --profile memory -f docker/docker-compose.yml up sysbench + +# HTTP benchmark (requires a running server) +WRK_URL=http://myserver/ docker compose --profile http -f docker/docker-compose.yml up wrk + +# Network throughput (requires an iperf3 server) +IPERF3_SERVER=192.168.1.100 docker compose --profile network -f docker/docker-compose.yml up iperf3 +``` + +### Latency test (requires HAT) + +```bash +# Using the installed virtual environment +/opt/performancenode/venv/bin/python3 hat/latency_test.py \ + --config hat/config/hat-config.json \ + --output /opt/performancenode/results/latency +``` + +--- + +## Configuration + +### HAT config (`hat/config/hat-config.json`) + +| Key | Default | Description | +|-----|---------|-------------| +| `gpio.button_signal_pin` | 4 | BCM pin for the GP2040-CE signal input | +| `gpio.trigger_output_pin` | 17 | BCM pin that drives the button press | +| `gpio.status_led_pin` | 27 | BCM pin for the status LED | +| `latency_test.sample_count` | 1000 | Number of measurements | +| `latency_test.warmup_samples` | 50 | Discarded warmup measurements | +| `latency_test.trigger_interval_ms` | 100 | Delay between triggers | +| `latency_test.timeout_ms` | 500 | Per-sample timeout | + +--- + +## Results + +All result files are JSON and are stored on the Pi under +`/opt/performancenode/results/`. They are also uploaded as GitHub Actions +artifacts, retained for: + +- **Performance benchmarks** – 90 days +- **Latency results** – 365 days + +--- + +## Runner Labels + +The self-hosted runner registers with the following labels so that workflows +target it precisely: + +``` +self-hosted, Linux, ARM64, raspberry-pi-5, performancenode +``` diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000..2386e2d --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,56 @@ +services: + # ── wrk – HTTP benchmarking ──────────────────────────────────────────────── + wrk: + build: + context: ./performance + target: wrk + image: performancenode/wrk:latest + container_name: perf-wrk + network_mode: host + volumes: + - ./results:/results + environment: + - RESULT_DIR=/results + profiles: ["http"] + + # ── iperf3 – network throughput ──────────────────────────────────────────── + iperf3: + build: + context: ./performance + target: iperf3 + image: performancenode/iperf3:latest + container_name: perf-iperf3 + network_mode: host + volumes: + - ./results:/results + environment: + - RESULT_DIR=/results + profiles: ["network"] + + # ── stress-ng – CPU / memory / I/O stress ───────────────────────────────── + stress-ng: + build: + context: ./performance + target: stress-ng + image: performancenode/stress-ng:latest + container_name: perf-stress-ng + volumes: + - ./results:/results + environment: + - RESULT_DIR=/results + profiles: ["stress"] + + # ── sysbench – CPU / memory / I/O benchmarks ────────────────────────────── + sysbench: + build: + context: ./performance + target: sysbench + image: performancenode/sysbench:latest + container_name: perf-sysbench + volumes: + - ./results:/results + environment: + - RESULT_DIR=/results + profiles: ["cpu", "memory", "io"] + +volumes: {} diff --git a/docker/performance/Dockerfile b/docker/performance/Dockerfile new file mode 100644 index 0000000..a552d4a --- /dev/null +++ b/docker/performance/Dockerfile @@ -0,0 +1,65 @@ +# ── Base image ──────────────────────────────────────────────────────────────── +FROM debian:bookworm-slim AS base + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + jq \ + time \ + procps && \ + rm -rf /var/lib/apt/lists/* + +# Default results directory (bind-mounted from the host at runtime). +ENV RESULT_DIR=/results +RUN mkdir -p /results + +# ── wrk – HTTP benchmarking ─────────────────────────────────────────────────── +FROM base AS wrk + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends wrk && \ + rm -rf /var/lib/apt/lists/* + +COPY entrypoints/wrk-entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] + +# ── iperf3 – network throughput ─────────────────────────────────────────────── +FROM base AS iperf3 + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends iperf3 && \ + rm -rf /var/lib/apt/lists/* + +COPY entrypoints/iperf3-entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] + +# ── stress-ng ──────────────────────────────────────────────────────────────── +FROM base AS stress-ng + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends stress-ng && \ + rm -rf /var/lib/apt/lists/* + +COPY entrypoints/stress-ng-entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] + +# ── sysbench ───────────────────────────────────────────────────────────────── +FROM base AS sysbench + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends sysbench && \ + rm -rf /var/lib/apt/lists/* + +COPY entrypoints/sysbench-entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] diff --git a/docker/performance/entrypoints/iperf3-entrypoint.sh b/docker/performance/entrypoints/iperf3-entrypoint.sh new file mode 100755 index 0000000..8a3c726 --- /dev/null +++ b/docker/performance/entrypoints/iperf3-entrypoint.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# iperf3-entrypoint.sh – Network throughput benchmark entrypoint. +# Environment variables: +# IPERF3_SERVER Server host/IP (required) +# IPERF3_PORT Server port (default: 5201) +# IPERF3_DURATION Test duration in seconds (default: 30) +# IPERF3_PARALLEL Parallel streams (default: 4) +# RESULT_DIR Output directory (default: /results) + +set -euo pipefail + +SERVER="${IPERF3_SERVER:-localhost}" +PORT="${IPERF3_PORT:-5201}" +DURATION="${IPERF3_DURATION:-30}" +PARALLEL="${IPERF3_PARALLEL:-4}" +OUT_DIR="${RESULT_DIR:-/results}" +TIMESTAMP=$(date -u +%Y%m%dT%H%M%SZ) +OUT_FILE="${OUT_DIR}/iperf3-${TIMESTAMP}.json" + +mkdir -p "$OUT_DIR" + +echo "Running iperf3 benchmark against ${SERVER}:${PORT}..." +echo " Duration: ${DURATION}s, Parallel streams: ${PARALLEL}" + +iperf3 \ + --client "$SERVER" \ + --port "$PORT" \ + --time "$DURATION" \ + --parallel "$PARALLEL" \ + --json \ + > "$OUT_FILE" + +# Print a human-readable summary from the JSON output. +SENT_MBPS=$(jq '.end.sum_sent.bits_per_second / 1e6' "$OUT_FILE") +RECV_MBPS=$(jq '.end.sum_received.bits_per_second / 1e6' "$OUT_FILE") +echo "Sent: ${SENT_MBPS} Mbps" +echo "Received: ${RECV_MBPS} Mbps" +echo "Results written to ${OUT_FILE}" diff --git a/docker/performance/entrypoints/stress-ng-entrypoint.sh b/docker/performance/entrypoints/stress-ng-entrypoint.sh new file mode 100755 index 0000000..8eefd04 --- /dev/null +++ b/docker/performance/entrypoints/stress-ng-entrypoint.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# stress-ng-entrypoint.sh – CPU / memory / I/O stress test entrypoint. +# Environment variables: +# STRESS_TIMEOUT Duration (default: 60s) +# STRESS_CPU Number of CPU stressors (default: number of CPUs) +# STRESS_VM Number of VM stressors (default: 1) +# STRESS_VM_BYTES Memory per VM stressor (default: 256m) +# RESULT_DIR Output directory (default: /results) + +set -euo pipefail + +TIMEOUT="${STRESS_TIMEOUT:-60s}" +CPU_WORKERS="${STRESS_CPU:-$(nproc)}" +VM_WORKERS="${STRESS_VM:-1}" +VM_BYTES="${STRESS_VM_BYTES:-256m}" +OUT_DIR="${RESULT_DIR:-/results}" +TIMESTAMP=$(date -u +%Y%m%dT%H%M%SZ) +OUT_FILE="${OUT_DIR}/stress-ng-${TIMESTAMP}.json" + +mkdir -p "$OUT_DIR" + +echo "Running stress-ng for ${TIMEOUT}..." +echo " CPU workers: ${CPU_WORKERS}, VM workers: ${VM_WORKERS} (${VM_BYTES} each)" + +stress-ng \ + --cpu "$CPU_WORKERS" \ + --vm "$VM_WORKERS" \ + --vm-bytes "$VM_BYTES" \ + --timeout "$TIMEOUT" \ + --metrics-brief \ + --yaml "/tmp/stress-ng-metrics.yaml" 2>&1 | tee /tmp/stress-ng-raw.txt + +# Convert YAML metrics to JSON (stress-ng doesn't produce JSON natively). +python3 - << 'PYEOF' +import yaml, json, sys, os + +yaml_file = "/tmp/stress-ng-metrics.yaml" +out_file = os.environ.get("OUT_FILE", "/results/stress-ng.json") + +try: + with open(yaml_file) as f: + data = yaml.safe_load(f) + with open(out_file, "w") as f: + json.dump(data, f, indent=2) + print(f"Results written to {out_file}") +except Exception as e: + print(f"Warning: could not convert metrics: {e}", file=sys.stderr) +PYEOF diff --git a/docker/performance/entrypoints/sysbench-entrypoint.sh b/docker/performance/entrypoints/sysbench-entrypoint.sh new file mode 100755 index 0000000..ad4f9ef --- /dev/null +++ b/docker/performance/entrypoints/sysbench-entrypoint.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# sysbench-entrypoint.sh – CPU / memory benchmark entrypoint using sysbench. +# Environment variables: +# SYSBENCH_TEST Test name: cpu | memory | fileio (default: cpu) +# SYSBENCH_THREADS Number of threads (default: number of CPUs) +# SYSBENCH_TIME Test duration in seconds (default: 60) +# RESULT_DIR Output directory (default: /results) + +set -euo pipefail + +TEST="${SYSBENCH_TEST:-cpu}" +THREADS="${SYSBENCH_THREADS:-$(nproc)}" +TIME_SEC="${SYSBENCH_TIME:-60}" +OUT_DIR="${RESULT_DIR:-/results}" +TIMESTAMP=$(date -u +%Y%m%dT%H%M%SZ) +OUT_FILE="${OUT_DIR}/sysbench-${TEST}-${TIMESTAMP}.json" + +mkdir -p "$OUT_DIR" + +echo "Running sysbench ${TEST} test (${THREADS} threads, ${TIME_SEC}s)..." + +RAW=$(sysbench "$TEST" \ + --threads="$THREADS" \ + --time="$TIME_SEC" \ + run 2>&1) + +echo "$RAW" + +# Extract common metrics. +EVENTS=$(echo "$RAW" | grep 'total number of events' | awk '{print $NF}') +LATAVG=$(echo "$RAW" | grep 'avg:' | awk '{print $2}') +LATMIN=$(echo "$RAW" | grep 'min:' | awk '{print $2}') +LATMAX=$(echo "$RAW" | grep 'max:' | awk '{print $2}') +EPS=$(echo "$RAW" | grep 'events per second' | awk '{print $NF}') + +jq -n \ + --arg ts "$TIMESTAMP" \ + --arg test "$TEST" \ + --arg threads "$THREADS" \ + --arg time "$TIME_SEC" \ + --arg events "${EVENTS:-0}" \ + --arg eps "${EPS:-0}" \ + --arg lat_min "${LATMIN:-0}" \ + --arg lat_avg "${LATAVG:-0}" \ + --arg lat_max "${LATMAX:-0}" \ + '{ + timestamp: $ts, + tool: "sysbench", + test: $test, + threads: ($threads | tonumber), + duration_sec: ($time | tonumber), + total_events: ($events | tonumber), + events_per_sec: ($eps | tonumber), + latency_ms: { + min: ($lat_min | tonumber), + avg: ($lat_avg | tonumber), + max: ($lat_max | tonumber) + } + }' > "$OUT_FILE" + +echo "Results written to ${OUT_FILE}" diff --git a/docker/performance/entrypoints/wrk-entrypoint.sh b/docker/performance/entrypoints/wrk-entrypoint.sh new file mode 100755 index 0000000..1abbbc5 --- /dev/null +++ b/docker/performance/entrypoints/wrk-entrypoint.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# wrk-entrypoint.sh – HTTP benchmark entrypoint. +# Environment variables: +# WRK_URL Target URL (required) +# WRK_THREADS Number of threads (default: 4) +# WRK_CONNECTIONS Number of connections (default: 100) +# WRK_DURATION Test duration (default: 30s) +# RESULT_DIR Output directory (default: /results) + +set -euo pipefail + +URL="${WRK_URL:-http://localhost/}" +THREADS="${WRK_THREADS:-4}" +CONNECTIONS="${WRK_CONNECTIONS:-100}" +DURATION="${WRK_DURATION:-30s}" +OUT_DIR="${RESULT_DIR:-/results}" +TIMESTAMP=$(date -u +%Y%m%dT%H%M%SZ) +OUT_FILE="${OUT_DIR}/wrk-${TIMESTAMP}.json" + +mkdir -p "$OUT_DIR" + +echo "Running wrk benchmark against ${URL}..." +echo " Threads: ${THREADS}, Connections: ${CONNECTIONS}, Duration: ${DURATION}" + +RAW=$(wrk -t"$THREADS" -c"$CONNECTIONS" -d"$DURATION" \ + --latency "$URL" 2>&1) + +echo "$RAW" + +# Parse key metrics and emit a JSON result. +REQUESTS=$(echo "$RAW" | grep 'Requests/sec' | awk '{print $2}') +LATENCY_AVG=$(echo "$RAW" | grep 'Latency' | head -1 | awk '{print $2}') +TRANSFER=$(echo "$RAW" | grep 'Transfer/sec' | awk '{print $2}') + +jq -n \ + --arg ts "$TIMESTAMP" \ + --arg url "$URL" \ + --arg threads "$THREADS" \ + --arg conns "$CONNECTIONS" \ + --arg duration "$DURATION" \ + --arg req_sec "${REQUESTS:-0}" \ + --arg lat_avg "${LATENCY_AVG:-0}" \ + --arg transfer "${TRANSFER:-0}" \ + '{ + timestamp: $ts, + tool: "wrk", + target_url: $url, + threads: ($threads | tonumber), + connections: ($conns | tonumber), + duration: $duration, + requests_per_sec: ($req_sec | tonumber), + latency_avg: $lat_avg, + transfer_per_sec: $transfer + }' > "$OUT_FILE" + +echo "Results written to ${OUT_FILE}" diff --git a/hat/config/hat-config.json b/hat/config/hat-config.json new file mode 100644 index 0000000..43a09b3 --- /dev/null +++ b/hat/config/hat-config.json @@ -0,0 +1,22 @@ +{ + "version": "1.0", + "description": "GP2040-CE Latency Test HAT configuration", + "gpio": { + "button_signal_pin": 4, + "trigger_output_pin": 17, + "status_led_pin": 27, + "active_high": true, + "debounce_ms": 1 + }, + "usb": { + "gp2040ce_vid": "0x2E8A", + "gp2040ce_pid": "0x0005" + }, + "latency_test": { + "sample_count": 1000, + "warmup_samples": 50, + "trigger_interval_ms": 100, + "timeout_ms": 500, + "results_dir": "/opt/performancenode/results/latency" + } +} diff --git a/hat/latency_test.py b/hat/latency_test.py new file mode 100644 index 0000000..4b47d86 --- /dev/null +++ b/hat/latency_test.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 +""" +latency_test.py – GP2040-CE input-latency measurement script. + +This script uses the custom HAT to measure the end-to-end input latency of a +GP2040-CE gamepad: + + 1. Assert TRIGGER_OUTPUT_PIN (GPIO 17) HIGH – this signal is wired to a + button input on the GP2040-CE device via the HAT. + 2. Record t0 at the moment the trigger goes high. + 3. Wait for BUTTON_SIGNAL_PIN (GPIO 4) to go HIGH – the GP2040-CE firmware + asserts this pin when it has processed and re-transmitted the button press + over USB HID. + 4. Record t1 when the signal is detected. + 5. latency = t1 – t0 + 6. Repeat for N samples, compute statistics, write JSON results. + +Usage: + python3 latency_test.py [--config /path/to/hat-config.json] [--output /path/to/results] +""" + +import argparse +import json +import os +import statistics +import sys +import time +from pathlib import Path +from typing import List + +try: + import RPi.GPIO as GPIO # type: ignore +except ImportError: + # Allow the script to be imported / unit-tested on non-Pi hosts. + GPIO = None # type: ignore + + +CONFIG_DEFAULT = "/etc/performancenode/hat/hat-config.json" + + +def load_config(path: str) -> dict: + with open(path) as f: + return json.load(f) + + +def setup_gpio(cfg: dict) -> None: + if GPIO is None: + raise RuntimeError("RPi.GPIO is not available on this platform.") + GPIO.setmode(GPIO.BCM) + gpio = cfg["gpio"] + GPIO.setup(gpio["trigger_output_pin"], GPIO.OUT, initial=GPIO.LOW) + GPIO.setup(gpio["button_signal_pin"], GPIO.IN, pull_up_down=GPIO.PUD_DOWN) + GPIO.setup(gpio["status_led_pin"], GPIO.OUT, initial=GPIO.LOW) + + +def measure_single(cfg: dict, timeout_sec: float) -> float | None: + """ + Trigger one button press and return the measured latency in milliseconds, + or None if the signal was not detected within the timeout. + """ + gpio = cfg["gpio"] + trigger_pin = gpio["trigger_output_pin"] + signal_pin = gpio["button_signal_pin"] + active_high = gpio["active_high"] + + trigger_level = GPIO.HIGH if active_high else GPIO.LOW + detect_level = GPIO.HIGH if active_high else GPIO.LOW + + # Assert trigger. + GPIO.output(trigger_pin, trigger_level) + t0 = time.perf_counter() + + # Wait for signal pin. + detected = False + while (time.perf_counter() - t0) < timeout_sec: + if GPIO.input(signal_pin) == detect_level: + t1 = time.perf_counter() + detected = True + break + + # Release trigger. + GPIO.output(trigger_pin, GPIO.LOW if active_high else GPIO.HIGH) + + if not detected: + return None + return (t1 - t0) * 1000.0 # ms + + +def run_latency_test(config_path: str, output_dir: str) -> dict: + cfg = load_config(config_path) + lt = cfg["latency_test"] + + sample_count = lt["sample_count"] + warmup_samples = lt["warmup_samples"] + interval_ms = lt["trigger_interval_ms"] + timeout_ms = lt["timeout_ms"] + timeout_sec = timeout_ms / 1000.0 + interval_sec = interval_ms / 1000.0 + + setup_gpio(cfg) + + if GPIO is not None: + GPIO.output(cfg["gpio"]["status_led_pin"], GPIO.HIGH) + + samples: List[float] = [] + timeouts = 0 + + print(f"Running latency test: {warmup_samples} warmup + {sample_count} samples …") + + try: + total = warmup_samples + sample_count + for i in range(total): + latency = measure_single(cfg, timeout_sec) + if latency is None: + timeouts += 1 + if i >= warmup_samples: + print(f" Sample {i - warmup_samples + 1}: TIMEOUT", flush=True) + else: + if i >= warmup_samples: + samples.append(latency) + print(f" Sample {i - warmup_samples + 1}: {latency:.3f} ms", flush=True) + time.sleep(interval_sec) + finally: + if GPIO is not None: + GPIO.output(cfg["gpio"]["status_led_pin"], GPIO.LOW) + GPIO.cleanup() + + if not samples: + raise RuntimeError("No valid latency samples collected.") + + result = { + "timestamp": time.strftime("%Y%m%dT%H%M%SZ", time.gmtime()), + "tool": "latency_test", + "sample_count": len(samples), + "timeout_count": timeouts, + "latency_ms": { + "min": round(min(samples), 4), + "max": round(max(samples), 4), + "mean": round(statistics.mean(samples), 4), + "median": round(statistics.median(samples), 4), + "stdev": round(statistics.stdev(samples), 4) if len(samples) > 1 else 0.0, + "p95": round(sorted(samples)[int(len(samples) * 0.95)], 4), + "p99": round(sorted(samples)[int(len(samples) * 0.99)], 4), + }, + "raw_samples_ms": [round(s, 4) for s in samples], + } + + Path(output_dir).mkdir(parents=True, exist_ok=True) + out_file = os.path.join(output_dir, f"latency-{result['timestamp']}.json") + with open(out_file, "w") as f: + json.dump(result, f, indent=2) + + print(f"\nResults written to {out_file}") + print(f" Mean latency: {result['latency_ms']['mean']} ms") + print(f" Median latency: {result['latency_ms']['median']} ms") + print(f" p95 latency: {result['latency_ms']['p95']} ms") + print(f" p99 latency: {result['latency_ms']['p99']} ms") + print(f" Timeouts: {timeouts}") + + return result + + +def main() -> None: + parser = argparse.ArgumentParser(description="GP2040-CE latency measurement") + parser.add_argument("--config", default=CONFIG_DEFAULT, + help="Path to hat-config.json") + parser.add_argument("--output", default="/opt/performancenode/results/latency", + help="Output directory for result JSON files") + args = parser.parse_args() + + if not os.path.isfile(args.config): + print(f"Config file not found: {args.config}", file=sys.stderr) + sys.exit(1) + + run_latency_test(args.config, args.output) + + +if __name__ == "__main__": + main() diff --git a/scripts/setup-docker.sh b/scripts/setup-docker.sh new file mode 100755 index 0000000..1234e35 --- /dev/null +++ b/scripts/setup-docker.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# setup-docker.sh – Install and configure Docker Engine on Raspberry Pi 5 (arm64). +# Must be run as root. + +set -euo pipefail + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' +info() { echo -e "${GREEN}[INFO]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +if [[ "$EUID" -ne 0 ]]; then + error "Please run as root." + exit 1 +fi + +# ── Skip if Docker is already installed ─────────────────────────────────────── +if command -v docker &>/dev/null; then + warn "Docker is already installed ($(docker --version)). Skipping installation." +else + info "Adding Docker's official GPG key and repository..." + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/debian/gpg \ + -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + # Raspberry Pi OS is Debian-based; use the Debian repository. + DPKG_ARCH="$(dpkg --print-architecture)" + VERSION_CODENAME="$(. /etc/os-release && echo "$VERSION_CODENAME")" + + echo \ + "deb [arch=${DPKG_ARCH} signed-by=/etc/apt/keyrings/docker.asc] \ +https://download.docker.com/linux/debian \ +${VERSION_CODENAME} stable" \ + > /etc/apt/sources.list.d/docker.list + + apt-get update -y + apt-get install -y --no-install-recommends \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin + + info "Docker installed: $(docker --version)" +fi + +# ── Service configuration ───────────────────────────────────────────────────── +info "Enabling and starting Docker service..." +systemctl enable docker +systemctl start docker + +# ── Add default non-root user to the docker group ───────────────────────────── +# Determine the primary non-root user (first user with UID >= 1000). +NON_ROOT_USER=$(getent passwd | awk -F: '$3 >= 1000 && $3 < 65534 {print $1; exit}') +if [[ -n "$NON_ROOT_USER" ]]; then + info "Adding '${NON_ROOT_USER}' to the 'docker' group..." + usermod -aG docker "$NON_ROOT_USER" + warn "User '${NON_ROOT_USER}' must log out and back in for group membership to take effect." +fi + +# ── Daemon configuration ────────────────────────────────────────────────────── +info "Writing /etc/docker/daemon.json..." +mkdir -p /etc/docker +cat > /etc/docker/daemon.json << 'EOF' +{ + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "3" + }, + "storage-driver": "overlay2", + "features": { + "buildkit": true + } +} +EOF + +systemctl restart docker + +info "Docker setup complete." diff --git a/scripts/setup-github-runner.sh b/scripts/setup-github-runner.sh new file mode 100755 index 0000000..6b082cb --- /dev/null +++ b/scripts/setup-github-runner.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# setup-github-runner.sh – Install a GitHub Actions self-hosted runner on the Pi. +# Must be run as root. +# +# Usage: +# sudo bash setup-github-runner.sh [--url REPO_URL] [--token REG_TOKEN] +# +# The runner runs as the 'github-runner' system user and is managed by systemd. + +set -euo pipefail + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' +info() { echo -e "${GREEN}[INFO]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +if [[ "$EUID" -ne 0 ]]; then + error "Please run as root." + exit 1 +fi + +# ── Defaults ────────────────────────────────────────────────────────────────── +RUNNER_VERSION="2.323.0" +RUNNER_USER="github-runner" +RUNNER_HOME="/opt/github-runner" +RUNNER_URL="" +RUNNER_TOKEN="" +RUNNER_NAME="performancenode-$(hostname)" +RUNNER_LABELS="self-hosted,Linux,ARM64,raspberry-pi-5,performancenode" +RUNNER_WORK_DIR="${RUNNER_HOME}/_work" + +# ── Argument parsing ────────────────────────────────────────────────────────── +while [[ $# -gt 0 ]]; do + case "$1" in + --url) RUNNER_URL="$2"; shift ;; + --token) RUNNER_TOKEN="$2"; shift ;; + --name) RUNNER_NAME="$2"; shift ;; + *) error "Unknown option: $1"; exit 1 ;; + esac + shift +done + +# ── Create runner user ──────────────────────────────────────────────────────── +if ! id "$RUNNER_USER" &>/dev/null; then + info "Creating system user '${RUNNER_USER}'..." + useradd -r -m -d "$RUNNER_HOME" -s /bin/bash "$RUNNER_USER" +fi + +# Add runner user to the docker group so workflows can use Docker. +if getent group docker &>/dev/null; then + usermod -aG docker "$RUNNER_USER" +fi + +# ── Download runner package ─────────────────────────────────────────────────── +RUNNER_ARCHIVE="actions-runner-linux-arm64-${RUNNER_VERSION}.tar.gz" +RUNNER_URL_DOWNLOAD="https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/${RUNNER_ARCHIVE}" + +mkdir -p "$RUNNER_HOME" + +if [[ ! -f "${RUNNER_HOME}/run.sh" ]]; then + info "Downloading GitHub Actions runner v${RUNNER_VERSION}..." + TMPFILE="$(mktemp)" + curl -fsSL "$RUNNER_URL_DOWNLOAD" -o "$TMPFILE" + + info "Extracting runner archive..." + tar -xzf "$TMPFILE" -C "$RUNNER_HOME" + rm -f "$TMPFILE" + + chown -R "${RUNNER_USER}:${RUNNER_USER}" "$RUNNER_HOME" +else + warn "Runner binaries already present. Skipping download." +fi + +# ── Install runner dependencies ─────────────────────────────────────────────── +info "Installing runner dependencies..." +bash "${RUNNER_HOME}/bin/installdependencies.sh" + +# ── Configure runner ────────────────────────────────────────────────────────── +mkdir -p "$RUNNER_WORK_DIR" +chown -R "${RUNNER_USER}:${RUNNER_USER}" "$RUNNER_WORK_DIR" + +if [[ -n "$RUNNER_URL" && -n "$RUNNER_TOKEN" ]]; then + info "Configuring runner for repository: ${RUNNER_URL}..." + sudo -u "$RUNNER_USER" \ + "${RUNNER_HOME}/config.sh" \ + --url "$RUNNER_URL" \ + --token "$RUNNER_TOKEN" \ + --name "$RUNNER_NAME" \ + --labels "$RUNNER_LABELS" \ + --work "$RUNNER_WORK_DIR" \ + --unattended \ + --replace +else + warn "No --url / --token provided. Runner configuration skipped." + warn "Configure manually later with:" + warn " sudo -u ${RUNNER_USER} ${RUNNER_HOME}/config.sh --url --token " +fi + +# ── Install as systemd service ──────────────────────────────────────────────── +info "Installing runner as a systemd service..." + +cat > /etc/systemd/system/github-runner.service << EOF +[Unit] +Description=GitHub Actions Self-Hosted Runner +After=network.target docker.service +Wants=docker.service + +[Service] +Type=simple +User=${RUNNER_USER} +WorkingDirectory=${RUNNER_HOME} +ExecStart=${RUNNER_HOME}/run.sh +Restart=on-failure +RestartSec=5 +KillMode=process +KillSignal=SIGTERM +TimeoutStopSec=5min + +# Environment +Environment=RUNNER_ALLOW_RUNASROOT=0 + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable github-runner + +if [[ -n "$RUNNER_URL" && -n "$RUNNER_TOKEN" ]]; then + systemctl start github-runner + info "GitHub Actions runner service started." +else + warn "Runner service installed but NOT started (no configuration yet)." + warn "Start it after configuring with: sudo systemctl start github-runner" +fi + +info "GitHub Actions runner setup complete." +info "Runner home: ${RUNNER_HOME}" +info "Runner labels: ${RUNNER_LABELS}" diff --git a/scripts/setup-hat.sh b/scripts/setup-hat.sh new file mode 100755 index 0000000..6ec787b --- /dev/null +++ b/scripts/setup-hat.sh @@ -0,0 +1,152 @@ +#!/usr/bin/env bash +# setup-hat.sh – Configure the custom GP2040-CE latency-test HAT on Raspberry Pi 5. +# +# The HAT connects a GP2040-CE device (gamepad) via USB to the Pi and uses +# GPIO pins to detect button-press signals with microsecond precision, enabling +# end-to-end input-latency measurement: +# +# [PC sends button press over USB] → [GP2040-CE device] → [GPIO signal] +# → [Pi records timestamp] → latency = GPIO timestamp – USB send timestamp +# +# GPIO pin assignments (BCM numbering): +# GPIO 4 – Button signal input (active-high, 3.3 V logic) +# GPIO 17 – Trigger output (drives test signal into GP2040-CE device) +# GPIO 27 – Status LED +# +# Must be run as root. + +set -euo pipefail + +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' +info() { echo -e "${GREEN}[INFO]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +if [[ "$EUID" -ne 0 ]]; then + error "Please run as root." + exit 1 +fi + +HAT_CONFIG_DIR="/etc/performancenode/hat" +UDEV_RULES_FILE="/etc/udev/rules.d/99-gp2040ce.rules" +RUNNER_USER="github-runner" + +# ── Install Python dependencies ─────────────────────────────────────────────── +info "Installing Python GPIO and USB libraries..." +apt-get install -y --no-install-recommends \ + python3-pip \ + python3-venv \ + libusb-1.0-0 \ + libusb-1.0-0-dev \ + udev + +VENV_DIR="/opt/performancenode/venv" +mkdir -p "$(dirname "$VENV_DIR")" +python3 -m venv "$VENV_DIR" +"${VENV_DIR}/bin/pip" install --upgrade pip +"${VENV_DIR}/bin/pip" install \ + RPi.GPIO==0.7.1 \ + pyusb==1.2.1 \ + gpiozero==2.0.1 \ + lgpio==0.2.2.0 + +# ── GPIO permissions ────────────────────────────────────────────────────────── +info "Configuring GPIO access permissions..." +# Ensure the gpio group exists and the runner user belongs to it. +if ! getent group gpio &>/dev/null; then + groupadd --system gpio +fi +usermod -aG gpio "$RUNNER_USER" 2>/dev/null || true + +# Allow group-writable access to GPIO device nodes. +cat > /etc/udev/rules.d/99-gpio.rules << 'EOF' +SUBSYSTEM=="gpio", GROUP="gpio", MODE="0660" +SUBSYSTEM=="gpiomem", GROUP="gpio", MODE="0660" +EOF + +# ── udev rule for GP2040-CE device ──────────────────────────────────────────── +info "Installing udev rule for GP2040-CE USB device..." +# GP2040-CE presents itself as a generic HID gamepad. +# VID 0x2E8A (Raspberry Pi / RP2040 bootloader) or vendor-specific. +# Add both the production and BOOTSEL VIDs for convenience. +cat > "$UDEV_RULES_FILE" << 'EOF' +# GP2040-CE – RP2040-based gamepad (production firmware) +SUBSYSTEM=="usb", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="0005", \ + GROUP="plugdev", MODE="0660", SYMLINK+="gp2040ce" + +# RP2040 BOOTSEL mode (for firmware flashing) +SUBSYSTEM=="usb", ATTRS{idVendor}=="2e8a", ATTRS{idProduct}=="0003", \ + GROUP="plugdev", MODE="0660", SYMLINK+="rp2040-boot" + +# Generic HID gamepad fallback +SUBSYSTEM=="input", ATTRS{name}=="GP2040*", GROUP="input", MODE="0660" +EOF + +usermod -aG plugdev "$RUNNER_USER" 2>/dev/null || true + +# ── HAT configuration file ──────────────────────────────────────────────────── +info "Writing HAT configuration to ${HAT_CONFIG_DIR}/hat-config.json..." +mkdir -p "$HAT_CONFIG_DIR" +cat > "${HAT_CONFIG_DIR}/hat-config.json" << 'EOF' +{ + "version": "1.0", + "description": "GP2040-CE Latency Test HAT configuration", + "gpio": { + "button_signal_pin": 4, + "trigger_output_pin": 17, + "status_led_pin": 27, + "active_high": true, + "debounce_ms": 1 + }, + "usb": { + "gp2040ce_vid": "0x2E8A", + "gp2040ce_pid": "0x0005" + }, + "latency_test": { + "sample_count": 1000, + "warmup_samples": 50, + "trigger_interval_ms": 100, + "timeout_ms": 500, + "results_dir": "/opt/performancenode/results/latency" + } +} +EOF +chmod 644 "${HAT_CONFIG_DIR}/hat-config.json" + +# ── Reload udev ─────────────────────────────────────────────────────────────── +info "Reloading udev rules..." +udevadm control --reload-rules +udevadm trigger + +# ── config.txt overlay ──────────────────────────────────────────────────────── +# Raspberry Pi 5 uses /boot/firmware/config.txt +BOOT_CONFIG="/boot/firmware/config.txt" +if [[ ! -f "$BOOT_CONFIG" ]]; then + BOOT_CONFIG="/boot/config.txt" +fi + +if [[ -f "$BOOT_CONFIG" ]]; then + if ! grep -q "# PerformanceNode HAT" "$BOOT_CONFIG"; then + info "Adding HAT overlay settings to ${BOOT_CONFIG}..." + cat >> "$BOOT_CONFIG" << 'EOF' + +# PerformanceNode HAT +dtparam=i2c_arm=on +dtparam=spi=on +# Increase USB current for HAT-powered GP2040-CE device +max_usb_current=1 +EOF + fi +else + warn "Boot config not found. Skipping dtparam configuration." +fi + +# ── Results directory ───────────────────────────────────────────────────────── +info "Creating results directories..." +mkdir -p /opt/performancenode/results/latency +mkdir -p /opt/performancenode/results/performance +if id "$RUNNER_USER" &>/dev/null; then + chown -R "${RUNNER_USER}:${RUNNER_USER}" /opt/performancenode/results +fi + +info "HAT setup complete. A reboot is required for all changes to take effect." diff --git a/scripts/setup-system.sh b/scripts/setup-system.sh new file mode 100755 index 0000000..56af83b --- /dev/null +++ b/scripts/setup-system.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash +# setup-system.sh – OS-level configuration for Raspberry Pi 5 PerformanceNode. +# Hardens the system, installs base packages, and applies performance tuning. +# Must be run as root. + +set -euo pipefail + +RED='\033[0;31m'; GREEN='\033[0;32m'; NC='\033[0m' +info() { echo -e "${GREEN}[INFO]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +if [[ "$EUID" -ne 0 ]]; then + error "Please run as root." + exit 1 +fi + +# ── Package updates ─────────────────────────────────────────────────────────── +info "Updating package lists and upgrading installed packages..." +apt-get update -y +apt-get upgrade -y --no-install-recommends + +# ── Base dependencies ───────────────────────────────────────────────────────── +info "Installing base packages..." +apt-get install -y --no-install-recommends \ + ca-certificates \ + curl \ + gnupg \ + git \ + jq \ + lsb-release \ + python3 \ + python3-pip \ + python3-venv \ + i2c-tools \ + libraspberrypi-bin \ + raspi-config \ + unzip \ + wget + +# ── Enable required kernel interfaces ───────────────────────────────────────── +info "Enabling I2C, SPI, and UART interfaces..." +raspi-config nonint do_i2c 0 +raspi-config nonint do_spi 0 +raspi-config nonint do_serial_hw 0 # Enable UART hardware +raspi-config nonint do_serial_cons 0 # Disable serial console (keep hardware) + +# ── Performance / CPU governor ──────────────────────────────────────────────── +info "Setting CPU governor to 'performance'..." +if [[ -f /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor ]]; then + for gov in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do + echo performance > "$gov" + done +fi + +# Persist across reboots via /etc/rc.local +if ! grep -q 'scaling_governor' /etc/rc.local 2>/dev/null; then + sed -i '/^exit 0/i # Set CPU governor to performance\nfor g in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do echo performance > "$g"; done' /etc/rc.local +fi + +# ── Swap configuration ──────────────────────────────────────────────────────── +info "Disabling swap to reduce latency jitter..." +systemctl disable dphys-swapfile 2>/dev/null || true +swapoff -a 2>/dev/null || true + +# ── Kernel parameters ───────────────────────────────────────────────────────── +info "Applying sysctl performance tuning..." +cat > /etc/sysctl.d/99-performancenode.conf << 'EOF' +# PerformanceNode – kernel tuning for low-latency / high-throughput testing + +# Increase file-descriptor limits +fs.file-max = 2097152 + +# TCP tuning +net.core.rmem_max = 134217728 +net.core.wmem_max = 134217728 +net.ipv4.tcp_rmem = 4096 87380 134217728 +net.ipv4.tcp_wmem = 4096 65536 134217728 +net.core.netdev_max_backlog = 5000 + +# Reduce swappiness (swap already disabled, but belt-and-suspenders) +vm.swappiness = 0 + +# Allow faster local port reuse +net.ipv4.tcp_tw_reuse = 1 +net.ipv4.ip_local_port_range = 1024 65535 +EOF + +sysctl --system + +# ── Time synchronisation ────────────────────────────────────────────────────── +info "Configuring NTP (systemd-timesyncd)..." +timedatectl set-ntp true + +# ── Hostname ────────────────────────────────────────────────────────────────── +DESIRED_HOSTNAME="performancenode" +CURRENT_HOSTNAME=$(hostname) +if [[ "$CURRENT_HOSTNAME" != "$DESIRED_HOSTNAME" ]]; then + info "Setting hostname to '${DESIRED_HOSTNAME}'..." + hostnamectl set-hostname "$DESIRED_HOSTNAME" + sed -i "s/$CURRENT_HOSTNAME/$DESIRED_HOSTNAME/g" /etc/hosts +fi + +info "System configuration complete." diff --git a/scripts/setup.sh b/scripts/setup.sh new file mode 100755 index 0000000..f4c5870 --- /dev/null +++ b/scripts/setup.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# setup.sh – Main entry-point for PerformanceNode Raspberry Pi 5 setup. +# Run as root (or with sudo) on a fresh Raspberry Pi OS Lite (64-bit) image. +# +# Usage: +# sudo bash setup.sh [OPTIONS] +# +# Options: +# --skip-docker Skip Docker installation +# --skip-runner Skip GitHub Actions runner installation +# --skip-hat Skip custom HAT setup +# --runner-url URL GitHub repository URL for the Actions runner +# --runner-token TOK Registration token for the Actions runner + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# ── Colour helpers ──────────────────────────────────────────────────────────── +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' +info() { echo -e "${GREEN}[INFO]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +# ── Defaults ────────────────────────────────────────────────────────────────── +SKIP_DOCKER=false +SKIP_RUNNER=false +SKIP_HAT=false +RUNNER_URL="" +RUNNER_TOKEN="" + +# ── Argument parsing ────────────────────────────────────────────────────────── +while [[ $# -gt 0 ]]; do + case "$1" in + --skip-docker) SKIP_DOCKER=true ;; + --skip-runner) SKIP_RUNNER=true ;; + --skip-hat) SKIP_HAT=true ;; + --runner-url) RUNNER_URL="$2"; shift ;; + --runner-token) RUNNER_TOKEN="$2"; shift ;; + *) error "Unknown option: $1"; exit 1 ;; + esac + shift +done + +# ── Root check ──────────────────────────────────────────────────────────────── +if [[ "$EUID" -ne 0 ]]; then + error "Please run as root: sudo bash $0" + exit 1 +fi + +info "=== PerformanceNode – Raspberry Pi 5 Setup ===" +info "Starting at $(date)" + +# ── Step 1: System ──────────────────────────────────────────────────────────── +info "Step 1/4 – Configuring system..." +bash "${SCRIPT_DIR}/setup-system.sh" + +# ── Step 2: Docker ──────────────────────────────────────────────────────────── +if [[ "$SKIP_DOCKER" == "false" ]]; then + info "Step 2/4 – Installing Docker..." + bash "${SCRIPT_DIR}/setup-docker.sh" +else + warn "Step 2/4 – Docker setup skipped." +fi + +# ── Step 3: GitHub Actions runner ───────────────────────────────────────────── +if [[ "$SKIP_RUNNER" == "false" ]]; then + info "Step 3/4 – Installing GitHub Actions self-hosted runner..." + RUNNER_ARGS=() + [[ -n "$RUNNER_URL" ]] && RUNNER_ARGS+=(--url "$RUNNER_URL") + [[ -n "$RUNNER_TOKEN" ]] && RUNNER_ARGS+=(--token "$RUNNER_TOKEN") + bash "${SCRIPT_DIR}/setup-github-runner.sh" "${RUNNER_ARGS[@]+"${RUNNER_ARGS[@]}"}" +else + warn "Step 3/4 – GitHub Actions runner setup skipped." +fi + +# ── Step 4: Custom HAT ──────────────────────────────────────────────────────── +if [[ "$SKIP_HAT" == "false" ]]; then + info "Step 4/4 – Configuring GP2040-CE latency-test HAT..." + bash "${SCRIPT_DIR}/setup-hat.sh" +else + warn "Step 4/4 – HAT setup skipped." +fi + +info "=== Setup complete. Please reboot the device. ===" +info " sudo reboot"