-
Notifications
You must be signed in to change notification settings - Fork 0
190 lines (167 loc) · 6.48 KB
/
test_evaluation_function.yml
File metadata and controls
190 lines (167 loc) · 6.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
name: Endpoint Validation Test
on:
workflow_call:
inputs:
eval_function:
description: 'Evaluation Function Name'
required: true
type: string
sql_limit:
description: 'Max number of records to fetch'
required: false
type: string
default: 1000
secrets:
TEST_API_ENDPOINT:
description: 'API Endpoint URL to test'
required: false
DB_USER:
required: false
DB_PASSWORD:
required: false
DB_HOST:
required: false
DB_PORT:
required: false
DB_NAME:
required: false
GCP_SERVICE_ACCOUNT_KEY:
required: false
GCP_PROJECT_ID:
required: false
jobs:
run_test:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v4
with:
repository: 'lambda-feedback/Database-Testing'
ref: main
token: ${{ github.token }}
path: Database-Testing
- name: Set up Python Environment
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install Dependencies
working-directory: ./Database-Testing
run: |
set -euo pipefail
pip install -r requirements.txt
- name: Authenticate to GCP
uses: google-github-actions/auth@v1
with:
credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
- name: Run Evaluation Function Against Database
id: run_script
working-directory: ./Database-Testing
env:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_HOST: ${{ secrets.DB_HOST }}
DB_PORT: ${{ secrets.DB_PORT }}
DB_NAME: ${{ secrets.DB_NAME }}
GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
GOOGLE_CREDENTIALS_JSON: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
run: |
set -euo pipefail
# Run the argparse-enabled script and capture exit code
if ! python3 test_evaluation_function.py \
--endpoint "${{ secrets.TEST_API_ENDPOINT }}" \
--eval_function_name "${{ inputs.eval_function }}" \
--sql_limit "${{ inputs.sql_limit }}" \
--grade_params_json ""; then
echo "::error::Python script failed with non-zero exit code"
exit 1
fi
# Verify report_data.json exists (needed for summary)
if [ ! -f report_data.json ]; then
echo "::error::report_data.json not found!"
exit 1
fi
REPORT_DATA="$(cat report_data.json)"
# Check for error status in the JSON response
STATUS="$(echo "$REPORT_DATA" | jq -r '.status // "unknown"')"
if [ "$STATUS" = "failed" ]; then
ERROR_MSG="$(echo "$REPORT_DATA" | jq -r '.error // "Unknown error"')"
echo "::error::Script returned failed status: ${ERROR_MSG}"
exit 1
fi
# Check for specific validation errors to log to console
ERROR_COUNT="$(echo "$REPORT_DATA" | jq -r '.number_of_errors // 0')"
if [ "${ERROR_COUNT}" -gt 0 ]; then
echo "::error file=test_evaluation_function.py::Test completed with ${ERROR_COUNT} errors."
# Do not exit 1 here if you want the Summary to display the failure stats nicely.
# Only exit 1 if you want the pipeline to stop immediately (usually preferred for tests).
exit 1
fi
- name: 📊 Create Job Summary Report
if: always() # Run even if the previous step failed/found errors
working-directory: ./Database-Testing
run: |
set -euo pipefail
if [ ! -f report_data.json ]; then
echo "## ❌ Evaluation Function Report" >> "$GITHUB_STEP_SUMMARY"
echo "**Status:** Failed - No report data generated" >> "$GITHUB_STEP_SUMMARY"
exit 0
fi
REPORT_DATA="$(cat report_data.json)"
# Check if this is an error response
STATUS="$(echo "$REPORT_DATA" | jq -r '.status // "unknown"')"
if [ "$STATUS" = "failed" ]; then
ERROR_MSG="$(echo "$REPORT_DATA" | jq -r '.error // "Unknown error"')"
{
echo "## ❌ Endpoint Validation Report"
echo "---"
echo "**Status:** Failed"
echo "**Error:** ${ERROR_MSG}"
echo "---"
} >> "$GITHUB_STEP_SUMMARY"
exit 0
fi
PASSES="$(echo "$REPORT_DATA" | jq -r '.pass_count // 0')"
TOTAL="$(echo "$REPORT_DATA" | jq -r '.total_count // 0')"
ERRORS="$(echo "$REPORT_DATA" | jq -r '.number_of_errors // 0')"
FIRESTORE_LINK="$(echo "$REPORT_DATA" | jq -r '.firestore_link // ""')"
FIRESTORE_DOC_ID="$(echo "$REPORT_DATA" | jq -r '.firestore_doc_id // ""')"
if [ "$TOTAL" -gt 0 ]; then
PASS_RATE="$(echo "scale=2; $PASSES / $TOTAL * 100" | bc -l)"
else
PASS_RATE="0"
fi
STATUS_EMOJI="✅"
if [ "$ERRORS" -gt 0 ]; then
STATUS_EMOJI="❌"
fi
{
echo "## ${STATUS_EMOJI} Endpoint Validation Report"
echo "---"
echo "**Endpoint:** ${{ secrets.TEST_API_ENDPOINT }}"
echo "**Evaluation Function:** ${{ inputs.eval_function }}"
echo ""
echo "| Metric | Value |"
echo "| :--- | :--- |"
echo "| **Total Tests** | ${TOTAL} |"
echo "| **Passed** | ${PASSES} |"
echo "| **Failed** | **${ERRORS}** |"
echo "| **Pass Rate** | ${PASS_RATE}% |"
echo "---"
# Add Firestore link if available
if [ -n "$FIRESTORE_LINK" ]; then
echo ""
echo "### 📋 Detailed Results"
echo ""
echo "🔗 **[View full results in Firestore](${FIRESTORE_LINK})**"
echo ""
echo "<details>"
echo "<summary>Firestore Document Details</summary>"
echo ""
echo "- **Document ID:** \`${FIRESTORE_DOC_ID}\`"
echo "- **Collection:** \`test-results\`"
echo "- **Project:** ${{ secrets.GCP_PROJECT_ID }}"
echo ""
echo "</details>"
fi
echo "---"
} >> "$GITHUB_STEP_SUMMARY"