Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/master.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4660,7 +4660,7 @@ jobs:
env:
COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }}
steps:
- name: Check out repository code
uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
Expand Down
100 changes: 2 additions & 98 deletions .github/workflows/pull_request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -850,100 +850,6 @@ jobs:
python3 -m praktika run 'Quick functional tests' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi

stateless_tests_amd_asan_flaky_check:
runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan]
if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }}
name: "Stateless tests (amd_asan, flaky check)"
outputs:
data: ${{ steps.run.outputs.DATA }}
pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ env.CHECKOUT_REF }}

- name: Setup
uses: ./.github/actions/runner_setup
- name: Docker setup
uses: ./.github/actions/docker_setup
with:
test_name: "Stateless tests (amd_asan, flaky check)"

- name: Prepare env script
run: |
rm -rf ./ci/tmp
mkdir -p ./ci/tmp
cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
export PYTHONPATH=./ci:.:

cat > ./ci/tmp/workflow_job.json << 'EOF'
${{ toJson(job) }}
EOF
cat > ./ci/tmp/workflow_status.json << 'EOF'
${{ toJson(needs) }}
EOF
ENV_SETUP_SCRIPT_EOF

- name: Run
id: run
run: |
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi

integration_tests_amd_asan_flaky:
runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_asan]
if: ${{ !cancelled() && !contains(needs.*.outputs.pipeline_status, 'failure') && !contains(needs.*.outputs.pipeline_status, 'undefined') && !contains(fromJson(needs.config_workflow.outputs.data).workflow_config.cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSk=') }}
name: "Integration tests (amd_asan, flaky)"
outputs:
data: ${{ steps.run.outputs.DATA }}
pipeline_status: ${{ steps.run.outputs.pipeline_status || 'undefined' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ env.CHECKOUT_REF }}

- name: Setup
uses: ./.github/actions/runner_setup
- name: Docker setup
uses: ./.github/actions/docker_setup
with:
test_name: "Integration tests (amd_asan, flaky)"

- name: Prepare env script
run: |
rm -rf ./ci/tmp
mkdir -p ./ci/tmp
cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
export PYTHONPATH=./ci:.:

cat > ./ci/tmp/workflow_job.json << 'EOF'
${{ toJson(job) }}
EOF
cat > ./ci/tmp/workflow_status.json << 'EOF'
${{ toJson(needs) }}
EOF
ENV_SETUP_SCRIPT_EOF

- name: Run
id: run
run: |
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
python3 -m praktika run 'Integration tests (amd_asan, flaky)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi

bugfix_validation_functional_tests:
runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
Expand Down Expand Up @@ -4612,7 +4518,7 @@ jobs:

finish_workflow:
runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_arm_tsan, build_amd_release, build_arm_release, quick_functional_tests, stateless_tests_amd_asan_flaky_check, integration_tests_amd_asan_flaky, bugfix_validation_functional_tests, bugfix_validation_integration_tests, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_asan, build_arm_binary, build_arm_tsan, build_amd_release, build_arm_release, quick_functional_tests, bugfix_validation_functional_tests, bugfix_validation_integration_tests, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_db_disk_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel_1_2, stateless_tests_amd_tsan_s3_storage_parallel_2_2, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_db_disk_old_analyzer_1_6, integration_tests_amd_asan_db_disk_old_analyzer_2_6, integration_tests_amd_asan_db_disk_old_analyzer_3_6, integration_tests_amd_asan_db_disk_old_analyzer_4_6, integration_tests_amd_asan_db_disk_old_analyzer_5_6, integration_tests_amd_asan_db_disk_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_amd_release, compatibility_check_arm_release, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_arm_asan_s3, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan]
if: ${{ always() }}
name: "Finish Workflow"
outputs:
Expand Down Expand Up @@ -4728,8 +4634,6 @@ jobs:
- build_amd_release
- build_arm_release
- quick_functional_tests
- stateless_tests_amd_asan_flaky_check
- integration_tests_amd_asan_flaky
- bugfix_validation_functional_tests
- bugfix_validation_integration_tests
- stateless_tests_amd_asan_distributed_plan_parallel_1_2
Expand Down Expand Up @@ -4833,7 +4737,7 @@ jobs:
env:
COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }}
steps:
- name: Check out repository code
uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release_builds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1288,7 +1288,7 @@ jobs:
env:
COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
VERSION: ${{ fromJson(needs.config_workflow.outputs.data).JOB_KV_DATA.version.string }}
steps:
- name: Check out repository code
uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
Expand Down
2 changes: 1 addition & 1 deletion ci/defs/job_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1045,7 +1045,7 @@ class JobConfigs:
docker_keeper = Job.Config(
name=JobNames.DOCKER_KEEPER,
runs_on=RunnerLabels.STYLE_CHECK_AMD,
command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse",
command="python3 ./ci/jobs/docker_server.py --tag-type head --allow-build-reuse --push",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/docker_server.py",
Expand Down
6 changes: 3 additions & 3 deletions ci/jobs/docker_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ def docker_login(relogin: bool = True) -> None:
"docker system info | grep --quiet -E 'Username|Registry'"
):
Shell.check(
"docker login --username 'robotclickhouse' --password-stdin",
"docker login --username 'altinityinfra' --password-stdin",
strict=True,
stdin_str=Secret.Config(
"dockerhub_robot_password", type=Secret.Type.AWS_SSM_PARAMETER
"DOCKER_PASSWORD", type=Secret.Type.GH_SECRET
).get_value(),
encoding="utf-8",
)
Expand Down Expand Up @@ -348,7 +348,7 @@ def main():
push = True

image = DockerImageData(image_repo, image_path)
tags = [f'{info.pr_number}-{version_dict["string"]}']
tags = [f'{info.pr_number}-{version_dict["describe"]}']
repo_urls = {}
direct_urls: Dict[str, List[str]] = {}

Expand Down
125 changes: 125 additions & 0 deletions ci/jobs/integration_test_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
from pathlib import Path
from typing import List, Tuple

import yaml # NOTE (strtgbb): Used for loading broken tests rules
import re

from ci.jobs.scripts.find_tests import Targeting
from ci.jobs.scripts.integration_tests_configs import IMAGES_ENV, get_optimal_test_batch
from ci.praktika.info import Info
Expand All @@ -22,6 +25,106 @@
MAX_MEM_PER_WORKER = 11


def get_broken_tests_rules(broken_tests_file_path: str) -> dict:
if (
not os.path.isfile(broken_tests_file_path)
or os.path.getsize(broken_tests_file_path) == 0
):
raise ValueError(
"There is something wrong with getting broken tests rules: "
f"file '{broken_tests_file_path}' is empty or does not exist."
)

with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file:
broken_tests = yaml.safe_load(broken_tests_file)

compiled_rules = {"exact": {}, "pattern": {}}

for test in broken_tests:
regex = test.get("regex") is True
rule = {
"reason": test["reason"],
}

if test.get("message"):
rule["message"] = re.compile(test["message"]) if regex else test["message"]

if test.get("not_message"):
rule["not_message"] = (
re.compile(test["not_message"]) if regex else test["not_message"]
)
if test.get("check_types"):
rule["check_types"] = test["check_types"]

if regex:
rule["regex"] = True
compiled_rules["pattern"][re.compile(test["name"])] = rule
else:
compiled_rules["exact"][test["name"]] = rule

return compiled_rules


def test_is_known_fail(broken_tests_rules, test_name, test_logs, job_flags):
matching_rules = []

def matches_substring(substring, log, is_regex):
if log is None:
return False
if is_regex:
return bool(substring.search(log))
return substring in log

broken_tests_log = f"{temp_path}/broken_tests_handler.log"

with open(broken_tests_log, "a") as log_file:

log_file.write(f"Checking known broken tests for failed test: {test_name}\n")
log_file.write("Potential matching rules:\n")
exact_rule = broken_tests_rules["exact"].get(test_name)
if exact_rule:
log_file.write(f"{test_name} - {exact_rule}\n")
matching_rules.append(exact_rule)

for name_re, data in broken_tests_rules["pattern"].items():
if name_re.fullmatch(test_name):
log_file.write(f"{name_re} - {data}\n")
matching_rules.append(data)

if not matching_rules:
return False

log_file.write(f"First line of test logs: {test_logs.splitlines()[0]}\n")

for rule_data in matching_rules:
if rule_data.get("check_types") and not any(
ct in job_flags for ct in rule_data["check_types"]
):
log_file.write(
f"Skip rule: Check types didn't match: '{rule_data['check_types']}' not in '{job_flags}'\n"
)
continue # check_types didn't match → skip rule

is_regex = rule_data.get("regex", False)
not_message = rule_data.get("not_message")
if not_message and matches_substring(not_message, test_logs, is_regex):
log_file.write(
f"Skip rule: Not message matched: '{rule_data['not_message']}'\n"
)
continue # not_message matched → skip rule
message = rule_data.get("message")
if message and not matches_substring(message, test_logs, is_regex):
log_file.write(
f"Skip rule: Message didn't match: '{rule_data['message']}'\n"
)
continue

log_file.write(f"Matched rule: {rule_data}\n")
return rule_data["reason"]

return False


def _start_docker_in_docker():
with open("./ci/tmp/docker-in-docker.log", "w") as log_file:
dockerd_proc = subprocess.Popen(
Expand Down Expand Up @@ -533,6 +636,28 @@ def main():
)
attached_files.append("./ci/tmp/dmesg.log")

broken_tests_rules = get_broken_tests_rules("tests/broken_tests.yaml")
for result in test_results:
if result.status == Result.StatusExtended.FAIL:
try:
known_fail_reason = test_is_known_fail(
broken_tests_rules,
result.name,
result.info,
job_params,
)
except Exception as e:
print(f"Error getting known fail reason for result {result.name}: {e}")
continue
else:
if not known_fail_reason:
continue
result.status = Result.StatusExtended.BROKEN
result.info += f"\nMarked as broken: {known_fail_reason}"

if os.path.exists(f"{temp_path}/broken_tests_handler.log"):
attached_files.append(f"{temp_path}/broken_tests_handler.log")

R = Result.create_from(results=test_results, stopwatch=sw, files=attached_files)

if has_error:
Expand Down
1 change: 1 addition & 0 deletions ci/praktika/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def create_from(
Result.Status.SKIPPED,
Result.StatusExtended.OK,
Result.StatusExtended.SKIPPED,
Result.StatusExtended.BROKEN,
):
continue
elif result.status in (
Expand Down
Loading
Loading