diff --git a/.github/workflows/README.md b/.github/workflows/README.md
index effb4a3..e04b6b5 100644
--- a/.github/workflows/README.md
+++ b/.github/workflows/README.md
@@ -8,18 +8,18 @@ The monorepo uses a combination of engine-specific workflows, path-aware compone
### Workflow Overview
-| Workflow | Purpose | Triggers |
-| --- | --- | --- |
-| `ci.yml` | Engine test and build validation | Push, PR, manual |
-| `lint.yml` | Engine lint and type checks | Push, PR |
-| `security.yml` | Engine dependency and filesystem security checks | Push, PR, schedule |
-| `monorepo-ci.yml` | Path-aware builds for engine, desktop, website, orchestration utils, and VS Code extension | Push, PR, manual |
-| `release.yml` | Engine package release | Tags, manual |
-| `release-desktop.yml` | Desktop release build | Tags, manual |
-| `release-website.yml` | Website release build | Tags, manual |
-| `release-vscode-extension.yml` | VS Code extension release packaging | Tags, manual |
-| `release-orchestration-utils.yml` | Shared utility package release build | Tags, manual |
-| `deploy-autopr-engine.yml` | Engine container build and Azure deployment | Push to `master`, PR, manual |
+| Workflow | Purpose | Triggers |
+| --------------------------------- | ------------------------------------------------------------------------------------------ | ---------------------------- |
+| `ci.yml` | Engine test and build validation | Push, PR, manual |
+| `lint.yml` | Engine lint and type checks | Push, PR |
+| `security.yml` | Engine dependency and filesystem security checks | Push, PR, schedule |
+| `monorepo-ci.yml` | Path-aware builds for engine, desktop, website, orchestration utils, and VS Code extension | Push, PR, manual |
+| `release.yml` | Engine package release | Tags, manual |
+| `release-desktop.yml` | Desktop release build | Tags, manual |
+| `release-website.yml` | Website release build | Tags, manual |
+| `release-vscode-extension.yml` | VS Code extension release packaging | Tags, manual |
+| `release-orchestration-utils.yml` | Shared utility package release build | Tags, manual |
+| `deploy-autopr-engine.yml` | Engine container build and Azure deployment | Push to `master`, PR, manual |
## Workflow Details
@@ -143,17 +143,17 @@ env:
Set these in GitHub repository settings:
-| Variable | Description | Default |
-| ----------------------- | ------------------------ | ------- |
+| Variable | Description | Default |
+| ------------------------- | ------------------------ | ------- |
| `CODEFLOW_VOLUME_PR` | Volume for pull requests | 100 |
| `CODEFLOW_VOLUME_CHECKIN` | Volume for pushes | 50 |
| `CODEFLOW_VOLUME_DEV` | Volume for development | 200 |
### Environment Variables
-| Variable | Description | Default |
-| ------------------------- | ------------------------- | ------- |
-| `PYTHON_VERSION` | Python version to use | 3.13 |
+| Variable | Description | Default |
+| --------------------------- | ------------------------- | ------- |
+| `PYTHON_VERSION` | Python version to use | 3.13 |
| `CODEFLOW_PRECOMMIT_VOLUME` | Pre-commit volume | 100 |
| `CODEFLOW_BG_BATCH` | Background fix batch size | 30 |
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a5954f6..71e24e6 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,15 +3,15 @@ name: CI
on:
workflow_dispatch:
push:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/ci.yml'
+ - "engine/**"
+ - ".github/workflows/ci.yml"
pull_request:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/ci.yml'
+ - "engine/**"
+ - ".github/workflows/ci.yml"
jobs:
test:
@@ -21,59 +21,59 @@ jobs:
working-directory: engine
strategy:
matrix:
- python-version: ['3.12', '3.13']
-
+ python-version: ["3.12", "3.13"]
+
steps:
- - uses: actions/checkout@v4
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
- with:
- python-version: ${{ matrix.python-version }}
- cache: 'pip'
-
- - name: Install Poetry
- uses: snok/install-poetry@v1
- with:
- version: latest
- virtualenvs-create: true
- virtualenvs-in-project: true
-
- - name: Install dependencies
- run: |
- poetry install --with dev --no-root
-
- - name: Install package
- run: poetry install --no-dev
-
- - name: Run tests with coverage
- run: |
- poetry run pytest --cov=codeflow_engine --cov-report=xml --cov-report=term --cov-report=html
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v4
- with:
- file: ./engine/coverage.xml
- flags: unittests
- name: codecov-umbrella
- fail_ci_if_error: false
- token: ${{ secrets.CODECOV_TOKEN }}
-
- - name: Check coverage threshold
- run: |
- poetry run coverage report --fail-under=70 || echo "Coverage below 70% - this is a warning, not a failure"
-
- - name: Upload coverage HTML report
- uses: actions/upload-artifact@v4
- if: always()
- with:
- name: coverage-report-${{ matrix.python-version }}
- path: engine/htmlcov/
-
- - name: Run linting
- run: |
- poetry run ruff check .
- poetry run mypy codeflow_engine
+ - uses: actions/checkout@v4
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: "pip"
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ with:
+ version: latest
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+
+ - name: Install dependencies
+ run: |
+ poetry install --with dev --no-root
+
+ - name: Install package
+ run: poetry install --no-dev
+
+ - name: Run tests with coverage
+ run: |
+ poetry run pytest --cov=codeflow_engine --cov-report=xml --cov-report=term --cov-report=html
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ file: ./engine/coverage.xml
+ flags: unittests
+ name: codecov-umbrella
+ fail_ci_if_error: false
+ token: ${{ secrets.CODECOV_TOKEN }}
+
+ - name: Check coverage threshold
+ run: |
+ poetry run coverage report --fail-under=70 || echo "Coverage below 70% - this is a warning, not a failure"
+
+ - name: Upload coverage HTML report
+ uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: coverage-report-${{ matrix.python-version }}
+ path: engine/htmlcov/
+
+ - name: Run linting
+ run: |
+ poetry run ruff check .
+ poetry run mypy codeflow_engine
build:
runs-on: ubuntu-latest
@@ -81,32 +81,31 @@ jobs:
defaults:
run:
working-directory: engine
-
+
steps:
- - uses: actions/checkout@v4
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: '3.12'
- cache: 'pip'
-
- - name: Install Poetry
- uses: snok/install-poetry@v1
- with:
- version: latest
- virtualenvs-create: true
- virtualenvs-in-project: true
-
- - name: Build package
- run: poetry build
-
- - name: Check package
- run: poetry check
-
- - name: Upload artifacts
- uses: actions/upload-artifact@v4
- with:
- name: engine-dist
- path: engine/dist/*
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ cache: "pip"
+
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ with:
+ version: latest
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+
+ - name: Build package
+ run: poetry build
+
+ - name: Check package
+ run: poetry check
+ - name: Upload artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: engine-dist
+ path: engine/dist/*
diff --git a/.github/workflows/deploy-autopr-engine.yml b/.github/workflows/deploy-autopr-engine.yml
index b51f36c..c28df14 100644
--- a/.github/workflows/deploy-autopr-engine.yml
+++ b/.github/workflows/deploy-autopr-engine.yml
@@ -5,15 +5,15 @@ on:
branches:
- master
paths:
- - 'engine/**'
- - '.github/workflows/deploy-autopr-engine.yml'
- - '.github/app-manifest.yml'
+ - "engine/**"
+ - ".github/workflows/deploy-autopr-engine.yml"
+ - ".github/app-manifest.yml"
pull_request:
paths:
- - 'engine/**'
- - '.github/workflows/deploy-autopr-engine.yml'
- - '.github/app-manifest.yml'
- - '.codeflow.yml'
+ - "engine/**"
+ - ".github/workflows/deploy-autopr-engine.yml"
+ - ".github/app-manifest.yml"
+ - ".codeflow.yml"
workflow_dispatch:
env:
@@ -189,9 +189,9 @@ jobs:
RESOURCE_GROUP="prod-rg-san-codeflow"
ENV_NAME="prod-codeflow-san-env"
CUSTOM_DOMAIN="app.codeflow.io"
-
+
echo "[*] Checking for existing managed certificates for domain: $CUSTOM_DOMAIN"
-
+
# Check if environment exists
if az containerapp env show -n $ENV_NAME -g $RESOURCE_GROUP &>/dev/null; then
echo "Environment exists, checking for duplicate certificates..."
@@ -259,7 +259,7 @@ jobs:
--name codeflow-engine \
--query properties.outputs \
--output json)
-
+
echo "container_app_url=$(echo $OUTPUTS | jq -r '.containerAppUrl.value')" >> $GITHUB_OUTPUT
echo "custom_domain=$(echo $OUTPUTS | jq -r '.customDomain.value')" >> $GITHUB_OUTPUT
echo "postgres_fqdn=$(echo $OUTPUTS | jq -r '.postgresFqdn.value')" >> $GITHUB_OUTPUT
@@ -281,4 +281,3 @@ jobs:
echo "3. Azure will automatically provision and bind the SSL certificate (5-15 minutes)"
echo ""
echo "[*] For troubleshooting, see: infrastructure/bicep/FAQ.md"
-
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index ba2904b..83497b3 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -2,15 +2,15 @@ name: Lint
on:
push:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/lint.yml'
+ - "engine/**"
+ - ".github/workflows/lint.yml"
pull_request:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/lint.yml'
+ - "engine/**"
+ - ".github/workflows/lint.yml"
jobs:
ruff:
@@ -21,26 +21,26 @@ jobs:
working-directory: engine
steps:
- uses: actions/checkout@v4
-
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
- cache: 'pip'
-
+ python-version: "3.12"
+ cache: "pip"
+
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: latest
virtualenvs-create: true
virtualenvs-in-project: true
-
+
- name: Install dependencies
run: poetry install --with dev --no-root
-
+
- name: Run Ruff
run: poetry run ruff check .
-
+
- name: Run Ruff format check
run: poetry run ruff format --check .
@@ -52,23 +52,22 @@ jobs:
working-directory: engine
steps:
- uses: actions/checkout@v4
-
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
- cache: 'pip'
-
+ python-version: "3.12"
+ cache: "pip"
+
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: latest
virtualenvs-create: true
virtualenvs-in-project: true
-
+
- name: Install dependencies
run: poetry install --with dev --no-root
-
+
- name: Run MyPy
run: poetry run mypy codeflow_engine --ignore-missing-imports
-
diff --git a/.github/workflows/monorepo-ci.yml b/.github/workflows/monorepo-ci.yml
index 5e15f29..0b162bb 100644
--- a/.github/workflows/monorepo-ci.yml
+++ b/.github/workflows/monorepo-ci.yml
@@ -1,156 +1,156 @@
name: Monorepo CI
on:
- workflow_dispatch:
- push:
- branches: [ master ]
- paths:
- - 'engine/**'
- - 'desktop/**'
- - 'website/**'
- - 'orchestration/**'
- - 'vscode-extension/**'
- - '.github/workflows/monorepo-ci.yml'
- pull_request:
- branches: [ master ]
- paths:
- - 'engine/**'
- - 'desktop/**'
- - 'website/**'
- - 'orchestration/**'
- - 'vscode-extension/**'
- - '.github/workflows/monorepo-ci.yml'
+ workflow_dispatch:
+ push:
+ branches: [master]
+ paths:
+ - "engine/**"
+ - "desktop/**"
+ - "website/**"
+ - "orchestration/**"
+ - "vscode-extension/**"
+ - ".github/workflows/monorepo-ci.yml"
+ pull_request:
+ branches: [master]
+ paths:
+ - "engine/**"
+ - "desktop/**"
+ - "website/**"
+ - "orchestration/**"
+ - "vscode-extension/**"
+ - ".github/workflows/monorepo-ci.yml"
jobs:
- changes:
- runs-on: ubuntu-latest
- outputs:
- engine: ${{ steps.filter.outputs.engine }}
- desktop: ${{ steps.filter.outputs.desktop }}
- website: ${{ steps.filter.outputs.website }}
- orchestration_utils: ${{ steps.filter.outputs.orchestration_utils }}
- vscode_extension: ${{ steps.filter.outputs.vscode_extension }}
- steps:
- - uses: actions/checkout@v4
- - name: Detect changed components
- id: filter
- uses: dorny/paths-filter@v3
- with:
- filters: |
- engine:
- - 'engine/**'
- - '.github/workflows/monorepo-ci.yml'
- desktop:
- - 'desktop/**'
- - '.github/workflows/monorepo-ci.yml'
- website:
- - 'website/**'
- - '.github/workflows/monorepo-ci.yml'
- orchestration_utils:
- - 'orchestration/packages/@codeflow/utils/**'
- - '.github/workflows/monorepo-ci.yml'
- vscode_extension:
- - 'vscode-extension/**'
- - '.github/workflows/monorepo-ci.yml'
+ changes:
+ runs-on: ubuntu-latest
+ outputs:
+ engine: ${{ steps.filter.outputs.engine }}
+ desktop: ${{ steps.filter.outputs.desktop }}
+ website: ${{ steps.filter.outputs.website }}
+ orchestration_utils: ${{ steps.filter.outputs.orchestration_utils }}
+ vscode_extension: ${{ steps.filter.outputs.vscode_extension }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Detect changed components
+ id: filter
+ uses: dorny/paths-filter@v3
+ with:
+ filters: |
+ engine:
+ - 'engine/**'
+ - '.github/workflows/monorepo-ci.yml'
+ desktop:
+ - 'desktop/**'
+ - '.github/workflows/monorepo-ci.yml'
+ website:
+ - 'website/**'
+ - '.github/workflows/monorepo-ci.yml'
+ orchestration_utils:
+ - 'orchestration/packages/@codeflow/utils/**'
+ - '.github/workflows/monorepo-ci.yml'
+ vscode_extension:
+ - 'vscode-extension/**'
+ - '.github/workflows/monorepo-ci.yml'
- engine:
- runs-on: ubuntu-latest
- needs: changes
- if: needs.changes.outputs.engine == 'true' || github.event_name == 'workflow_dispatch'
- defaults:
- run:
- working-directory: engine
- steps:
- - uses: actions/checkout@v4
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: '3.12'
- - name: Install Poetry
- run: python -m pip install poetry
- - name: Install engine dependencies
- run: poetry install --with dev --no-interaction
- - name: Validate engine package
- run: poetry run python -m compileall codeflow_engine
+ engine:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: needs.changes.outputs.engine == 'true' || github.event_name == 'workflow_dispatch'
+ defaults:
+ run:
+ working-directory: engine
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ - name: Install Poetry
+ run: python -m pip install poetry
+ - name: Install engine dependencies
+ run: poetry install --with dev --no-interaction
+ - name: Validate engine package
+ run: poetry run python -m compileall codeflow_engine
- desktop:
- runs-on: ubuntu-latest
- needs: changes
- if: needs.changes.outputs.desktop == 'true' || github.event_name == 'workflow_dispatch'
- defaults:
- run:
- working-directory: desktop
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: desktop/package-lock.json
- - name: Install desktop dependencies
- run: npm ci
- - name: Build desktop frontend
- run: npm run build
+ desktop:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: needs.changes.outputs.desktop == 'true' || github.event_name == 'workflow_dispatch'
+ defaults:
+ run:
+ working-directory: desktop
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: desktop/package-lock.json
+ - name: Install desktop dependencies
+ run: npm ci
+ - name: Build desktop frontend
+ run: npm run build
- website:
- runs-on: ubuntu-latest
- needs: changes
- if: needs.changes.outputs.website == 'true' || github.event_name == 'workflow_dispatch'
- defaults:
- run:
- working-directory: website
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: website/package-lock.json
- - name: Install website dependencies
- run: npm ci
- - name: Lint website
- run: npm run lint
- - name: Test website
- run: npm test
- - name: Build website
- run: npm run build
+ website:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: needs.changes.outputs.website == 'true' || github.event_name == 'workflow_dispatch'
+ defaults:
+ run:
+ working-directory: website
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: website/package-lock.json
+ - name: Install website dependencies
+ run: npm ci
+ - name: Lint website
+ run: npm run lint
+ - name: Test website
+ run: npm test
+ - name: Build website
+ run: npm run build
- orchestration-utils:
- runs-on: ubuntu-latest
- needs: changes
- if: needs.changes.outputs.orchestration_utils == 'true' || github.event_name == 'workflow_dispatch'
- defaults:
- run:
- working-directory: orchestration/packages/@codeflow/utils
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- - name: Install orchestration utility dependencies
- run: npm install
- - name: Build orchestration utilities
- run: npm run build
+ orchestration-utils:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: needs.changes.outputs.orchestration_utils == 'true' || github.event_name == 'workflow_dispatch'
+ defaults:
+ run:
+ working-directory: orchestration/packages/@codeflow/utils
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ - name: Install orchestration utility dependencies
+ run: npm install
+ - name: Build orchestration utilities
+ run: npm run build
- vscode-extension:
- runs-on: ubuntu-latest
- needs: changes
- if: needs.changes.outputs.vscode_extension == 'true' || github.event_name == 'workflow_dispatch'
- defaults:
- run:
- working-directory: vscode-extension
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: vscode-extension/package-lock.json
- - name: Install extension dependencies
- run: npm ci
- - name: Build extension
- run: npm run build
+ vscode-extension:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: needs.changes.outputs.vscode_extension == 'true' || github.event_name == 'workflow_dispatch'
+ defaults:
+ run:
+ working-directory: vscode-extension
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: vscode-extension/package-lock.json
+ - name: Install extension dependencies
+ run: npm ci
+ - name: Build extension
+ run: npm run build
diff --git a/.github/workflows/release-desktop.yml b/.github/workflows/release-desktop.yml
index 0a1d50a..cab28ea 100644
--- a/.github/workflows/release-desktop.yml
+++ b/.github/workflows/release-desktop.yml
@@ -1,34 +1,34 @@
name: Release Desktop
on:
- push:
- tags:
- - 'desktop-v*'
+ push:
+ tags:
+ - "desktop-v*"
jobs:
- release:
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: desktop
- permissions:
- contents: write
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: desktop/package-lock.json
- - name: Install dependencies
- run: npm ci
- - name: Build desktop app
- run: npm run build
- - name: Archive desktop build
- run: tar -czf ../desktop-build.tar.gz dist
- - name: Create GitHub release
- uses: softprops/action-gh-release@v2
- with:
- name: Desktop Release ${{ github.ref_name }}
- files: desktop-build.tar.gz
+ release:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: desktop
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: desktop/package-lock.json
+ - name: Install dependencies
+ run: npm ci
+ - name: Build desktop app
+ run: npm run build
+ - name: Archive desktop build
+ run: tar -czf ../desktop-build.tar.gz dist
+ - name: Create GitHub release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: Desktop Release ${{ github.ref_name }}
+ files: desktop-build.tar.gz
diff --git a/.github/workflows/release-orchestration-utils.yml b/.github/workflows/release-orchestration-utils.yml
index 1b570fe..1a1a5f8 100644
--- a/.github/workflows/release-orchestration-utils.yml
+++ b/.github/workflows/release-orchestration-utils.yml
@@ -1,32 +1,32 @@
name: Release Orchestration Utils
on:
- push:
- tags:
- - 'orchestration-utils-v*'
+ push:
+ tags:
+ - "orchestration-utils-v*"
jobs:
- release:
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: orchestration/packages/@codeflow/utils
- permissions:
- contents: write
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- - name: Install dependencies
- run: npm install
- - name: Build utilities package
- run: npm run build
- - name: Archive package build
- run: tar -czf ../../../../orchestration-utils-build.tar.gz dist package.json README.md LICENSE
- - name: Create GitHub release
- uses: softprops/action-gh-release@v2
- with:
- name: Orchestration Utils Release ${{ github.ref_name }}
- files: orchestration-utils-build.tar.gz
+ release:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: orchestration/packages/@codeflow/utils
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ - name: Install dependencies
+ run: npm install
+ - name: Build utilities package
+ run: npm run build
+ - name: Archive package build
+ run: tar -czf ../../../../orchestration-utils-build.tar.gz dist package.json README.md LICENSE
+ - name: Create GitHub release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: Orchestration Utils Release ${{ github.ref_name }}
+ files: orchestration-utils-build.tar.gz
diff --git a/.github/workflows/release-vscode-extension.yml b/.github/workflows/release-vscode-extension.yml
index caab254..d3aecc3 100644
--- a/.github/workflows/release-vscode-extension.yml
+++ b/.github/workflows/release-vscode-extension.yml
@@ -1,34 +1,34 @@
name: Release VS Code Extension
on:
- push:
- tags:
- - 'vscode-extension-v*'
+ push:
+ tags:
+ - "vscode-extension-v*"
jobs:
- release:
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: vscode-extension
- permissions:
- contents: write
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: vscode-extension/package-lock.json
- - name: Install dependencies
- run: npm ci
- - name: Build extension
- run: npm run build
- - name: Package extension
- run: npm run package
- - name: Create GitHub release
- uses: softprops/action-gh-release@v2
- with:
- name: VS Code Extension Release ${{ github.ref_name }}
- files: vscode-extension/*.vsix
+ release:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: vscode-extension
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: vscode-extension/package-lock.json
+ - name: Install dependencies
+ run: npm ci
+ - name: Build extension
+ run: npm run build
+ - name: Package extension
+ run: npm run package
+ - name: Create GitHub release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: VS Code Extension Release ${{ github.ref_name }}
+ files: vscode-extension/*.vsix
diff --git a/.github/workflows/release-website.yml b/.github/workflows/release-website.yml
index f05523e..98dd4a2 100644
--- a/.github/workflows/release-website.yml
+++ b/.github/workflows/release-website.yml
@@ -1,35 +1,35 @@
name: Release Website
on:
- push:
- tags:
- - 'website-v*'
+ push:
+ tags:
+ - "website-v*"
jobs:
- release:
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: website
- permissions:
- contents: write
- steps:
- - uses: actions/checkout@v4
- - name: Set up Node.js
- uses: actions/setup-node@v4
- with:
- node-version: '20'
- cache: 'npm'
- cache-dependency-path: website/package-lock.json
- - name: Install dependencies
- run: npm ci
- - name: Build website
- run: npm run build
- - name: Archive website build
- run: |
- if [ -d out ]; then tar -czf ../website-build.tar.gz out; else tar -czf ../website-build.tar.gz .next public package.json; fi
- - name: Create GitHub release
- uses: softprops/action-gh-release@v2
- with:
- name: Website Release ${{ github.ref_name }}
- files: website-build.tar.gz
+ release:
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: website
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+ cache: "npm"
+ cache-dependency-path: website/package-lock.json
+ - name: Install dependencies
+ run: npm ci
+ - name: Build website
+ run: npm run build
+ - name: Archive website build
+ run: |
+ if [ -d out ]; then tar -czf ../website-build.tar.gz out; else tar -czf ../website-build.tar.gz .next public package.json; fi
+ - name: Create GitHub release
+ uses: softprops/action-gh-release@v2
+ with:
+ name: Website Release ${{ github.ref_name }}
+ files: website-build.tar.gz
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 1831652..b299611 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -3,7 +3,7 @@ name: Release
on:
push:
tags:
- - 'engine-v*'
+ - "engine-v*"
jobs:
release:
@@ -11,7 +11,7 @@ jobs:
permissions:
contents: write
pull-requests: write
-
+
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -21,7 +21,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version: "3.12"
- name: Extract version from tag
id: version
@@ -44,7 +44,7 @@ jobs:
run: |
VERSION="${{ steps.version.outputs.version }}"
PYPROJECT_VERSION=$(python -c "import tomllib; f=open('engine/pyproject.toml','rb'); d=tomllib.load(f); print(d['project']['version'])")
-
+
if [ "$VERSION" != "$PYPROJECT_VERSION" ]; then
echo "❌ Version mismatch:"
echo " Tag version: $VERSION"
@@ -90,19 +90,19 @@ jobs:
name: Release v${{ steps.version.outputs.version }}
body: |
## Release v${{ steps.version.outputs.version }}
-
+
**Release Date:** $(date +'%Y-%m-%d')
-
+
${{ steps.changelog.outputs.changelog }}
-
+
## Installation
-
+
```bash
pip install codeflow-engine==${{ steps.version.outputs.version }}
```
-
+
## Full Changelog
-
+
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/master/engine/CHANGELOG.md) for full details.
files: |
engine/dist/*
@@ -115,4 +115,3 @@ jobs:
name: engine-release-artifacts
path: engine/dist/*
retention-days: 90
-
diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml
index de807ac..577ed3a 100644
--- a/.github/workflows/security.yml
+++ b/.github/workflows/security.yml
@@ -2,17 +2,17 @@ name: Security Scan
on:
push:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/security.yml'
+ - "engine/**"
+ - ".github/workflows/security.yml"
pull_request:
- branches: [ master, develop ]
+ branches: [master, develop]
paths:
- - 'engine/**'
- - '.github/workflows/security.yml'
+ - "engine/**"
+ - ".github/workflows/security.yml"
schedule:
- - cron: '0 0 * * 1' # Weekly on Monday
+ - cron: "0 0 * * 1" # Weekly on Monday
jobs:
bandit:
@@ -23,28 +23,28 @@ jobs:
working-directory: engine
steps:
- uses: actions/checkout@v4
-
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
- cache: 'pip'
-
+ python-version: "3.12"
+ cache: "pip"
+
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: latest
virtualenvs-create: true
virtualenvs-in-project: true
-
+
- name: Install dependencies
run: poetry install --with dev --no-root
-
+
- name: Run Bandit
run: |
poetry run bandit -r codeflow_engine -f json -o bandit-report.json || true
poetry run bandit -r codeflow_engine
-
+
- name: Upload Bandit report
if: always()
uses: actions/upload-artifact@v4
@@ -60,26 +60,26 @@ jobs:
working-directory: engine
steps:
- uses: actions/checkout@v4
-
+
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
- cache: 'pip'
-
+ python-version: "3.12"
+ cache: "pip"
+
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: latest
virtualenvs-create: true
virtualenvs-in-project: true
-
+
- name: Install dependencies
run: poetry install --with dev --no-root
-
+
- name: Export dependencies
run: poetry export -f requirements.txt --output requirements.txt --without-hashes
-
+
- name: Run Safety check
run: |
pip install safety
@@ -90,19 +90,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
-
+
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
- scan-type: 'fs'
- scan-ref: './engine'
- format: 'sarif'
- output: 'trivy-results.sarif'
- severity: 'CRITICAL,HIGH'
-
+ scan-type: "fs"
+ scan-ref: "./engine"
+ format: "sarif"
+ output: "trivy-results.sarif"
+ severity: "CRITICAL,HIGH"
+
- name: Upload Trivy results to GitHub Security
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
- sarif_file: 'trivy-results.sarif'
-
+ sarif_file: "trivy-results.sarif"
diff --git a/.github/workflows/validate-templates.yml b/.github/workflows/validate-templates.yml
index adcb410..17d3d47 100644
--- a/.github/workflows/validate-templates.yml
+++ b/.github/workflows/validate-templates.yml
@@ -6,18 +6,18 @@ name: Validate Templates
on:
push:
paths:
- - 'engine/templates/**/*.yml'
- - 'engine/templates/**/*.yaml'
- - 'engine/install.sh'
- - 'engine/install.ps1'
- - '.github/workflows/*.yml'
+ - "engine/templates/**/*.yml"
+ - "engine/templates/**/*.yaml"
+ - "engine/install.sh"
+ - "engine/install.ps1"
+ - ".github/workflows/*.yml"
pull_request:
paths:
- - 'engine/templates/**/*.yml'
- - 'engine/templates/**/*.yaml'
- - 'engine/install.sh'
- - 'engine/install.ps1'
- - '.github/workflows/*.yml'
+ - "engine/templates/**/*.yml"
+ - "engine/templates/**/*.yaml"
+ - "engine/install.sh"
+ - "engine/install.ps1"
+ - ".github/workflows/*.yml"
jobs:
validate-yaml:
@@ -30,7 +30,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
- python-version: '3.12'
+ python-version: "3.12"
- name: Install yamllint
run: pip install yamllint
diff --git a/.github/workflows/validate-version.yml b/.github/workflows/validate-version.yml
index b529170..83c1458 100644
--- a/.github/workflows/validate-version.yml
+++ b/.github/workflows/validate-version.yml
@@ -1,141 +1,140 @@
name: Validate Version
on:
- pull_request:
- paths:
- - 'engine/pyproject.toml'
- - 'desktop/package.json'
- - 'website/package.json'
- - 'vscode-extension/package.json'
- - 'orchestration/packages/@codeflow/utils/package.json'
- - '.github/workflows/validate-version.yml'
- push:
- branches:
- - master
- - develop
- paths:
- - 'engine/pyproject.toml'
- - 'desktop/package.json'
- - 'website/package.json'
- - 'vscode-extension/package.json'
- - 'orchestration/packages/@codeflow/utils/package.json'
+ pull_request:
+ paths:
+ - "engine/pyproject.toml"
+ - "desktop/package.json"
+ - "website/package.json"
+ - "vscode-extension/package.json"
+ - "orchestration/packages/@codeflow/utils/package.json"
+ - ".github/workflows/validate-version.yml"
+ push:
+ branches:
+ - master
+ - develop
+ paths:
+ - "engine/pyproject.toml"
+ - "desktop/package.json"
+ - "website/package.json"
+ - "vscode-extension/package.json"
+ - "orchestration/packages/@codeflow/utils/package.json"
jobs:
- validate-version:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: '3.12'
-
- - name: Validate version format
- run: |
- python <<'PY'
- import re
- import sys
-
- with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
- content = f.read()
-
- match = re.search(r'version\s*=\s*"([^"]+)"', content)
- if not match:
- print('❌ Could not find version in engine/pyproject.toml')
- sys.exit(1)
-
- version = match.group(1)
- print(f'Found version: {version}')
-
- if not re.match(r'^\d+\.\d+\.\d+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$', version):
- print(f'❌ Invalid version format: {version}')
- print('Expected format: MAJOR.MINOR.PATCH[-PRERELEASE][+BUILD]')
- sys.exit(1)
-
- print(f'✅ Version format is valid: {version}')
- PY
-
- - name: Check version increment (on PR)
- if: github.event_name == 'pull_request'
- run: |
- python <<'PY'
- import re
- import subprocess
- import sys
-
- result = subprocess.run(
- ['git', 'show', 'origin/master:engine/pyproject.toml'],
- capture_output=True,
- text=True,
- check=False,
- )
-
- if result.returncode != 0:
- print('⚠️ Could not compare with master branch (may be first commit)')
- sys.exit(0)
-
- base_content = result.stdout
- base_match = re.search(r'version\s*=\s*"([^"]+)"', base_content)
- if not base_match:
- print('⚠️ Could not find version in base branch')
- sys.exit(0)
-
- with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
- content = f.read()
-
- current_match = re.search(r'version\s*=\s*"([^"]+)"', content)
- if not current_match:
- print('❌ Could not find version in current engine/pyproject.toml')
- sys.exit(1)
-
- base_version = base_match.group(1)
- current_version = current_match.group(1)
-
- print(f'Base version: {base_version}')
- print(f'Current version: {current_version}')
-
- if base_version == current_version:
- print('⚠️ Version has not been incremented')
- print('Please bump the version before merging')
- else:
- print(f'✅ Version incremented: {base_version} → {current_version}')
- PY
-
- - name: Validate version consistency
- run: |
- python -c "
- import json
- import re
- import sys
-
- files = [
- 'desktop/package.json',
- 'website/package.json',
- 'vscode-extension/package.json',
- 'orchestration/packages/@codeflow/utils/package.json',
- ]
-
- with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
- pyproject_content = f.read()
-
- pyproject_match = re.search(r'version\s*=\s*\"([^\"]+)\"', pyproject_content)
- pyproject_version = pyproject_match.group(1) if pyproject_match else None
-
- for path in files:
- try:
- with open(path, 'r', encoding='utf-8') as f:
- package_json = json.load(f)
- package_version = package_json.get('version')
- if package_version:
- print(f'ℹ️ {path}: {package_version}')
- except FileNotFoundError:
- continue
-
- if pyproject_version:
- print(f'ℹ️ engine/pyproject.toml: {pyproject_version}')
- "
-
+ validate-version:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Validate version format
+ run: |
+ python <<'PY'
+ import re
+ import sys
+
+ with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ match = re.search(r'version\s*=\s*"([^"]+)"', content)
+ if not match:
+ print('❌ Could not find version in engine/pyproject.toml')
+ sys.exit(1)
+
+ version = match.group(1)
+ print(f'Found version: {version}')
+
+ if not re.match(r'^\d+\.\d+\.\d+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$', version):
+ print(f'❌ Invalid version format: {version}')
+ print('Expected format: MAJOR.MINOR.PATCH[-PRERELEASE][+BUILD]')
+ sys.exit(1)
+
+ print(f'✅ Version format is valid: {version}')
+ PY
+
+ - name: Check version increment (on PR)
+ if: github.event_name == 'pull_request'
+ run: |
+ python <<'PY'
+ import re
+ import subprocess
+ import sys
+
+ result = subprocess.run(
+ ['git', 'show', 'origin/master:engine/pyproject.toml'],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+
+ if result.returncode != 0:
+ print('⚠️ Could not compare with master branch (may be first commit)')
+ sys.exit(0)
+
+ base_content = result.stdout
+ base_match = re.search(r'version\s*=\s*"([^"]+)"', base_content)
+ if not base_match:
+ print('⚠️ Could not find version in base branch')
+ sys.exit(0)
+
+ with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
+ content = f.read()
+
+ current_match = re.search(r'version\s*=\s*"([^"]+)"', content)
+ if not current_match:
+ print('❌ Could not find version in current engine/pyproject.toml')
+ sys.exit(1)
+
+ base_version = base_match.group(1)
+ current_version = current_match.group(1)
+
+ print(f'Base version: {base_version}')
+ print(f'Current version: {current_version}')
+
+ if base_version == current_version:
+ print('⚠️ Version has not been incremented')
+ print('Please bump the version before merging')
+ else:
+ print(f'✅ Version incremented: {base_version} → {current_version}')
+ PY
+
+ - name: Validate version consistency
+ run: |
+ python -c "
+ import json
+ import re
+ import sys
+
+ files = [
+ 'desktop/package.json',
+ 'website/package.json',
+ 'vscode-extension/package.json',
+ 'orchestration/packages/@codeflow/utils/package.json',
+ ]
+
+ with open('engine/pyproject.toml', 'r', encoding='utf-8') as f:
+ pyproject_content = f.read()
+
+ pyproject_match = re.search(r'version\s*=\s*\"([^\"]+)\"', pyproject_content)
+ pyproject_version = pyproject_match.group(1) if pyproject_match else None
+
+ for path in files:
+ try:
+ with open(path, 'r', encoding='utf-8') as f:
+ package_json = json.load(f)
+ package_version = package_json.get('version')
+ if package_version:
+ print(f'ℹ️ {path}: {package_version}')
+ except FileNotFoundError:
+ continue
+
+ if pyproject_version:
+ print(f'ℹ️ engine/pyproject.toml: {pyproject_version}')
+ "
diff --git a/desktop/package-lock.json b/desktop/package-lock.json
index dbbfb31..12cac8c 100644
--- a/desktop/package-lock.json
+++ b/desktop/package-lock.json
@@ -4326,4 +4326,4 @@
}
}
}
-}
+}
\ No newline at end of file
diff --git a/desktop/src-tauri/tauri.conf.json b/desktop/src-tauri/tauri.conf.json
index 5ae71c4..bc4d1b2 100644
--- a/desktop/src-tauri/tauri.conf.json
+++ b/desktop/src-tauri/tauri.conf.json
@@ -37,4 +37,4 @@
"icons/icon.ico"
]
}
-}
+}
\ No newline at end of file
diff --git a/desktop/src/App.tsx b/desktop/src/App.tsx
index 739d208..d003678 100644
--- a/desktop/src/App.tsx
+++ b/desktop/src/App.tsx
@@ -1,6 +1,6 @@
-import React, { Suspense, lazy, useEffect, useState } from 'react';
-import { HashRouter as Router, Routes, Route, Link, useLocation } from 'react-router-dom';
-import { Home, Settings, FileText, Moon, Sun, BarChart3 } from 'lucide-react';
+import { BarChart3, FileText, Home, Moon, Settings, Sun } from 'lucide-react';
+import React, { Suspense, lazy, useEffect, useState } from 'react';
+import { Link, Route, HashRouter as Router, Routes, useLocation } from 'react-router-dom';
import './App.css';
const Dashboard = lazy(() => import('./pages/Dashboard'));
@@ -57,11 +57,10 @@ function NavigationLink({ to, icon: Icon, label }: { to: string; icon: any; labe
return (
diff --git a/desktop/vite.config.ts b/desktop/vite.config.ts
index d9bcf6c..05fa76e 100644
--- a/desktop/vite.config.ts
+++ b/desktop/vite.config.ts
@@ -1,5 +1,5 @@
-import { defineConfig } from "vite";
import react from "@vitejs/plugin-react";
+import { defineConfig } from "vite";
import tsconfigPaths from "vite-tsconfig-paths";
const env = (globalThis as { process?: { env?: Record } }).process?.env ?? {};
@@ -22,10 +22,10 @@ export default defineConfig(() => ({
host: host || false,
hmr: host
? {
- protocol: "ws",
- host,
- port: 1421,
- }
+ protocol: "ws",
+ host,
+ port: 1421,
+ }
: undefined,
watch: {
// 3. tell Vite to ignore watching `src-tauri`
diff --git a/docs/PROGRAM_NAME_SUGGESTIONS.md b/docs/PROGRAM_NAME_SUGGESTIONS.md
index 635df22..7e70864 100644
--- a/docs/PROGRAM_NAME_SUGGESTIONS.md
+++ b/docs/PROGRAM_NAME_SUGGESTIONS.md
@@ -10,16 +10,16 @@ This document provides a comprehensive analysis of naming options for the CodeFl
Our evaluation framework uses 8 key factors to assess each name candidate:
-| Factor | Weight | Description |
-|--------|--------|-------------|
-| **Clarity** | 8 pts | How clearly the name communicates the product's purpose |
-| **Memorability** | 8 pts | How easy the name is to remember and recall |
-| **Brandability** | 7 pts | Potential for logo design, visual identity, and marketing |
-| **Domain Availability** | 6 pts | .com/.io domain availability and social media handles |
-| **Target Fit** | 6 pts | Alignment with developer/enterprise audience |
-| **Scalability** | 5 pts | Ability to support product expansion beyond PRs |
-| **Uniqueness** | 5 pts | Distinctiveness in the market and SEO advantages |
-| **Professional Appeal** | 5 pts | Enterprise credibility and trust signals |
+| Factor | Weight | Description |
+| ----------------------- | ------ | --------------------------------------------------------- |
+| **Clarity** | 8 pts | How clearly the name communicates the product's purpose |
+| **Memorability** | 8 pts | How easy the name is to remember and recall |
+| **Brandability** | 7 pts | Potential for logo design, visual identity, and marketing |
+| **Domain Availability** | 6 pts | .com/.io domain availability and social media handles |
+| **Target Fit** | 6 pts | Alignment with developer/enterprise audience |
+| **Scalability** | 5 pts | Ability to support product expansion beyond PRs |
+| **Uniqueness** | 5 pts | Distinctiveness in the market and SEO advantages |
+| **Professional Appeal** | 5 pts | Enterprise credibility and trust signals |
---
diff --git a/engine/codeflow_engine/actions/__init__.py b/engine/codeflow_engine/actions/__init__.py
index 4d2faff..109e0e2 100644
--- a/engine/codeflow_engine/actions/__init__.py
+++ b/engine/codeflow_engine/actions/__init__.py
@@ -13,70 +13,31 @@
- maintenance: Maintenance tasks
"""
+import importlib
+from types import ModuleType
from typing import Any
from codeflow_engine.actions.registry import ActionRegistry
-# Import category modules with error handling for optional dependencies
-ai_actions = None
-try:
- from codeflow_engine.actions import ai_actions
-except (ImportError, OSError):
- pass
-
-analysis = None
-try:
- from codeflow_engine.actions import analysis
-except (ImportError, OSError):
- pass
-
-base = None
-try:
- from codeflow_engine.actions import base
-except (ImportError, OSError):
- pass
-
-generation = None
-try:
- from codeflow_engine.actions import generation
-except (ImportError, OSError):
- pass
-git = None
-try:
- from codeflow_engine.actions import git
-except (ImportError, OSError):
- pass
+def _optional_module(module_name: str) -> ModuleType | None:
+ try:
+ return importlib.import_module(module_name)
+ except (ImportError, OSError):
+ return None
-issues = None
-try:
- from codeflow_engine.actions import issues
-except (ImportError, OSError):
- pass
-maintenance = None
-try:
- from codeflow_engine.actions import maintenance
-except (ImportError, OSError):
- pass
-
-platform = None
-try:
- from codeflow_engine.actions import platform
-except (ImportError, OSError):
- pass
-
-quality = None
-try:
- from codeflow_engine.actions import quality
-except (ImportError, OSError):
- pass
-
-scripts = None
-try:
- from codeflow_engine.actions import scripts
-except (ImportError, OSError):
- pass
+# Import category modules with error handling for optional dependencies
+ai_actions: ModuleType | None = _optional_module("codeflow_engine.actions.ai_actions")
+analysis: ModuleType | None = _optional_module("codeflow_engine.actions.analysis")
+base: ModuleType | None = _optional_module("codeflow_engine.actions.base")
+generation: ModuleType | None = _optional_module("codeflow_engine.actions.generation")
+git: ModuleType | None = _optional_module("codeflow_engine.actions.git")
+issues: ModuleType | None = _optional_module("codeflow_engine.actions.issues")
+maintenance: ModuleType | None = _optional_module("codeflow_engine.actions.maintenance")
+platform: ModuleType | None = _optional_module("codeflow_engine.actions.platform")
+quality: ModuleType | None = _optional_module("codeflow_engine.actions.quality")
+scripts: ModuleType | None = _optional_module("codeflow_engine.actions.scripts")
# Re-export commonly used actions for backward compatibility
# Analysis
@@ -187,11 +148,7 @@
pass
# Create llm alias for backward compatibility (codeflow_engine.actions.llm)
-llm = None
-try:
- from codeflow_engine.actions.ai_actions import llm
-except ImportError:
- pass
+llm: ModuleType | None = _optional_module("codeflow_engine.actions.ai_actions.llm")
# Platform
PlatformDetector: type[Any] | None = None
diff --git a/engine/codeflow_engine/actions/ai_actions/__init__.py b/engine/codeflow_engine/actions/ai_actions/__init__.py
new file mode 100644
index 0000000..b5a34e1
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/__init__.py
@@ -0,0 +1,25 @@
+"""CodeFlow Engine - AI Actions."""
+
+from codeflow_engine.actions.autogen_implementation import AutoGenImplementation
+from codeflow_engine.actions.autogen_multi_agent import AutoGenAgentSystem
+from codeflow_engine.actions.configurable_llm_provider import (
+ LLMProviderManager as ConfigurableLLMProvider,
+)
+from codeflow_engine.actions.learning_memory_system import LearningMemorySystem
+from codeflow_engine.actions.mem0_memory_integration import Mem0MemoryManager
+from codeflow_engine.actions.summarize_pr_with_ai import (
+ SummarizePrWithAI as SummarizePRWithAI,
+)
+
+from . import autogen, llm
+
+__all__ = [
+ "AutoGenAgentSystem",
+ "AutoGenImplementation",
+ "ConfigurableLLMProvider",
+ "LearningMemorySystem",
+ "Mem0MemoryManager",
+ "SummarizePRWithAI",
+ "autogen",
+ "llm",
+]
diff --git a/engine/codeflow_engine/actions/ai_actions/autogen/__init__.py b/engine/codeflow_engine/actions/ai_actions/autogen/__init__.py
new file mode 100644
index 0000000..8a5e82b
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/autogen/__init__.py
@@ -0,0 +1,7 @@
+"""Compatibility wrapper for grouped AutoGen imports."""
+
+from codeflow_engine.actions.autogen.agents import AutoGenAgentFactory
+from codeflow_engine.actions.autogen.models import AutoGenInputs, AutoGenOutputs
+from codeflow_engine.actions.autogen.system import AutoGenAgentSystem
+
+__all__ = ["AutoGenAgentFactory", "AutoGenAgentSystem", "AutoGenInputs", "AutoGenOutputs"]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/ai_actions/llm/__init__.py b/engine/codeflow_engine/actions/ai_actions/llm/__init__.py
new file mode 100644
index 0000000..74250c4
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/llm/__init__.py
@@ -0,0 +1,57 @@
+"""CODEFLOW LLM Package - compatibility wrapper under grouped actions."""
+
+from codeflow_engine.actions.ai_actions.llm.manager import ActionLLMProviderManager, LLMProviderManager
+from codeflow_engine.actions.ai_actions.llm.providers import (
+ AnthropicProvider,
+ AzureOpenAIProvider,
+ GroqProvider,
+ MISTRAL_AVAILABLE,
+ MistralProvider,
+ OpenAIProvider,
+ PerplexityProvider,
+ TogetherAIProvider,
+)
+from codeflow_engine.actions.ai_actions.llm.types import (
+ LLMConfig,
+ LLMProviderType,
+ LLMResponse,
+ Message,
+ MessageRole,
+)
+from codeflow_engine.core.llm import BaseLLMProvider, LLMProviderRegistry, OpenAICompatibleProvider
+
+
+def get_llm_provider_manager() -> ActionLLMProviderManager:
+ from codeflow_engine.actions.llm import get_llm_provider_manager as get_manager
+
+ return get_manager()
+
+
+def complete_chat(*args, **kwargs):
+ from codeflow_engine.actions.llm import complete_chat as complete
+
+ return complete(*args, **kwargs)
+
+
+__all__ = [
+ "ActionLLMProviderManager",
+ "AnthropicProvider",
+ "AzureOpenAIProvider",
+ "BaseLLMProvider",
+ "GroqProvider",
+ "LLMConfig",
+ "LLMProviderManager",
+ "LLMProviderRegistry",
+ "LLMProviderType",
+ "LLMResponse",
+ "MISTRAL_AVAILABLE",
+ "Message",
+ "MessageRole",
+ "MistralProvider",
+ "OpenAICompatibleProvider",
+ "OpenAIProvider",
+ "PerplexityProvider",
+ "TogetherAIProvider",
+ "complete_chat",
+ "get_llm_provider_manager",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/ai_actions/llm/manager.py b/engine/codeflow_engine/actions/ai_actions/llm/manager.py
new file mode 100644
index 0000000..96636c2
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/llm/manager.py
@@ -0,0 +1,5 @@
+"""Compatibility wrapper for grouped LLM manager imports."""
+
+from codeflow_engine.actions.llm.manager import ActionLLMProviderManager, LLMProviderManager
+
+__all__ = ["ActionLLMProviderManager", "LLMProviderManager"]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/ai_actions/llm/providers/__init__.py b/engine/codeflow_engine/actions/ai_actions/llm/providers/__init__.py
new file mode 100644
index 0000000..b6343ec
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/llm/providers/__init__.py
@@ -0,0 +1,23 @@
+"""Compatibility wrapper for grouped LLM provider imports."""
+
+from codeflow_engine.actions.llm.providers import (
+ AnthropicProvider,
+ GroqProvider,
+ MISTRAL_AVAILABLE,
+ MistralProvider,
+ OpenAIProvider,
+ PerplexityProvider,
+ TogetherAIProvider,
+)
+from codeflow_engine.actions.llm.providers.azure_openai import AzureOpenAIProvider
+
+__all__ = [
+ "AnthropicProvider",
+ "AzureOpenAIProvider",
+ "GroqProvider",
+ "MISTRAL_AVAILABLE",
+ "MistralProvider",
+ "OpenAIProvider",
+ "PerplexityProvider",
+ "TogetherAIProvider",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/ai_actions/llm/types.py b/engine/codeflow_engine/actions/ai_actions/llm/types.py
new file mode 100644
index 0000000..c8dde6e
--- /dev/null
+++ b/engine/codeflow_engine/actions/ai_actions/llm/types.py
@@ -0,0 +1,5 @@
+"""Compatibility wrapper for grouped LLM types imports."""
+
+from codeflow_engine.actions.llm.types import LLMConfig, LLMProviderType, LLMResponse, Message, MessageRole
+
+__all__ = ["LLMConfig", "LLMProviderType", "LLMResponse", "Message", "MessageRole"]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/ai_fix_applier.py b/engine/codeflow_engine/actions/ai_linting_fixer/ai_fix_applier.py
index cc69247..97c4428 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/ai_fix_applier.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/ai_fix_applier.py
@@ -8,15 +8,17 @@
from typing import Any
from codeflow_engine.actions.ai_linting_fixer.backup_manager import BackupManager
-from codeflow_engine.actions.ai_linting_fixer.file_persistence import \
- FilePersistenceManager
+from codeflow_engine.actions.ai_linting_fixer.file_persistence import (
+ FilePersistenceManager,
+)
from codeflow_engine.actions.ai_linting_fixer.fix_strategy import StrategySelector
from codeflow_engine.actions.ai_linting_fixer.llm_client import LLMClient
-from codeflow_engine.actions.ai_linting_fixer.models import (LintingIssue)
+from codeflow_engine.actions.ai_linting_fixer.models import LintingIssue
from codeflow_engine.actions.ai_linting_fixer.response_parser import ResponseParser
from codeflow_engine.actions.ai_linting_fixer.validation_manager import (
- ValidationConfig, ValidationManager)
-from codeflow_engine.ai.core.providers.manager import LLMProviderManager
+ ValidationConfig,
+ ValidationManager,
+)
logger = logging.getLogger(__name__)
@@ -26,7 +28,7 @@ class AIFixApplier:
def __init__(
self,
- llm_manager: LLMProviderManager,
+ llm_manager: Any,
backup_manager: BackupManager | None = None,
validation_config: ValidationConfig | None = None,
):
@@ -51,7 +53,9 @@ def __init__(
self.llm_client = LLMClient(llm_manager)
self.response_parser = ResponseParser()
self.persistence_manager = FilePersistenceManager(backup_manager)
- self.validation_manager = ValidationManager(validation_config or ValidationConfig())
+ self.validation_manager = ValidationManager(
+ validation_config or ValidationConfig()
+ )
# Initialize strategy selector
self.strategy_selector = StrategySelector(
@@ -61,7 +65,7 @@ def __init__(
self.validation_manager,
)
- self.session_id = None
+ self.session_id: str | None = None
async def apply_specialist_fix_with_validation(
self,
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/code_analyzer.py b/engine/codeflow_engine/actions/ai_linting_fixer/code_analyzer.py
index 263038e..5a1d0a5 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/code_analyzer.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/code_analyzer.py
@@ -6,6 +6,7 @@
import ast
import logging
+from typing import Any
try:
@@ -156,7 +157,7 @@ def count_lines_of_code(self, content: str) -> dict[str, int]:
docstring_lines = 0
in_docstring = False
- docstring_quotes = None
+ docstring_quotes: str | None = None
for line in lines:
stripped = line.strip()
@@ -175,7 +176,7 @@ def count_lines_of_code(self, content: str) -> dict[str, int]:
docstring_lines += 1
else:
# End of docstring
- if docstring_quotes in stripped:
+ if docstring_quotes and docstring_quotes in stripped:
in_docstring = False
docstring_quotes = None
docstring_lines += 1
@@ -223,7 +224,7 @@ def get_cpu_usage(self) -> float:
logger.debug(f"Error getting CPU usage: {e}")
return 0.0
- def analyze_function_complexity(self, content: str) -> list[dict[str, any]]:
+ def analyze_function_complexity(self, content: str) -> list[dict[str, Any]]:
"""Analyze complexity of individual functions."""
try:
tree = ast.parse(content)
@@ -245,7 +246,9 @@ def analyze_function_complexity(self, content: str) -> list[dict[str, any]]:
logger.debug(f"Error analyzing function complexity: {e}")
return []
- def _calculate_function_complexity(self, node: ast.FunctionDef) -> int:
+ def _calculate_function_complexity(
+ self, node: ast.FunctionDef | ast.AsyncFunctionDef
+ ) -> int:
"""Calculate cyclomatic complexity of a function."""
complexity = 1 # Base complexity
@@ -266,7 +269,7 @@ def _calculate_function_complexity(self, node: ast.FunctionDef) -> int:
return complexity
- def detect_code_smells(self, content: str) -> list[dict[str, any]]:
+ def detect_code_smells(self, content: str) -> list[dict[str, Any]]:
"""Detect common code smells and anti-patterns."""
smells = []
@@ -313,7 +316,7 @@ def detect_code_smells(self, content: str) -> list[dict[str, any]]:
return smells
- def get_code_metrics(self, content: str) -> dict[str, any]:
+ def get_code_metrics(self, content: str) -> dict[str, Any]:
"""Get comprehensive code metrics."""
return {
"complexity": self.calculate_file_complexity(content),
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/core.py b/engine/codeflow_engine/actions/ai_linting_fixer/core.py
index 41e2ed7..1570a74 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/core.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/core.py
@@ -12,9 +12,10 @@
from codeflow_engine.actions.ai_linting_fixer.database import AIInteractionDB
from codeflow_engine.actions.ai_linting_fixer.metrics import MetricsCollector
from codeflow_engine.actions.ai_linting_fixer.queue_manager import IssueQueueManager
-from codeflow_engine.actions.ai_linting_fixer.workflow import (WorkflowContext,
- WorkflowIntegrationMixin)
-from codeflow_engine.ai.core.providers.manager import LLMProviderManager
+from codeflow_engine.actions.ai_linting_fixer.workflow import (
+ WorkflowContext,
+ WorkflowIntegrationMixin,
+)
logger = logging.getLogger(__name__)
@@ -38,7 +39,7 @@ class AILintingFixer(WorkflowIntegrationMixin):
def __init__(
self,
- llm_manager: LLMProviderManager | None = None,
+ llm_manager: Any | None = None,
max_workers: int = DEFAULT_MAX_WORKERS,
workflow_context: WorkflowContext | None = None,
):
@@ -82,8 +83,11 @@ def _generate_session_id(self) -> str:
"""Generate a unique session identifier."""
import random
import string
+
timestamp = self.metrics.session_metrics.start_time.strftime("%Y%m%d_%H%M%S")
- random_suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
+ random_suffix = "".join(
+ random.choices(string.ascii_lowercase + string.digits, k=6)
+ )
return f"ai_lint_{timestamp}_{random_suffix}"
def queue_detected_issues(self, issues: list, quiet: bool = False) -> int:
@@ -92,7 +96,7 @@ def queue_detected_issues(self, issues: list, quiet: bool = False) -> int:
return 0
# Ensure session_id exists
- if not hasattr(self, 'session_id') or not self.session_id:
+ if not hasattr(self, "session_id") or not self.session_id:
msg = "Session ID is required but not available"
raise ValueError(msg)
@@ -126,9 +130,7 @@ async def process_queued_issues(
# Get next batch of issues
batch_size = min(max_fixes - processed_count, max_fixes)
issues = self.queue_manager.get_next_issues(
- limit=batch_size,
- worker_id=self.session_id,
- filter_types=filter_types
+ limit=batch_size, worker_id=self.session_id, filter_types=filter_types
)
if not issues:
@@ -157,14 +159,16 @@ async def process_queued_issues(
self.queue_manager.update_issue_status(
issue_id=int(issue_id),
status=status,
- fix_result=fix_result.get("details", {})
+ fix_result=fix_result.get("details", {}),
)
results["processed"] += 1
processed_count += 1
except Exception as e:
if not quiet:
- logger.exception("Error processing issue %s", issue.get('id', 'unknown'))
+ logger.exception(
+ "Error processing issue %s", issue.get("id", "unknown")
+ )
results["failed"] += 1
results["processed"] += 1
processed_count += 1
@@ -175,7 +179,7 @@ async def process_queued_issues(
self.queue_manager.update_issue_status(
issue_id=int(issue_id),
status="failed",
- fix_result={"error": str(e)}
+ fix_result={"error": str(e)},
)
# Update stats
@@ -204,15 +208,12 @@ async def _attempt_issue_fix(self, issue: dict[str, Any]) -> dict[str, Any]:
"line_number": line_number,
"error_code": error_code,
"message": message,
- "fix_applied": f"Applied fix for {error_code}"
- }
+ "fix_applied": f"Applied fix for {error_code}",
+ },
}
except Exception as e:
logger.exception("Error in _attempt_issue_fix")
- return {
- "success": False,
- "details": {"error": str(e)}
- }
+ return {"success": False, "details": {"error": str(e)}}
else:
return fix_result
@@ -223,7 +224,9 @@ def get_session_results(self) -> dict[str, Any]:
# Calculate success rate
total_issues = self.stats["issues_processed"]
- success_rate = 0.0 if total_issues == 0 else self.stats["issues_fixed"] / total_issues
+ success_rate = (
+ 0.0 if total_issues == 0 else self.stats["issues_fixed"] / total_issues
+ )
return {
"session_id": self.session_id,
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/llm_client.py b/engine/codeflow_engine/actions/ai_linting_fixer/llm_client.py
index 469f745..b661433 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/llm_client.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/llm_client.py
@@ -6,9 +6,7 @@
import asyncio
import logging
-from typing import Any
-
-from codeflow_engine.ai.core.providers.manager import LLMProviderManager
+from typing import Any, cast
logger = logging.getLogger(__name__)
@@ -16,9 +14,9 @@
class LLMClient:
"""Unified client for both async and sync LLM manager interfaces."""
- def __init__(self, llm_manager: LLMProviderManager):
+ def __init__(self, llm_manager: Any):
"""Initialize the LLM client.
-
+
Args:
llm_manager: The LLM provider manager instance
"""
@@ -26,18 +24,18 @@ def __init__(self, llm_manager: LLMProviderManager):
async def complete(self, request: dict[str, Any]) -> Any:
"""Make a completion request using the appropriate interface.
-
+
Args:
request: Request parameters including messages, provider, temperature, etc.
-
+
Returns:
LLM response or None if failed
"""
try:
# Check if the manager has async methods
- if hasattr(self.llm_manager, "generate_completion") and asyncio.iscoroutinefunction(
- self.llm_manager.generate_completion
- ):
+ if hasattr(
+ self.llm_manager, "generate_completion"
+ ) and asyncio.iscoroutinefunction(self.llm_manager.generate_completion):
# Use async interface with generate_completion
# Build kwargs excluding known keys to forward all other parameters
known_keys = {"messages", "provider", "temperature", "max_tokens"}
@@ -55,7 +53,7 @@ async def complete(self, request: dict[str, Any]) -> Any:
self.llm_manager.complete
):
# Use async interface with complete
- response = await self.llm_manager.complete(request)
+ response = await cast(Any, self.llm_manager).complete(request)
else:
# Use sync interface - check if result is awaitable
if hasattr(self.llm_manager, "generate_completion"):
@@ -72,7 +70,7 @@ async def complete(self, request: dict[str, Any]) -> Any:
**kwargs
)
else:
- response = self.llm_manager.complete(request)
+ response = cast(Any, self.llm_manager).complete(request)
# Check if the returned value is awaitable and await if needed
if asyncio.iscoroutine(response):
@@ -94,7 +92,7 @@ def create_request(
max_tokens: int = 2000,
) -> dict[str, Any]:
"""Create a standardized LLM request.
-
+
Args:
system_prompt: System message content
user_prompt: User message content
@@ -102,7 +100,7 @@ def create_request(
model: Model name (optional)
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
-
+
Returns:
Standardized request dictionary
"""
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/model_competency.py b/engine/codeflow_engine/actions/ai_linting_fixer/model_competency.py
index 21f88cd..142f687 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/model_competency.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/model_competency.py
@@ -24,7 +24,7 @@ def __init__(self):
"""Initialize the competency manager with predefined ratings."""
self.model_competency = self._initialize_competency_ratings()
self.fallback_strategies = self._initialize_fallback_strategies()
- self.available_models = {}
+ self.available_models: dict[str, bool] = {}
self._update_model_availabilities()
def _initialize_competency_ratings(self) -> dict[str, dict[str, float]]:
@@ -116,7 +116,7 @@ def _initialize_competency_ratings(self) -> dict[str, dict[str, float]]:
# Add ratings from model configurations
for model_config in ALL_MODEL_CONFIGS:
if model_config.competency_ratings:
- ratings[model_config.name] = model_config.competency_ratings
+ ratings[model_config.name] = dict(model_config.competency_ratings)
return ratings
@@ -176,7 +176,7 @@ def get_model_competency(self, model_name: str, error_code: str) -> float:
return self.model_competency.get(model_name, {}).get(error_code, 0.5)
def get_fallback_sequence(
- self, error_code: str, strategy_override: str = None
+ self, error_code: str, strategy_override: str | None = None
) -> list[tuple[str, str]]:
"""Get the optimal fallback sequence for an error code."""
if strategy_override:
@@ -230,7 +230,7 @@ def calculate_confidence(
return max(0.1, base_competency - 0.2)
def get_best_model_for_issue(
- self, error_code: str, available_models: list[str] = None
+ self, error_code: str, available_models: list[str] | None = None
) -> str:
"""Get the best available model for a specific issue type."""
if available_models is None:
@@ -307,7 +307,7 @@ def get_model_info(self, model_name: str) -> dict[str, Any]:
}
# Return info for legacy cloud models
- legacy_models = {
+ legacy_models: dict[str, dict[str, Any]] = {
"gpt-35-turbo": {"provider": "azure_openai", "performance_tier": "Fast"},
"gpt-4": {"provider": "azure_openai", "performance_tier": "High"},
"gpt-4o": {"provider": "azure_openai", "performance_tier": "Excellent"},
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/model_configs/__init__.py b/engine/codeflow_engine/actions/ai_linting_fixer/model_configs/__init__.py
index d41934c..f633122 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/model_configs/__init__.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/model_configs/__init__.py
@@ -7,6 +7,7 @@
import logging
from functools import partial
+from typing import Any, Callable
from codeflow_engine.actions.ai_linting_fixer.model_configs.deepseek_r1_7b import \
DEEPSEEK_R1_7B_CONFIG
@@ -40,7 +41,7 @@
logger = logging.getLogger(__name__)
# All model configurations
-ALL_MODEL_CONFIGS = [
+ALL_MODEL_CONFIGS: list[Any] = [
GPT_5_CHAT_CONFIG,
MISTRAL_7B_CONFIG,
DEEPSEEK_R1_7B_CONFIG,
@@ -51,7 +52,7 @@
]
# Availability update functions
-AVAILABILITY_UPDATERS = {
+AVAILABILITY_UPDATERS: dict[str, Callable[[], bool]] = {
# Do not overwrite release flag; update endpoint only
"gpt-5-chat": partial(update_gpt5_availability, GPT_5_CHAT_CONFIG, update_endpoint_only=True),
"mistral-7b": update_mistral_availability,
@@ -63,7 +64,7 @@
}
-def update_all_availabilities():
+def update_all_availabilities() -> dict[str, bool]:
"""Update availability status for all models."""
results = {}
for model_name, updater in AVAILABILITY_UPDATERS.items():
@@ -75,12 +76,12 @@ def update_all_availabilities():
return results
-def get_available_models():
+def get_available_models() -> list[Any]:
"""Get list of currently available models."""
return [config for config in ALL_MODEL_CONFIGS if config.availability]
-def get_model_by_name(name: str):
+def get_model_by_name(name: str) -> Any | None:
"""Get model configuration by name."""
for config in ALL_MODEL_CONFIGS:
if config.name == name:
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/queue_manager.py b/engine/codeflow_engine/actions/ai_linting_fixer/queue_manager.py
index d97263c..e084331 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/queue_manager.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/queue_manager.py
@@ -107,7 +107,7 @@ def get_next_issues(
try:
where_conditions = ["status = 'pending'"]
- params = []
+ params: list[Any] = []
if filter_types:
placeholders = ",".join("?" for _ in filter_types)
@@ -169,7 +169,7 @@ def update_issue_status(
(status, json.dumps(fix_result) if fix_result else None, issue_id),
)
- def get_queue_stats(self) -> dict[str, int]:
+ def get_queue_stats(self) -> dict[str, int | float]:
"""Get statistics about the issue queue."""
with sqlite3.connect(self.db_path) as conn:
cursor = conn.execute(
@@ -193,3 +193,44 @@ def get_queue_stats(self) -> dict[str, int]:
"failed": row[4] or 0,
"success_rate": (row[3] / row[0] * 100) if row[0] > 0 else 0.0,
}
+
+ def get_queue_statistics(self) -> dict[str, Any]:
+ """Compatibility wrapper returning nested queue statistics."""
+ stats = self.get_queue_stats()
+ return {
+ "overall": {
+ "total_issues": int(stats.get("total", 0)),
+ "pending": int(stats.get("pending", 0)),
+ "in_progress": int(stats.get("processing", 0)),
+ "completed": int(stats.get("completed", 0)),
+ "failed": int(stats.get("failed", 0)),
+ "success_rate": float(stats.get("success_rate", 0.0)),
+ }
+ }
+
+ def cleanup_old_queue_items(self, days_to_keep: int = 7) -> int:
+ """Delete completed and failed queue items older than the retention window."""
+ with sqlite3.connect(self.db_path) as conn:
+ cursor = conn.execute(
+ """
+ DELETE FROM issue_queue
+ WHERE status IN ('completed', 'failed')
+ AND updated_at < datetime('now', ?)
+ """,
+ (f"-{days_to_keep} days",),
+ )
+ return cursor.rowcount
+
+ def reset_stale_issues(self, timeout_minutes: int = 30) -> int:
+ """Reset long-running processing issues back to pending."""
+ with sqlite3.connect(self.db_path) as conn:
+ cursor = conn.execute(
+ """
+ UPDATE issue_queue
+ SET status = 'pending', worker_id = NULL, updated_at = CURRENT_TIMESTAMP
+ WHERE status = 'processing'
+ AND updated_at < datetime('now', ?)
+ """,
+ (f"-{timeout_minutes} minutes",),
+ )
+ return cursor.rowcount
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/redis_queue.py b/engine/codeflow_engine/actions/ai_linting_fixer/redis_queue.py
index 83a06fa..2550ca8 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/redis_queue.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/redis_queue.py
@@ -76,7 +76,7 @@ class QueuedIssue:
class_name: str | None = None
estimated_confidence: float = 0.7
- def __post_init__(self):
+ def __post_init__(self) -> None:
if self.created_at is None:
self.created_at = datetime.now(UTC)
if not self.id:
@@ -114,7 +114,7 @@ class ProcessingResult:
worker_id: str | None = None
processed_at: datetime | None = None
- def __post_init__(self):
+ def __post_init__(self) -> None:
if self.processed_at is None:
self.processed_at = datetime.now(UTC)
@@ -161,6 +161,8 @@ def __init__(
self.results_queue = f"{queue_prefix}:results"
self.failed_queue = f"{queue_prefix}:failed"
self.worker_heartbeat = f"{queue_prefix}:workers:heartbeat"
+ self.issue_queue_key = self.pending_queue
+ self.processing_count_key = f"{queue_prefix}:processing_count"
# Statistics
self.processed_count = 0
@@ -196,38 +198,28 @@ def enqueue_issue(self, issue: QueuedIssue) -> bool:
"""Add an issue to the pending queue."""
try:
self._validate_redis_client()
- issue_data = {
- "id": issue.id,
- "file_path": str(issue.file_path),
- "issue_type": issue.issue_type,
- "message": issue.message,
- "line": issue.line,
- "column": issue.column,
- "severity": issue.severity,
- "timestamp": datetime.utcnow().isoformat(),
- }
+ issue_data = issue.to_dict()
+ issue_data["timestamp"] = datetime.now(UTC).isoformat()
+ assert self.redis_client is not None
self.redis_client.lpush(self.issue_queue_key, json.dumps(issue_data))
return True
except Exception as e:
logger.exception(f"Failed to enqueue issue: {e}")
return False
- def dequeue_issue(self) -> QueuedIssue | None:
+ def dequeue_issue(self, timeout: int | None = None) -> QueuedIssue | None:
"""Remove and return the next issue from the queue."""
try:
self._validate_redis_client()
- result = self.redis_client.rpop(self.issue_queue_key)
+ assert self.redis_client is not None
+ if timeout is not None:
+ popped = self.redis_client.brpop(self.issue_queue_key, timeout=timeout)
+ result = popped[1] if popped else None
+ else:
+ result = self.redis_client.rpop(self.issue_queue_key)
if result:
data = json.loads(result)
- return QueuedIssue(
- id=data["id"],
- file_path=Path(data["file_path"]),
- issue_type=data["issue_type"],
- message=data["message"],
- line=data["line"],
- column=data["column"],
- severity=data["severity"],
- )
+ return QueuedIssue.from_dict(data)
return None
except Exception as e:
logger.exception(f"Failed to dequeue issue: {e}")
@@ -237,6 +229,7 @@ def get_queue_length(self) -> int:
"""Get the current number of issues in the queue."""
try:
self._validate_redis_client()
+ assert self.redis_client is not None
return self.redis_client.llen(self.issue_queue_key)
except Exception as e:
logger.exception(f"Failed to get queue length: {e}")
@@ -246,16 +239,18 @@ def clear_queue(self) -> bool:
"""Clear all issues from the queue."""
try:
self._validate_redis_client()
+ assert self.redis_client is not None
self.redis_client.delete(self.issue_queue_key)
return True
except Exception as e:
logger.exception(f"Failed to clear queue: {e}")
return False
- def get_queue_stats(self) -> dict:
+ def get_queue_stats(self) -> dict[str, Any]:
"""Get statistics about the queue."""
try:
self._validate_redis_client()
+ assert self.redis_client is not None
length = self.redis_client.llen(self.issue_queue_key)
return {
"queue_length": length,
@@ -274,21 +269,12 @@ def peek_queue(self, count: int = 5) -> list[QueuedIssue]:
"""Peek at the top issues in the queue without removing them."""
try:
self._validate_redis_client()
+ assert self.redis_client is not None
results = self.redis_client.lrange(self.issue_queue_key, 0, count - 1)
- issues = []
+ issues: list[QueuedIssue] = []
for result in results:
data = json.loads(result)
- issues.append(
- QueuedIssue(
- id=data["id"],
- file_path=Path(data["file_path"]),
- issue_type=data["issue_type"],
- message=data["message"],
- line=data["line"],
- column=data["column"],
- severity=data["severity"],
- )
- )
+ issues.append(QueuedIssue.from_dict(data))
return issues
except Exception as e:
logger.exception(f"Failed to peek queue: {e}")
@@ -308,10 +294,11 @@ def remove_issue(self, issue_id: str) -> bool:
logger.exception(f"Failed to remove issue: {e}")
return False
- def get_processing_status(self) -> dict:
+ def get_processing_status(self) -> dict[str, Any]:
"""Get the current processing status."""
try:
self._validate_redis_client()
+ assert self.redis_client is not None
queue_length = self.redis_client.llen(self.issue_queue_key)
processing_count = self.redis_client.get(self.processing_count_key) or 0
diff --git a/engine/codeflow_engine/actions/ai_linting_fixer/workflow_orchestrator.py b/engine/codeflow_engine/actions/ai_linting_fixer/workflow_orchestrator.py
index 88f8e79..4b65ef1 100644
--- a/engine/codeflow_engine/actions/ai_linting_fixer/workflow_orchestrator.py
+++ b/engine/codeflow_engine/actions/ai_linting_fixer/workflow_orchestrator.py
@@ -7,18 +7,24 @@
import logging
import os
-from typing import Any
+from typing import Any, cast
from codeflow_engine.actions.ai_linting_fixer.core import AILintingFixer
from codeflow_engine.actions.ai_linting_fixer.detection import IssueDetector
-from codeflow_engine.actions.ai_linting_fixer.display import (DisplayConfig, OutputMode,
- get_display)
-from codeflow_engine.actions.ai_linting_fixer.models import (AILintingFixerInputs,
- AILintingFixerOutputs,
- LintingIssue,
- create_empty_outputs)
-from codeflow_engine.actions.llm.manager import \
- ActionLLMProviderManager as LLMProviderManager
+from codeflow_engine.actions.ai_linting_fixer.display import (
+ DisplayConfig,
+ OutputMode,
+ get_display,
+)
+from codeflow_engine.actions.ai_linting_fixer.models import (
+ AILintingFixerInputs,
+ AILintingFixerOutputs,
+ LintingIssue,
+ create_empty_outputs,
+)
+from codeflow_engine.actions.llm.manager import (
+ ActionLLMProviderManager as LLMProviderManager,
+)
logger = logging.getLogger(__name__)
@@ -42,22 +48,24 @@ def __init__(self, display_config: DisplayConfig):
self.display = get_display(display_config)
self.issue_detector = IssueDetector()
- def create_llm_manager(self, inputs: AILintingFixerInputs) -> LLMProviderManager | None:
+ def create_llm_manager(self, inputs: AILintingFixerInputs) -> Any | None:
"""Create and configure the LLM manager."""
# Get Azure OpenAI configuration
- azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "https:///")
+ azure_endpoint = os.getenv(
+ "AZURE_OPENAI_ENDPOINT", "https:///"
+ )
azure_api_key = os.getenv("AZURE_OPENAI_API_KEY")
# Soft validation: check if Azure OpenAI is properly configured
azure_configured = (
- azure_api_key and
- azure_endpoint and
- "<" not in azure_endpoint and
- "your-azure-openai-endpoint" not in azure_endpoint
+ azure_api_key
+ and azure_endpoint
+ and "<" not in azure_endpoint
+ and "your-azure-openai-endpoint" not in azure_endpoint
)
# Build LLM configuration with fallback providers
- llm_config = {
+ llm_config: dict[str, Any] = {
"default_provider": None, # Will be set based on available providers
"fallback_order": [], # Will be populated based on available providers
"providers": {},
@@ -69,7 +77,9 @@ def create_llm_manager(self, inputs: AILintingFixerInputs) -> LLMProviderManager
"azure_endpoint": azure_endpoint,
"api_key": azure_api_key,
"api_version": os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-01"),
- "deployment_name": os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-35-turbo"),
+ "deployment_name": os.getenv(
+ "AZURE_OPENAI_DEPLOYMENT_NAME", "gpt-35-turbo"
+ ),
}
# Add other providers for fallback
@@ -130,7 +140,9 @@ def convert_to_legacy_format(self, issues: list[Any]) -> list[LintingIssue]:
legacy_issues.append(legacy_issue)
return legacy_issues
- def queue_issues(self, fixer: AILintingFixer, issues: list[LintingIssue], quiet: bool) -> int:
+ def queue_issues(
+ self, fixer: AILintingFixer, issues: list[LintingIssue], quiet: bool
+ ) -> int:
"""Queue detected issues for processing."""
self.display.operation.show_queueing_progress(len(issues))
@@ -149,7 +161,9 @@ async def process_issues(
"""Process queued issues."""
# Note: processing mode could be used for future enhancements
_processing_mode = (
- "redis" if hasattr(fixer, 'redis_manager') and fixer.redis_manager else "local"
+ "redis"
+ if hasattr(fixer, "redis_manager") and fixer.redis_manager
+ else "local"
)
self.display.operation.show_processing_start(len(issues))
@@ -207,9 +221,13 @@ def generate_final_results(
suggestions = []
if final_results.issues_failed > 0:
suggestions.append("Review failed fixes and consider manual intervention")
- success_rate = final_results.issues_fixed / max(final_results.total_issues_found, 1)
+ success_rate = final_results.issues_fixed / max(
+ final_results.total_issues_found, 1
+ )
if success_rate < 0.8:
- suggestions.append("Consider adjusting fix parameters or reviewing code patterns")
+ suggestions.append(
+ "Consider adjusting fix parameters or reviewing code patterns"
+ )
self.display.results.show_suggestions(suggestions)
return final_results
@@ -222,7 +240,9 @@ def create_error_output(self, error_msg: str) -> AILintingFixerOutputs:
return error_results
-async def orchestrate_ai_linting_workflow(inputs: AILintingFixerInputs) -> AILintingFixerOutputs:
+async def orchestrate_ai_linting_workflow(
+ inputs: AILintingFixerInputs,
+) -> AILintingFixerOutputs:
"""
Main workflow orchestration function.
@@ -234,11 +254,7 @@ async def orchestrate_ai_linting_workflow(inputs: AILintingFixerInputs) -> AILin
mode=(
OutputMode.QUIET
if inputs.quiet
- else (
- OutputMode.VERBOSE
- if inputs.verbose_metrics
- else OutputMode.NORMAL
- )
+ else (OutputMode.VERBOSE if inputs.verbose_metrics else OutputMode.NORMAL)
)
)
@@ -263,15 +279,15 @@ async def orchestrate_ai_linting_workflow(inputs: AILintingFixerInputs) -> AILin
except Exception as e:
logger.warning("Could not check provider status: %s", e)
else:
- display.system.show_warning(
+ display.error.show_warning(
"No LLM providers configured. AI features will be disabled."
)
- display.system.show_info("To enable AI features, configure at least one of:")
- display.system.show_info(
+ display.error.show_info("To enable AI features, configure at least one of:")
+ display.error.show_info(
" - AZURE_OPENAI_API_KEY and AZURE_OPENAI_ENDPOINT"
)
- display.system.show_info(" - OPENAI_API_KEY")
- display.system.show_info(" - ANTHROPIC_API_KEY")
+ display.error.show_info(" - OPENAI_API_KEY")
+ display.error.show_info(" - ANTHROPIC_API_KEY")
# Step 1: Detect issues
issues = orchestrator.detect_issues(inputs.target_path)
@@ -299,7 +315,9 @@ async def orchestrate_ai_linting_workflow(inputs: AILintingFixerInputs) -> AILin
return results
# Step 3: Process issues
- process_results = await orchestrator.process_issues(fixer, legacy_issues, inputs)
+ process_results = await orchestrator.process_issues(
+ fixer, legacy_issues, inputs
+ )
# Show dry run notice if applicable
if inputs.dry_run:
diff --git a/engine/codeflow_engine/actions/analysis/__init__.py b/engine/codeflow_engine/actions/analysis/__init__.py
new file mode 100644
index 0000000..adbc42b
--- /dev/null
+++ b/engine/codeflow_engine/actions/analysis/__init__.py
@@ -0,0 +1,6 @@
+"""CodeFlow Engine - Analysis Actions."""
+
+from codeflow_engine.actions.ai_comment_analyzer import AICommentAnalyzer
+from codeflow_engine.actions.pr_review_analyzer import PRReviewAnalyzer
+
+__all__ = ["AICommentAnalyzer", "PRReviewAnalyzer"]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/autogen_implementation.py b/engine/codeflow_engine/actions/autogen_implementation.py
index d0c2756..a0b7f35 100644
--- a/engine/codeflow_engine/actions/autogen_implementation.py
+++ b/engine/codeflow_engine/actions/autogen_implementation.py
@@ -17,6 +17,7 @@
try:
from autogen import ConversableAgent # type: ignore[import-not-found]
from autogen import GroupChat, GroupChatManager
+
AUTOGEN_AVAILABLE = True
except ImportError:
# Create dummy classes for type annotations when AutoGen is not available
@@ -24,9 +25,7 @@ class _ConversableAgent:
def __init__(self, **kwargs: Any) -> None:
pass
- def initiate_chat(
- self, *_args: Any, **_kwargs: Any
- ) -> list[dict[str, Any]]:
+ def initiate_chat(self, *_args: Any, **_kwargs: Any) -> list[dict[str, Any]]:
return []
class _GroupChat:
@@ -40,9 +39,7 @@ def __init__(
self.messages: list[dict[str, Any]] = messages or []
class _GroupChatManager:
- def __init__(
- self, groupchat: _GroupChat, _llm_config: dict[str, Any]
- ) -> None:
+ def __init__(self, groupchat: _GroupChat, _llm_config: dict[str, Any]) -> None:
self.groupchat: _GroupChat = groupchat
# Define the constant outside the block to avoid redefinition
@@ -114,9 +111,7 @@ def execute_multi_agent_task(self, inputs: AutoGenInputs) -> AutoGenOutputs:
)
# Create group chat manager
- manager = GroupChatManager(
- groupchat=group_chat, llm_config=self.llm_config
- )
+ manager = GroupChatManager(groupchat=group_chat, llm_config=self.llm_config)
# Execute the task
conversation_result = self._execute_conversation(agents, manager, inputs)
@@ -290,29 +285,29 @@ def _execute_conversation(
try:
# Initiate the conversation with the architect
- architect = cast(ConversableAgentType, agents[0]) # First agent is always the architect
+ architect = cast(
+ ConversableAgentType, agents[0]
+ ) # First agent is always the architect
result = architect.initiate_chat(
- manager,
- message=task_message,
- max_turns=20
+ manager, message=task_message, max_turns=20
)
# Extract conversation history - use safer approach to access attributes
try:
- if manager and hasattr(manager, 'groupchat'):
+ if manager and hasattr(manager, "groupchat"):
groupchat = manager.groupchat
- if hasattr(groupchat, 'messages'):
+ if hasattr(groupchat, "messages"):
conversation_history = groupchat.messages
except Exception:
# Fallback if we can't access conversation history
pass
- else:
- return {
- "success": True,
- "conversation_history": conversation_history,
- "final_result": result,
- }
+
+ return {
+ "success": True,
+ "conversation_history": conversation_history,
+ "final_result": result,
+ }
except Exception as e:
return {
@@ -418,7 +413,9 @@ def _process_results(
error_message=None,
)
- def _extract_implementation_plan(self, conversation_history: list[dict[str, Any]]) -> str:
+ def _extract_implementation_plan(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> str:
"""Extract implementation plan from conversation"""
plan_content: list[str] = []
@@ -435,7 +432,9 @@ def _extract_implementation_plan(self, conversation_history: list[dict[str, Any]
else "No implementation plan found"
)
- def _extract_code_changes(self, conversation_history: list[dict[str, Any]]) -> dict[str, str]:
+ def _extract_code_changes(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> dict[str, str]:
"""Extract code changes from conversation"""
code_changes: dict[str, str] = {}
@@ -463,7 +462,9 @@ def _extract_code_changes(self, conversation_history: list[dict[str, Any]]) -> d
return code_changes
- def _extract_test_files(self, conversation_history: list[dict[str, Any]]) -> dict[str, str]:
+ def _extract_test_files(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> dict[str, str]:
"""Extract test files from conversation"""
test_files: dict[str, str] = {}
@@ -531,7 +532,9 @@ def _extract_filename_from_context(
return None
- def _extract_recommendations(self, conversation_history: list[dict[str, Any]]) -> list[str]:
+ def _extract_recommendations(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> list[str]:
"""Extract recommendations from conversation"""
recommendations: list[str] = []
@@ -542,14 +545,19 @@ def _extract_recommendations(self, conversation_history: list[dict[str, Any]]) -
if "recommend" in content.lower() or "suggestion" in content.lower():
# Extract recommendation sentences
- sentences = content.split('.')
+ sentences = content.split(".")
for sentence in sentences:
- if "recommend" in sentence.lower() or "suggestion" in sentence.lower():
+ if (
+ "recommend" in sentence.lower()
+ or "suggestion" in sentence.lower()
+ ):
recommendations.append(sentence.strip())
return recommendations[:5] # Limit to 5 recommendations
- def _extract_fix_code(self, conversation_history: list[dict[str, Any]]) -> str | None:
+ def _extract_fix_code(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> str | None:
"""Extract fix code from conversation"""
for message in conversation_history:
content = message.get("content", "")
@@ -568,7 +576,9 @@ def _extract_fix_code(self, conversation_history: list[dict[str, Any]]) -> str |
return None
- def _extract_consensus(self, conversation_history: list[dict[str, Any]]) -> str | None:
+ def _extract_consensus(
+ self, conversation_history: list[dict[str, Any]]
+ ) -> str | None:
"""Extract consensus from conversation"""
consensus_messages: list[str] = []
@@ -581,7 +591,9 @@ def _extract_consensus(self, conversation_history: list[dict[str, Any]]) -> str
word in content.lower()
for word in ["agree", "consensus", "unanimous", "approved"]
):
- consensus_messages.append(f"**{message.get('name', 'Agent')}**: {content}")
+ consensus_messages.append(
+ f"**{message.get('name', 'Agent')}**: {content}"
+ )
if consensus_messages:
return "\n\n".join(consensus_messages[-3:]) # Last 3 consensus messages
@@ -647,11 +659,13 @@ def _format_conversations(
for message in conversation_history:
content = message.get("content", "")
if isinstance(content, str):
- formatted_conversations.append({
- "agent": str(message.get("name", "Unknown")),
- "content": content,
- "timestamp": datetime.now(UTC).isoformat(),
- })
+ formatted_conversations.append(
+ {
+ "agent": str(message.get("name", "Unknown")),
+ "content": content,
+ "timestamp": datetime.now(UTC).isoformat(),
+ }
+ )
return formatted_conversations
@@ -675,7 +689,7 @@ def run(inputs_dict: dict[str, Any]) -> dict[str, Any]:
)
return (
fallback_outputs.model_dump()
- if hasattr(fallback_outputs, 'model_dump')
+ if hasattr(fallback_outputs, "model_dump")
else fallback_outputs.dict()
)
@@ -684,9 +698,7 @@ def run(inputs_dict: dict[str, Any]) -> dict[str, Any]:
outputs = implementation.execute_multi_agent_task(inputs)
# Use model_dump() which is the newer pydantic v2 way to convert to dict
return (
- outputs.model_dump()
- if hasattr(outputs, 'model_dump')
- else outputs.dict()
+ outputs.model_dump() if hasattr(outputs, "model_dump") else outputs.dict()
) # type: ignore[attr-defined]
@@ -700,12 +712,12 @@ def run(inputs_dict: dict[str, Any]) -> dict[str, Any]:
"pr_context": {
"repository": "my-org/my-repo",
"branch": "feature/role-based-access",
- "title": "Add role-based access control"
+ "title": "Add role-based access control",
},
"agents_config": {
"complexity_level": "medium",
"max_agents": 4,
- "specialized_agents": ["security_auditor", "qa_engineer"]
+ "specialized_agents": ["security_auditor", "qa_engineer"],
},
}
diff --git a/engine/codeflow_engine/actions/generation/__init__.py b/engine/codeflow_engine/actions/generation/__init__.py
new file mode 100644
index 0000000..45cf0a9
--- /dev/null
+++ b/engine/codeflow_engine/actions/generation/__init__.py
@@ -0,0 +1,21 @@
+"""CodeFlow Engine - Generation Actions."""
+
+from codeflow_engine.actions.generate_barrel_file import GenerateBarrelFile
+from codeflow_engine.actions.generate_prop_table import GeneratePropTable
+from codeflow_engine.actions.generate_release_notes import GenerateReleaseNotes
+from codeflow_engine.actions.scaffold_api_route import (
+ ScaffoldApiRoute as ScaffoldAPIRoute,
+)
+from codeflow_engine.actions.scaffold_component import ScaffoldComponent
+from codeflow_engine.actions.scaffold_shared_hook import ScaffoldSharedHook
+from codeflow_engine.actions.svg_to_component import SvgToComponent as SVGToComponent
+
+__all__ = [
+ "GenerateBarrelFile",
+ "GeneratePropTable",
+ "GenerateReleaseNotes",
+ "ScaffoldAPIRoute",
+ "ScaffoldComponent",
+ "ScaffoldSharedHook",
+ "SVGToComponent",
+]
diff --git a/engine/codeflow_engine/actions/git/__init__.py b/engine/codeflow_engine/actions/git/__init__.py
new file mode 100644
index 0000000..5cb770f
--- /dev/null
+++ b/engine/codeflow_engine/actions/git/__init__.py
@@ -0,0 +1,10 @@
+"""CodeFlow Engine - Git Actions."""
+
+from codeflow_engine.actions.apply_git_patch import ApplyGitPatch
+from codeflow_engine.actions.create_github_release import (
+ CreateGithubRelease as CreateGitHubRelease,
+)
+from codeflow_engine.actions.delete_branch import DeleteBranch
+from codeflow_engine.actions.find_merged_branches import FindMergedBranches
+
+__all__ = ["ApplyGitPatch", "CreateGitHubRelease", "DeleteBranch", "FindMergedBranches"]
diff --git a/engine/codeflow_engine/actions/issues/__init__.py b/engine/codeflow_engine/actions/issues/__init__.py
new file mode 100644
index 0000000..ba02f85
--- /dev/null
+++ b/engine/codeflow_engine/actions/issues/__init__.py
@@ -0,0 +1,19 @@
+"""CodeFlow Engine - Issue/PR Actions."""
+
+from codeflow_engine.actions.create_or_update_issue import CreateOrUpdateIssue
+from codeflow_engine.actions.find_stale_issues_or_prs import FindStaleIssuesOrPRs
+from codeflow_engine.actions.handle_pr_comment import PRCommentHandler
+from codeflow_engine.actions.issue_creator import IssueCreator
+from codeflow_engine.actions.label_pr import LabelPR
+from codeflow_engine.actions.label_pr_by_size import LabelPRBySize
+from codeflow_engine.actions.post_comment import PostComment
+
+__all__ = [
+ "CreateOrUpdateIssue",
+ "FindStaleIssuesOrPRs",
+ "IssueCreator",
+ "LabelPR",
+ "LabelPRBySize",
+ "PRCommentHandler",
+ "PostComment",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/llm/__init__.py b/engine/codeflow_engine/actions/llm/__init__.py
index cc2614d..351cbd3 100644
--- a/engine/codeflow_engine/actions/llm/__init__.py
+++ b/engine/codeflow_engine/actions/llm/__init__.py
@@ -59,7 +59,7 @@
)
# Conditionally import MistralProvider
-MistralProvider = None
+MistralProvider: Any = None
if MISTRAL_AVAILABLE:
from codeflow_engine.actions.ai_actions.llm.providers import MistralProvider
diff --git a/engine/codeflow_engine/actions/llm/providers/__init__.py b/engine/codeflow_engine/actions/llm/providers/__init__.py
index 916e2a8..1650424 100644
--- a/engine/codeflow_engine/actions/llm/providers/__init__.py
+++ b/engine/codeflow_engine/actions/llm/providers/__init__.py
@@ -17,7 +17,7 @@
MISTRAL_AVAILABLE = True
except ImportError:
- MistralProvider = None
+ MistralProvider: Any = None
MISTRAL_AVAILABLE = False
from codeflow_engine.actions.llm.providers.openai import OpenAIProvider
diff --git a/engine/codeflow_engine/actions/llm/providers/azure_openai.py b/engine/codeflow_engine/actions/llm/providers/azure_openai.py
index b649ddb..1ab4772 100644
--- a/engine/codeflow_engine/actions/llm/providers/azure_openai.py
+++ b/engine/codeflow_engine/actions/llm/providers/azure_openai.py
@@ -71,9 +71,12 @@ def complete(self, request: dict[str, Any]) -> LLMResponse:
"""Complete a chat conversation using Azure OpenAI."""
client = self._get_client()
if not client:
+ fallback_model = (
+ request.get("model") or self.default_model or "azure-openai"
+ )
return LLMResponse.from_error(
"Azure OpenAI client not available",
- request.get("model", self.default_model),
+ str(fallback_model),
)
try:
@@ -122,6 +125,7 @@ def complete(self, request: dict[str, Any]) -> LLMResponse:
except Exception as e:
error_msg = f"Azure OpenAI API error: {e!s}"
logger.exception(error_msg)
- return LLMResponse.from_error(
- error_msg, request.get("model", self.default_model)
+ fallback_model = (
+ request.get("model") or self.default_model or "azure-openai"
)
+ return LLMResponse.from_error(error_msg, str(fallback_model))
diff --git a/engine/codeflow_engine/actions/maintenance/__init__.py b/engine/codeflow_engine/actions/maintenance/__init__.py
new file mode 100644
index 0000000..0d3152d
--- /dev/null
+++ b/engine/codeflow_engine/actions/maintenance/__init__.py
@@ -0,0 +1,17 @@
+"""CodeFlow Engine - Maintenance Actions."""
+
+from codeflow_engine.actions.enforce_import_order import EnforceImportOrder
+from codeflow_engine.actions.find_large_assets import FindLargeAssets
+from codeflow_engine.actions.generate_todo_report import GenerateTodoReport
+from codeflow_engine.actions.update_dependency import UpdateDependency
+from codeflow_engine.actions.update_docs_file import UpdateDocsFile
+from codeflow_engine.actions.update_migration_plan import UpdateMigrationPlan
+
+__all__ = [
+ "EnforceImportOrder",
+ "FindLargeAssets",
+ "GenerateTodoReport",
+ "UpdateDependency",
+ "UpdateDocsFile",
+ "UpdateMigrationPlan",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/__init__.py b/engine/codeflow_engine/actions/platform/__init__.py
new file mode 100644
index 0000000..883952f
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/__init__.py
@@ -0,0 +1,25 @@
+"""CodeFlow Engine - Platform Actions."""
+
+from .config import PlatformConfigManager
+from .detector import PlatformDetector
+from .file_analyzer import FileAnalyzer
+from .models import PlatformDetectorInputs, PlatformDetectorOutputs
+from .patterns import PlatformPatterns
+from .scoring import PlatformScoringEngine
+from .utils import calculate_confidence_score, get_confidence_level
+from .multi_platform_integrator import MultiPlatformIntegrator
+from .prototype_enhancer import PrototypeEnhancer
+
+__all__ = [
+ "FileAnalyzer",
+ "MultiPlatformIntegrator",
+ "PlatformConfigManager",
+ "PlatformDetector",
+ "PlatformDetectorInputs",
+ "PlatformDetectorOutputs",
+ "PlatformPatterns",
+ "PlatformScoringEngine",
+ "PrototypeEnhancer",
+ "calculate_confidence_score",
+ "get_confidence_level",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/analysis/__init__.py b/engine/codeflow_engine/actions/platform/analysis/__init__.py
new file mode 100644
index 0000000..7cc5006
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/analysis/__init__.py
@@ -0,0 +1,3 @@
+"""Compatibility wrapper for grouped platform analysis imports."""
+
+from codeflow_engine.actions.platform_detection.analysis import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/config.py b/engine/codeflow_engine/actions/platform/config.py
new file mode 100644
index 0000000..dabf67b
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/config.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.config import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/detector.py b/engine/codeflow_engine/actions/platform/detector.py
new file mode 100644
index 0000000..8ebe555
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/detector.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.detector import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/file_analyzer.py b/engine/codeflow_engine/actions/platform/file_analyzer.py
new file mode 100644
index 0000000..aac75c2
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/file_analyzer.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.file_analyzer import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/models.py b/engine/codeflow_engine/actions/platform/models.py
new file mode 100644
index 0000000..4a0652f
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/models.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.models import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/multi_platform_integrator.py b/engine/codeflow_engine/actions/platform/multi_platform_integrator.py
new file mode 100644
index 0000000..a85512b
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/multi_platform_integrator.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.multi_platform_integrator import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/patterns.py b/engine/codeflow_engine/actions/platform/patterns.py
new file mode 100644
index 0000000..0beda01
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/patterns.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.patterns import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/platform_detector.py b/engine/codeflow_engine/actions/platform/platform_detector.py
new file mode 100644
index 0000000..c06bbfc
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/platform_detector.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detector import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/prototype_enhancement/__init__.py b/engine/codeflow_engine/actions/platform/prototype_enhancement/__init__.py
new file mode 100644
index 0000000..9c48254
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/prototype_enhancement/__init__.py
@@ -0,0 +1,3 @@
+"""Compatibility wrapper for grouped prototype enhancement imports."""
+
+from codeflow_engine.actions.prototype_enhancement import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/prototype_enhancer.py b/engine/codeflow_engine/actions/platform/prototype_enhancer.py
new file mode 100644
index 0000000..25f5372
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/prototype_enhancer.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.prototype_enhancer import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/schema.py b/engine/codeflow_engine/actions/platform/schema.py
new file mode 100644
index 0000000..266442c
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/schema.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.schema import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/scoring.py b/engine/codeflow_engine/actions/platform/scoring.py
new file mode 100644
index 0000000..3cf9eb4
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/scoring.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.scoring import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform/utils.py b/engine/codeflow_engine/actions/platform/utils.py
new file mode 100644
index 0000000..02ae52f
--- /dev/null
+++ b/engine/codeflow_engine/actions/platform/utils.py
@@ -0,0 +1 @@
+from codeflow_engine.actions.platform_detection.utils import * # noqa: F403
\ No newline at end of file
diff --git a/engine/codeflow_engine/actions/platform_detection/analysis/base.py b/engine/codeflow_engine/actions/platform_detection/analysis/base.py
index a92fec8..55e5c28 100644
--- a/engine/codeflow_engine/actions/platform_detection/analysis/base.py
+++ b/engine/codeflow_engine/actions/platform_detection/analysis/base.py
@@ -90,7 +90,9 @@ def register_handler(
) -> None:
"""Register a handler for files matching the given pattern."""
# Import here to avoid circular import
- from codeflow_engine.actions.platform_detection.analysis.handlers import FileHandler
+ from codeflow_engine.actions.platform_detection.analysis.handlers import (
+ FileHandler,
+ )
if not issubclass(handler_cls, FileHandler):
msg = f"Handler must be a subclass of FileHandler, got {handler_cls}"
@@ -106,6 +108,10 @@ def register_handler(
def get_handler_for_file(self, file_path: Path) -> FileHandler | None:
"""Get the appropriate handler for the given file."""
+ from codeflow_engine.actions.platform_detection.analysis.handlers import (
+ DefaultFileHandler,
+ )
+
filename = file_path.name
# Check for exact matches first
@@ -119,7 +125,8 @@ def get_handler_for_file(self, file_path: Path) -> FileHandler | None:
return handler_cls()
# Default to the catch-all handler if available
- return self.handlers.get("*", DefaultFileHandler)()
+ default_handler_cls = self.handlers.get("*", DefaultFileHandler)
+ return default_handler_cls()
def analyze_file(self, file_path: Path) -> FileAnalysisResult:
"""
diff --git a/engine/codeflow_engine/actions/platform_detection/analysis/handlers.py b/engine/codeflow_engine/actions/platform_detection/analysis/handlers.py
index 9912eb7..4954ae8 100644
--- a/engine/codeflow_engine/actions/platform_detection/analysis/handlers.py
+++ b/engine/codeflow_engine/actions/platform_detection/analysis/handlers.py
@@ -60,16 +60,19 @@ def analyze(self, file_path: Path, analyzer: "FileAnalyzer") -> FileAnalysisResu
try:
# First check file patterns (name, path, etc.)
- for pattern in self.file_patterns:
- if pattern.matches(file_path):
- result.add_match(pattern.platform_id, pattern.confidence)
+ for file_pattern in self.file_patterns:
+ if file_pattern.matches(file_path):
+ result.add_match(file_pattern.platform_id, file_pattern.confidence)
# Then check content patterns
content = self._read_file(file_path)
if content is not None:
- for pattern in self.patterns:
- if pattern.matches(content):
- result.add_match(pattern.platform_id, pattern.confidence)
+ for content_pattern in self.patterns:
+ if content_pattern.matches(content):
+ result.add_match(
+ content_pattern.platform_id,
+ content_pattern.confidence,
+ )
# Run any custom analysis
custom_result = self._analyze_content(file_path, content, analyzer)
diff --git a/engine/codeflow_engine/actions/platform_detection/config.py b/engine/codeflow_engine/actions/platform_detection/config.py
index bb1f85e..7f063bd 100644
--- a/engine/codeflow_engine/actions/platform_detection/config.py
+++ b/engine/codeflow_engine/actions/platform_detection/config.py
@@ -9,7 +9,10 @@
from pathlib import Path
from typing import Any, ClassVar, Optional, Self, TypeVar
-from codeflow_engine.actions.platform_detection.schema import PlatformConfig, PlatformType
+from codeflow_engine.actions.platform_detection.schema import (
+ PlatformConfig,
+ PlatformType,
+)
# Set up logging
@@ -250,7 +253,9 @@ def get_ai_platforms(self) -> dict[str, dict[str, Any]]:
ai_platforms[platform_id] = platform_dict
return ai_platforms
- def get_platforms_by_type(self, platform_type: str) -> list[dict[str, Any]]:
+ def get_platforms_by_type(
+ self, platform_type: str | PlatformType
+ ) -> list[dict[str, Any]]:
"""Get all platforms of a specific type.
Args:
@@ -259,7 +264,19 @@ def get_platforms_by_type(self, platform_type: str) -> list[dict[str, Any]]:
Returns:
A list of platform configurations of the specified type
"""
- platform_ids: list[str] = self._platforms_by_type.get(platform_type, [])
+ normalized_type: PlatformType | None
+ if isinstance(platform_type, PlatformType):
+ normalized_type = platform_type
+ else:
+ try:
+ normalized_type = PlatformType(platform_type)
+ except ValueError:
+ normalized_type = None
+
+ if normalized_type is None:
+ return []
+
+ platform_ids: list[str] = self._platforms_by_type.get(normalized_type, [])
return [self._platforms[pid].to_dict() for pid in platform_ids]
def get_all_platforms(self) -> dict[str, dict[str, Any]]:
diff --git a/engine/codeflow_engine/actions/platform_detection/file_analyzer.py b/engine/codeflow_engine/actions/platform_detection/file_analyzer.py
index 4655bdf..0851fd9 100644
--- a/engine/codeflow_engine/actions/platform_detection/file_analyzer.py
+++ b/engine/codeflow_engine/actions/platform_detection/file_analyzer.py
@@ -48,11 +48,11 @@ def scan_for_platform_files(
self, platform_configs: dict[str, dict[str, Any]]
) -> dict[str, list[str]]:
"""Scan workspace for platform-specific files."""
- results = {}
+ results: dict[str, list[str]] = {}
# Convert platform configs to the new format
for platform, config in platform_configs.items():
- file_matches = []
+ file_matches: list[str] = []
for file_pattern in config.get("files", []):
# Convert glob patterns to the new format
pattern = FilePattern(platform, file_pattern, confidence=0.7)
@@ -71,10 +71,10 @@ def scan_for_folder_patterns(
self, platform_configs: dict[str, dict[str, Any]]
) -> dict[str, list[str]]:
"""Scan workspace for platform-specific folder patterns."""
- results = {}
+ results: dict[str, list[str]] = {}
for platform, config in platform_configs.items():
- folder_matches = []
+ folder_matches: list[str] = []
for folder_pattern in config.get("folder_patterns", []):
# Look for directories matching the pattern
folder_matches.extend(
@@ -88,22 +88,6 @@ def scan_for_folder_patterns(
return results
- def _find_files_by_pattern(self, pattern: str) -> list[str]:
- """Find files matching the given glob pattern."""
- return [
- str(file_path.relative_to(self.workspace_path))
- for file_path in self.workspace_path.glob("**/" + pattern)
- if file_path.is_file()
- ]
-
- def _find_folders_by_pattern(self, pattern: str) -> list[str]:
- """Find folders matching the given glob pattern."""
- return [
- str(dir_path.relative_to(self.workspace_path))
- for dir_path in self.workspace_path.glob("**/" + pattern)
- if dir_path.is_dir()
- ]
-
def analyze_file_content(
self, file_path: str, platform_configs: dict[str, dict[str, Any]]
) -> dict[str, float]:
@@ -146,7 +130,7 @@ def scan_for_platform_indicators(
Dict mapping platform names to their detection results
"""
# Convert platform configs to the new format
- results = {}
+ results: dict[str, dict[str, Any]] = {}
# Get file and folder matches using the new analyzer
file_matches = self.scan_for_platform_files(platform_configs)
@@ -154,21 +138,18 @@ def scan_for_platform_indicators(
# Combine results in the legacy format
for platform in set(file_matches.keys()) | set(folder_matches.keys()):
- results[platform] = {
- "files": file_matches.get(platform, []),
- "folders": folder_matches.get(platform, []),
- "confidence": 0.0,
- }
+ platform_files = file_matches.get(platform, [])
+ platform_folders = folder_matches.get(platform, [])
# Calculate confidence based on number of matches
- file_count = len(results[platform]["files"])
- folder_count = len(results[platform]["folders"])
+ file_count = len(platform_files)
+ folder_count = len(platform_folders)
# More matches = higher confidence, but cap at 0.7
confidence = min(0.7, 0.1 * (file_count + folder_count))
# Analyze file contents for additional confidence
- for file_path in results[platform]["files"]:
+ for file_path in platform_files:
content_results = self.analyze_file_content(
str(self.workspace_path / file_path),
{platform: platform_configs[platform]},
@@ -176,7 +157,11 @@ def scan_for_platform_indicators(
if platform in content_results:
confidence = min(1.0, confidence + content_results[platform])
- results[platform]["confidence"] = confidence
+ results[platform] = {
+ "files": platform_files,
+ "folders": platform_folders,
+ "confidence": confidence,
+ }
return results
diff --git a/engine/codeflow_engine/actions/platform_detection/schema.py b/engine/codeflow_engine/actions/platform_detection/schema.py
index 665e6ed..c69de58 100644
--- a/engine/codeflow_engine/actions/platform_detection/schema.py
+++ b/engine/codeflow_engine/actions/platform_detection/schema.py
@@ -76,6 +76,11 @@ class UIConfig(TypedDict, total=False):
theme_color: str
+def _default_ui_config() -> UIConfig:
+ """Create an empty UI config with the expected typed shape."""
+ return {}
+
+
class PlatformStatus(StrEnum):
"""Status of the platform support."""
@@ -176,7 +181,7 @@ class PlatformConfig:
integrations: list[str] = field(default_factory=list)
integration_type: IntegrationType = IntegrationType.API
integration_instructions: str = ""
- ui_config: UIConfig = field(default_factory=dict)
+ ui_config: UIConfig = field(default_factory=_default_ui_config)
# Documentation
documentation_url: str = ""
@@ -298,11 +303,9 @@ def from_dict(cls, platform_id: str, data: dict) -> PlatformConfig:
supported_languages=data.get("supported_languages", []),
supported_frameworks=data.get("supported_frameworks", []),
integrations=data.get("integrations", []),
- integration_type=IntegrationType(
- data.get("integration_type", "api")
- ),
+ integration_type=IntegrationType(data.get("integration_type", "api")),
integration_instructions=data.get("integration_instructions", ""),
- ui_config=data.get("ui_config", {}),
+ ui_config=cast(UIConfig, data.get("ui_config", {})),
# Documentation
documentation_url=data.get("documentation_url", ""),
setup_guide=data.get("setup_guide", ""),
diff --git a/engine/codeflow_engine/actions/prototype_enhancement/enhancer.py b/engine/codeflow_engine/actions/prototype_enhancement/enhancer.py
index 2f15960..d62e1af 100644
--- a/engine/codeflow_engine/actions/prototype_enhancement/enhancer.py
+++ b/engine/codeflow_engine/actions/prototype_enhancement/enhancer.py
@@ -11,15 +11,10 @@
from typing import Any
-try:
- from codeflow_engine.models.artifacts import ( # type: ignore[import-untyped]
- PrototypeEnhancerInputs,
- PrototypeEnhancerOutputs,
- )
-except ImportError:
- # Fallback for when models are not available during development
- from typing import Any as PrototypeEnhancerInputs
- from typing import Any as PrototypeEnhancerOutputs
+from codeflow_engine.models.artifacts import (
+ PrototypeEnhancerInputs,
+ PrototypeEnhancerOutputs,
+)
from codeflow_engine.actions.prototype_enhancement.enhancement_strategies import (
EnhancementStrategy,
@@ -32,6 +27,39 @@
logger = logging.getLogger(__name__)
+def _build_output(
+ *,
+ success: bool,
+ message: str,
+ enhanced_files: dict[str, Any],
+ package_json_updates: dict[str, Any],
+ deployment_configs: dict[str, Any],
+ checklist: list[str],
+ next_steps: list[str],
+ enhancement_summary: str,
+ platform_specific_notes: list[str],
+) -> PrototypeEnhancerOutputs:
+ metadata = {
+ "enhanced_files": enhanced_files,
+ "package_json_updates": package_json_updates,
+ "deployment_configs": deployment_configs,
+ "checklist": checklist,
+ "enhancement_summary": enhancement_summary,
+ "platform_specific_notes": platform_specific_notes,
+ }
+
+ generated_files = sorted(enhanced_files.keys())
+
+ return PrototypeEnhancerOutputs(
+ success=success,
+ message=message,
+ generated_files=generated_files,
+ modified_files=[],
+ next_steps=next_steps,
+ metadata=metadata,
+ )
+
+
class PrototypeEnhancer:
"""
Modular prototype enhancer that provides platform-specific enhancements
@@ -104,6 +132,7 @@ def _enhance_for_production(
self, inputs: PrototypeEnhancerInputs, config: PlatformConfig
) -> PrototypeEnhancerOutputs:
"""Enhance project for production readiness."""
+ _ = config
strategy = self.enhancement_strategies[inputs.platform]
project_path = Path(inputs.project_path) if inputs.project_path else Path.cwd()
@@ -128,22 +157,23 @@ def _enhance_for_production(
"production_ready"
]
- return PrototypeEnhancerOutputs(
+ return _build_output(
+ success=True,
+ message="Production enhancement completed successfully",
enhanced_files=enhancement_result.get("files", {}),
package_json_updates=package_json_updates,
deployment_configs=self._get_deployment_configs(inputs.platform),
- production_checklist=production_checklist,
+ checklist=production_checklist,
next_steps=next_steps,
enhancement_summary=self._create_enhancement_summary(enhancement_result),
- platform_specific_notes=self._get_platform_notes(
- inputs.platform, "production_ready"
- ),
+ platform_specific_notes=self._get_platform_notes(inputs.platform, "production_ready"),
)
def _enhance_for_testing(
self, inputs: PrototypeEnhancerInputs, config: PlatformConfig
) -> PrototypeEnhancerOutputs:
"""Enhance project for testing."""
+ _ = config
strategy = self.enhancement_strategies[inputs.platform]
project_path = Path(inputs.project_path) if inputs.project_path else Path.cwd()
@@ -175,22 +205,23 @@ def _enhance_for_testing(
# Get next steps
next_steps = self.platform_registry.get_next_steps()[inputs.platform]["testing"]
- return PrototypeEnhancerOutputs(
+ return _build_output(
+ success=True,
+ message="Testing enhancement completed successfully",
enhanced_files=enhancement_result.get("files", {}),
package_json_updates=package_json_updates,
deployment_configs={},
- production_checklist=testing_checklist,
+ checklist=testing_checklist,
next_steps=next_steps,
enhancement_summary=self._create_enhancement_summary(enhancement_result),
- platform_specific_notes=self._get_platform_notes(
- inputs.platform, "testing"
- ),
+ platform_specific_notes=self._get_platform_notes(inputs.platform, "testing"),
)
def _enhance_for_security(
self, inputs: PrototypeEnhancerInputs, config: PlatformConfig
) -> PrototypeEnhancerOutputs:
"""Enhance project for security."""
+ _ = config
strategy = self.enhancement_strategies[inputs.platform]
project_path = Path(inputs.project_path) if inputs.project_path else Path.cwd()
@@ -224,16 +255,16 @@ def _enhance_for_security(
"security"
]
- return PrototypeEnhancerOutputs(
+ return _build_output(
+ success=True,
+ message="Security enhancement completed successfully",
enhanced_files=enhancement_result.get("files", {}),
package_json_updates=package_json_updates,
deployment_configs={},
- production_checklist=security_checklist,
+ checklist=security_checklist,
next_steps=next_steps,
enhancement_summary=self._create_enhancement_summary(enhancement_result),
- platform_specific_notes=self._get_platform_notes(
- inputs.platform, "security"
- ),
+ platform_specific_notes=self._get_platform_notes(inputs.platform, "security"),
)
def _generate_package_json_updates(
@@ -357,11 +388,13 @@ def _get_platform_notes(self, platform: str, enhancement_type: str) -> list[str]
def _create_error_output(self, error_message: str) -> PrototypeEnhancerOutputs:
"""Create an error output."""
- return PrototypeEnhancerOutputs(
+ return _build_output(
+ success=False,
+ message=error_message,
enhanced_files={},
package_json_updates={},
deployment_configs={},
- production_checklist=[],
+ checklist=[],
next_steps=[f"Error: {error_message}"],
enhancement_summary=f"Enhancement failed: {error_message}",
platform_specific_notes=[],
diff --git a/engine/codeflow_engine/actions/prototype_enhancement/generators/base_generator.py b/engine/codeflow_engine/actions/prototype_enhancement/generators/base_generator.py
index f2a4808..7d16632 100644
--- a/engine/codeflow_engine/actions/prototype_enhancement/generators/base_generator.py
+++ b/engine/codeflow_engine/actions/prototype_enhancement/generators/base_generator.py
@@ -4,6 +4,7 @@
Provides the BaseGenerator class that all specialized generators inherit from.
"""
+from dataclasses import asdict
from abc import ABC, abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, TypeVar
@@ -90,6 +91,6 @@ def _get_platform_variables(self) -> dict[str, Any]:
return {
"platform": self.platform_config.name,
- "platform_config": self.platform_config.dict(),
- "platform_vars": self.platform_config.variables or {},
+ "platform_config": asdict(self.platform_config),
+ "platform_vars": {},
}
diff --git a/engine/codeflow_engine/actions/prototype_enhancement/generators/template_utils.py b/engine/codeflow_engine/actions/prototype_enhancement/generators/template_utils.py
index 745b8e6..046c4a4 100644
--- a/engine/codeflow_engine/actions/prototype_enhancement/generators/template_utils.py
+++ b/engine/codeflow_engine/actions/prototype_enhancement/generators/template_utils.py
@@ -65,31 +65,24 @@ def render(
Returns:
Rendered template content, or None if template not found
"""
- if variables is None:
- variables = {}
- # Get template metadata
- template_meta = self.template_registry.get_template(template_key)
- if not template_meta:
- return None
-
- # Apply variants if specified
- if variants:
- template_meta = self._apply_variants(template_meta, variants)
-
- # Get the template content
- template_path = self.templates_dir / template_meta.path
- if not template_path.exists():
+ rendered = self.template_registry.generate_template(
+ template_key,
+ variables=variables,
+ variants=variants,
+ )
+ if rendered is None:
return None
- template_content = template_path.read_text(encoding="utf-8")
+ metadata = self.template_registry.get_metadata(template_key)
+ if metadata is None:
+ return rendered
- # If it's a Jinja2 template (has .j2 extension), render it
+ template_path = metadata.template_file_path
if template_path.suffix == ".j2":
- template = self.jinja_env.from_string(template_content)
- return template.render(**variables)
+ template = self.jinja_env.from_string(rendered)
+ return template.render(**(variables or {}))
- # Otherwise, return the raw content
- return template_content
+ return rendered
def _apply_variants(
self, template_meta: TemplateMetadata, variants: list[str]
@@ -103,16 +96,5 @@ def _apply_variants(
Returns:
New template metadata with variants applied
"""
- # Start with a copy of the original metadata
- result = template_meta.copy()
- # Apply each variant in order
- for variant in variants:
- if variant in template_meta.variants:
- variant_meta = template_meta.variants[variant]
- # Merge the variant's variables with the current ones
- if variant_meta.variables:
- result.variables = {**result.variables, **variant_meta.variables}
- # Apply any template overrides
- if variant_meta.template:
- result.template = variant_meta.template
- return result
+ _ = variants
+ return template_meta
diff --git a/engine/codeflow_engine/actions/quality/__init__.py b/engine/codeflow_engine/actions/quality/__init__.py
new file mode 100644
index 0000000..9dc338d
--- /dev/null
+++ b/engine/codeflow_engine/actions/quality/__init__.py
@@ -0,0 +1,19 @@
+"""CodeFlow Engine - Quality Actions."""
+
+from codeflow_engine.actions.check_dependency_licenses import CheckDependencyLicenses
+from codeflow_engine.actions.check_lockfile_drift import CheckLockfileDrift
+from codeflow_engine.actions.check_performance_budget import CheckPerformanceBudget
+from codeflow_engine.actions.quality_gates import QualityGateValidator as QualityGates
+from codeflow_engine.actions.run_accessibility_audit import RunAccessibilityAudit
+from codeflow_engine.actions.run_security_audit import RunSecurityAudit
+from codeflow_engine.actions.visual_regression_test import VisualRegressionTest
+
+__all__ = [
+ "CheckDependencyLicenses",
+ "CheckLockfileDrift",
+ "CheckPerformanceBudget",
+ "QualityGates",
+ "RunAccessibilityAudit",
+ "RunSecurityAudit",
+ "VisualRegressionTest",
+]
diff --git a/engine/codeflow_engine/actions/quality/gates/__init__.py b/engine/codeflow_engine/actions/quality/gates/__init__.py
new file mode 100644
index 0000000..90d4778
--- /dev/null
+++ b/engine/codeflow_engine/actions/quality/gates/__init__.py
@@ -0,0 +1,11 @@
+"""Compatibility wrapper for grouped quality gates imports."""
+
+from codeflow_engine.actions.quality_gates.evaluator import (
+ QualityGateValidator as QualityGateEvaluator,
+)
+from codeflow_engine.actions.quality_gates.models import (
+ QualityGateInputs as QualityGate,
+ QualityGateOutputs as QualityGateResult,
+)
+
+__all__ = ["QualityGate", "QualityGateEvaluator", "QualityGateResult"]
diff --git a/engine/codeflow_engine/actions/quality_engine/platform_detector.py b/engine/codeflow_engine/actions/quality_engine/platform_detector.py
index d1421e1..01ddf5d 100644
--- a/engine/codeflow_engine/actions/quality_engine/platform_detector.py
+++ b/engine/codeflow_engine/actions/quality_engine/platform_detector.py
@@ -4,6 +4,7 @@
import platform
import sys
+from typing import Any
import structlog
@@ -20,7 +21,7 @@ def __init__(self) -> None:
self.is_linux = self.platform == "linux"
self.is_macos = self.platform == "darwin"
- def detect_platform(self) -> dict[str, any]:
+ def detect_platform(self) -> dict[str, Any]:
"""Detect the current platform and its capabilities."""
return {
"platform": self.platform,
@@ -154,8 +155,9 @@ def get_cross_platform_tools(self) -> list[str]:
]
-def create_platform_aware_tool_registry(tool_registry: any) -> any:
+def create_platform_aware_tool_registry(tool_registry: Any) -> PlatformDetector:
"""Create a platform-aware tool registry that adapts tools for the current platform."""
+ _ = tool_registry
detector = PlatformDetector()
if detector.should_show_windows_warning():
diff --git a/engine/codeflow_engine/actions/scripts/__init__.py b/engine/codeflow_engine/actions/scripts/__init__.py
new file mode 100644
index 0000000..6c5de2d
--- /dev/null
+++ b/engine/codeflow_engine/actions/scripts/__init__.py
@@ -0,0 +1,19 @@
+"""CodeFlow Engine - Script Actions."""
+
+from codeflow_engine.actions.publish_package import PublishPackage
+from codeflow_engine.actions.run_changed_tests import RunChangedTests
+from codeflow_engine.actions.run_db_migrations import RunDBMigrations
+from codeflow_engine.actions.run_script import RunScript
+from codeflow_engine.actions.seed_database import SeedDatabase
+from codeflow_engine.actions.take_screenshots import TakeScreenshots
+from codeflow_engine.actions.trigger_deployment import TriggerDeployment
+
+__all__ = [
+ "PublishPackage",
+ "RunChangedTests",
+ "RunDBMigrations",
+ "RunScript",
+ "SeedDatabase",
+ "TakeScreenshots",
+ "TriggerDeployment",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/ai/implementation_roadmap/phase_manager.py b/engine/codeflow_engine/ai/implementation_roadmap/phase_manager.py
index 81f0d01..c50a653 100644
--- a/engine/codeflow_engine/ai/implementation_roadmap/phase_manager.py
+++ b/engine/codeflow_engine/ai/implementation_roadmap/phase_manager.py
@@ -164,7 +164,7 @@ async def execute_all_phases(
async def _check_phase_dependencies(self, phase: Phase) -> bool:
"""Check if phase dependencies are satisfied"""
- for dep_phase_name in phase.dependencies:
+ for dep_phase_name in phase.depends_on:
dep_execution = self.phase_executions.get(dep_phase_name)
if not dep_execution or dep_execution.status != "completed":
return False
@@ -296,7 +296,7 @@ def get_overall_progress(self) -> dict[str, Any]:
def get_next_steps(self) -> list[dict[str, Any]]:
"""Get recommended next steps based on current progress"""
- next_steps = []
+ next_steps: list[dict[str, Any]] = []
# Check immediate phase
immediate_progress = self.get_phase_progress("immediate")
@@ -361,12 +361,8 @@ def get_next_steps(self) -> list[dict[str, Any]]:
def get_phase_summary(self) -> dict[str, Any]:
"""Get comprehensive summary of all phases"""
- summary = {
- "execution_summary": self.get_overall_progress(),
- "phase_details": {},
- "next_steps": self.get_next_steps(),
- "recommendations": [],
- }
+ phase_details: dict[str, dict[str, Any]] = {}
+ recommendations: list[str] = []
# Add detailed phase information
for phase_name in ["immediate", "medium", "strategic"]:
@@ -375,7 +371,7 @@ def get_phase_summary(self) -> dict[str, Any]:
phase_detail = {
"name": phase_name,
- "description": phase.description if phase else "",
+ "description": phase.display_name if phase else "",
"total_tasks": len(phase.tasks) if phase else 0,
"status": execution.status if execution else "not_started",
"progress": self.get_phase_progress(phase_name),
@@ -396,21 +392,27 @@ def get_phase_summary(self) -> dict[str, Any]:
},
}
)
+ phase_details[phase_name] = phase_detail
- summary["phase_details"][phase_name] = phase_detail
+ summary: dict[str, Any] = {
+ "execution_summary": self.get_overall_progress(),
+ "phase_details": phase_details,
+ "next_steps": self.get_next_steps(),
+ "recommendations": recommendations,
+ }
# Add recommendations based on current state
overall_progress = summary["execution_summary"]["overall_progress_percentage"]
if overall_progress < 25:
- summary["recommendations"].append(
+ recommendations.append(
"Focus on completing immediate priority tasks first for quick wins"
)
elif overall_progress < 75:
- summary["recommendations"].append(
+ recommendations.append(
"Consider running medium priority tasks in parallel where possible"
)
else:
- summary["recommendations"].append(
+ recommendations.append(
"Excellent progress! Consider strategic enhancements for long-term benefits"
)
diff --git a/engine/codeflow_engine/ai/implementation_roadmap/task_executor.py b/engine/codeflow_engine/ai/implementation_roadmap/task_executor.py
index 536abb5..1081417 100644
--- a/engine/codeflow_engine/ai/implementation_roadmap/task_executor.py
+++ b/engine/codeflow_engine/ai/implementation_roadmap/task_executor.py
@@ -3,7 +3,7 @@
Handles the execution of individual implementation tasks
"""
-from dataclasses import dataclass
+from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Any
@@ -21,14 +21,8 @@ class TaskExecution:
start_time: datetime
end_time: datetime | None = None
error_message: str | None = None
- files_created: list[str] | None = None
- logs: list[str] | None = None
-
- def __post_init__(self) -> None:
- if self.files_created is None:
- self.files_created = []
- if self.logs is None:
- self.logs = []
+ files_created: list[str] = field(default_factory=list)
+ logs: list[str] = field(default_factory=list)
@property
def duration(self) -> float | None:
diff --git a/engine/codeflow_engine/cli/git_hooks.py b/engine/codeflow_engine/cli/git_hooks.py
index ddfc639..4ebe367 100644
--- a/engine/codeflow_engine/cli/git_hooks.py
+++ b/engine/codeflow_engine/cli/git_hooks.py
@@ -130,6 +130,9 @@ def uninstall_hooks(self) -> bool:
def _install_pre_commit_hook(self, config: dict[str, Any] | None = None):
"""Install pre-commit hook for quality checks."""
+ if self.hooks_dir is None:
+ msg = "Hooks directory is not available"
+ raise RuntimeError(msg)
hook_content = self._generate_pre_commit_hook(config)
hook_path = self.hooks_dir / "pre-commit"
@@ -141,6 +144,9 @@ def _install_pre_commit_hook(self, config: dict[str, Any] | None = None):
def _install_post_commit_hook(self, config: dict[str, Any] | None = None):
"""Install post-commit hook for metrics collection."""
+ if self.hooks_dir is None:
+ msg = "Hooks directory is not available"
+ raise RuntimeError(msg)
hook_content = self._generate_post_commit_hook(config)
hook_path = self.hooks_dir / "post-commit"
@@ -152,6 +158,9 @@ def _install_post_commit_hook(self, config: dict[str, Any] | None = None):
def _install_commit_msg_hook(self, config: dict[str, Any] | None = None):
"""Install commit-msg hook for commit message validation."""
+ if self.hooks_dir is None:
+ msg = "Hooks directory is not available"
+ raise RuntimeError(msg)
hook_content = self._generate_commit_msg_hook(config)
hook_path = self.hooks_dir / "commit-msg"
diff --git a/engine/codeflow_engine/clients/github_client.py b/engine/codeflow_engine/clients/github_client.py
index cf4294f..7321d63 100644
--- a/engine/codeflow_engine/clients/github_client.py
+++ b/engine/codeflow_engine/clients/github_client.py
@@ -178,7 +178,7 @@ async def _request(
ClientError: For network or other client errors
"""
url = f"{self.config.base_url}{endpoint}"
- last_error = None
+ last_error: Exception | None = None
for attempt in range(self.config.max_retries + 1):
try:
@@ -189,39 +189,37 @@ async def _request(
f"{method} {url} (attempt {attempt + 1}/{self.config.max_retries + 1})"
)
- async with self._get_session() as session:
- async with session.request(method, url, **kwargs) as response:
- # Update rate limit info from response
- await self._handle_rate_limit(response)
-
- if (
- response.status == 403
- and "X-RateLimit-Remaining" in response.headers
- ) and int(response.headers["X-RateLimit-Remaining"]) == 0:
- reset_time = int(
- response.headers.get(
- "X-RateLimit-Reset", time.time() + 60
- )
- )
- sleep_time = max(
- 1, reset_time - time.time() + 1
- ) # Add 1s buffer
- self.logger.warning(
- "Rate limited. Waiting %.1fs until reset",
- sleep_time,
- )
- await asyncio.sleep(sleep_time)
- continue # Retry the request after waiting
-
- response.raise_for_status()
-
- # Handle different response types
- content_type = response.headers.get("Content-Type", "")
- if "application/json" in content_type:
- return await response.json()
- if content_type.startswith("text/"):
- return {"text": await response.text()}
- return {}
+ session = await self._get_session()
+ async with session.request(method, url, **kwargs) as response:
+ # Update rate limit info from response
+ await self._handle_rate_limit(response)
+
+ if (
+ response.status == 403
+ and "X-RateLimit-Remaining" in response.headers
+ ) and int(response.headers["X-RateLimit-Remaining"]) == 0:
+ reset_time = int(
+ response.headers.get("X-RateLimit-Reset", time.time() + 60)
+ )
+ sleep_time = max(
+ 1, reset_time - time.time() + 1
+ ) # Add 1s buffer
+ self.logger.warning(
+ "Rate limited. Waiting %.1fs until reset",
+ sleep_time,
+ )
+ await asyncio.sleep(sleep_time)
+ continue # Retry the request after waiting
+
+ response.raise_for_status()
+
+ # Handle different response types
+ content_type = response.headers.get("Content-Type", "")
+ if "application/json" in content_type:
+ return await response.json()
+ if content_type.startswith("text/"):
+ return {"text": await response.text()}
+ return {}
except ClientResponseError as e:
if e.status == 403 and "rate limit" in (e.message or "").lower():
diff --git a/engine/codeflow_engine/config/__init__.py b/engine/codeflow_engine/config/__init__.py
index 72c967b..3a16c68 100644
--- a/engine/codeflow_engine/config/__init__.py
+++ b/engine/codeflow_engine/config/__init__.py
@@ -11,6 +11,7 @@
from dataclasses import dataclass, field
import os
+from os import PathLike
import pathlib
from typing import Any
import warnings
@@ -96,16 +97,16 @@ def _load_from_environment(self) -> None:
if env_value is not None:
# Handle type conversion
int_fields = {
- "max_concurrent_workflows", "workflow_timeout",
- "workflow_retry_attempts", "workflow_retry_delay"
+ "max_concurrent_workflows",
+ "workflow_timeout",
+ "workflow_retry_attempts",
+ "workflow_retry_delay",
}
if attr_name in int_fields:
setattr(self, attr_name, int(env_value))
elif attr_name == "enable_debug_logging":
setattr(
- self,
- attr_name,
- env_value.lower() in {"true", "1", "yes", "on"}
+ self, attr_name, env_value.lower() in {"true", "1", "yes", "on"}
)
else:
setattr(self, attr_name, env_value)
@@ -114,7 +115,7 @@ def _load_from_file(self, config_path: str | None = None) -> None:
"""Load configuration from YAML file."""
if config_path is None:
# Look for config file in common locations
- possible_paths = [
+ possible_paths: list[str | PathLike[str]] = [
"codeflow.yaml",
"codeflow.yml",
".codeflow.yaml",
@@ -124,16 +125,18 @@ def _load_from_file(self, config_path: str | None = None) -> None:
]
for path in possible_paths:
- if pathlib.Path(path).exists():
- config_path = path
+ normalized_path = pathlib.Path(path)
+ if normalized_path.exists():
+ config_path = str(normalized_path)
break
if config_path and pathlib.Path(config_path).exists():
try:
with open(config_path, encoding="utf-8") as f:
- config_data = yaml.safe_load(f)
+ loaded_config = yaml.safe_load(f)
- if config_data:
+ if isinstance(loaded_config, dict):
+ config_data = dict(loaded_config)
for key, value in config_data.items():
if hasattr(self, key):
setattr(self, key, value)
diff --git a/engine/codeflow_engine/config/validation.py b/engine/codeflow_engine/config/validation.py
index 6c5fa87..8459ad7 100644
--- a/engine/codeflow_engine/config/validation.py
+++ b/engine/codeflow_engine/config/validation.py
@@ -74,14 +74,6 @@ def _validate_github_config(self) -> None:
"GitHub timeout is very low, may cause request failures"
)
- # Retries validation
- if github.max_retries < 0:
- self.errors.append("GitHub max_retries cannot be negative")
- elif github.max_retries > 10:
- self.warnings.append(
- "GitHub max_retries is very high, may cause long delays"
- )
-
def _validate_llm_config(self) -> None:
"""Validate LLM configuration."""
llm = self.settings.llm
diff --git a/engine/codeflow_engine/core/__init__.py b/engine/codeflow_engine/core/__init__.py
new file mode 100644
index 0000000..8f7d32d
--- /dev/null
+++ b/engine/codeflow_engine/core/__init__.py
@@ -0,0 +1,81 @@
+"""
+CodeFlow Core Module - Shared base classes, utilities, and patterns.
+
+This module provides common infrastructure used across the codeflow_engine package:
+- Base classes for managers, validators, and handlers
+- Common patterns (Registry, Factory, etc.)
+- Configuration utilities
+- Shared utilities
+"""
+
+from codeflow_engine.core.config import (
+ AppSettings,
+ BaseConfig,
+ ConfigLoader,
+ DatabaseSettings,
+ LLMSettings,
+ LoggingSettings,
+ env_bool,
+ env_float,
+ env_int,
+ env_list,
+ env_var,
+)
+from codeflow_engine.core.files import (
+ BackupService,
+ ContentValidationResult,
+ ContentValidator,
+ FileBackup,
+ FileIO,
+)
+from codeflow_engine.core.llm import (
+ BaseLLMProvider,
+ LLMProviderRegistry,
+ LLMResponse,
+ OpenAICompatibleProvider,
+)
+from codeflow_engine.core.managers import (
+ BaseManager,
+ ManagerConfig,
+ SessionMixin,
+ StatsMixin,
+)
+from codeflow_engine.core.validation import (
+ BaseTypeValidator,
+ CompositeValidator,
+ SecurityPatterns,
+ ValidationResult,
+ ValidationSeverity,
+)
+
+__all__ = [
+ "AppSettings",
+ "BackupService",
+ "BaseConfig",
+ "BaseLLMProvider",
+ "BaseManager",
+ "BaseTypeValidator",
+ "CompositeValidator",
+ "ConfigLoader",
+ "ContentValidationResult",
+ "ContentValidator",
+ "DatabaseSettings",
+ "FileBackup",
+ "FileIO",
+ "LLMProviderRegistry",
+ "LLMResponse",
+ "LLMSettings",
+ "LoggingSettings",
+ "ManagerConfig",
+ "OpenAICompatibleProvider",
+ "SecurityPatterns",
+ "SessionMixin",
+ "StatsMixin",
+ "ValidationResult",
+ "ValidationSeverity",
+ "env_bool",
+ "env_float",
+ "env_int",
+ "env_list",
+ "env_var",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/config/__init__.py b/engine/codeflow_engine/core/config/__init__.py
new file mode 100644
index 0000000..dff423a
--- /dev/null
+++ b/engine/codeflow_engine/core/config/__init__.py
@@ -0,0 +1,38 @@
+"""
+Core Configuration Module.
+
+Provides centralized configuration management with:
+- Environment-based configuration loading
+- Type-safe configuration models
+- Environment variable helpers
+"""
+
+from codeflow_engine.core.config.base import (
+ BaseConfig,
+ ConfigLoader,
+ env_bool,
+ env_float,
+ env_int,
+ env_list,
+ env_var,
+)
+from codeflow_engine.core.config.models import (
+ AppSettings,
+ DatabaseSettings,
+ LLMSettings,
+ LoggingSettings,
+)
+
+__all__ = [
+ "AppSettings",
+ "BaseConfig",
+ "ConfigLoader",
+ "DatabaseSettings",
+ "LLMSettings",
+ "LoggingSettings",
+ "env_bool",
+ "env_float",
+ "env_int",
+ "env_list",
+ "env_var",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/config/base.py b/engine/codeflow_engine/core/config/base.py
new file mode 100644
index 0000000..faeedd6
--- /dev/null
+++ b/engine/codeflow_engine/core/config/base.py
@@ -0,0 +1,164 @@
+"""
+Base Configuration Utilities.
+
+Provides environment variable helpers and base configuration patterns.
+"""
+
+import os
+from abc import ABC
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, TypeVar
+
+import structlog
+
+
+logger = structlog.get_logger(__name__)
+
+T = TypeVar("T")
+
+
+def env_var(name: str, default: str = "") -> str:
+ return os.getenv(name, default)
+
+
+def env_bool(name: str, default: bool = False) -> bool:
+ value = os.getenv(name, "").lower()
+ if not value:
+ return default
+ return value in ("1", "true", "yes", "on")
+
+
+def env_int(name: str, default: int = 0) -> int:
+ value = os.getenv(name, "")
+ if not value:
+ return default
+ try:
+ return int(value)
+ except ValueError:
+ logger.warning("invalid_env_int", name=name, value=value, default=default)
+ return default
+
+
+def env_float(name: str, default: float = 0.0) -> float:
+ value = os.getenv(name, "")
+ if not value:
+ return default
+ try:
+ return float(value)
+ except ValueError:
+ logger.warning("invalid_env_float", name=name, value=value, default=default)
+ return default
+
+
+def env_list(name: str, default: list[str] | None = None, separator: str = ",") -> list[str]:
+ value = os.getenv(name, "")
+ if not value:
+ return default or []
+ return [item.strip() for item in value.split(separator) if item.strip()]
+
+
+@dataclass
+class BaseConfig(ABC):
+ @classmethod
+ def from_env(cls, prefix: str = "") -> "BaseConfig":
+ raise NotImplementedError("Subclasses must implement from_env")
+
+ def to_dict(self) -> dict[str, Any]:
+ return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
+
+ def merge(self, overrides: dict[str, Any]) -> "BaseConfig":
+ current = self.to_dict()
+ current.update(overrides)
+ return type(self)(**current)
+
+
+@dataclass
+class ConfigLoader:
+ config_paths: list[str] = field(default_factory=lambda: ["pyproject.toml", "config.yaml"])
+
+ def load_toml(self, path: str, section: str | None = None) -> dict[str, Any]:
+ if not Path(path).exists():
+ return {}
+
+ try:
+ import tomllib
+ except ImportError:
+ try:
+ import tomli as tomllib # type: ignore[import-not-found]
+ except ImportError:
+ logger.debug("toml_not_available", path=path)
+ return {}
+
+ try:
+ with open(path, "rb") as f:
+ data = tomllib.load(f)
+
+ if section:
+ for key in section.split("."):
+ data = data.get(key, {})
+ if not isinstance(data, dict):
+ return {}
+
+ return data
+ except Exception as e:
+ logger.warning("toml_load_failed", path=path, error=str(e))
+ return {}
+
+ def load_yaml(self, path: str) -> dict[str, Any]:
+ if not Path(path).exists():
+ return {}
+
+ try:
+ import yaml
+ except ImportError:
+ logger.debug("yaml_not_available", path=path)
+ return {}
+
+ try:
+ with open(path, encoding="utf-8") as f:
+ return yaml.safe_load(f) or {}
+ except Exception as e:
+ logger.warning("yaml_load_failed", path=path, error=str(e))
+ return {}
+
+ def load_json(self, path: str) -> dict[str, Any]:
+ if not Path(path).exists():
+ return {}
+
+ import json
+
+ try:
+ with open(path, encoding="utf-8") as f:
+ return json.load(f)
+ except Exception as e:
+ logger.warning("json_load_failed", path=path, error=str(e))
+ return {}
+
+ def load(self, path: str, section: str | None = None) -> dict[str, Any]:
+ path_lower = path.lower()
+ if path_lower.endswith(".toml"):
+ return self.load_toml(path, section)
+ if path_lower.endswith((".yaml", ".yml")):
+ return self.load_yaml(path)
+ if path_lower.endswith(".json"):
+ return self.load_json(path)
+ logger.warning("unknown_config_format", path=path)
+ return {}
+
+ def load_merged(self, paths: list[str] | None = None, section: str | None = None) -> dict[str, Any]:
+ paths = paths or self.config_paths
+ merged: dict[str, Any] = {}
+ for path in paths:
+ config = self.load(path, section)
+ merged = self._deep_merge(merged, config)
+ return merged
+
+ def _deep_merge(self, base: dict[str, Any], override: dict[str, Any]) -> dict[str, Any]:
+ result = dict(base)
+ for key, value in override.items():
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
+ result[key] = self._deep_merge(result[key], value)
+ else:
+ result[key] = value
+ return result
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/config/models.py b/engine/codeflow_engine/core/config/models.py
new file mode 100644
index 0000000..138235d
--- /dev/null
+++ b/engine/codeflow_engine/core/config/models.py
@@ -0,0 +1,147 @@
+"""
+Configuration Models.
+"""
+
+from dataclasses import dataclass, field
+from enum import StrEnum
+from typing import Any
+
+from codeflow_engine.core.config.base import (
+ BaseConfig,
+ env_bool,
+ env_float,
+ env_int,
+ env_var,
+)
+
+
+class Environment(StrEnum):
+ DEVELOPMENT = "development"
+ STAGING = "staging"
+ PRODUCTION = "production"
+ TESTING = "testing"
+
+
+class LogLevel(StrEnum):
+ DEBUG = "DEBUG"
+ INFO = "INFO"
+ WARNING = "WARNING"
+ ERROR = "ERROR"
+ CRITICAL = "CRITICAL"
+
+
+@dataclass
+class LoggingSettings(BaseConfig):
+ level: str = "INFO"
+ format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ json_format: bool = False
+
+ @classmethod
+ def from_env(cls, prefix: str = "") -> "LoggingSettings":
+ p = f"{prefix}_" if prefix else ""
+ return cls(
+ level=env_var(f"{p}LOG_LEVEL", "INFO").upper(),
+ format=env_var(f"{p}LOG_FORMAT", cls.format),
+ json_format=env_bool(f"{p}LOG_JSON"),
+ )
+
+
+@dataclass
+class DatabaseSettings(BaseConfig):
+ url: str = "sqlite:///:memory:"
+ pool_size: int = 5
+ max_overflow: int = 10
+ pool_timeout: int = 30
+ pool_recycle: int = 3600
+ pool_pre_ping: bool = True
+ echo: bool = False
+ ssl_required: bool = False
+
+ @classmethod
+ def from_env(cls, prefix: str = "") -> "DatabaseSettings":
+ p = f"{prefix}_" if prefix else ""
+ return cls(
+ url=env_var(f"{p}DATABASE_URL", "sqlite:///:memory:"),
+ pool_size=env_int(f"{p}DB_POOL_SIZE", 5),
+ max_overflow=env_int(f"{p}DB_MAX_OVERFLOW", 10),
+ pool_timeout=env_int(f"{p}DB_POOL_TIMEOUT", 30),
+ pool_recycle=env_int(f"{p}DB_POOL_RECYCLE", 3600),
+ pool_pre_ping=env_bool(f"{p}DB_POOL_PRE_PING", True),
+ echo=env_bool(f"{p}DB_ECHO"),
+ ssl_required=env_bool(f"{p}DB_SSL_REQUIRED"),
+ )
+
+
+@dataclass
+class LLMSettings(BaseConfig):
+ provider: str = "openai"
+ api_key: str = ""
+ api_key_env: str = ""
+ model: str = "gpt-4"
+ temperature: float = 0.7
+ max_tokens: int = 4096
+ base_url: str | None = None
+
+ @classmethod
+ def from_env(cls, prefix: str = "") -> "LLMSettings":
+ p = f"{prefix}_" if prefix else ""
+ provider = env_var(f"{p}LLM_PROVIDER", "openai")
+ api_key_env_map = {
+ "openai": "OPENAI_API_KEY",
+ "anthropic": "ANTHROPIC_API_KEY",
+ "groq": "GROQ_API_KEY",
+ "mistral": "MISTRAL_API_KEY",
+ "azure": "AZURE_OPENAI_API_KEY",
+ }
+ api_key_env = api_key_env_map.get(provider, f"{provider.upper()}_API_KEY")
+ return cls(
+ provider=provider,
+ api_key=env_var(api_key_env, ""),
+ api_key_env=api_key_env,
+ model=env_var(f"{p}LLM_MODEL", "gpt-4"),
+ temperature=env_float(f"{p}LLM_TEMPERATURE", 0.7),
+ max_tokens=env_int(f"{p}LLM_MAX_TOKENS", 4096),
+ base_url=env_var(f"{p}LLM_BASE_URL") or None,
+ )
+
+
+@dataclass
+class AppSettings(BaseConfig):
+ environment: Environment = Environment.DEVELOPMENT
+ debug: bool = False
+ app_name: str = "codeflow_engine"
+ version: str = "0.1.0"
+ logging: LoggingSettings = field(default_factory=LoggingSettings)
+ database: DatabaseSettings = field(default_factory=DatabaseSettings)
+ llm: LLMSettings = field(default_factory=LLMSettings)
+ custom: dict[str, Any] = field(default_factory=dict)
+
+ @classmethod
+ def from_env(cls, prefix: str = "") -> "AppSettings":
+ p = f"{prefix}_" if prefix else ""
+ env_str = env_var(f"{p}ENVIRONMENT", "development").lower()
+ try:
+ environment = Environment(env_str)
+ except ValueError:
+ environment = Environment.DEVELOPMENT
+ return cls(
+ environment=environment,
+ debug=env_bool(f"{p}DEBUG"),
+ app_name=env_var(f"{p}APP_NAME", "codeflow_engine"),
+ version=env_var(f"{p}VERSION", "0.1.0"),
+ logging=LoggingSettings.from_env(prefix),
+ database=DatabaseSettings.from_env(prefix),
+ llm=LLMSettings.from_env(prefix),
+ )
+
+ @property
+ def is_production(self) -> bool:
+ return self.environment == Environment.PRODUCTION
+
+ @property
+ def is_development(self) -> bool:
+ return self.environment == Environment.DEVELOPMENT
+
+ @property
+ def is_testing(self) -> bool:
+ return self.environment == Environment.TESTING
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/files/__init__.py b/engine/codeflow_engine/core/files/__init__.py
new file mode 100644
index 0000000..44fdb1d
--- /dev/null
+++ b/engine/codeflow_engine/core/files/__init__.py
@@ -0,0 +1,13 @@
+"""Core File Operations Module."""
+
+from codeflow_engine.core.files.backup import BackupService, FileBackup
+from codeflow_engine.core.files.io import FileIO
+from codeflow_engine.core.files.validator import ContentValidationResult, ContentValidator
+
+__all__ = [
+ "BackupService",
+ "ContentValidationResult",
+ "ContentValidator",
+ "FileBackup",
+ "FileIO",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/files/backup.py b/engine/codeflow_engine/core/files/backup.py
new file mode 100644
index 0000000..ac9e318
--- /dev/null
+++ b/engine/codeflow_engine/core/files/backup.py
@@ -0,0 +1,162 @@
+"""Backup Service."""
+
+from dataclasses import dataclass, field
+from datetime import UTC, datetime
+import operator
+from pathlib import Path
+import shutil
+from typing import Any
+
+import structlog
+
+from codeflow_engine.core.files.io import FileIO
+
+
+logger = structlog.get_logger(__name__)
+
+
+@dataclass
+class FileBackup:
+ file_path: str
+ backup_path: str
+ backup_time: datetime
+ original_size: int
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+class BackupService:
+ def __init__(self, backup_directory: str = "./backups") -> None:
+ self.backup_directory = Path(backup_directory)
+ self._ensure_backup_directory()
+
+ def _ensure_backup_directory(self) -> None:
+ try:
+ self.backup_directory.mkdir(parents=True, exist_ok=True)
+ except Exception as e:
+ logger.warning("backup_dir_create_failed", error=str(e))
+
+ def create_backup(self, file_path: str, prefix: str = "") -> FileBackup | None:
+ path = Path(file_path)
+ if not path.exists():
+ logger.warning("backup_source_not_found", file_path=file_path)
+ return None
+ try:
+ timestamp = datetime.now(UTC).strftime("%Y%m%d_%H%M%S")
+ prefix_part = f"{prefix}_" if prefix else ""
+ backup_filename = (
+ f"{path.stem}.{prefix_part}backup_{timestamp}{path.suffix}"
+ )
+ backup_path = self.backup_directory / backup_filename
+ shutil.copy2(file_path, backup_path)
+ backup = FileBackup(
+ file_path=str(path.resolve()),
+ backup_path=str(backup_path),
+ backup_time=datetime.now(UTC),
+ original_size=FileIO.get_size(file_path),
+ )
+ logger.info(
+ "backup_created", file_path=file_path, backup_path=str(backup_path)
+ )
+ return backup
+ except Exception as e:
+ logger.error("backup_failed", file_path=file_path, error=str(e))
+ return None
+
+ def create_backups(self, file_paths: list[str], prefix: str = "") -> int:
+ successful = 0
+ for file_path in file_paths:
+ if self.create_backup(file_path, prefix):
+ successful += 1
+ return successful
+
+ def restore(self, file_path: str, backup_path: str) -> bool:
+ if not Path(backup_path).exists():
+ logger.error("backup_not_found", backup_path=backup_path)
+ return False
+ try:
+ shutil.copy2(backup_path, file_path)
+ logger.info("file_restored", file_path=file_path, backup_path=backup_path)
+ return True
+ except Exception as e:
+ logger.error(
+ "restore_failed",
+ file_path=file_path,
+ backup_path=backup_path,
+ error=str(e),
+ )
+ return False
+
+ def list_backups(self, file_path: str | None = None) -> list[dict[str, Any]]:
+ try:
+ if not self.backup_directory.exists():
+ return []
+ backups: list[dict[str, Any]] = []
+ for backup_file in self.backup_directory.glob("*.backup_*"):
+ try:
+ stat = backup_file.stat()
+ backup_info: dict[str, Any] = {
+ "backup_path": str(backup_file),
+ "backup_name": backup_file.name,
+ "size_bytes": stat.st_size,
+ "modified_time": datetime.fromtimestamp(
+ stat.st_mtime, tz=UTC
+ ).isoformat(),
+ }
+ name = backup_file.name
+ if ".backup_" in name:
+ original_stem = name.split(".backup_")[0]
+ parts = original_stem.rsplit(".", 1)
+ original_stem_value = parts[0] if parts else original_stem
+ backup_info["original_stem"] = original_stem_value
+ if file_path:
+ file_stem = Path(file_path).stem
+ if not str(original_stem_value).endswith(file_stem):
+ continue
+ backups.append(backup_info)
+ except Exception:
+ continue
+ backups.sort(key=operator.itemgetter("modified_time"), reverse=True)
+ return backups
+ except Exception as e:
+ logger.error("list_backups_failed", error=str(e))
+ return []
+
+ def get_latest_backup(self, file_path: str) -> str | None:
+ backups = self.list_backups(file_path)
+ return backups[0]["backup_path"] if backups else None
+
+ def cleanup_old_backups(
+ self, max_backups: int = 10, older_than_days: int | None = None
+ ) -> int:
+ try:
+ backups = self.list_backups()
+ if len(backups) <= max_backups:
+ return 0
+ backups_to_remove = backups[max_backups:]
+ if older_than_days:
+ cutoff_time = datetime.now(UTC).timestamp() - (
+ older_than_days * 24 * 60 * 60
+ )
+ backups_to_remove = [
+ backup
+ for backup in backups_to_remove
+ if datetime.fromisoformat(backup["modified_time"]).timestamp()
+ < cutoff_time
+ ]
+ removed = 0
+ for backup in backups_to_remove:
+ try:
+ Path(backup["backup_path"]).unlink()
+ logger.debug("backup_removed", backup_path=backup["backup_path"])
+ removed += 1
+ except Exception as e:
+ logger.warning(
+ "backup_remove_failed",
+ backup_path=backup["backup_path"],
+ error=str(e),
+ )
+ logger.info("backups_cleaned", removed=removed)
+ return removed
+ except Exception as e:
+ logger.error("cleanup_failed", error=str(e))
+ return 0
diff --git a/engine/codeflow_engine/core/files/io.py b/engine/codeflow_engine/core/files/io.py
new file mode 100644
index 0000000..b81c4a7
--- /dev/null
+++ b/engine/codeflow_engine/core/files/io.py
@@ -0,0 +1,127 @@
+"""File I/O Operations."""
+
+from datetime import UTC, datetime
+from pathlib import Path
+import shutil
+from typing import Any
+
+import structlog
+
+
+logger = structlog.get_logger(__name__)
+
+
+class FileIO:
+ @staticmethod
+ def read(file_path: str, encoding: str = "utf-8") -> tuple[bool, str]:
+ try:
+ with Path(file_path).open(encoding=encoding) as f:
+ content = f.read()
+ return True, content
+ except Exception as e:
+ logger.warning("file_read_failed", file_path=file_path, error=str(e))
+ return False, ""
+
+ @staticmethod
+ def read_or_none(file_path: str, encoding: str = "utf-8") -> str | None:
+ success, content = FileIO.read(file_path, encoding)
+ return content if success else None
+
+ @staticmethod
+ def write(file_path: str, content: str, encoding: str = "utf-8", create_dirs: bool = False) -> bool:
+ try:
+ path = Path(file_path)
+ if create_dirs:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ with path.open("w", encoding=encoding) as f:
+ f.write(content)
+ logger.debug("file_written", file_path=file_path, size=len(content))
+ return True
+ except Exception as e:
+ logger.error("file_write_failed", file_path=file_path, error=str(e))
+ return False
+
+ @staticmethod
+ def exists(file_path: str) -> bool:
+ return Path(file_path).exists()
+
+ @staticmethod
+ def is_file(file_path: str) -> bool:
+ return Path(file_path).is_file()
+
+ @staticmethod
+ def is_dir(file_path: str) -> bool:
+ return Path(file_path).is_dir()
+
+ @staticmethod
+ def get_size(file_path: str) -> int:
+ try:
+ return Path(file_path).stat().st_size
+ except Exception:
+ return 0
+
+ @staticmethod
+ def get_info(file_path: str) -> dict[str, Any]:
+ try:
+ path = Path(file_path)
+ if not path.exists():
+ return {"exists": False}
+ stat = path.stat()
+ return {
+ "exists": True,
+ "size_bytes": stat.st_size,
+ "size_mb": stat.st_size / (1024 * 1024),
+ "modified_time": datetime.fromtimestamp(stat.st_mtime, tz=UTC).isoformat(),
+ "created_time": datetime.fromtimestamp(stat.st_ctime, tz=UTC).isoformat(),
+ "is_file": path.is_file(),
+ "is_directory": path.is_dir(),
+ "extension": path.suffix,
+ "name": path.name,
+ "stem": path.stem,
+ "parent": str(path.parent),
+ }
+ except Exception as e:
+ logger.debug("file_info_failed", file_path=file_path, error=str(e))
+ return {"exists": False, "error": str(e)}
+
+ @staticmethod
+ def copy(source_path: str, destination_path: str) -> bool:
+ try:
+ shutil.copy2(source_path, destination_path)
+ logger.debug("file_copied", source=source_path, destination=destination_path)
+ return True
+ except Exception as e:
+ logger.error("file_copy_failed", source=source_path, destination=destination_path, error=str(e))
+ return False
+
+ @staticmethod
+ def move(source_path: str, destination_path: str) -> bool:
+ try:
+ shutil.move(source_path, destination_path)
+ logger.debug("file_moved", source=source_path, destination=destination_path)
+ return True
+ except Exception as e:
+ logger.error("file_move_failed", source=source_path, destination=destination_path, error=str(e))
+ return False
+
+ @staticmethod
+ def delete(file_path: str) -> bool:
+ try:
+ path = Path(file_path)
+ if not path.exists():
+ return True
+ path.unlink()
+ logger.debug("file_deleted", file_path=file_path)
+ return True
+ except Exception as e:
+ logger.error("file_delete_failed", file_path=file_path, error=str(e))
+ return False
+
+ @staticmethod
+ def mkdir(directory_path: str, parents: bool = True) -> bool:
+ try:
+ Path(directory_path).mkdir(parents=parents, exist_ok=True)
+ return True
+ except Exception as e:
+ logger.error("mkdir_failed", directory_path=directory_path, error=str(e))
+ return False
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/files/validator.py b/engine/codeflow_engine/core/files/validator.py
new file mode 100644
index 0000000..2f78536
--- /dev/null
+++ b/engine/codeflow_engine/core/files/validator.py
@@ -0,0 +1,107 @@
+"""Content Validator."""
+
+from dataclasses import dataclass, field
+from typing import Any
+
+import structlog
+
+
+logger = structlog.get_logger(__name__)
+
+
+@dataclass
+class ContentValidationResult:
+ valid: bool = True
+ issues: list[str] = field(default_factory=list)
+ warnings: list[str] = field(default_factory=list)
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+class ContentValidator:
+ MAX_LINE_LENGTH = 1000
+ WARN_LINE_LENGTH = 500
+
+ def __init__(self, max_line_length: int = MAX_LINE_LENGTH, warn_line_length: int = WARN_LINE_LENGTH, check_trailing_whitespace: bool = True, check_mixed_line_endings: bool = True) -> None:
+ self.max_line_length = max_line_length
+ self.warn_line_length = warn_line_length
+ self.check_trailing_whitespace = check_trailing_whitespace
+ self.check_mixed_line_endings = check_mixed_line_endings
+
+ def validate(self, content: str) -> ContentValidationResult:
+ result = ContentValidationResult()
+ if not content.strip():
+ result.warnings.append("Content is empty")
+ result.metadata["is_empty"] = True
+ return result
+ self._check_encoding(content, result)
+ if not result.valid:
+ return result
+ lines = content.split("\n")
+ result.metadata["line_count"] = len(lines)
+ self._check_line_lengths(lines, result)
+ if self.check_mixed_line_endings:
+ self._check_line_endings(content, result)
+ if self.check_trailing_whitespace:
+ self._check_trailing_whitespace(lines, result)
+ return result
+
+ def _check_encoding(self, content: str, result: ContentValidationResult) -> None:
+ try:
+ content.encode("utf-8")
+ except UnicodeEncodeError:
+ result.issues.append("Content contains invalid UTF-8 characters")
+ result.valid = False
+
+ def _check_line_lengths(self, lines: list[str], result: ContentValidationResult) -> None:
+ long_lines = []
+ very_long_lines = []
+ for i, line in enumerate(lines, 1):
+ line_len = len(line)
+ if line_len > self.max_line_length:
+ very_long_lines.append(i)
+ elif line_len > self.warn_line_length:
+ long_lines.append(i)
+ if very_long_lines:
+ result.warnings.append(
+ f"Lines exceeding {self.max_line_length} chars: {very_long_lines[:5]}"
+ + (f" (+{len(very_long_lines) - 5} more)" if len(very_long_lines) > 5 else "")
+ )
+ if long_lines:
+ result.metadata["long_lines"] = long_lines[:10]
+
+ def _check_line_endings(self, content: str, result: ContentValidationResult) -> None:
+ has_crlf = "\r\n" in content
+ content_without_crlf = content.replace("\r\n", "")
+ has_lf = "\n" in content_without_crlf
+ has_cr = "\r" in content_without_crlf
+ line_ending_types = sum([has_crlf, has_lf, has_cr])
+ if line_ending_types > 1:
+ result.warnings.append("Mixed line endings detected (CRLF/LF/CR)")
+ result.metadata["mixed_line_endings"] = True
+ if has_crlf and not has_lf and not has_cr:
+ result.metadata["line_ending"] = "CRLF"
+ elif has_lf and not has_crlf and not has_cr:
+ result.metadata["line_ending"] = "LF"
+ elif has_cr and not has_crlf and not has_lf:
+ result.metadata["line_ending"] = "CR"
+ else:
+ result.metadata["line_ending"] = "mixed"
+
+ def _check_trailing_whitespace(self, lines: list[str], result: ContentValidationResult) -> None:
+ lines_with_trailing = []
+ for i, line in enumerate(lines, 1):
+ if line and line != line.rstrip():
+ lines_with_trailing.append(i)
+ if lines_with_trailing:
+ count = len(lines_with_trailing)
+ result.metadata["trailing_whitespace_lines"] = count
+ if count > 10:
+ result.warnings.append(f"{count} lines have trailing whitespace")
+
+ def validate_for_write(self, content: str, strict: bool = False) -> tuple[bool, str]:
+ result = self.validate(content)
+ if not result.valid:
+ return False, "; ".join(result.issues)
+ if strict and result.warnings:
+ return False, "; ".join(result.warnings)
+ return True, "Content is valid"
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/llm/__init__.py b/engine/codeflow_engine/core/llm/__init__.py
new file mode 100644
index 0000000..19c7d6f
--- /dev/null
+++ b/engine/codeflow_engine/core/llm/__init__.py
@@ -0,0 +1,14 @@
+"""Core LLM Module - Base classes and utilities for LLM providers."""
+
+from codeflow_engine.core.llm.base import BaseLLMProvider
+from codeflow_engine.core.llm.openai_compatible import OpenAICompatibleProvider
+from codeflow_engine.core.llm.registry import LLMProviderRegistry
+from codeflow_engine.core.llm.response import LLMResponse, ResponseExtractor
+
+__all__ = [
+ "BaseLLMProvider",
+ "LLMProviderRegistry",
+ "LLMResponse",
+ "OpenAICompatibleProvider",
+ "ResponseExtractor",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/llm/base.py b/engine/codeflow_engine/core/llm/base.py
new file mode 100644
index 0000000..e0dd7c1
--- /dev/null
+++ b/engine/codeflow_engine/core/llm/base.py
@@ -0,0 +1,35 @@
+"""Abstract base class for LLM providers."""
+
+import logging
+import os
+from abc import ABC, abstractmethod
+from typing import Any
+
+from codeflow_engine.core.llm.response import LLMResponse
+
+logger = logging.getLogger(__name__)
+
+
+class BaseLLMProvider(ABC):
+ def __init__(self, config: dict[str, Any]) -> None:
+ self.config = config
+ self.api_key = config.get("api_key") or os.getenv(config.get("api_key_env", ""))
+ self.base_url = config.get("base_url")
+ self.default_model = config.get("default_model")
+ self.name = config.get("name", self.__class__.__name__.lower().replace("provider", ""))
+ self.available = False
+
+ @abstractmethod
+ def complete(self, request: dict[str, Any]) -> LLMResponse:
+ pass
+
+ def is_available(self) -> bool:
+ return self.available and bool(self.api_key)
+
+ def get_model(self, request: dict[str, Any], fallback: str = "unknown") -> str:
+ return request.get("model") or self.default_model or fallback
+
+ def _create_error_response(self, error: Exception | str, request: dict[str, Any], fallback_model: str = "unknown") -> LLMResponse:
+ error_msg = str(error) if isinstance(error, Exception) else error
+ model = self.get_model(request, fallback_model)
+ return LLMResponse.from_error(f"Error calling {self.name} API: {error_msg}", model)
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/llm/openai_compatible.py b/engine/codeflow_engine/core/llm/openai_compatible.py
new file mode 100644
index 0000000..9035177
--- /dev/null
+++ b/engine/codeflow_engine/core/llm/openai_compatible.py
@@ -0,0 +1,86 @@
+"""OpenAI-Compatible Provider Template."""
+
+import logging
+from typing import Any
+
+from codeflow_engine.core.llm.base import BaseLLMProvider
+from codeflow_engine.core.llm.response import LLMResponse, ResponseExtractor
+
+logger = logging.getLogger(__name__)
+
+
+class OpenAICompatibleProvider(BaseLLMProvider):
+ DEFAULT_MODEL: str = "gpt-4"
+ LIBRARY_NAME: str = "openai"
+ CLIENT_CLASS_PATH: str = "openai.OpenAI"
+
+ def __init__(self, config: dict[str, Any]) -> None:
+ super().__init__(config)
+ self.client: Any = None
+ self._initialize_client()
+
+ def _initialize_client(self) -> None:
+ try:
+ import openai
+
+ self.client = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
+ self.available = True
+ except ImportError:
+ logger.debug(f"{self.LIBRARY_NAME} package not installed")
+ self.available = False
+
+ def _get_default_model(self) -> str:
+ return self.default_model or self.DEFAULT_MODEL
+
+ def _prepare_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, str]]:
+ return ResponseExtractor.filter_messages(messages)
+
+ def _make_api_call(self, messages: list[dict[str, str]], model: str, temperature: float, max_tokens: int | None, **kwargs: Any) -> Any:
+ call_params: dict[str, Any] = {
+ "model": str(model),
+ "messages": messages,
+ "temperature": temperature,
+ }
+ if max_tokens is not None:
+ call_params["max_tokens"] = max_tokens
+ for key in ["top_p", "frequency_penalty", "presence_penalty", "stop"]:
+ if key in kwargs and kwargs[key] is not None:
+ call_params[key] = kwargs[key]
+ return self.client.chat.completions.create(**call_params)
+
+ def _extract_response(self, response: Any, model: str) -> LLMResponse:
+ content, finish_reason, usage = ResponseExtractor.extract_openai_response(response)
+ return LLMResponse(
+ content=str(content),
+ model=str(getattr(response, "model", model)),
+ finish_reason=str(finish_reason),
+ usage=usage,
+ )
+
+ def complete(self, request: dict[str, Any]) -> LLMResponse:
+ if not self.client:
+ return LLMResponse.from_error(
+ f"{self.name} client not initialized",
+ self.get_model(request, self._get_default_model()),
+ )
+ try:
+ messages = request.get("messages", [])
+ model = self.get_model(request, self._get_default_model())
+ temperature = request.get("temperature", 0.7)
+ max_tokens = request.get("max_tokens")
+ prepared_messages = self._prepare_messages(messages)
+ if not prepared_messages:
+ return LLMResponse.from_error("No valid messages provided", model)
+ response = self._make_api_call(
+ messages=prepared_messages,
+ model=model,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=request.get("top_p"),
+ frequency_penalty=request.get("frequency_penalty"),
+ presence_penalty=request.get("presence_penalty"),
+ stop=request.get("stop"),
+ )
+ return self._extract_response(response, model)
+ except Exception as e:
+ return self._create_error_response(e, request, self._get_default_model())
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/llm/registry.py b/engine/codeflow_engine/core/llm/registry.py
new file mode 100644
index 0000000..9530231
--- /dev/null
+++ b/engine/codeflow_engine/core/llm/registry.py
@@ -0,0 +1,71 @@
+"""LLM Provider Registry."""
+
+import logging
+from typing import Any, TypeVar
+
+from codeflow_engine.core.llm.base import BaseLLMProvider
+
+logger = logging.getLogger(__name__)
+
+T = TypeVar("T", bound=BaseLLMProvider)
+
+
+class LLMProviderRegistry:
+ _providers: dict[str, type[BaseLLMProvider]] = {}
+ _default_configs: dict[str, dict[str, Any]] = {}
+
+ @classmethod
+ def register(cls, name: str, provider_class: type[BaseLLMProvider], default_config: dict[str, Any] | None = None) -> None:
+ cls._providers[name.lower()] = provider_class
+ if default_config:
+ cls._default_configs[name.lower()] = default_config
+ logger.debug(f"Registered LLM provider: {name}")
+
+ @classmethod
+ def unregister(cls, name: str) -> bool:
+ name_lower = name.lower()
+ if name_lower in cls._providers:
+ del cls._providers[name_lower]
+ cls._default_configs.pop(name_lower, None)
+ return True
+ return False
+
+ @classmethod
+ def create(cls, name: str, config: dict[str, Any] | None = None) -> BaseLLMProvider | None:
+ name_lower = name.lower()
+ provider_class = cls._providers.get(name_lower)
+ if provider_class is None:
+ logger.warning(f"Provider '{name}' not found in registry")
+ return None
+ default_config = cls._default_configs.get(name_lower, {})
+ merged_config = {**default_config, **(config or {})}
+ try:
+ return provider_class(merged_config)
+ except Exception as e:
+ logger.exception(f"Failed to create provider '{name}': {e}")
+ return None
+
+ @classmethod
+ def get_provider_class(cls, name: str) -> type[BaseLLMProvider] | None:
+ return cls._providers.get(name.lower())
+
+ @classmethod
+ def get_all(cls) -> dict[str, type[BaseLLMProvider]]:
+ return cls._providers.copy()
+
+ @classmethod
+ def get_default_config(cls, name: str) -> dict[str, Any]:
+ return cls._default_configs.get(name.lower(), {}).copy()
+
+ @classmethod
+ def is_registered(cls, name: str) -> bool:
+ return name.lower() in cls._providers
+
+ @classmethod
+ def list_providers(cls) -> list[str]:
+ return list(cls._providers.keys())
+
+ @classmethod
+ def clear(cls) -> None:
+ cls._providers.clear()
+ cls._default_configs.clear()
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/llm/response.py b/engine/codeflow_engine/core/llm/response.py
new file mode 100644
index 0000000..c54b7a7
--- /dev/null
+++ b/engine/codeflow_engine/core/llm/response.py
@@ -0,0 +1,66 @@
+"""LLM Response types and extraction utilities."""
+
+from dataclasses import dataclass
+from typing import Any
+
+
+@dataclass
+class LLMResponse:
+ content: str
+ model: str
+ finish_reason: str
+ usage: dict[str, int] | None = None
+ error: str | None = None
+
+ @classmethod
+ def from_error(cls, error: str, model: str = "unknown") -> "LLMResponse":
+ return cls(content="", model=model, finish_reason="error", error=error)
+
+
+class ResponseExtractor:
+ @staticmethod
+ def extract_openai_response(response: Any, default_model: str = "unknown") -> tuple[str, str, dict[str, int] | None]:
+ content = ""
+ finish_reason = "stop"
+ usage = None
+ if hasattr(response, "choices") and response.choices and len(response.choices) > 0:
+ choice = response.choices[0]
+ if hasattr(choice, "message") and hasattr(choice.message, "content"):
+ content = choice.message.content or ""
+ finish_reason = getattr(choice, "finish_reason", "stop") or "stop"
+ if hasattr(response, "usage") and response.usage:
+ if hasattr(response.usage, "dict"):
+ usage = response.usage.dict()
+ else:
+ usage = {
+ "prompt_tokens": getattr(response.usage, "prompt_tokens", 0),
+ "completion_tokens": getattr(response.usage, "completion_tokens", 0),
+ "total_tokens": getattr(response.usage, "total_tokens", 0),
+ }
+ return content, finish_reason, usage
+
+ @staticmethod
+ def extract_anthropic_response(response: Any) -> tuple[str, str, dict[str, int] | None]:
+ content = ""
+ if hasattr(response, "content") and response.content:
+ content = "\n".join(block.text for block in response.content if hasattr(block, "text"))
+ finish_reason = getattr(response, "stop_reason", "stop")
+ usage = None
+ if hasattr(response, "usage"):
+ response_usage = response.usage
+ input_tokens = getattr(response_usage, "input_tokens", 0) if hasattr(response_usage, "input_tokens") else response_usage.get("input_tokens", 0) if isinstance(response_usage, dict) else 0
+ output_tokens = getattr(response_usage, "output_tokens", 0) if hasattr(response_usage, "output_tokens") else response_usage.get("output_tokens", 0) if isinstance(response_usage, dict) else 0
+ usage = {
+ "prompt_tokens": input_tokens,
+ "completion_tokens": output_tokens,
+ "total_tokens": input_tokens + output_tokens,
+ }
+ return content, finish_reason, usage
+
+ @staticmethod
+ def filter_messages(messages: list[dict[str, Any]]) -> list[dict[str, str]]:
+ return [
+ {"role": msg.get("role", "user"), "content": msg.get("content", "")}
+ for msg in messages
+ if msg.get("content", "").strip()
+ ]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/managers/__init__.py b/engine/codeflow_engine/core/managers/__init__.py
new file mode 100644
index 0000000..b649362
--- /dev/null
+++ b/engine/codeflow_engine/core/managers/__init__.py
@@ -0,0 +1,10 @@
+"""Core Manager Framework."""
+
+from codeflow_engine.core.managers.base import (
+ BaseManager,
+ ManagerConfig,
+ SessionMixin,
+ StatsMixin,
+)
+
+__all__ = ["BaseManager", "ManagerConfig", "SessionMixin", "StatsMixin"]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/managers/base.py b/engine/codeflow_engine/core/managers/base.py
new file mode 100644
index 0000000..1952d59
--- /dev/null
+++ b/engine/codeflow_engine/core/managers/base.py
@@ -0,0 +1,153 @@
+"""Base Manager Framework."""
+
+from abc import ABC
+from dataclasses import dataclass, field
+from datetime import UTC, datetime
+from typing import Any, TypeVar
+
+import structlog
+
+
+T = TypeVar("T", bound="ManagerConfig")
+
+
+@dataclass
+class ManagerConfig:
+ name: str = "manager"
+ enabled: bool = True
+ log_level: str = "INFO"
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+ def merge(self: T, overrides: dict[str, Any]) -> T:
+ current = {k: v for k, v in self.__dict__.items()}
+ current.update(overrides)
+ return type(self)(**current)
+
+
+class BaseManager(ABC):
+ def __init__(self, config: ManagerConfig | None = None) -> None:
+ self._config = config or ManagerConfig()
+ self._logger = structlog.get_logger(self._config.name)
+ self._started = False
+ self._start_time: datetime | None = None
+
+ @property
+ def config(self) -> ManagerConfig:
+ return self._config
+
+ @property
+ def logger(self) -> structlog.stdlib.BoundLogger:
+ return self._logger
+
+ @property
+ def is_started(self) -> bool:
+ return self._started
+
+ @property
+ def uptime_seconds(self) -> float:
+ if not self._start_time:
+ return 0.0
+ return (datetime.now(UTC) - self._start_time).total_seconds()
+
+ def startup(self) -> None:
+ if self._started:
+ self._logger.warning("manager_already_started", name=self._config.name)
+ return
+ self._logger.info("manager_starting", name=self._config.name)
+ self._start_time = datetime.now(UTC)
+ self._on_startup()
+ self._started = True
+ self._logger.info("manager_started", name=self._config.name)
+
+ def shutdown(self) -> None:
+ if not self._started:
+ return
+ self._logger.info("manager_shutting_down", name=self._config.name)
+ self._on_shutdown()
+ self._started = False
+ self._logger.info("manager_shutdown", name=self._config.name)
+
+ def _on_startup(self) -> None:
+ pass
+
+ def _on_shutdown(self) -> None:
+ pass
+
+
+class SessionMixin:
+ def __init__(self) -> None:
+ self._sessions: dict[str, dict[str, Any]] = {}
+ self._current_session: str | None = None
+
+ @property
+ def current_session_id(self) -> str | None:
+ return self._current_session
+
+ @property
+ def active_sessions(self) -> list[str]:
+ return [sid for sid, data in self._sessions.items() if data.get("is_active", False)]
+
+ def start_session(self, session_id: str, metadata: dict[str, Any] | None = None) -> None:
+ self._sessions[session_id] = {
+ "start_time": datetime.now(UTC),
+ "is_active": True,
+ "metadata": metadata or {},
+ "data": {},
+ }
+ self._current_session = session_id
+
+ def end_session(self, session_id: str | None = None) -> None:
+ sid = session_id or self._current_session
+ if sid and sid in self._sessions:
+ self._sessions[sid]["is_active"] = False
+ self._sessions[sid]["end_time"] = datetime.now(UTC)
+ if self._current_session == sid:
+ self._current_session = None
+
+ def get_session_data(self, session_id: str | None = None) -> dict[str, Any]:
+ sid = session_id or self._current_session
+ if sid and sid in self._sessions:
+ return self._sessions[sid].get("data", {})
+ return {}
+
+ def set_session_data(self, key: str, value: Any, session_id: str | None = None) -> None:
+ sid = session_id or self._current_session
+ if sid and sid in self._sessions:
+ self._sessions[sid].setdefault("data", {})[key] = value
+
+
+class StatsMixin:
+ def __init__(self) -> None:
+ self._stats: dict[str, int | float] = {}
+ self._stats_history: list[dict[str, Any]] = []
+
+ def increment_stat(self, name: str, amount: int = 1) -> None:
+ self._stats[name] = self._stats.get(name, 0) + amount
+
+ def set_stat(self, name: str, value: int | float) -> None:
+ self._stats[name] = value
+
+ def get_stat(self, name: str, default: int | float = 0) -> int | float:
+ return self._stats.get(name, default)
+
+ def get_all_stats(self) -> dict[str, int | float]:
+ return self._stats.copy()
+
+ def record_event(self, event_type: str, data: dict[str, Any] | None = None) -> None:
+ self._stats_history.append({
+ "timestamp": datetime.now(UTC).isoformat(),
+ "event_type": event_type,
+ "data": data or {},
+ })
+
+ def get_stats_history(self, event_type: str | None = None, limit: int | None = None) -> list[dict[str, Any]]:
+ history = self._stats_history
+ if event_type:
+ history = [e for e in history if e["event_type"] == event_type]
+ if limit:
+ history = history[-limit:]
+ return history
+
+ def clear_stats(self) -> None:
+ self._stats.clear()
+ self._stats_history.clear()
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/validation/__init__.py b/engine/codeflow_engine/core/validation/__init__.py
new file mode 100644
index 0000000..6b9c861
--- /dev/null
+++ b/engine/codeflow_engine/core/validation/__init__.py
@@ -0,0 +1,21 @@
+"""Core Validation Module."""
+
+from codeflow_engine.core.validation.base import BaseTypeValidator
+from codeflow_engine.core.validation.composite import CompositeValidator
+from codeflow_engine.core.validation.patterns import SecurityPatterns
+from codeflow_engine.core.validation.result import (
+ ValidationResult,
+ ValidationSeverity,
+ merge_validation_results,
+ update_severity,
+)
+
+__all__ = [
+ "BaseTypeValidator",
+ "CompositeValidator",
+ "SecurityPatterns",
+ "ValidationResult",
+ "ValidationSeverity",
+ "merge_validation_results",
+ "update_severity",
+]
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/validation/base.py b/engine/codeflow_engine/core/validation/base.py
new file mode 100644
index 0000000..e5df0ee
--- /dev/null
+++ b/engine/codeflow_engine/core/validation/base.py
@@ -0,0 +1,29 @@
+"""Base Type Validator."""
+
+from abc import ABC, abstractmethod
+from typing import Any
+
+from codeflow_engine.core.validation.patterns import DEFAULT_SECURITY_PATTERNS, SecurityPatterns
+from codeflow_engine.core.validation.result import ValidationResult, ValidationSeverity
+
+
+class BaseTypeValidator(ABC):
+ def __init__(self, security_patterns: SecurityPatterns | None = None) -> None:
+ self.security_patterns = security_patterns or DEFAULT_SECURITY_PATTERNS
+
+ @abstractmethod
+ def can_validate(self, value: Any) -> bool:
+ pass
+
+ @abstractmethod
+ def validate(self, key: str, value: Any) -> ValidationResult:
+ pass
+
+ def _check_security_threats(self, key: str, value: str) -> ValidationResult:
+ has_threat, threat_type = self.security_patterns.check_all_threats(value)
+ if has_threat:
+ return ValidationResult.failure(
+ f"Potential {threat_type} detected in '{key}'",
+ ValidationSeverity.CRITICAL,
+ )
+ return ValidationResult.success()
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/validation/composite.py b/engine/codeflow_engine/core/validation/composite.py
new file mode 100644
index 0000000..6ae3ec6
--- /dev/null
+++ b/engine/codeflow_engine/core/validation/composite.py
@@ -0,0 +1,94 @@
+"""Composite Validator."""
+
+import re
+from typing import Any
+
+import structlog
+
+from codeflow_engine.core.validation.base import BaseTypeValidator
+from codeflow_engine.core.validation.patterns import DEFAULT_SECURITY_PATTERNS, SecurityPatterns
+from codeflow_engine.core.validation.result import ValidationResult, ValidationSeverity, update_severity
+
+logger = structlog.get_logger(__name__)
+
+MAX_KEY_LENGTH = 100
+SAFE_KEY_PATTERN = re.compile(r"^[a-zA-Z0-9_\-\.]+$")
+
+
+class CompositeValidator:
+ def __init__(self, security_patterns: SecurityPatterns | None = None, validators: list[BaseTypeValidator] | None = None) -> None:
+ self.security_patterns = security_patterns or DEFAULT_SECURITY_PATTERNS
+ self._validators: list[BaseTypeValidator] = validators or []
+
+ def register(self, validator: BaseTypeValidator) -> "CompositeValidator":
+ self._validators.append(validator)
+ return self
+
+ def unregister(self, validator_type: type[BaseTypeValidator]) -> bool:
+ original_count = len(self._validators)
+ self._validators = [v for v in self._validators if not isinstance(v, validator_type)]
+ return len(self._validators) < original_count
+
+ def validate_input(self, data: dict[str, Any], schema: type | None = None) -> ValidationResult:
+ result = ValidationResult(is_valid=True)
+ sanitized_data: dict[str, Any] = {}
+ try:
+ for key, value in data.items():
+ if not self._is_safe_key(key):
+ result.add_error(f"Invalid key name: {key}", ValidationSeverity.HIGH)
+ continue
+ value_result = self._validate_value(key, value)
+ self._merge_result(result, value_result)
+ if value_result.is_valid and value_result.sanitized_data is not None:
+ sanitized_data[key] = self._unwrap_sanitized(value_result.sanitized_data)
+ if schema and result.is_valid:
+ result = self._apply_schema(schema, sanitized_data, result)
+ else:
+ result.sanitized_data = sanitized_data
+ self._log_validation_result(result, data)
+ return result
+ except Exception:
+ logger.exception("Input validation error")
+ return ValidationResult.failure("Validation system error", ValidationSeverity.CRITICAL)
+
+ def _validate_value(self, key: str, value: Any) -> ValidationResult:
+ for validator in self._validators:
+ if validator.can_validate(value):
+ return validator.validate(key, value)
+ return ValidationResult.success({"value": value})
+
+ def _is_safe_key(self, key: str) -> bool:
+ return bool(SAFE_KEY_PATTERN.match(key)) and len(key) <= MAX_KEY_LENGTH
+
+ def _merge_result(self, target: ValidationResult, source: ValidationResult) -> None:
+ if not source.is_valid:
+ target.is_valid = False
+ target.errors.extend(source.errors)
+ target.warnings.extend(source.warnings)
+ target.severity = update_severity(target.severity, source.severity)
+
+ def _unwrap_sanitized(self, sanitized_data: dict[str, Any]) -> Any:
+ if isinstance(sanitized_data, dict) and len(sanitized_data) == 1 and "value" in sanitized_data:
+ return sanitized_data["value"]
+ if isinstance(sanitized_data, dict) and "items" in sanitized_data:
+ return sanitized_data["items"]
+ return sanitized_data
+
+ def _apply_schema(self, schema: type, sanitized_data: dict[str, Any], current_result: ValidationResult) -> ValidationResult:
+ try:
+ validated = schema(**sanitized_data)
+ if hasattr(validated, "dict"):
+ current_result.sanitized_data = validated.dict()
+ elif hasattr(validated, "model_dump"):
+ current_result.sanitized_data = validated.model_dump()
+ else:
+ current_result.sanitized_data = sanitized_data
+ except Exception as e:
+ current_result.add_error(f"Schema validation failed: {e!s}", ValidationSeverity.HIGH)
+ return current_result
+
+ def _log_validation_result(self, result: ValidationResult, data: dict[str, Any]) -> None:
+ if not result.is_valid:
+ logger.warning("Input validation failed", errors=result.errors, severity=result.severity.value, data_keys=list(data.keys()))
+ else:
+ logger.debug("Input validation passed", data_keys=list(data.keys()))
\ No newline at end of file
diff --git a/engine/codeflow_engine/core/validation/patterns.py b/engine/codeflow_engine/core/validation/patterns.py
new file mode 100644
index 0000000..cbebc01
--- /dev/null
+++ b/engine/codeflow_engine/core/validation/patterns.py
@@ -0,0 +1,82 @@
+"""Centralized Security Patterns."""
+
+import re
+from dataclasses import dataclass, field
+from typing import Pattern
+
+
+@dataclass
+class SecurityPatterns:
+ sql_injection: list[str] = field(default_factory=lambda: [
+ r"(\b(SELECT|INSERT|UPDATE|DELETE|DROP|CREATE|ALTER|EXEC|EXECUTE|UNION|SCRIPT)\b)",
+ r"(\b(OR|AND)\b\s+\d+\s*=\s*\d+)",
+ r"(\b(OR|AND)\b\s+['\"]\w+['\"]\s*=\s*['\"]\w+['\"])",
+ r"(--|\b(COMMENT|REM)\b)",
+ r"(\b(WAITFOR|DELAY)\b)",
+ r"(\b(BENCHMARK|SLEEP)\b)",
+ r"(\bUNION\s+SELECT\b)",
+ ])
+ xss: list[str] = field(default_factory=lambda: [
+ r"",
+ r"javascript:",
+ r"on\w+\s*=",
+ r"