Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [1.22.0] - 2026-03-20
### Fixed

- Fixed volume mount fields (`volume_id`, `secret_name`, `file_names`, `size_in_mb`) being silently dropped during deserialization, causing deployment updates to fail with `volume_mounts.*.volume_id should not be null or undefined`

## [1.23.0] - 2026-03-20

### Added

Expand Down
75 changes: 75 additions & 0 deletions tests/unit_tests/job_deployments/test_job_deployments.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,16 @@
import json
from dataclasses import replace

import pytest
import responses # https://github.com/getsentry/responses

from verda.containers import ComputeResource, Container, ContainerRegistrySettings
from verda.containers._containers import (
GeneralStorageMount,
MemoryMount,
SecretMount,
SharedFileSystemMount,
)
from verda.exceptions import APIException
from verda.job_deployments import (
JobDeployment,
Expand Down Expand Up @@ -207,3 +214,71 @@ def test_purge_job_deployment_queue(self, service, endpoint):
service.purge_queue(JOB_NAME)

assert responses.assert_call_count(url, 1) is True

@responses.activate
def test_update_preserves_volume_mounts_round_trip(self, service, endpoint):
"""Regression test: volume mount subclass fields (volume_id, secret_name, etc.)
must survive a get → update round trip without being dropped during deserialization."""
volume_id = '550e8400-e29b-41d4-a716-446655440000'
api_payload = {
'name': JOB_NAME,
'containers': [
{
'name': CONTAINER_NAME,
'image': 'busybox:latest',
'exposed_port': 8080,
'env': [],
'volume_mounts': [
{'type': 'scratch', 'mount_path': '/data'},
{'type': 'shared', 'mount_path': '/sfs', 'volume_id': volume_id},
{
'type': 'secret',
'mount_path': '/secrets',
'secret_name': 'my-secret',
'file_names': ['key.pem'],
},
{'type': 'memory', 'mount_path': '/dev/shm', 'size_in_mb': 512},
],
}
],
'endpoint_base_url': 'https://test-job.datacrunch.io',
'created_at': '2024-01-01T00:00:00Z',
'compute': {'name': 'H100', 'size': 1},
'container_registry_settings': {'is_private': False, 'credentials': None},
}

get_url = f'{endpoint}/{JOB_NAME}'
responses.add(responses.GET, get_url, json=api_payload, status=200)
responses.add(responses.PATCH, get_url, json=api_payload, status=200)

# Simulate the user's flow: get → modify image → update
deployment = service.get_by_name(JOB_NAME)

# Verify deserialization produced the correct subclasses
vms = deployment.containers[0].volume_mounts
assert isinstance(vms[0], GeneralStorageMount)
assert isinstance(vms[1], SharedFileSystemMount)
assert vms[1].volume_id == volume_id
assert isinstance(vms[2], SecretMount)
assert vms[2].secret_name == 'my-secret'
assert vms[2].file_names == ['key.pem']
assert isinstance(vms[3], MemoryMount)
assert vms[3].size_in_mb == 512

# Update only the image (exactly what the reported user script does)
containers = list(deployment.containers)
containers[0] = replace(containers[0], image='busybox:v2')
updated_deployment = replace(deployment, containers=containers)

service.update(JOB_NAME, updated_deployment)

# Verify the PATCH request body still contains volume_id
request_body = json.loads(responses.calls[1].request.body.decode('utf-8'))
sent_vms = request_body['containers'][0]['volume_mounts']
assert sent_vms[0]['type'] == 'scratch'
assert sent_vms[1]['type'] == 'shared'
assert sent_vms[1]['volume_id'] == volume_id
assert sent_vms[2]['type'] == 'secret'
assert sent_vms[2]['secret_name'] == 'my-secret'
assert sent_vms[3]['type'] == 'memory'
assert sent_vms[3]['size_in_mb'] == 512
29 changes: 27 additions & 2 deletions verda/containers/_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from enum import Enum
from typing import Any

from dataclasses_json import Undefined, dataclass_json # type: ignore
from dataclasses_json import Undefined, config, dataclass_json # type: ignore

from verda.http_client import HTTPClient
from verda.inference_client import InferenceClient, InferenceResponse
Expand Down Expand Up @@ -203,6 +203,29 @@ def __init__(self, mount_path: str, volume_id: str):
self.volume_id = volume_id


def _decode_volume_mount(data: dict) -> VolumeMount:
"""Decode a volume mount dict into the correct VolumeMount subclass based on type."""
mount_type = data.get('type')
if mount_type == VolumeMountType.SHARED or mount_type == 'shared':
return SharedFileSystemMount(mount_path=data['mount_path'], volume_id=data['volume_id'])
if mount_type == VolumeMountType.SECRET or mount_type == 'secret':
return SecretMount(
mount_path=data['mount_path'],
secret_name=data['secret_name'],
file_names=data.get('file_names'),
)
if mount_type == VolumeMountType.MEMORY or mount_type == 'memory':
return MemoryMount(size_in_mb=data['size_in_mb'])
return GeneralStorageMount(mount_path=data['mount_path'])


def _decode_volume_mounts(data: list[dict] | None) -> list[VolumeMount] | None:
"""Decode a list of volume mount dicts into the correct VolumeMount subclasses."""
if not data:
return None
return [_decode_volume_mount(v) for v in data]


@dataclass_json
@dataclass
class Container:
Expand All @@ -224,7 +247,9 @@ class Container:
healthcheck: HealthcheckSettings | None = None
entrypoint_overrides: EntrypointOverridesSettings | None = None
env: list[EnvVar] | None = None
volume_mounts: list[VolumeMount] | None = None
volume_mounts: list[VolumeMount] | None = field(
default=None, metadata=config(decoder=_decode_volume_mounts)
)


@dataclass_json
Expand Down
Loading