diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py index 837d1675ba05..92d243d46764 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/_agent_framework.py @@ -18,7 +18,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ResponseErrorEvent, ResponseFailedEvent +from azure.ai.agentserver.core.models._projects import ResponseErrorEvent, ResponseFailedEvent from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint: disable=import-error from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py index aac9b24c445c..86094a617991 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py @@ -12,7 +12,7 @@ from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py index 22f144a4b7a4..02d11958cf24 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py @@ -16,7 +16,7 @@ Response as OpenAIResponse, ResponseStreamEvent, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( FunctionToolCallItemResource, FunctionToolCallOutputItemResource, ItemContentOutputText, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py index abd2dd2c02ef..ca429683a1be 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/agent_id_generator.py @@ -8,7 +8,7 @@ from typing import Optional from azure.ai.agentserver.core import AgentRunContext -from azure.ai.agentserver.core.models import projects +from azure.ai.agentserver.core.models import _projects as projects def generate_agent_id(context: AgentRunContext) -> Optional[projects.AgentId]: diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py index 89bac4ca76c5..0b054dfe9e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/models/human_in_the_loop_helper.py @@ -8,7 +8,7 @@ from agent_framework import Content, Message, WorkflowCheckpoint, WorkflowEvent from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME logger = get_logger() diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py index acd89f1baef0..3e84763f4e68 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_repository.py @@ -8,7 +8,7 @@ from agent_framework import CheckpointStorage -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointSession, FoundryCheckpointClient, ) diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py index 833c3647149a..63ba16dcd1ed 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/azure/ai/agentserver/agentframework/persistence/_foundry_checkpoint_storage.py @@ -9,7 +9,7 @@ from agent_framework import WorkflowCheckpoint -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, FoundryCheckpointClient, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py index 4b59922cce9a..c26e716ed4dc 100644 --- a/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-agentframework/tests/unit_tests/test_human_in_the_loop_helper.py @@ -2,7 +2,7 @@ import pytest from agent_framework import Content, Message, WorkflowEvent -from azure.ai.agentserver.core.server.common.constants import ( +from azure.ai.agentserver.core.server.common._constants import ( HUMAN_IN_THE_LOOP_FUNCTION_NAME, ) diff --git a/sdk/agentserver/azure-ai-agentserver-core/README.md b/sdk/agentserver/azure-ai-agentserver-core/README.md index ff60cf460196..cc420579e5fe 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/README.md +++ b/sdk/agentserver/azure-ai-agentserver-core/README.md @@ -26,7 +26,7 @@ from azure.ai.agentserver.core.models import ( CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponsesAssistantMessageItemResource, ResponseTextDeltaEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py index 88a13741bbac..39de11cefe55 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py @@ -5,8 +5,8 @@ from ._version import VERSION from .logger import configure as config_logging -from .server.base import FoundryCBAgent -from .server.common.agent_run_context import AgentRunContext +from .server._base import FoundryCBAgent +from .server.common._agent_run_context import AgentRunContext from .server._context import AgentServerContext config_logging() diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py index f9d6ed3d8aa8..0ca387146579 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/__init__.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- """Checkpoint storage module for Azure AI Agent Server.""" -from .client import FoundryCheckpointClient +from .client._client import FoundryCheckpointClient from .client._models import ( CheckpointItem, CheckpointItemId, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py index 34f30f16c5d9..901cbb3d70a8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/__init__.py @@ -3,16 +3,4 @@ # --------------------------------------------------------- """Checkpoint client module for Azure AI Agent Server.""" -from ._client import FoundryCheckpointClient -from ._models import ( - CheckpointItem, - CheckpointItemId, - CheckpointSession, -) - -__all__ = [ - "CheckpointItem", - "CheckpointItemId", - "CheckpointSession", - "FoundryCheckpointClient", -] +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py index f7e178d758b4..fc2f45321968 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/checkpoints/client/_client.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=client-method-missing-kwargs,client-accepts-api-version-keyword,missing-client-constructor-parameter-kwargs +# ^^^ azure-sdk pylint rules: internal client not intended as a public Azure SDK client """Asynchronous client for Azure AI Foundry checkpoint storage API.""" from typing import Any, AsyncContextManager, List, Optional diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py index f15e98986470..2b5f39e964b4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py @@ -1,4 +1,3 @@ -# pylint: disable=broad-exception-caught,dangerous-default-value # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -12,13 +11,12 @@ from .constants import Constants def _get_default_log_config() -> dict[str, Any]: - """ - Build default log config with level from environment. - + """Build default log config with level from environment. + :return: A dictionary containing logging configuration. - :rtype: dict + :rtype: dict[str, Any] """ - log_level = get_log_level() + log_level = _get_log_level() return { "version": 1, "disable_existing_loggers": False, @@ -40,7 +38,14 @@ def _get_default_log_config() -> dict[str, Any]: } -def get_log_level(): +def _get_log_level() -> str: + """Read log level from the ``AGENT_LOG_LEVEL`` environment variable. + + Falls back to ``"INFO"`` if the variable is unset or contains an invalid value. + + :return: A valid Python logging level name. + :rtype: str + """ log_level = os.getenv(Constants.AGENT_LOG_LEVEL, "INFO").upper() valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] if log_level not in valid_levels: @@ -54,7 +59,12 @@ def get_log_level(): APPINSIGHT_CONNSTR_ENV_NAME = "APPLICATIONINSIGHTS_CONNECTION_STRING" -def get_dimensions(): +def _get_dimensions() -> dict[str, str]: + """Collect environment-based dimensions for structured logging. + + :return: A mapping of dimension keys to their runtime values. + :rtype: dict[str, str] + """ env_values = {name: value for name, value in vars(Constants).items() if not name.startswith("_")} res = {"azure.ai.agentserver.version": VERSION} for name, env_name in env_values.items(): @@ -65,11 +75,25 @@ def get_dimensions(): return res -def get_project_endpoint(logger=None): +def get_project_endpoint(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Resolve the project endpoint from environment variables. + + Checks ``AZURE_AI_PROJECT_ENDPOINT`` first, then falls back to deriving + an endpoint from ``AGENT_PROJECT_NAME``. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The resolved project endpoint URL, or ``None`` if unavailable. + :rtype: Optional[str] + """ project_endpoint = os.environ.get(Constants.AZURE_AI_PROJECT_ENDPOINT) if project_endpoint: if logger: - logger.info(f"Using project endpoint from {Constants.AZURE_AI_PROJECT_ENDPOINT}: {project_endpoint}") + logger.info( + "Using project endpoint from %s: %s", + Constants.AZURE_AI_PROJECT_ENDPOINT, + project_endpoint, + ) return project_endpoint project_resource_id = os.environ.get(Constants.AGENT_PROJECT_RESOURCE_ID) if project_resource_id: @@ -78,18 +102,32 @@ def get_project_endpoint(logger=None): parts = last_part.split("@") if len(parts) < 2: if logger: - logger.warning(f"Invalid project resource id format: {project_resource_id}") + logger.warning("Invalid project resource id format: %s", project_resource_id) return None account = parts[0] project = parts[1] endpoint = f"https://{account}.services.ai.azure.com/api/projects/{project}" if logger: - logger.info(f"Using project endpoint derived from {Constants.AGENT_PROJECT_RESOURCE_ID}: {endpoint}") + logger.info( + "Using project endpoint derived from %s: %s", + Constants.AGENT_PROJECT_RESOURCE_ID, + endpoint, + ) return endpoint return None -def get_application_insights_connstr(logger=None): +def _get_application_insights_connstr(logger: Optional[logging.Logger] = None) -> Optional[str]: + """Retrieve or derive the Application Insights connection string. + + Looks in the ``APPLICATIONINSIGHTS_CONNECTION_STRING`` environment variable first, + then attempts to fetch it from the project endpoint. + + :param logger: Optional logger for diagnostic messages. + :type logger: Optional[logging.Logger] + :return: The connection string, or ``None`` if unavailable. + :rtype: Optional[str] + """ try: conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME) if not conn_str: @@ -101,22 +139,36 @@ def get_application_insights_connstr(logger=None): project_client = AIProjectClient(credential=DefaultAzureCredential(), endpoint=project_endpoint) conn_str = project_client.telemetry.get_application_insights_connection_string() if not conn_str and logger: - logger.info(f"No Application Insights connection found for project: {project_endpoint}") + logger.info( + "No Application Insights connection found for project: %s", + project_endpoint, + ) elif conn_str: os.environ[APPINSIGHT_CONNSTR_ENV_NAME] = conn_str elif logger: logger.info("Application Insights not configured, telemetry export disabled.") return conn_str - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # bootstrap: many failure modes possible if logger: - logger.warning(f"Failed to get Application Insights connection string, telemetry export disabled: {e}") + logger.warning( + "Failed to get Application Insights connection string, telemetry export disabled: %s", + e, + ) return None class CustomDimensionsFilter(logging.Filter): - def filter(self, record): - # Add custom dimensions to every log record - dimensions = get_dimensions() + """Logging filter that attaches environment dimensions and request context to log records.""" + + def filter(self, record: logging.LogRecord) -> bool: + """Inject custom dimensions into *record* and allow it through. + + :param record: The log record to enrich. + :type record: logging.LogRecord + :return: Always ``True`` so the record is never discarded. + :rtype: bool + """ + dimensions = _get_dimensions() for key, value in dimensions.items(): setattr(record, key, value) cur_request_context = request_context.get() @@ -140,7 +192,7 @@ def configure(log_config: Optional[dict[str, Any]] = None): config.dictConfig(log_config) app_logger = logging.getLogger("azure.ai.agentserver") - application_insights_connection_string = get_application_insights_connstr(logger=app_logger) + application_insights_connection_string = _get_application_insights_connstr(logger=app_logger) enable_application_insights_logger = ( os.environ.get(Constants.ENABLE_APPLICATION_INSIGHTS_LOGGER, "true").lower() == "true" ) @@ -169,10 +221,10 @@ def configure(log_config: Optional[dict[str, Any]] = None): handler.addFilter(custom_filter) # Only add to azure.ai.agentserver namespace to avoid infrastructure logs - app_logger.setLevel(get_log_level()) + app_logger.setLevel(_get_log_level()) app_logger.addHandler(handler) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught print(f"Failed to configure logging: {e}") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py index d5622ebe7732..b6a1895a3868 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/__init__.py @@ -1,7 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +# TypedDict module; __all__ cannot be statically typed because the list is built at runtime. from ._create_response import CreateResponse # type: ignore -from .projects import Response, ResponseStreamEvent +from ._projects import Response, ResponseStreamEvent __all__ = ["CreateResponse", "Response", "ResponseStreamEvent"] # type: ignore[var-annotated] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py index 820d54c6cea0..5ec72115734a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_create_response.py @@ -1,11 +1,12 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=no-name-in-module +# pylint: disable=no-name-in-module # openai re-exports are dynamically generated from typing import Optional -from .openai import response_create_params # type: ignore -from . import projects as _azure_ai_projects_models +# ResponseCreateParamsBase is a TypedDict — mypy cannot verify total=False on mixed bases. +from ._openai import response_create_params # type: ignore +from . import _projects as _azure_ai_projects_models class CreateResponse(response_create_params.ResponseCreateParamsBase, total=False): # type: ignore agent: Optional[_azure_ai_projects_models.AgentReference] diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/openai/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_openai/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_enums.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_models.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_models.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_patch_evaluations.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_patch_evaluations.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/__init__.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/__init__.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/model_base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/model_base.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_utils/serialization.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/models/_projects/_utils/serialization.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py similarity index 88% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py index 7a9f488227a7..e1ce45188c34 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/base.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_base.py @@ -1,9 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements -# mypy: ignore-errors -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name (false positive on module) import contextlib import inspect import json @@ -13,6 +11,7 @@ from typing import Any, AsyncGenerator, Generator, Optional, Union import uvicorn +from openai import AsyncOpenAI from opentelemetry import context as otel_context, trace from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from starlette.applications import Starlette @@ -37,10 +36,14 @@ build_foundry_agents_metadata_headers, try_attach_foundry_metadata_to_event, ) -from .common.agent_run_context import AgentRunContext +from .common._agent_run_context import AgentRunContext from ..constants import Constants from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, get_project_endpoint, request_context -from ..models import Response as OpenAIResponse, ResponseStreamEvent, projects as project_models +from ..models import ( + Response as OpenAIResponse, + ResponseStreamEvent, + _projects as project_models +) from ..tools import UserInfoContextMiddleware, create_tool_runtime from ..utils._credential import AsyncTokenCredentialAdapter @@ -53,19 +56,19 @@ def __init__(self, app: ASGIApp, agent: Optional['FoundryCBAgent'] = None): super().__init__(app) self.agent = agent - async def dispatch(self, request: Request, call_next): + async def dispatch(self, request: Request, call_next): # type: ignore[override] if request.url.path in ("/runs", "/responses"): try: self.set_request_id_to_context_var(request) payload = await request.json() - except Exception as e: - logger.error(f"Invalid JSON payload: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for bad payload + logger.error("Invalid JSON payload: %s", e) return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400) try: request.state.agent_run_context = AgentRunContext(payload) self.set_run_context_to_context_var(request.state.agent_run_context) - except Exception as e: - logger.error(f"Context build failed: {e}.", exc_info=True) + except Exception as e: # pylint: disable=broad-exception-caught # middleware catch-all for context build + logger.error("Context build failed: %s.", e, exc_info=True) return JSONResponse({"error": f"Context build failed: {e}"}, status_code=500) return await call_next(request) @@ -99,7 +102,8 @@ def set_run_context_to_context_var(self, run_context): class FoundryCBAgent: - def __init__(self, + def __init__( # pylint: disable=too-many-statements # Starlette app setup requires sequential route/middleware wiring + self, credentials: Optional[Union[AsyncTokenCredential, TokenCredential]] = None, project_endpoint: Optional[str] = None) -> None: self.credentials = AsyncTokenCredentialAdapter(credentials) if credentials else AsyncDefaultTokenCredential() @@ -129,9 +133,9 @@ async def runs_endpoint(request): ex = None resp = await self.agent_run(context) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught # top-level agent_run catch-all # TODO: extract status code from exception - logger.error(f"Error processing CreateResponse request: {e}", exc_info=True) + logger.error("Error processing CreateResponse request: %s", e, exc_info=True) ex = e if not context.stream: @@ -172,7 +176,7 @@ async def gen_async(ex): if self._should_store(context): logger.debug("Storing output to conversation.") await self._save_output_events_to_conversation(context, output_events) - except Exception as e: # noqa: BLE001 + except Exception as e: # noqa: BLE001 # pylint: disable=broad-exception-caught logger.error("Error in async generator: %s", e, exc_info=True) ex = e finally: @@ -207,12 +211,12 @@ async def readiness_endpoint(request): ] @contextlib.asynccontextmanager - async def _lifespan(app): + async def _lifespan(app): # pylint: disable=unused-argument import logging # Log server started successfully port = getattr(self, '_port', 'unknown') - logger.info(f"FoundryCBAgent server started successfully on port {port}") + logger.info("FoundryCBAgent server started successfully on port %s", port) # Attach App Insights handler to uvicorn loggers for handler in logger.handlers: @@ -234,9 +238,9 @@ async def _lifespan(app): allow_methods=["*"], allow_headers=["*"], ) - self.app.add_middleware(AgentRunContextMiddleware, agent=self) + self.app.add_middleware(AgentRunContextMiddleware, agent=self) # type: ignore[arg-type] - self.tracer = None + self.tracer: trace.Tracer = trace.get_tracer(__name__) def _should_store(self, context: AgentRunContext) -> bool: """Determine whether conversation artifacts should be persisted. @@ -246,7 +250,7 @@ def _should_store(self, context: AgentRunContext) -> bool: :return: ``True`` when storage is requested and the conversation is scoped to a project. :rtype: bool """ - return context.request.get("store", False) and context.conversation_id and self._project_endpoint + return bool(context.request.get("store", False) and context.conversation_id and self._project_endpoint) def _items_are_equal(self, item1: dict, item2: dict) -> bool: """Compare two conversation items for equality based on type and content. @@ -274,7 +278,7 @@ def _items_are_equal(self, item1: dict, item2: dict) -> bool: return text1 == text2 return content1 == content2 - async def _create_openai_client(self) -> "AsyncOpenAI": + async def _create_openai_client(self) -> AsyncOpenAI: """Create an AsyncOpenAI client for conversation operations. :return: Configured AsyncOpenAI client scoped to the Foundry project endpoint. @@ -303,7 +307,7 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: try: conversation_id = context.conversation_id input_items = context.request.get("input", []) - if not input_items: + if not input_items or not conversation_id: return # Handle string input as a single item @@ -349,19 +353,22 @@ async def _save_input_to_conversation(self, context: AgentRunContext) -> None: all_match = False break if all_match: - logger.debug(f"All {n} input items already exist in " + - f"conversation {conversation_id}, skipping save") + logger.debug( + "All %d input items already exist in conversation %s, skipping save", + n, + conversation_id, + ) return - except Exception as e: - logger.debug(f"Could not check for duplicates: {e}") + except Exception as e: # pylint: disable=broad-exception-caught # best-effort duplicate check + logger.debug("Could not check for duplicates: %s", e) await openai_client.conversations.items.create( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} input items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save input items to conversation: {e}", exc_info=True) + logger.debug("Saved %d input items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save input items to conversation: %s", e, exc_info=True) async def _save_output_to_conversation( self, context: AgentRunContext, response: project_models.Response) -> None: @@ -396,9 +403,9 @@ async def _save_output_to_conversation( conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) async def _save_output_events_to_conversation(self, context: AgentRunContext, events: list) -> None: """Persist streaming output events for later retrieval. @@ -433,9 +440,9 @@ async def _save_output_events_to_conversation(self, context: AgentRunContext, ev conversation_id=conversation_id, items=items_to_save, ) - logger.debug(f"Saved {len(items_to_save)} output items to conversation {conversation_id}") - except Exception as e: - logger.warning(f"Failed to save output items to conversation: {e}", exc_info=True) + logger.debug("Saved %d output items to conversation %s", len(items_to_save), conversation_id) + except Exception as e: # pylint: disable=broad-exception-caught # best-effort conversation persistence + logger.warning("Failed to save output items to conversation: %s", e, exc_info=True) @abstractmethod async def agent_run( @@ -557,10 +564,10 @@ async def respond_with_oauth_consent_astream(self, context, error) -> AsyncGener }) yield project_models.ResponseCompletedEvent(sequence_number=sequence_number, response=response) - async def agent_liveness(self, request) -> Union[Response, dict]: + async def agent_liveness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return Response(status_code=200) - async def agent_readiness(self, request) -> Union[Response, dict]: + async def agent_readiness(self, request) -> Union[Response, dict]: # pylint: disable=unused-argument return {"status": "ready"} async def run_async( @@ -577,7 +584,7 @@ async def run_async( config = uvicorn.Config(self.app, host="0.0.0.0", port=port, loop="asyncio") server = uvicorn.Server(config) self._port = port - logger.info(f"Starting FoundryCBAgent server async on port {port}") + logger.info("Starting FoundryCBAgent server async on port %s", port) await server.serve() def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: @@ -593,7 +600,7 @@ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None: """ self.init_tracing() self._port = port - logger.info(f"Starting FoundryCBAgent server on port {port}") + logger.info("Starting FoundryCBAgent server on port %s", port) uvicorn.run(self.app, host="0.0.0.0", port=port) def init_tracing(self): @@ -618,7 +625,9 @@ def get_trace_attributes(self): "service.name": "azure.ai.agentserver", } - def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): + def init_tracing_internal( # pylint: disable=unused-argument # base class hook, params used by subclasses + self, exporter_endpoint=None, app_insights_conn_str=None + ): pass def setup_application_insights_exporter(self, connection_string, provider): @@ -638,7 +647,7 @@ def setup_otlp_exporter(self, endpoint, provider): exporter_instance = OTLPSpanExporter(endpoint=endpoint) processor = BatchSpanProcessor(exporter_instance) provider.add_span_processor(processor) - logger.info(f"Tracing setup with OTLP exporter: {endpoint}") + logger.info("Tracing setup with OTLP exporter: %s", endpoint) def create_response_headers(self) -> dict[str, str]: headers = {} diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py index 352dfdc9d27b..9b13cfedd636 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/_response_metadata.py @@ -9,7 +9,7 @@ from ..application._metadata import get_current_app from ..models import Response as OpenAIResponse, ResponseStreamEvent -from ..models.projects import ( +from ..models._projects import ( ResponseCompletedEvent, ResponseCreatedEvent, ResponseInProgressEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py similarity index 77% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py index 87c32926bde4..750e4209d9e5 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/agent_run_context.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_agent_run_context.py @@ -3,11 +3,11 @@ # --------------------------------------------------------- from typing import Optional -from .id_generator.foundry_id_generator import FoundryIdGenerator -from .id_generator.id_generator import IdGenerator +from .id_generator._foundry_id_generator import FoundryIdGenerator +from .id_generator._id_generator import IdGenerator from ...logger import get_logger from ...models import CreateResponse -from ...models.projects import AgentId, AgentReference, ResponseConversation1 +from ...models._projects import AgentId, AgentReference, ResponseConversation1 logger = get_logger() @@ -48,10 +48,10 @@ def conversation_id(self) -> Optional[str]: def stream(self) -> bool: return self._stream - def get_agent_id_object(self) -> AgentId: + def get_agent_id_object(self) -> Optional[AgentId]: agent = self.request.get("agent") if not agent: - return None # type: ignore + return None return AgentId( { "type": agent.type, @@ -60,9 +60,9 @@ def get_agent_id_object(self) -> AgentId: } ) - def get_conversation_object(self) -> ResponseConversation1: + def get_conversation_object(self) -> Optional[ResponseConversation1]: if not self._conversation_id: - return None # type: ignore + return None return ResponseConversation1(id=self._conversation_id) @@ -75,11 +75,11 @@ def _deserialize_create_response(payload: dict) -> CreateResponse: tools = payload.get("tools") if tools: - _deserialized["tools"] = [tool for tool in tools] # pylint: disable=unnecessary-comprehension + _deserialized["tools"] = list(tools) return _deserialized -def _deserialize_agent_reference(payload: dict) -> AgentReference: +def _deserialize_agent_reference(payload: dict) -> Optional[AgentReference]: if not payload: - return None # type: ignore + return None return AgentReference(**payload) diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/constants.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/_constants.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py similarity index 59% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py index 01ac72289e4e..0c0f91cbb36d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_foundry_id_generator.py @@ -1,4 +1,3 @@ -# pylint: disable=docstring-missing-return,docstring-missing-param,docstring-missing-rtype # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- @@ -9,7 +8,7 @@ import re from typing import Optional -from .id_generator import IdGenerator +from ._id_generator import IdGenerator _WATERMARK_RE = re.compile(r"^[A-Za-z0-9]*$") @@ -26,6 +25,13 @@ class FoundryIdGenerator(IdGenerator): """ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): + """Initialize the ID generator. + + :param response_id: An existing response ID, or ``None`` to generate one. + :type response_id: Optional[str] + :param conversation_id: An existing conversation ID, or ``None``. + :type conversation_id: Optional[str] + """ self.response_id = response_id or self._new_id("resp") self.conversation_id = conversation_id partition_source = self.conversation_id or self.response_id @@ -36,6 +42,13 @@ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]): @classmethod def from_request(cls, payload: dict) -> "FoundryIdGenerator": + """Create a generator from an incoming request payload. + + :param payload: The raw request payload dictionary. + :type payload: dict + :return: A configured :class:`FoundryIdGenerator` instance. + :rtype: FoundryIdGenerator + """ response_id = payload.get("metadata", {}).get("response_id", None) conv_id_raw = payload.get("conversation", None) if isinstance(conv_id_raw, str): @@ -47,6 +60,13 @@ def from_request(cls, payload: dict) -> "FoundryIdGenerator": return cls(response_id, conv_id) def generate(self, category: Optional[str] = None) -> str: + """Generate a new unique ID for the given category. + + :param category: Optional prefix category (e.g. ``"msg"``, ``"func"``). Defaults to ``"id"``. + :type category: Optional[str] + :return: The generated unique identifier string. + :rtype: str + """ prefix = "id" if not category else category return self._new_id(prefix, partition_key=self._partition_id) @@ -63,12 +83,29 @@ def _new_id( partition_key: Optional[str] = None, partition_key_hint: str = "", ) -> str: - """ - Generates a new ID. - - Format matches the C# logic: - f"{prefix}{delimiter}{infix}{partitionKey}{entropy}" - (i.e., exactly one delimiter after prefix; no delimiter between entropy and partition key) + """Generate a new ID matching the C# FoundryIdGenerator format. + + Format: ``"{prefix}{delimiter}{infix}{partitionKey}{entropy}"`` + + :param prefix: The ID prefix (e.g. ``"resp"``, ``"msg"``). + :type prefix: str + :param string_length: Length of the random entropy portion. + :type string_length: int + :param partition_key_length: Length of the partition key. + :type partition_key_length: int + :param infix: Optional infix inserted between delimiter and partition key. + :type infix: Optional[str] + :param watermark: Optional alphanumeric watermark inserted mid-entropy. + :type watermark: str + :param delimiter: Delimiter between prefix and the rest of the ID. + :type delimiter: str + :param partition_key: Explicit partition key; if ``None``, derived or generated. + :type partition_key: Optional[str] + :param partition_key_hint: ID string to extract a partition key from. + :type partition_key_hint: str + :return: The generated ID string. + :rtype: str + :raises ValueError: If the watermark contains non-alphanumeric characters. """ entropy = FoundryIdGenerator._secure_entropy(string_length) @@ -96,10 +133,16 @@ def _new_id( @staticmethod def _secure_entropy(string_length: int) -> str: - """ - Generates a secure random alphanumeric string of exactly `string_length`. - Re-tries whole generation until the filtered base64 string is exactly the desired length, - matching the C# behavior. + """Generate a cryptographically secure alphanumeric string. + + Uses :func:`os.urandom` and base64 encoding, filtering to alphanumeric + characters and retrying until the exact length is reached. + + :param string_length: Desired length of the output string. + :type string_length: int + :return: A random alphanumeric string of exactly *string_length* characters. + :rtype: str + :raises ValueError: If *string_length* is less than 1. """ if string_length < 1: raise ValueError("Must be greater than or equal to 1") @@ -120,11 +163,22 @@ def _extract_partition_id( partition_key_length: int = 18, delimiter: str = "_", ) -> str: - """ - Extracts partition key from an existing ID. - - Expected shape (per C# logic): "_" - We take the last `partition_key_length` characters from the *second* segment. + """Extract the partition key from an existing ID. + + Expected shape: ``"_"``. + Returns the first *partition_key_length* characters of the second segment. + + :param id_str: The ID string to extract from. + :type id_str: str + :param string_length: Expected entropy length used for validation. + :type string_length: int + :param partition_key_length: Number of characters to extract as partition key. + :type partition_key_length: int + :param delimiter: The delimiter separating ID segments. + :type delimiter: str + :return: The extracted partition key. + :rtype: str + :raises ValueError: If the ID format is invalid. """ if not id_str: raise ValueError("Id cannot be null or empty") diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py similarity index 100% rename from sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/id_generator.py rename to sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/server/common/id_generator/_id_generator.py diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py index 12b647d7adc7..0efcf1c6f20b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/client/_client.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import itertools from collections import defaultdict from typing import ( @@ -37,7 +37,7 @@ from .._exceptions import ToolInvocationError -class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 +class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: disable=C4748 # azure-sdk: client-paging-methods-use-list """Asynchronous client for aggregating tools from Azure AI MCP and Tools APIs. This client provides access to tools from both MCP (Model Context Protocol) servers @@ -55,7 +55,7 @@ class FoundryToolClient(AsyncContextManager["FoundryToolClient"]): # pylint: di :type api_version: str or None """ - def __init__( # pylint: disable=C4718 + def __init__( # pylint: disable=C4718 # azure-sdk: client-method-name-no-double-underscore self, endpoint: str, credential: "AsyncTokenCredential", diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py index 2d50089fef8f..c75532f0d3e4 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_catalog.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name from abc import ABC, abstractmethod from typing import Any, Awaitable, Collection, List, Mapping, MutableMapping, Optional, Union diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py index 80b25d78b20e..9604124cde9b 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/tools/runtime/_starlette.py @@ -40,8 +40,9 @@ def install(cls, :type user_resolver: Optional[Callable[[Request], Awaitable[Optional[UserInfo]]]] """ + user_info_var : _UserContextType = user_context or ContextVarUserProvider.default_user_info_context app.add_middleware(UserInfoContextMiddleware, # type: ignore[arg-type] - user_info_var=user_context or ContextVarUserProvider.default_user_info_context, + user_info_var=user_info_var, user_resolver=user_resolver or cls._default_user_resolver) @staticmethod diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py index 398a8c46fd5d..0b6600de7d6a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py +++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/utils/_credential.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- from __future__ import annotations -import asyncio # pylint: disable=C4763 +import asyncio # pylint: disable=C4763 # azure-sdk: async-client-bad-name import inspect from types import TracebackType from typing import Any, Type, cast @@ -12,7 +12,7 @@ from azure.core.credentials_async import AsyncTokenCredential -async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 +async def _to_thread(func, *args, **kwargs): # pylint: disable=C4743 # azure-sdk: client-method-should-not-use-static-method """Compatibility wrapper for asyncio.to_thread (Python 3.8+). :param func: The function to run in a thread. diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst new file mode 100644 index 000000000000..3076ff010e1b --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.operations.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.core.checkpoints.client.operations package +=============================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client.operations + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst new file mode 100644 index 000000000000..cd6763335948 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.client.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints.client package +==================================================== + +.. automodule:: azure.ai.agentserver.core.checkpoints.client + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client.operations diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst new file mode 100644 index 000000000000..99b9dfa2ef50 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.checkpoints.rst @@ -0,0 +1,15 @@ +azure.ai.agentserver.core.checkpoints package +============================================= + +.. automodule:: azure.ai.agentserver.core.checkpoints + :inherited-members: + :members: + :undoc-members: + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + azure.ai.agentserver.core.checkpoints.client diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst deleted file mode 100644 index dd1cce6eecca..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.openai.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.openai package -=============================================== - -.. automodule:: azure.ai.agentserver.core.models.openai - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst deleted file mode 100644 index 38e0be4f331b..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.projects.rst +++ /dev/null @@ -1,8 +0,0 @@ -azure.ai.agentserver.core.models.projects package -================================================= - -.. automodule:: azure.ai.agentserver.core.models.projects - :inherited-members: - :members: - :undoc-members: - :ignore-module-all: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst index 008b280c64de..120b01cccc5a 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.models.rst @@ -6,12 +6,3 @@ azure.ai.agentserver.core.models package :members: :undoc-members: :ignore-module-all: - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - azure.ai.agentserver.core.models.openai - azure.ai.agentserver.core.models.projects diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst index b8f1dadf3a73..60005f2b04cc 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.rst @@ -13,6 +13,7 @@ Subpackages :maxdepth: 4 azure.ai.agentserver.core.application + azure.ai.agentserver.core.checkpoints azure.ai.agentserver.core.models azure.ai.agentserver.core.server azure.ai.agentserver.core.tools diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst index cf935aa1d1ed..68f155131f5c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.id_generator.rst @@ -9,18 +9,18 @@ azure.ai.agentserver.core.server.common.id\_generator package Submodules ---------- -azure.ai.agentserver.core.server.common.id\_generator.foundry\_id\_generator module ------------------------------------------------------------------------------------ +azure.ai.agentserver.core.server.common.id\_generator.\_foundry\_id\_generator module +------------------------------------------------------------------------------------ -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.id\_generator.id\_generator module --------------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.id\_generator.\_id\_generator module +--------------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.id_generator.id_generator +.. automodule:: azure.ai.agentserver.core.server.common.id_generator._id_generator :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst index 8fb5b52e4465..fd02e856642c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.common.rst @@ -17,18 +17,18 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.common.agent\_run\_context module ------------------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_agent\_run\_context module +------------------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.agent_run_context +.. automodule:: azure.ai.agentserver.core.server.common._agent_run_context :inherited-members: :members: :undoc-members: -azure.ai.agentserver.core.server.common.constants module --------------------------------------------------------- +azure.ai.agentserver.core.server.common.\_constants module +---------------------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.common.constants +.. automodule:: azure.ai.agentserver.core.server.common._constants :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst index b82fa765b839..8363ec9e32d8 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.server.rst @@ -17,10 +17,10 @@ Subpackages Submodules ---------- -azure.ai.agentserver.core.server.base module --------------------------------------------- +azure.ai.agentserver.core.server.\_base module +---------------------------------------------- -.. automodule:: azure.ai.agentserver.core.server.base +.. automodule:: azure.ai.agentserver.core.server._base :inherited-members: :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst index 8182914f69f9..14304731f5e7 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.client.rst @@ -2,6 +2,6 @@ azure.ai.agentserver.core.tools.client package ============================================== .. automodule:: azure.ai.agentserver.core.tools.client - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst index c112ec2beabd..6b798851fed2 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst +++ b/sdk/agentserver/azure-ai-agentserver-core/doc/azure.ai.agentserver.core.tools.rst @@ -2,10 +2,9 @@ azure.ai.agentserver.core.tools package ======================================= .. automodule:: azure.ai.agentserver.core.tools - :inherited-members: + :inherited-members: BaseModel :members: :undoc-members: - :exclude-members: BaseModel,model_json_schema Subpackages ----------- diff --git a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml index 3829a7356919..a0bca5c434fa 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-core/pyproject.toml @@ -75,5 +75,3 @@ combine-as-imports = true breaking = false # incompatible python version pyright = false verifytypes = false -latestdependency = false -dependencies = false \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py index 099d8dc45181..2cf533eb33fb 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/bilingual_weekend_planner/main.py @@ -33,7 +33,7 @@ CreateResponse, Response as OpenAIResponse, ) -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py index af9812826941..3831f702564d 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/mcp_simple/mcp_simple.py @@ -29,7 +29,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, MCPListToolsItemResource, MCPListToolsTool, diff --git a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py index f6d2c08bb0b9..f4298d21d39c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py +++ b/sdk/agentserver/azure-ai-agentserver-core/samples/simple_mock_agent/custom_mock_agent_test.py @@ -3,7 +3,7 @@ from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ( +from azure.ai.agentserver.core.models._projects import ( ItemContentOutputText, ResponseCompletedEvent, ResponseCreatedEvent, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py index a46f45f7c739..fb6dc8858c86 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/common/test_foundry_id_generator.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.server.common.id_generator.foundry_id_generator import FoundryIdGenerator +from azure.ai.agentserver.core.server.common.id_generator._foundry_id_generator import FoundryIdGenerator def test_conversation_id_none_uses_response_partition(): diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py index a38871197cba..00137abecf15 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_conversation_persistence.py @@ -42,7 +42,7 @@ async def __anext__(self): def create_mock_agent(): """Create a mock FoundryCBAgent without calling __init__.""" - from azure.ai.agentserver.core.server.base import FoundryCBAgent + from azure.ai.agentserver.core.server._base import FoundryCBAgent # Create instance without calling __init__ agent = object.__new__(FoundryCBAgent) diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py index c2e3bea53287..f01c4977cfb0 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/server/test_response_metadata.py @@ -10,7 +10,7 @@ set_current_app, ) from azure.ai.agentserver.core.models import Response as OpenAIResponse -from azure.ai.agentserver.core.models.projects import ResponseCreatedEvent, ResponseErrorEvent +from azure.ai.agentserver.core.models._projects import ResponseCreatedEvent, ResponseErrorEvent from azure.ai.agentserver.core.server._response_metadata import ( METADATA_KEY, attach_foundry_metadata_to_response, diff --git a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py index 771ca0a0eb0c..35639ea8ae2c 100644 --- a/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py +++ b/sdk/agentserver/azure-ai-agentserver-core/tests/unit_tests/test_logger.py @@ -78,31 +78,31 @@ def test_logs_warning_for_invalid_resource_id(self): @pytest.mark.unit class TestGetApplicationInsightsConnstr: - """Tests for get_application_insights_connstr function.""" + """Tests for _get_application_insights_connstr function.""" def test_returns_connstr_from_env_var(self): """Test that connection string is returned from environment variable.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, {"APPLICATIONINSIGHTS_CONNECTION_STRING": "InstrumentationKey=test123"}, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result == "InstrumentationKey=test123" def test_returns_none_when_no_connstr_and_no_project(self): """Test that None is returned when no connection string and no project endpoint.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr with patch.dict(os.environ, { "APPLICATIONINSIGHTS_CONNECTION_STRING": "", "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr() + result = _get_application_insights_connstr() assert result is None or result == "" def test_logs_debug_when_not_configured(self): """Test that debug message is logged when not configured.""" - from azure.ai.agentserver.core.logger import get_application_insights_connstr + from azure.ai.agentserver.core.logger import _get_application_insights_connstr mock_logger = MagicMock() @@ -111,7 +111,7 @@ def test_logs_debug_when_not_configured(self): "AZURE_AI_PROJECT_ENDPOINT": "", "AGENT_PROJECT_RESOURCE_ID": "", }, clear=False): - result = get_application_insights_connstr(logger=mock_logger) + result = _get_application_insights_connstr(logger=mock_logger) # Debug should be called when not configured, or result should be None assert mock_logger.debug.called or result is None or result == "" diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py index 18e4d6bfbdc2..959432ada6af 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/__init__.py @@ -1,7 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=docstring-should-be-keyword +"""Public entry points for the Azure AI Agent Server LangGraph adapter.""" + __path__ = __import__("pkgutil").extend_path(__path__, __name__) from typing import Optional, Union, TYPE_CHECKING @@ -10,16 +11,16 @@ from ._context import LanggraphRunContext from ._version import VERSION -from .langgraph import LangGraphAdapter +from ._langgraph import LangGraphAdapter if TYPE_CHECKING: # pragma: no cover from langgraph.graph.state import CompiledStateGraph - from .models.response_api_converter import ResponseAPIConverter + from .models._response_api_converter import ResponseAPIConverter from azure.core.credentials_async import AsyncTokenCredential from azure.core.credentials import TokenCredential -def from_langgraph( +def from_langgraph( # pylint: disable=docstring-should-be-keyword agent: "CompiledStateGraph", /, credentials: Optional[Union["AsyncTokenCredential", "TokenCredential"]] = None, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py index d037088b18a5..354cf42fa06d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_context.py @@ -1,6 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- +"""Execution context helpers for the LangGraph adapter.""" + import sys from dataclasses import dataclass from typing import Optional, Union @@ -15,11 +17,24 @@ @dataclass class LanggraphRunContext: + """Holds per-run state shared across LangGraph adapter components. + + :param agent_run: The current agent run context. + :type agent_run: AgentRunContext + :param tools: The resolved Foundry tool context for the run. + :type tools: FoundryToolContext + """ + agent_run: AgentRunContext tools: FoundryToolContext def attach_to_config(self, config: RunnableConfig): + """Attach this run context to a LangChain runnable config. + + :param config: The runnable config to enrich. + :type config: RunnableConfig + """ config["configurable"]["__foundry_hosted_agent_langgraph_run_context__"] = self @classmethod @@ -46,6 +61,14 @@ def resolve(cls, @staticmethod def _resolve_runtime( runtime: Optional[Union[Runtime, ToolRuntime]] = None) -> Optional[Union[Runtime, ToolRuntime]]: + """Resolve the active runtime from the explicit runtime or thread-local state. + + :param runtime: An explicitly supplied runtime, if available. + :type runtime: Optional[Union[Runtime, ToolRuntime]] + + :return: The resolved runtime, if one is available. + :rtype: Optional[Union[Runtime, ToolRuntime]] + """ if runtime: return runtime if sys.version_info >= (3, 11): @@ -54,6 +77,14 @@ def _resolve_runtime( @staticmethod def from_config(config: RunnableConfig) -> Optional["LanggraphRunContext"]: + """Extract the run context from a runnable config. + + :param config: The runnable config carrying the context. + :type config: RunnableConfig + + :return: The extracted run context, if present. + :rtype: Optional[LanggraphRunContext] + """ context = config["configurable"].get("__foundry_hosted_agent_langgraph_run_context__") if isinstance(context, LanggraphRunContext): return context @@ -61,6 +92,14 @@ def from_config(config: RunnableConfig) -> Optional["LanggraphRunContext"]: @staticmethod def from_runtime(runtime: Union[Runtime, ToolRuntime]) -> Optional["LanggraphRunContext"]: + """Extract the run context from a LangGraph runtime wrapper. + + :param runtime: The runtime to inspect. + :type runtime: Union[Runtime, ToolRuntime] + + :return: The extracted run context, if present. + :rtype: Optional[LanggraphRunContext] + """ context = runtime.context if isinstance(context, LanggraphRunContext): return context diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py similarity index 59% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py index 37f7080ba81f..092c5bccc071 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/langgraph.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/_langgraph.py @@ -1,27 +1,26 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,no-member -# mypy: disable-error-code="assignment,arg-type" import os import re -from typing import Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, List, Optional, cast from langgraph.graph.state import CompiledStateGraph from azure.ai.agentserver.core.constants import Constants from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.server.base import FoundryCBAgent +from azure.ai.agentserver.core.server._base import FoundryCBAgent from azure.ai.agentserver.core import AgentRunContext from azure.ai.agentserver.core.tools import OAuthConsentRequiredError # pylint:disable=import-error,no-name-in-module from ._context import LanggraphRunContext -from .models.response_api_converter import GraphInputArguments, ResponseAPIConverter -from .models.response_api_default_converter import ResponseAPIDefaultConverter -from .models.utils import is_state_schema_valid +from .models._response_api_converter import GraphInputArguments, ResponseAPIConverter +from .models._response_api_default_converter import ResponseAPIDefaultConverter +from .models._utils import is_state_schema_valid from .tools._context import FoundryToolContext from .tools._resolver import FoundryLangChainToolResolver if TYPE_CHECKING: + from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential logger = get_logger() @@ -35,7 +34,7 @@ class LangGraphAdapter(FoundryCBAgent): def __init__( self, graph: CompiledStateGraph, - credentials: "Optional[AsyncTokenCredential]" = None, + credentials: "Optional[AsyncTokenCredential | TokenCredential]" = None, converter: "Optional[ResponseAPIConverter]" = None, ) -> None: """ @@ -45,14 +44,15 @@ def __init__( and returns CompiledStateGraph (sync or async). :type graph: Union[CompiledStateGraph, GraphFactory] :param credentials: Azure credentials for authentication. - :type credentials: Optional[AsyncTokenCredential] + :type credentials: Optional[AsyncTokenCredential | TokenCredential] :param converter: custom response converter. :type converter: Optional[ResponseAPIConverter] """ - super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg + super().__init__(credentials=credentials) # pylint: disable=unexpected-keyword-arg self._graph = graph self._tool_resolver = FoundryLangChainToolResolver() self.azure_ai_tracer = None + self.converter: ResponseAPIConverter if not converter: if is_state_schema_valid(self._graph.builder.state_schema): @@ -63,6 +63,14 @@ def __init__( self.converter = converter async def agent_run(self, context: AgentRunContext): + """Execute a LangGraph-backed agent run. + + :param context: The agent run context supplied by Agent Server. + :type context: AgentRunContext + + :return: A response object or an async response stream. + :rtype: Any + """ # Resolve graph - always resolve if it's a factory function to get fresh graph each time # For factories, get a new graph instance per request to avoid concurrency issues try: @@ -82,12 +90,27 @@ async def agent_run(self, context: AgentRunContext): return self.respond_with_oauth_consent_astream(context, e) async def setup_lg_run_context(self, agent_run_context: AgentRunContext) -> LanggraphRunContext: + """Build the LangGraph run context for the current request. + + :param agent_run_context: The agent run context from the server layer. + :type agent_run_context: AgentRunContext + + :return: The run context used by the adapter and tools. + :rtype: LanggraphRunContext + """ resolved = await self._tool_resolver.resolve_from_registry() return LanggraphRunContext( agent_run_context, FoundryToolContext(resolved)) def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None): + """Initialize LangSmith and Azure AI tracing hooks for the adapter. + + :param exporter_endpoint: Optional OTLP exporter endpoint. + :type exporter_endpoint: Optional[str] + :param app_insights_conn_str: Optional Application Insights connection string. + :type app_insights_conn_str: Optional[str] + """ # set env vars for langsmith os.environ["LANGSMITH_OTEL_ENABLED"] = "true" os.environ["LANGSMITH_TRACING"] = "true" @@ -103,14 +126,29 @@ def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=No name=self.get_agent_identifier(), ) logger.info("AzureAIOpenTelemetryTracer initialized successfully.") - except Exception as e: - logger.error(f"Failed to import AzureAIOpenTelemetryTracer, ignore: {e}") + except Exception as error: # pylint: disable=broad-except + logger.error("Failed to initialize AzureAIOpenTelemetryTracer, ignore: %s", error) def setup_otlp_exporter(self, endpoint, provider): + """Normalize the OTLP endpoint before delegating exporter setup. + + :param endpoint: The configured exporter endpoint. + :type endpoint: str + :param provider: The tracer provider receiving the exporter. + :type provider: Any + + :return: The configured exporter registration result. + :rtype: Any + """ endpoint = self.format_otlp_endpoint(endpoint) return super().setup_otlp_exporter(endpoint, provider) def get_trace_attributes(self): + """Return base tracing attributes for LangGraph spans. + + :return: The trace attributes for this adapter. + :rtype: dict + """ attrs = super().get_trace_attributes() attrs["service.namespace"] = "azure.ai.agentserver.langgraph" return attrs @@ -126,11 +164,11 @@ async def agent_run_non_stream(self, input_arguments: GraphInputArguments): :rtype: dict """ try: - result = await self._graph.ainvoke(**input_arguments) + result = await self._ainvoke_graph(input_arguments) output = await self.converter.convert_response_non_stream(result, input_arguments["context"]) return output - except Exception as e: - logger.error(f"Error during agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during agent run: %s", e, exc_info=True) raise e async def agent_run_astream(self, @@ -145,16 +183,51 @@ async def agent_run_astream(self, :rtype: AsyncGenerator[dict] """ try: - logger.info(f"Starting streaming agent run {input_arguments['context'].agent_run.response_id}") - stream = self._graph.astream(**input_arguments) + logger.info("Starting streaming agent run %s", input_arguments["context"].agent_run.response_id) + stream = self._astream_graph(input_arguments) async for output_event in self.converter.convert_response_stream( - stream, - input_arguments["context"]): + stream, + input_arguments["context"], + ): yield output_event - except Exception as e: - logger.error(f"Error during streaming agent run: {e}", exc_info=True) + except Exception as e: # pylint: disable=broad-except + logger.error("Error during streaming agent run: %s", e, exc_info=True) raise e + async def _ainvoke_graph(self, input_arguments: GraphInputArguments) -> Any: + """Invoke the compiled graph with the LangGraph-supported arguments only. + + :param input_arguments: The adapter-level graph invocation arguments. + :type input_arguments: GraphInputArguments + + :return: The graph execution result. + :rtype: Any + """ + invoke_kwargs = input_arguments.get("invoke_kwargs", {}) + return await self._graph.ainvoke( + input_arguments["input"], + config=input_arguments["config"], + stream_mode=input_arguments["stream_mode"], + **invoke_kwargs, + ) + + def _astream_graph(self, input_arguments: GraphInputArguments): + """Stream the compiled graph with the LangGraph-supported arguments only. + + :param input_arguments: The adapter-level graph invocation arguments. + :type input_arguments: GraphInputArguments + + :return: The async graph event iterator. + :rtype: AsyncIterator[Any] + """ + invoke_kwargs = input_arguments.get("invoke_kwargs", {}) + return self._graph.astream( + input_arguments["input"], + config=input_arguments["config"], + stream_mode=input_arguments["stream_mode"], + **invoke_kwargs, + ) + def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: LanggraphRunContext): """ Ensure the RunnableConfig is set in the input arguments. @@ -171,23 +244,36 @@ def ensure_runnable_config(self, input_arguments: GraphInputArguments, context: configurable["thread_id"] = thread_id else: configurable["thread_id"] = f"langgraph-{input_arguments['context'].agent_run.response_id}" - logger.debug(f"Conversation ID not provided, generate one: thread_id={configurable['thread_id']}") + logger.debug("Conversation ID not provided, generate one: thread_id=%s", configurable["thread_id"]) config["configurable"] = configurable context.attach_to_config(config) - callbacks = config.get("callbacks", []) # mypy: ignore-errors + callbacks = cast(List[object], config.get("callbacks") or []) if self.azure_ai_tracer and self.azure_ai_tracer not in callbacks: callbacks.append(self.azure_ai_tracer) config["callbacks"] = callbacks input_arguments["config"] = config def format_otlp_endpoint(self, endpoint: str) -> str: + """Ensure the OTLP endpoint includes the traces ingestion path. + + :param endpoint: The configured exporter endpoint. + :type endpoint: str + + :return: The normalized traces endpoint. + :rtype: str + """ m = re.match(r"^(https?://[^/]+)", endpoint) if m: return f"{m.group(1)}/v1/traces" return endpoint def get_agent_identifier(self) -> str: + """Resolve the agent identifier used by tracing integrations. + + :return: The configured agent name or identifier. + :rtype: str + """ agent_name = os.getenv(Constants.AGENT_NAME) if agent_name: return agent_name diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py index 999b87dc8fe8..e2fcbe8bf5f0 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/checkpointer/_foundry_checkpoint_saver.py @@ -21,7 +21,7 @@ from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, @@ -186,7 +186,7 @@ async def _load_pending_writes( ): item = await self._client.read_item(item_id) if item: - task_id, channel, value, _ = self.serde.loads_typed(item.data) + task_id, channel, value, _ = self.serde.loads(item.data) writes.append((task_id, channel, value)) except (ValueError, TypeError): continue @@ -218,7 +218,7 @@ async def _load_blobs( item_id = CheckpointItemId(session_id=thread_id, item_id=blob_item_id) item = await self._client.read_item(item_id) if item: - type_tag, data = self.serde.loads_typed(item.data) + type_tag, data = self.serde.loads(item.data) if type_tag != "empty": channel_values[channel] = data @@ -256,7 +256,7 @@ async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: return None # Deserialize checkpoint data - checkpoint_data = self.serde.loads_typed(item.data) + checkpoint_data = self.serde.loads(item.data) checkpoint: Checkpoint = checkpoint_data["checkpoint"] metadata: CheckpointMetadata = checkpoint_data["metadata"] @@ -335,7 +335,7 @@ async def aput( checkpoint_copy = checkpoint.copy() channel_values: Dict[str, Any] = checkpoint_copy.pop("channel_values", {}) # type: ignore[misc] - checkpoint_data = self.serde.dumps_typed({ + checkpoint_data = self.serde.dumps({ "checkpoint": checkpoint_copy, "metadata": metadata, }) @@ -354,9 +354,9 @@ async def aput( # Create blob items for channel values with new versions for channel, version in new_versions.items(): if channel in channel_values: - blob_data = self.serde.dumps_typed(channel_values[channel]) + blob_data = self.serde.dumps(channel_values[channel]) else: - blob_data = self.serde.dumps_typed(("empty", b"")) + blob_data = self.serde.dumps(("empty", b"")) blob_item_id = make_item_id( checkpoint_ns, checkpoint_id, "blob", f"{channel}:{version}" @@ -412,7 +412,7 @@ async def aput_writes( items: List[CheckpointItem] = [] for idx, (channel, value) in enumerate(writes): - write_data = self.serde.dumps_typed((task_id, channel, value, task_path)) + write_data = self.serde.dumps((task_id, channel, value, task_path)) write_item_id = make_item_id( checkpoint_ns, checkpoint_id, "writes", f"{task_id}:{idx}" ) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py index d540fd20468c..c4a276af508a 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/__init__.py @@ -1,3 +1,20 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_default_converter import ResponseAPIDefaultConverter +from ._response_api_request_converter import ( + ResponseAPIMessageRequestConverter, + ResponseAPIRequestConverter, + convert_item_resource_to_message, +) + +__all__ = [ + "ResponseAPIConverter", + "GraphInputArguments", + "ResponseAPIDefaultConverter", + "ResponseAPIRequestConverter", + "ResponseAPIMessageRequestConverter", + "convert_item_resource_to_message", +] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py similarity index 81% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py index 9f3c693800a1..0abf0f4eda12 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_helper.py @@ -11,8 +11,8 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import (ResponseInputItemParam, ResponseInputParam) +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import (ResponseInputItemParam, ResponseInputParam) from .._context import LanggraphRunContext INTERRUPT_NODE_NAME = "__interrupt__" @@ -21,7 +21,13 @@ class HumanInTheLoopHelper: """Helper class for managing human-in-the-loop interactions in LangGraph.""" + def __init__(self, context: LanggraphRunContext): + """Initialize the helper with the current LangGraph run context. + + :param context: The current run context. + :type context: LanggraphRunContext + """ self.context = context def has_interrupt(self, state: Optional[StateSnapshot]) -> bool: @@ -68,6 +74,17 @@ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_model """ raise NotImplementedError("Subclasses must implement convert_interrupt method.") + def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str], Optional[str], Optional[str]]: + """Convert an interrupt into function-call fields. + + :param interrupt: The interrupt to convert. + :type interrupt: Interrupt + + :return: A tuple of function name, call id, and serialized arguments. + :rtype: tuple[Optional[str], Optional[str], Optional[str]] + """ + raise NotImplementedError("Subclasses must implement interrupt_to_function_call method.") + def validate_and_convert_human_feedback( self, state: Optional[StateSnapshot], input_data: Union[str, ResponseInputParam] ) -> Optional[Command]: @@ -104,6 +121,16 @@ def validate_and_convert_human_feedback( def _validate_input_format( self, input_data: Union[str, ResponseInputParam], interrupt_obj: Interrupt ) -> Optional[ResponseInputItemParam]: + """Validate the interrupt feedback payload format. + + :param input_data: The request input payload to validate. + :type input_data: Union[str, ResponseInputParam] + :param interrupt_obj: The interrupt that the feedback must match. + :type interrupt_obj: Interrupt + + :return: The validated function call output item, if valid. + :rtype: Optional[ResponseInputItemParam] + """ if isinstance(input_data, str): logger.warning("Expecting function call output item, got string: %s", input_data) return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py similarity index 64% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py index e1396ba90577..7da0e64dfecb 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/human_in_the_loop_json_helper.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_human_in_the_loop_json_helper.py @@ -3,7 +3,7 @@ # --------------------------------------------------------- import json -from typing import Optional, Union +from typing import Optional from langgraph.types import ( Command, @@ -11,13 +11,13 @@ ) from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.models.openai import ( +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.models._openai import ( ResponseInputItemParam, ) -from azure.ai.agentserver.core.server.common.constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME +from azure.ai.agentserver.core.server.common._constants import HUMAN_IN_THE_LOOP_FUNCTION_NAME -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper logger = get_logger() @@ -31,10 +31,21 @@ class HumanInTheLoopJsonHelper(HumanInTheLoopHelper): """ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_models.ItemResource]: + """Convert an interrupt into an in-progress function-call item resource. + + :param interrupt_info: The interrupt emitted by LangGraph. + :type interrupt_info: Interrupt + + :return: The corresponding function-call item resource, if conversion succeeds. + :rtype: Optional[project_models.ItemResource] + """ if not isinstance(interrupt_info, Interrupt): logger.warning("Interrupt is not of type Interrupt: %s", interrupt_info) return None name, call_id, arguments = self.interrupt_to_function_call(interrupt_info) + if name is None or call_id is None or arguments is None: + logger.warning("Interrupt could not be converted to a function call: %s", interrupt_info) + return None return project_models.FunctionToolCallItemResource( call_id=call_id, name=name, @@ -43,7 +54,7 @@ def convert_interrupt(self, interrupt_info: Interrupt) -> Optional[project_model status="in_progress", ) - def interrupt_to_function_call(self, interrupt: Interrupt) : + def interrupt_to_function_call(self, interrupt: Interrupt) -> tuple[Optional[str], Optional[str], Optional[str]]: """ Convert an Interrupt to a function call tuple. @@ -51,19 +62,27 @@ def interrupt_to_function_call(self, interrupt: Interrupt) : :type interrupt: Interrupt :return: A tuple of (name, call_id, argument). - :rtype: tuple[str | None, str | None, str | None] + :rtype: tuple[Optional[str], Optional[str], Optional[str]] """ if isinstance(interrupt.value, str): arguments = interrupt.value else: try: arguments = json.dumps(interrupt.value) - except Exception as e: # pragma: no cover - fallback # pylint: disable=broad-exception-caught - logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, e) + except (TypeError, ValueError) as error: # pragma: no cover - fallback + logger.error("Failed to serialize interrupt value to JSON: %s, error: %s", interrupt.value, error) arguments = str(interrupt.value) return HUMAN_IN_THE_LOOP_FUNCTION_NAME, interrupt.id, arguments - def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Union[Command, None]: + def convert_input_item_to_command(self, input_item: ResponseInputItemParam) -> Optional[Command]: + """Convert a function-call-output item into a LangGraph resume command. + + :param input_item: The function call output item supplied by the client. + :type input_item: ResponseInputItemParam + + :return: The parsed LangGraph command, if valid. + :rtype: Optional[Command] + """ output_str = input_item.get("output") if not isinstance(output_str, str): logger.error("Invalid output type in function call output: %s", input_item) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py similarity index 87% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py index 32cbf93a4bfb..bfcd835b8902 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# mypy: disable-error-code="call-overload,override" """Base interface for converting between LangGraph internal state and OpenAI-style responses. A ResponseAPIConverter implementation bridges: @@ -30,7 +29,17 @@ from .._context import LanggraphRunContext -class GraphInputArguments(TypedDict): +class _GraphOptionalInvokeArguments(TypedDict, total=False): + """Optional adapter-level graph invocation arguments. + + :ivar invoke_kwargs: Additional keyword arguments forwarded to LangGraph invocation methods. + :vartype invoke_kwargs: Dict[str, Any] + """ + + invoke_kwargs: Dict[str, Any] + + +class GraphInputArguments(_GraphOptionalInvokeArguments): """TypedDict for LangGraph input arguments.""" input: Union[Dict[str, Any], Command, None] config: RunnableConfig @@ -79,11 +88,11 @@ async def convert_response_non_stream( """ @abstractmethod - async def convert_response_stream( - self, - output: AsyncIterator[Union[Dict[str, Any], Any]], - context: LanggraphRunContext, - ) -> AsyncIterable[ResponseStreamEvent]: + def convert_response_stream( + self, + output: AsyncIterator[Union[Dict[str, Any], Any]], + context: LanggraphRunContext, + ) -> AsyncIterable[ResponseStreamEvent]: """Convert an async iterator of LangGraph stream events into stream events. This is a convenience wrapper around state_to_response_stream that retrieves diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py similarity index 71% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py index b64afc900f9d..1def3b0a2d86 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_default_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_default_converter.py @@ -1,7 +1,6 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation from __future__ import annotations import time @@ -15,17 +14,17 @@ from azure.ai.agentserver.core.logger import get_logger, get_project_endpoint from azure.ai.agentserver.core.models import Response, ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper -from .human_in_the_loop_json_helper import HumanInTheLoopJsonHelper -from .response_api_converter import GraphInputArguments, ResponseAPIConverter -from .response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, - ResponseAPINonStreamResponseConverter) -from .response_api_request_converter import ( +from ._human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_json_helper import HumanInTheLoopJsonHelper +from ._response_api_converter import GraphInputArguments, ResponseAPIConverter +from ._response_api_non_stream_response_converter import (ResponseAPIMessagesNonStreamResponseConverter, + ResponseAPINonStreamResponseConverter) +from ._response_api_request_converter import ( ResponseAPIMessageRequestConverter, ResponseAPIRequestConverter, convert_item_resource_to_message, ) -from .response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter +from ._response_api_stream_response_converter import ResponseAPIMessagesStreamResponseConverter from .._context import LanggraphRunContext logger = get_logger() @@ -48,6 +47,21 @@ def __init__(self, ResponseAPINonStreamResponseConverter ] | None = None, create_human_in_the_loop_helper: Callable[[LanggraphRunContext], HumanInTheLoopHelper] | None = None): + """Initialize the default LangGraph response converter. + + :param graph: The compiled LangGraph state graph. + :type graph: CompiledStateGraph + :param create_request_converter: Optional factory for request converters. + :type create_request_converter: Optional[Callable[[LanggraphRunContext], ResponseAPIRequestConverter]] + :param create_stream_response_converter: Optional factory for streaming converters. + :type create_stream_response_converter: + Optional[Callable[[LanggraphRunContext], ResponseAPIMessagesStreamResponseConverter]] + :param create_non_stream_response_converter: Optional factory for non-stream converters. + :type create_non_stream_response_converter: + Optional[Callable[[LanggraphRunContext], ResponseAPINonStreamResponseConverter]] + :param create_human_in_the_loop_helper: Optional factory for HITL helpers. + :type create_human_in_the_loop_helper: Optional[Callable[[LanggraphRunContext], HumanInTheLoopHelper]] + """ self._graph = graph self._custom_request_converter_factory = create_request_converter self._custom_stream_response_converter_factory = create_stream_response_converter @@ -55,6 +69,14 @@ def __init__(self, self._custom_human_in_the_loop_helper_factory = create_human_in_the_loop_helper async def convert_request(self, context: LanggraphRunContext) -> GraphInputArguments: + """Convert the incoming request into graph input arguments. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The graph invocation arguments. + :rtype: GraphInputArguments + """ prev_state = await self._aget_state(context) input_data = await self._convert_request_input_with_history(context, prev_state) stream_mode = self.get_stream_mode(context) @@ -63,10 +85,21 @@ async def convert_request(self, context: LanggraphRunContext) -> GraphInputArgum stream_mode=stream_mode, config={}, context=context, + invoke_kwargs={}, ) async def convert_response_non_stream( self, output: Union[dict[str, Any], Any], context: LanggraphRunContext) -> Response: + """Convert non-stream graph output into a final response object. + + :param output: The graph output to convert. + :type output: Union[dict[str, Any], Any] + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The final response object. + :rtype: Response + """ agent_run_context = context.agent_run converter = self._create_non_stream_response_converter(context) converted_output = converter.convert(output) @@ -84,28 +117,47 @@ async def convert_response_non_stream( ) return response - async def convert_response_stream( # type: ignore[override] + def convert_response_stream( self, output: AsyncIterator[Union[Dict[str, Any], Any]], context: LanggraphRunContext, ) -> AsyncIterable[ResponseStreamEvent]: - converter = self._create_stream_response_converter(context) - async for event in output: - converted_output = converter.convert(event) - for e in converted_output: - yield e + async def _stream() -> AsyncIterator[ResponseStreamEvent]: + converter = self._create_stream_response_converter(context) + async for event in output: + converted_output = converter.convert(event) + for converted_event in converted_output: + yield converted_event - state = await self._aget_state(context) - finalized_output = converter.finalize(state) # finalize the response with graph state after stream - for event in finalized_output: - yield event + state = await self._aget_state(context) + finalized_output = converter.finalize(state) # finalize the response with graph state after stream + for event in finalized_output: + yield event + + return _stream() def get_stream_mode(self, context: LanggraphRunContext) -> StreamMode: + """Select the graph stream mode for the current request. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The stream mode to use for execution. + :rtype: StreamMode + """ if context.agent_run.stream: return "messages" return "updates" def _create_request_converter(self, context: LanggraphRunContext) -> ResponseAPIRequestConverter: + """Create the request converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The request converter. + :rtype: ResponseAPIRequestConverter + """ if self._custom_request_converter_factory: return self._custom_request_converter_factory(context) data = context.agent_run.request @@ -114,6 +166,14 @@ def _create_request_converter(self, context: LanggraphRunContext) -> ResponseAPI def _create_stream_response_converter( self, context: LanggraphRunContext ) -> ResponseAPIMessagesStreamResponseConverter: + """Create the stream response converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The stream response converter. + :rtype: ResponseAPIMessagesStreamResponseConverter + """ if self._custom_stream_response_converter_factory: return self._custom_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) @@ -122,12 +182,28 @@ def _create_stream_response_converter( def _create_non_stream_response_converter( self, context: LanggraphRunContext ) -> ResponseAPINonStreamResponseConverter: + """Create the non-stream response converter for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The non-stream response converter. + :rtype: ResponseAPINonStreamResponseConverter + """ if self._custom_non_stream_response_converter_factory: return self._custom_non_stream_response_converter_factory(context) hitl_helper = self._create_human_in_the_loop_helper(context) return ResponseAPIMessagesNonStreamResponseConverter(context, hitl_helper) def _create_human_in_the_loop_helper(self, context: LanggraphRunContext) -> HumanInTheLoopHelper: + """Create the human-in-the-loop helper for the current run. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The human-in-the-loop helper. + :rtype: HumanInTheLoopHelper + """ if self._custom_human_in_the_loop_helper_factory: return self._custom_human_in_the_loop_helper_factory(context) return HumanInTheLoopJsonHelper(context) @@ -180,7 +256,7 @@ async def _convert_request_input_with_history( prev_state, context.agent_run.request.get("input") ) if command is not None: - logger.info(f"HITL command detected for conversation {conversation_id}") + logger.info("HITL command detected for conversation %s", conversation_id) return command # Convert current request input @@ -190,7 +266,7 @@ async def _convert_request_input_with_history( # Check if checkpoint exists has_checkpoint = prev_state is not None and prev_state.values is not None and len(prev_state.values) > 0 if has_checkpoint: - logger.info(f"Checkpoint found for conversation {conversation_id}, using existing state") + logger.info("Checkpoint found for conversation %s, using existing state", conversation_id) return current_input # No checkpoint - try to fetch historical items from AIProjectClient @@ -198,11 +274,11 @@ async def _convert_request_input_with_history( logger.debug("No conversation_id provided, skipping historical items fetch") return current_input - logger.info(f"No checkpoint found for conversation {conversation_id}, fetching historical items") + logger.info("No checkpoint found for conversation %s, fetching historical items", conversation_id) historical_messages = await self._fetch_historical_items(conversation_id) if not historical_messages: - logger.info(f"No historical items found for conversation {conversation_id}") + logger.info("No historical items found for conversation %s", conversation_id) return current_input # Merge historical messages with current input, avoiding duplicates @@ -238,7 +314,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage from openai import AsyncOpenAI from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider - logger.debug(f"Creating AsyncOpenAI client for endpoint: {endpoint}/openai") + logger.debug("Creating AsyncOpenAI client for endpoint: %s/openai", endpoint) credential = DefaultAzureCredential() token_provider = get_bearer_token_provider(credential, "https://ai.azure.com/.default") @@ -252,7 +328,7 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage items.append(item) items.reverse() - logger.info(f"Fetched {len(items)} historical items from conversation {conversation_id}") + logger.info("Fetched %s historical items from conversation %s", len(items), conversation_id) # Convert items to LangGraph messages messages = [] @@ -267,11 +343,20 @@ async def _fetch_historical_items(self, conversation_id: str) -> List[AnyMessage return messages - except ImportError as e: - logger.warning(f"OpenAI or Azure Identity not available, cannot fetch historical items: {e}", exc_info=True) + except ImportError as error: + logger.warning( + "OpenAI or Azure Identity not available, cannot fetch historical items: %s", + error, + exc_info=True, + ) return [] - except Exception as e: # pylint: disable=broad-except - logger.warning(f"Failed to fetch historical items for conversation {conversation_id}: {e}", exc_info=True) + except Exception as error: # pylint: disable=broad-except + logger.warning( + "Failed to fetch historical items for conversation %s: %s", + conversation_id, + error, + exc_info=True, + ) return [] def _merge_messages_without_duplicates( @@ -298,8 +383,10 @@ def _merge_messages_without_duplicates( if not current_messages or not historical_messages: merged = list(historical_messages) + list(current_messages) logger.info( - f"Merged {len(historical_messages)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(historical_messages), + len(current_messages), + conversation_id, ) return merged @@ -322,36 +409,50 @@ def _merge_messages_without_duplicates( curr_content = self._normalize_content(curr_msg.content if hasattr(curr_msg, 'content') else "") logger.debug( - f"Comparing message {i}: historical({hist_type}, '{hist_content}') " - f"vs current({curr_type}, '{curr_content}')" + "Comparing message %s: historical(%s, '%s') vs current(%s, '%s')", + i, + hist_type, + hist_content, + curr_type, + curr_content, ) # Compare type and content if hist_type != curr_type: - logger.debug(f"Message {i} type mismatch: {hist_type} != {curr_type}") + logger.debug("Message %s type mismatch: %s != %s", i, hist_type, curr_type) all_match = False break if hist_content != curr_content: - logger.debug(f"Message {i} content mismatch") + logger.debug("Message %s content mismatch", i) all_match = False break if all_match: # Remove the last N historical messages (they're duplicates) filtered_historical = filtered_historical[:-n] - logger.info(f"Filtered {n} duplicate items from end of historical items") + logger.info("Filtered %s duplicate items from end of historical items", n) # Prepend historical messages to current messages merged = filtered_historical + list(current_messages) logger.info( - f"Merged {len(filtered_historical)} historical items with {len(current_messages)} " - f"current items for conversation {conversation_id}" + "Merged %s historical items with %s current items for conversation %s", + len(filtered_historical), + len(current_messages), + conversation_id, ) return merged async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnapshot]: + """Fetch the persisted checkpoint state for the current conversation. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The persisted state snapshot, if available. + :rtype: Optional[StateSnapshot] + """ thread_id = context.agent_run.conversation_id if not thread_id: logger.debug("No conversation_id provided, skipping checkpoint lookup") @@ -360,12 +461,12 @@ async def _aget_state(self, context: LanggraphRunContext) -> Optional[StateSnaps configurable={"thread_id": thread_id}, ) if self._graph.checkpointer: - logger.debug(f"Checking for existing checkpoint for conversation {thread_id}") + logger.debug("Checking for existing checkpoint for conversation %s", thread_id) state = await self._graph.aget_state(config=config) if state and state.values: - logger.debug(f"Checkpoint state retrieved for conversation {thread_id}") + logger.debug("Checkpoint state retrieved for conversation %s", thread_id) else: - logger.debug(f"No checkpoint state found for conversation {thread_id}") + logger.debug("No checkpoint state found for conversation %s", thread_id) return state logger.debug("No checkpointer configured for graph, skipping checkpoint lookup") return None @@ -420,7 +521,7 @@ def _filter_incomplete_tool_calls(self, messages: List[AnyMessage]) -> List[AnyM result.append(msg) if removed_count > 0: - logger.info(f"Filtered {removed_count} messages with incomplete tool call sequences") + logger.info("Filtered %s messages with incomplete tool call sequences", removed_count) return result diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py similarity index 67% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py index 7ec8bdf14f1a..5dc1d648cbef 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_non_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_non_stream_response_converter.py @@ -1,22 +1,20 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,broad-exception-caught,logging-not-lazy -# mypy: disable-error-code="valid-type,call-overload,attr-defined" import copy from abc import ABC, abstractmethod -from typing import Any, Collection, Iterable, List, Union +from typing import Any, Collection, Iterable, List, Optional, Union from langchain_core import messages from langchain_core.messages import AnyMessage from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import projects as project_models -from .human_in_the_loop_helper import ( +from azure.ai.agentserver.core.models import _projects as project_models +from ._human_in_the_loop_helper import ( HumanInTheLoopHelper, INTERRUPT_NODE_NAME, ) -from .utils import extract_function_call +from ._utils import extract_function_call from .._context import LanggraphRunContext logger = get_logger() @@ -48,13 +46,28 @@ class ResponseAPIMessagesNonStreamResponseConverter(ResponseAPINonStreamResponse def __init__(self, context: LanggraphRunContext, hitl_helper: HumanInTheLoopHelper): + """Initialize the non-stream response converter. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param hitl_helper: The helper used for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ self.context = context self.hitl_helper = hitl_helper def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.ItemResource]: + """Convert graph output into response item resources. + + :param output: The graph output to convert. + :type output: Union[dict[str, Any], Any] + + :return: The converted response item resources. + :rtype: list[project_models.ItemResource] + """ res: list[project_models.ItemResource] = [] if not isinstance(output, list): - logger.error(f"Expected output to be a list, got {type(output)}: {output}") + logger.error("Expected output to be a list, got %s: %s", type(output), output) raise ValueError(f"Invalid output format. Expected a list, got {type(output)}.") for step in output: for node_name, node_output in step.items(): @@ -65,12 +78,22 @@ def convert(self, output: Union[dict[str, Any], Any]) -> list[project_models.Ite def _convert_node_output( self, node_name: str, node_output: Any ) -> Iterable[project_models.ItemResource]: + """Convert a single node update into response item resources. + + :param node_name: The name of the node that produced the output. + :type node_name: str + :param node_output: The node output payload. + :type node_output: Any + + :return: An iterable of converted item resources. + :rtype: Iterable[project_models.ItemResource] + """ if node_name == INTERRUPT_NODE_NAME: yield from self.hitl_helper.convert_interrupts(node_output) else: message_arr = node_output.get("messages") if not message_arr or not isinstance(message_arr, Collection): - logger.warning(f"No messages found in node {node_name} output: {node_output}") + logger.warning("No messages found in node %s output: %s", node_name, node_output) return for message in message_arr: @@ -78,11 +101,18 @@ def _convert_node_output( converted = self.convert_output_message(message) if converted: yield converted - except Exception as e: - logger.error(f"Error converting message {message}: {e}") + except (AttributeError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", message, error) + + def convert_output_message(self, output_message: AnyMessage) -> Optional[project_models.ItemResource]: + """Convert a single LangChain message into a response item resource. - def convert_output_message(self, output_message: AnyMessage): # pylint: disable=inconsistent-return-statements - # Implement the conversion logic for inner inputs + :param output_message: The message to convert. + :type output_message: AnyMessage + + :return: The converted item resource, if supported. + :rtype: Optional[project_models.ItemResource] + """ if isinstance(output_message, messages.HumanMessage): return project_models.ResponsesUserMessageItemResource( content=self.convert_MessageContent( @@ -104,11 +134,17 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable # If there are tool calls, we assume there is only ONE function call if len(output_message.tool_calls) > 1: logger.warning( - f"There are {len(output_message.tool_calls)} tool calls found. " - + "Only the first one will be processed." + "There are %s tool calls found. Only the first one will be processed.", + len(output_message.tool_calls), ) tool_call = output_message.tool_calls[0] name, call_id, argument = extract_function_call(tool_call) + if not isinstance(call_id, str) or not call_id: + raise ValueError(f"Function tool call missing call_id: {tool_call}") + if not isinstance(name, str) or not name: + raise ValueError(f"Function tool call missing name: {tool_call}") + if not isinstance(argument, str): + raise ValueError(f"Function tool call missing arguments: {tool_call}") return project_models.FunctionToolCallItemResource( call_id=call_id, name=name, @@ -124,16 +160,30 @@ def convert_output_message(self, output_message: AnyMessage): # pylint: disable status="completed", ) if isinstance(output_message, messages.ToolMessage): + if not isinstance(output_message.content, str): + raise ValueError(f"Function tool output must be a string: {output_message}") return project_models.FunctionToolCallOutputItemResource( call_id=output_message.tool_call_id, output=output_message.content, id=self.context.agent_run.id_generator.generate_function_output_id(), + status="completed", ) - logger.warning(f"Unsupported message type: {type(output_message)}, {output_message}") + logger.warning("Unsupported message type: %s, %s", type(output_message), output_message) + return None def convert_MessageContent( self, content, role: project_models.ResponsesMessageRole ) -> List[project_models.ItemContent]: + """Convert message content into response item content objects. + + :param content: The content payload to convert. + :type content: Any + :param role: The role associated with the content. + :type role: project_models.ResponsesMessageRole + + :return: The converted item content list. + :rtype: List[project_models.ItemContent] + """ if isinstance(content, str): return [self.convert_MessageContentItem(content, role)] return [self.convert_MessageContentItem(item, role) for item in content] @@ -141,6 +191,16 @@ def convert_MessageContent( def convert_MessageContentItem( self, content, role: project_models.ResponsesMessageRole ) -> project_models.ItemContent: + """Convert one content item into a response item content model. + + :param content: The content item to convert. + :type content: Any + :param role: The role associated with the content item. + :type role: project_models.ResponsesMessageRole + + :return: The converted content model. + :rtype: project_models.ItemContent + """ content_dict = copy.deepcopy(content) if isinstance(content, dict) else {"text": content} content_type = None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py similarity index 69% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py index 486545ef078a..a9f8b04221b8 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_request_converter.py @@ -1,11 +1,9 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation -# mypy: ignore-errors from abc import ABC, abstractmethod import json -from typing import Dict, List +from typing import Any, Dict, List, Optional, cast from langchain_core.messages import ( AIMessage, @@ -17,7 +15,9 @@ from langchain_core.messages.tool import ToolCall from azure.ai.agentserver.core.logger import get_logger -from azure.ai.agentserver.core.models import CreateResponse, openai as openai_models, projects as project_models +from azure.ai.agentserver.core.models import ( + CreateResponse, _openai as openai_models, _projects as project_models +) logger = get_logger() @@ -38,7 +38,7 @@ } -def convert_item_resource_to_message(item: Dict) -> AnyMessage: +def convert_item_resource_to_message(item: Dict) -> Optional[AnyMessage]: """ Convert an ItemResource (from AIProjectClient conversation items) to a LangGraph message. @@ -67,13 +67,11 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: # Fallback: try to get any text field text_content = content[0].get("text", "") content = text_content - elif isinstance(content, str): - pass # content is already a string - else: + elif not isinstance(content, str): content = str(content) if content else "" if role not in role_mapping: - logger.warning(f"Unknown role '{role}' in item resource, defaulting to USER") + logger.warning("Unknown role '%s' in item resource, defaulting to USER", role) role = project_models.ResponsesMessageRole.USER return role_mapping[role](content=content) @@ -100,8 +98,8 @@ def convert_item_resource_to_message(item: Dict) -> AnyMessage: output = " ".join(text_parts) return ToolMessage(content=output, tool_call_id=call_id) - logger.warning(f"Unsupported item type '{item_type}' in item resource, skipping") - return None # type: ignore + logger.warning("Unsupported item type '%s' in item resource, skipping", item_type) + return None class ResponseAPIRequestConverter(ABC): @@ -120,12 +118,24 @@ def convert(self) -> dict: class ResponseAPIMessageRequestConverter(ResponseAPIRequestConverter): + """Convert Response API input items into LangGraph message inputs.""" + def __init__(self, data: CreateResponse): + """Initialize the request converter. + + :param data: The incoming create-response payload. + :type data: CreateResponse + """ self.data: CreateResponse = data def convert(self) -> dict: + """Convert the request payload into LangGraph message input. + + :return: A LangGraph-compatible input dictionary. + :rtype: dict + """ # Convert the CreateRunRequest input to a format suitable for LangGraph - langgraph_input = {"messages": []} + langgraph_input: dict[str, list[AnyMessage]] = {"messages": []} instructions = self.data.get("instructions") if instructions and isinstance(instructions, str): @@ -152,17 +162,18 @@ def convert_input(self, item: openai_models.ResponseInputItemParam) -> AnyMessag :return: The converted LangGraph message. :rtype: AnyMessage """ - item_type = item.get("type", project_models.ItemType.MESSAGE) + item_data = cast(Dict[str, Any], item) + item_type = item_data.get("type", project_models.ItemType.MESSAGE) if item_type == project_models.ItemType.MESSAGE: # this is a message - return self.convert_message(item) + return self.convert_message(item_data) if item_type == project_models.ItemType.FUNCTION_CALL: - return self.convert_function_call(item) + return self.convert_function_call(item_data) if item_type == project_models.ItemType.FUNCTION_CALL_OUTPUT: - return self.convert_function_call_output(item) + return self.convert_function_call_output(item_data) raise ValueError(f"Unsupported OpenAIItemParam type: {item_type}, {item}") - def convert_message(self, message: dict) -> AnyMessage: + def convert_message(self, message: Dict[str, Any]) -> AnyMessage: """ Convert a message dict to a LangGraph message @@ -182,31 +193,53 @@ def convert_message(self, message: dict) -> AnyMessage: return role_mapping[role](content=self.convert_OpenAIItemContentList(content)) raise ValueError(f"Unsupported ResponseMessagesItemParam content type: {type(content)}, {content}") - def convert_function_call(self, item: dict) -> AnyMessage: + def convert_function_call(self, item: Dict[str, Any]) -> AnyMessage: + """Convert a function call input item into an AI message. + + :param item: The function call item payload. + :type item: dict + + :return: The converted AI message. + :rtype: AnyMessage + """ + call_id = item.get("call_id") + name = item.get("name") + argument = item.get("arguments") + + if not isinstance(call_id, str) or not call_id: + raise ValueError(f"Function call item missing call_id: {item}") + if not isinstance(name, str) or not name: + raise ValueError(f"Function call item missing name: {item}") + if argument is not None and not isinstance(argument, str): + raise ValueError(f"Function call arguments must be a string: {item}") + try: - item = openai_models.ResponseFunctionToolCallParam(**item) - argument = item.get("arguments", None) args = json.loads(argument) if argument else {} - except json.JSONDecodeError as e: - raise ValueError(f"Invalid JSON in function call arguments: {item}") from e - except Exception as e: - raise ValueError(f"Invalid function call item: {item}") from e - return AIMessage(tool_calls=[ToolCall(id=item.get("call_id"), name=item.get("name"), args=args)], content="") + except json.JSONDecodeError as error: + raise ValueError(f"Invalid JSON in function call arguments: {item}") from error + return AIMessage(tool_calls=[ToolCall(id=call_id, name=name, args=args)], content="") - def convert_function_call_output(self, item: dict) -> ToolMessage: - try: - item = openai_models.response_input_item_param.FunctionCallOutput(**item) # pylint: disable=no-member - except Exception as e: - raise ValueError(f"Invalid function call output item: {item}") from e + def convert_function_call_output(self, item: Dict[str, Any]) -> ToolMessage: + """Convert a function call output item into a tool message. + + :param item: The function call output payload. + :type item: dict + + :return: The converted tool message. + :rtype: ToolMessage + """ + call_id = item.get("call_id") + if not isinstance(call_id, str) or not call_id: + raise ValueError(f"Function call output item missing call_id: {item}") output = item.get("output", None) if isinstance(output, str): - return ToolMessage(content=output, tool_call_id=item.get("call_id")) + return ToolMessage(content=output, tool_call_id=call_id) if isinstance(output, list): - return ToolMessage(content=self.convert_OpenAIItemContentList(output), tool_call_id=item.get("call_id")) + return ToolMessage(content=self.convert_OpenAIItemContentList(output), tool_call_id=call_id) raise ValueError(f"Unsupported function call output type: {type(output)}, {output}") - def convert_OpenAIItemContentList(self, content: List[Dict]) -> List[Dict]: + def convert_OpenAIItemContentList(self, content: List[Dict[str, Any]]) -> List[str | Dict[str, Any]]: """ Convert ItemContent to a list format @@ -216,12 +249,12 @@ def convert_OpenAIItemContentList(self, content: List[Dict]) -> List[Dict]: :return: The converted list of ItemContent. :rtype: List[Dict] """ - result = [] + result: List[str | Dict[str, Any]] = [] for item in content: result.append(self.convert_OpenAIItemContent(item)) return result - def convert_OpenAIItemContent(self, content: Dict) -> Dict: + def convert_OpenAIItemContent(self, content: Dict[str, Any]) -> Dict[str, Any]: """ Convert ItemContent to a dict format @@ -233,5 +266,7 @@ def convert_OpenAIItemContent(self, content: Dict) -> Dict: """ res = content.copy() content_type = content.get("type") + if content_type is None: + return res res["type"] = item_content_type_mapping.get(content_type, content_type) return res diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py similarity index 57% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py index 02f79c589a96..1ce63d1f5bcd 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_api_stream_response_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_response_api_stream_response_converter.py @@ -1,16 +1,16 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=logging-fstring-interpolation,C4751 -# mypy: disable-error-code="assignment,valid-type" +# pylint: disable=C4751 from abc import ABC, abstractmethod -from typing import Any, List, Union +from typing import Any, List, Optional, Union from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt from azure.ai.agentserver.core.logger import get_logger from azure.ai.agentserver.core.models import ResponseStreamEvent -from .human_in_the_loop_helper import HumanInTheLoopHelper +from ._human_in_the_loop_helper import HumanInTheLoopHelper from .response_event_generators import ( ResponseEventGenerator, ResponseStreamEventGenerator, @@ -51,28 +51,53 @@ def finalize(self, graph_state=None): class ResponseAPIMessagesStreamResponseConverter(ResponseAPIStreamResponseConverter): + """Convert LangGraph streaming message events into Responses API stream events.""" + def __init__(self, context: LanggraphRunContext, *, hitl_helper: HumanInTheLoopHelper): + """Initialize the stream response converter. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :keyword hitl_helper: The helper used for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ # self.stream = stream self.context = context self.hitl_helper = hitl_helper self.stream_state = StreamEventState() - self.current_generator: ResponseEventGenerator = None + self.current_generator: Optional[ResponseEventGenerator] = None def convert(self, event: Union[AnyMessage, dict, Any, None]): + """Convert a single streamed LangGraph event. + + :param event: The event to convert. + :type event: Union[AnyMessage, dict, Any, None] + + :return: The converted response stream events. + :rtype: List[ResponseStreamEvent] + """ try: if self.current_generator is None: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) - if event is None or not hasattr(event, '__getitem__'): + if event is None or not hasattr(event, "__getitem__"): raise ValueError(f"Event is not indexable: {event}") message = event[0] # expect a tuple converted = self.try_process_message(message, self.context) return converted - except Exception as e: - logger.error(f"Error converting message {event}: {e}") - raise ValueError(f"Error converting message {event}") from e + except (IndexError, KeyError, TypeError, ValueError) as error: + logger.error("Error converting message %s: %s", event, error) + raise ValueError(f"Error converting message {event}") from error def finalize(self, graph_state=None): + """Emit final stream events after graph execution completes. + + :param graph_state: The final graph state snapshot. + :type graph_state: Any + + :return: The final response stream events. + :rtype: List[ResponseStreamEvent] + """ logger.info("Stream ended, finalizing response.") res = [] # check and convert interrupts @@ -86,29 +111,48 @@ def finalize(self, graph_state=None): return res def try_process_message( - self, event: Union[AnyMessage, Any, None], context: LanggraphRunContext + self, event: Union[AnyMessage, Interrupt, Any, None], context: LanggraphRunContext ) -> List[ResponseStreamEvent]: + """Process one message through the current event-generator chain. + + :param event: The message or interrupt to process. + :type event: Union[AnyMessage, Any, None] + :param context: The run context for the current request. + :type context: LanggraphRunContext + + :return: The generated response stream events. + :rtype: List[ResponseStreamEvent] + """ if event and not self.current_generator: self.current_generator = ResponseStreamEventGenerator(logger, None, hitl_helper=self.hitl_helper) + if self.current_generator is None: + return [] + is_processed = False - next_processor = self.current_generator + next_processor: Optional[ResponseEventGenerator] = self.current_generator returned_events = [] while not is_processed: - is_processed, next_processor, processed_events = self.current_generator.try_process_message( + current_generator = self.current_generator + if current_generator is None: + break + is_processed, next_processor, processed_events = current_generator.try_process_message( event, context, self.stream_state ) returned_events.extend(processed_events) - if not is_processed and next_processor == self.current_generator: + if not is_processed and next_processor == current_generator: logger.warning( - f"Message can not be processed by current generator {type(self.current_generator).__name__}:" - + f" {type(event)}: {event}" + "Message can not be processed by current generator %s: %s: %s", + type(current_generator).__name__, + type(event), + event, ) break - if next_processor != self.current_generator: + if next_processor != current_generator: logger.info( - f"Switching processor from {type(self.current_generator).__name__} " - + f"to {type(next_processor).__name__}" + "Switching processor from %s to %s", + type(current_generator).__name__, + type(next_processor).__name__ if next_processor is not None else "NoneType", ) self.current_generator = next_processor return returned_events diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py similarity index 90% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py index d9517d8b0e8d..08fa373aafea 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/utils.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/_utils.py @@ -2,15 +2,15 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json -from typing import get_type_hints +from typing import Any, Mapping, Optional, Tuple, get_type_hints -def extract_function_call(tool_call: dict): +def extract_function_call(tool_call: Mapping[str, Any]) -> Tuple[Optional[str], Optional[str], Optional[str]]: """ Extract function call details from tool_call dict. :param tool_call: The tool call dictionary containing function call details. - :type tool_call: dict + :type tool_call: Mapping[str, Any] :return: A tuple of (name, call_id, argument). :rtype: tuple[str | None, str | None, str | None] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py index 7b9f0362e4ba..83c9590a3b58 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/__init__.py @@ -1,8 +1,8 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_stream_event_generator import ResponseStreamEventGenerator +from ._response_event_generator import ResponseEventGenerator, StreamEventState +from ._response_stream_event_generator import ResponseStreamEventGenerator __all__ = [ "ResponseEventGenerator", diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py similarity index 52% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py index ae169d866ee5..cbf4ce484255 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_content_helpers.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_content_helpers.py @@ -1,29 +1,64 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from azure.ai.agentserver.core.models import projects as project_models +from typing import Any + +from azure.ai.agentserver.core.models import _projects as project_models class ItemContentHelper: + """Base helper for building response item content during streaming.""" + def __init__(self, content_type: str): + """Initialize the content helper. + + :param content_type: The response item content type handled by this helper. + :type content_type: str + """ self.content_type = content_type self.has_aggregated_content = False def create_item_content(self) -> project_models.ItemContent: + """Create the current response item content model. + + :return: The current item content model. + :rtype: project_models.ItemContent + """ return project_models.ItemContent( type=self.content_type, ) + def aggregate_content(self, _item: Any) -> None: + """Accumulate additional content into the helper state. + + :param _item: The content fragment to aggregate. + :type _item: Any + """ + raise NotImplementedError + class InputTextItemContentHelper(ItemContentHelper): + """Helper for aggregating input-text content parts.""" + def __init__(self): + """Initialize the input-text content helper.""" super().__init__(project_models.ItemContentType.INPUT_TEXT) self.text = "" def create_item_content(self): + """Create the aggregated input-text content model. + + :return: The aggregated input-text item content. + :rtype: project_models.ItemContentInputText + """ return project_models.ItemContentInputText(text=self.text) def aggregate_content(self, item): + """Accumulate additional input-text content. + + :param item: The content fragment to aggregate. + :type item: Any + """ self.has_aggregated_content = True if isinstance(item, str): self.text += item @@ -36,13 +71,21 @@ def aggregate_content(self, item): class OutputTextItemContentHelper(ItemContentHelper): + """Helper for aggregating output-text content parts.""" + def __init__(self): + """Initialize the output-text content helper.""" super().__init__(project_models.ItemContentType.OUTPUT_TEXT) self.text = "" self.annotations = [] self.logprobs = [] def create_item_content(self): + """Create the aggregated output-text content model. + + :return: The aggregated output-text item content. + :rtype: project_models.ItemContentOutputText + """ return project_models.ItemContentOutputText( text=self.text, annotations=self.annotations, @@ -50,6 +93,11 @@ def create_item_content(self): ) def aggregate_content(self, item): + """Accumulate additional output-text content. + + :param item: The content fragment to aggregate. + :type item: Any + """ self.has_aggregated_content = True if isinstance(item, str): self.text += item diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py new file mode 100644 index 000000000000..752f6b6e6eb4 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_item_resource_helpers.py @@ -0,0 +1,287 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC, abstractmethod +from typing import Any, Mapping, Optional + +from langgraph.types import Interrupt + +from azure.ai.agentserver.core.models import _projects as project_models + +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call + + +class ItemResourceHelper(ABC): + """Base helper for constructing response item resources during streaming.""" + + def __init__(self, item_type: str, item_id: Optional[str] = None): + """Initialize the item-resource helper. + + :param item_type: The response item type handled by this helper. + :type item_type: str + :param item_id: The optional response item identifier. + :type item_id: Optional[str] + """ + self.item_type = item_type + self.item_id = item_id + + @abstractmethod + def create_item_resource(self, is_done: bool) -> Optional[project_models.ItemResource]: + """Create the current item resource representation. + + :param is_done: Whether the created item should be marked completed. + :type is_done: bool + :return: The current item resource or None if not applicable. + :rtype: Optional[project_models.ItemResource] + """ + raise NotImplementedError + + @abstractmethod + def add_aggregate_content(self, item: Any) -> None: + """Accumulate child content into the helper state. + + :param item: The child content to aggregate. + :type item: Any + :return: None + :rtype: None + """ + raise NotImplementedError + + @abstractmethod + def get_aggregated_content(self) -> Optional[project_models.ItemResource]: + """Return the aggregated item resource representation. + + :return: The aggregated item resource or None if not applicable. + :rtype: Optional[project_models.ItemResource] + """ + raise NotImplementedError + + +class FunctionCallItemResourceHelper(ItemResourceHelper): + """Helper for streaming function-call item resources.""" + + def __init__(self, item_id: Optional[str] = None, tool_call: Optional[Mapping[str, Any]] = None): + """Initialize the function-call item helper. + + :param item_id: The response item identifier. + :type item_id: str + :param tool_call: The initial tool-call payload, if available. + :type tool_call: dict + """ + super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) + self.call_id: Optional[str] = None + self.name: Optional[str] = None + self.arguments = "" + if tool_call: + self.name, self.call_id, _ = extract_function_call(tool_call) + + def create_item_resource(self, is_done: bool): + """Create the current function-call item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ + content = { + "id": self.item_id, + "type": self.item_type, + "call_id": self.call_id, + "name": self.name, + "arguments": self.arguments if self.arguments else "", + "status": "in_progress" if not is_done else "completed", + } + return project_models.ItemResource(content) + + def add_aggregate_content(self, item: Any) -> None: + """Accumulate additional function-call arguments. + + :param item: The content fragment to aggregate. + :type item: Any + """ + if isinstance(item, str): + self.arguments += item + return + if not isinstance(item, Mapping): + return + if item.get("type") != project_models.ItemType.FUNCTION_CALL: + return + _, _, argument = extract_function_call(item) + if argument: + self.arguments += argument + + def get_aggregated_content(self): + """Return the completed function-call item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ + return self.create_item_resource(is_done=True) + + +class FunctionCallInterruptItemResourceHelper(ItemResourceHelper): + """Helper for converting interrupt payloads into function-call resources.""" + + def __init__(self, + item_id: Optional[str] = None, + hitl_helper: Optional[HumanInTheLoopHelper] = None, + interrupt: Optional[Interrupt] = None): + """Initialize the interrupt item helper. + + :param item_id: The response item identifier. + :type item_id: Optional[str] + :param hitl_helper: The helper used to convert interrupts. + :type hitl_helper: Optional[HumanInTheLoopHelper] + :param interrupt: The interrupt being represented. + :type interrupt: Optional[Interrupt] + """ + super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) + self.hitl_helper = hitl_helper + self.interrupt = interrupt + + def create_item_resource(self, is_done: bool): + """Create the interrupt-backed item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current interrupt item resource, if available. + :rtype: Optional[project_models.ItemResource] + """ + if self.hitl_helper is None or self.interrupt is None: + return None + item_resource = self.hitl_helper.convert_interrupt(self.interrupt) + if item_resource is not None and not is_done: + item_resource_data = item_resource.as_dict() + if "arguments" in item_resource_data: + item_resource_data["arguments"] = "" + return project_models.ItemResource(item_resource_data) + return item_resource + + def add_aggregate_content(self, item: Any) -> None: + """Ignore aggregated content for interrupt-backed items. + + :param item: The content fragment to aggregate. + :type item: Any + """ + return None + + def get_aggregated_content(self): + """Return the completed interrupt-backed item resource. + + :return: The completed item resource, if available. + :rtype: Optional[project_models.ItemResource] + """ + return self.create_item_resource(is_done=True) + + +class FunctionCallOutputItemResourceHelper(ItemResourceHelper): + """Helper for streaming function-call-output item resources.""" + + def __init__(self, item_id: Optional[str] = None, call_id: Optional[str] = None): + """Initialize the function-call-output helper. + + :param item_id: The response item identifier. + :type item_id: str + :param call_id: The function call identifier. + :type call_id: str + """ + super().__init__(project_models.ItemType.FUNCTION_CALL_OUTPUT, item_id) + self.call_id = call_id + self.content = "" + + def create_item_resource(self, is_done: bool): + """Create the current function-call-output item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ + content = { + "id": self.item_id, + "type": self.item_type, + "status": "in_progress" if not is_done else "completed", + "call_id": self.call_id, + "output": self.content, + } + return project_models.ItemResource(content) + + def add_aggregate_content(self, item: Any) -> None: + """Accumulate additional function-call-output content. + + :param item: The content fragment to aggregate. + :type item: Any + """ + if isinstance(item, str): + self.content += item + return + if not isinstance(item, Mapping): + return + content = item.get("text") + if isinstance(content, str): + self.content += content + + def get_aggregated_content(self): + """Return the completed function-call-output item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ + return self.create_item_resource(is_done=True) + + +class MessageItemResourceHelper(ItemResourceHelper): + """Helper for streaming message item resources.""" + + def __init__(self, item_id: str, role: project_models.ResponsesMessageRole): + """Initialize the message item helper. + + :param item_id: The response item identifier. + :type item_id: str + :param role: The response message role. + :type role: project_models.ResponsesMessageRole + """ + super().__init__(project_models.ItemType.MESSAGE, item_id) + self.role = role + self.content: list[project_models.ItemContent] = [] + + def create_item_resource(self, is_done: bool): + """Create the current message item resource. + + :param is_done: Whether the item is complete. + :type is_done: bool + + :return: The current item resource. + :rtype: project_models.ItemResource + """ + content = { + "id": self.item_id, + "type": self.item_type, + "status": "in_progress" if not is_done else "completed", + "content": self.content, + "role": self.role, + } + return project_models.ItemResource(content) + + def add_aggregate_content(self, item: Any) -> None: + """Accumulate additional message content. + + :param item: The content fragment to aggregate. + :type item: Any + """ + if isinstance(item, dict): + item = project_models.ItemContent(item) + if isinstance(item, project_models.ItemContent): + self.content.append(item) + + def get_aggregated_content(self): + """Return the completed message item resource. + + :return: The completed item resource. + :rtype: project_models.ItemResource + """ + return self.create_item_resource(is_done=True) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py new file mode 100644 index 000000000000..60aa29e5c8c2 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_content_part_event_generator.py @@ -0,0 +1,267 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Any, Optional + +from langchain_core import messages as langgraph_messages + +from azure.ai.agentserver.core.models import _projects as project_models + +from . import _item_content_helpers as item_content_helpers +from ._response_event_generator import ( + ResponseEventGenerator, + ResponseGeneratorEvents, + ResponseGeneratorMessage, + ResponseGeneratorResult, + StreamEventState, +) +from ._response_output_text_event_generator import ResponseOutputTextEventGenerator +from ..._context import LanggraphRunContext + + +class ResponseContentPartEventGenerator(ResponseEventGenerator): + """Generate content-part events for a single response item.""" + + def __init__( + self, + logger, + parent: ResponseEventGenerator, + item_id: str, + message_id: Optional[str], + output_index: int, + content_index: int, + ): + """Initialize the content-part event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: Optional[str] + :param output_index: The output item index. + :type output_index: int + :param content_index: The content part index within the item. + :type content_index: int + """ + super().__init__(logger, parent) + self.output_index = output_index + self.content_index = content_index + self.item_id = item_id + self.message_id = message_id + self.aggregated_content = "" + self.item_content_helper: Optional[item_content_helpers.ItemContentHelper] = None + + def try_process_message( + self, message: ResponseGeneratorMessage, context: LanggraphRunContext, stream_state: StreamEventState + ) -> ResponseGeneratorResult: + """Process a message into content-part events. + + :param message: The message to process. + :type message: Any + :param context: The run context for the current request. + :type context: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + is_processed = False + events: ResponseGeneratorEvents = [] + next_processor: Optional[ResponseEventGenerator] = self + if not self.item_content_helper: + if not self.try_create_item_content_helper(message): + # cannot create item content, skip this message + self.logger.warning("Cannot create item content helper for message: %s", message) + return True, self, [] + if self.item_content_helper and not self.started: + self.started, start_events = self.on_start(message, context, stream_state) + if not self.started: + # could not start processing, skip this message + return True, self, [] + events.extend(start_events) + + if self.should_end(message): + _, complete_events = self.on_end(message, context, stream_state) + events.extend(complete_events) + next_processor = self.parent + is_processed = self.has_finish_reason(message) if message else False + return is_processed, next_processor, events + + child_processor = self.create_child_processor(message) + if child_processor: + next_processor = child_processor + + return is_processed, next_processor, events + + def on_start( + self, _event: ResponseGeneratorMessage, _run_details: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the content-part-added event. + + :param _event: The current message. + :type _event: Any + :param _run_details: The run context, unused by this generator. + :type _run_details: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if self.started: + return False, [] + if self.item_content_helper is None: + return False, [] + + start_event = project_models.ResponseContentPartAddedEvent( + item_id=self.item_id, + output_index=self.output_index, + content_index=self.content_index, + part=self.item_content_helper.create_item_content(), + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + self.started = True + + return True, [start_event] + + def on_end( + self, _message: ResponseGeneratorMessage, _context: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the content-part-done event. + + :param _message: The terminal message. + :type _message: Any + :param _context: The run context, unused by this generator. + :type _context: Any + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and the completion events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if self.item_content_helper is None: + return False, [] + aggregated_content = self.item_content_helper.create_item_content() + done_event = project_models.ResponseContentPartDoneEvent( + item_id=self.item_id, + output_index=self.output_index, + content_index=self.content_index, + part=aggregated_content, + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + if self.parent: + self.parent.aggregate_content(aggregated_content.as_dict()) + return True, [done_event] + + def try_create_item_content_helper(self, message: ResponseGeneratorMessage) -> bool: + """Create the content helper that matches the message payload. + + :param message: The message to inspect. + :type message: Any + + :return: True when a helper was created. + :rtype: bool + """ + if isinstance(message, (langgraph_messages.AIMessage, langgraph_messages.ToolMessage)): + if self.is_text_content(message.content): + self.item_content_helper = item_content_helpers.OutputTextItemContentHelper() + return True + if isinstance(message, (langgraph_messages.HumanMessage, langgraph_messages.SystemMessage)): + if self.is_text_content(message.content): + self.item_content_helper = item_content_helpers.InputTextItemContentHelper() + return True + return False + + def aggregate_content(self, content: Any) -> None: + """Aggregate child content into the current content helper. + + :param content: The child content to aggregate. + :type content: Any + """ + if self.item_content_helper is None: + return None + self.item_content_helper.aggregate_content(content) + return None + + def is_text_content(self, content: Any) -> bool: + """Check whether the message content can be treated as plain text. + + :param content: The content payload to inspect. + :type content: Any + + :return: True when the content is representable as text. + :rtype: bool + """ + if isinstance(content, str): + return True + if isinstance(content, list) and all(isinstance(c, str) for c in content): + return True + return False + + def create_child_processor(self, _message: ResponseGeneratorMessage) -> Optional[ResponseEventGenerator]: + """Create the child generator for the current content helper. + + :param _message: The originating message, unused by this generator. + :type _message: Any + + :return: The child generator. + :rtype: Optional[ResponseEventGenerator] + """ + if self.item_content_helper is None: + return None + if self.item_content_helper.content_type in ( + project_models.ItemContentType.INPUT_TEXT, + project_models.ItemContentType.OUTPUT_TEXT, + ): + return ResponseOutputTextEventGenerator( + logger=self.logger, + parent=self, + content_index=self.content_index, + output_index=self.output_index, + item_id=self.item_id, + message_id=self.message_id, + ) + raise ValueError(f"Unsupported item content type for child processor: {self.item_content_helper.content_type}") + + def has_finish_reason(self, message: ResponseGeneratorMessage) -> bool: + """Check whether the message contains a finish reason. + + :param message: The message to inspect. + :type message: Any + + :return: True when a finish reason is present. + :rtype: bool + """ + if not isinstance(message, langgraph_messages.BaseMessageChunk): + return False + if message.response_metadata and message.response_metadata.get("finish_reason"): + return True + return False + + def should_end(self, event: ResponseGeneratorMessage) -> bool: + """Determine whether content generation for this item should end. + + :param event: The current message or chunk. + :type event: Any + + :return: True when the generator should stop. + :rtype: bool + """ + if event is None: + return True + if not isinstance(event, langgraph_messages.BaseMessage): + return True + if event.id != self.message_id: + return True + # if is Message not MessageChunk, should create child and end in the second iteration + if not isinstance(event, langgraph_messages.BaseMessageChunk): + if self.item_content_helper is None: + return True + return self.item_content_helper.has_aggregated_content + return False diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py new file mode 100644 index 000000000000..ba6c0e251e4c --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_event_generator.py @@ -0,0 +1,119 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from abc import ABC, abstractmethod +from typing import Any, List, Optional, Tuple, Union + +from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt + +from azure.ai.agentserver.core.models import _projects as project_models +from ..._context import LanggraphRunContext + +ResponseGeneratorMessage = Optional[Union[AnyMessage, Interrupt]] +ResponseGeneratorEvents = List[project_models.ResponseStreamEvent] +ResponseGeneratorResult = Tuple[bool, Optional["ResponseEventGenerator"], ResponseGeneratorEvents] + + +class StreamEventState: + """ + :meta private: + State information for the stream event processing. + """ + + sequence_number: int = 0 + + +class ResponseEventGenerator(ABC): + """ + :meta private: + Abstract base class for response event generators. + """ + + started: bool = False + + def __init__(self, logger, parent: Optional["ResponseEventGenerator"]): + """Initialize the response event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: Optional[ResponseEventGenerator] + """ + self.logger = logger + self.parent = parent # parent generator + + @abstractmethod + def try_process_message( + self, + message: ResponseGeneratorMessage, + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> ResponseGeneratorResult: + """ + Try to process the incoming message. + + :param message: The incoming message to process. + :type message: Optional[Union[AnyMessage, Interrupt]] + :param context: The agent run context. + :type context: LanggraphRunContext + :param stream_state: The current stream event state. + :type stream_state: StreamEventState + + :return: tuple of (is_processed, next_processor, events) + :rtype: Tuple[bool, Optional[ResponseEventGenerator], List[ResponseStreamEvent]] + """ + raise NotImplementedError + + def on_start( + self, + _message: ResponseGeneratorMessage, + _context: LanggraphRunContext, + _stream_state: StreamEventState, + ) -> Tuple[bool, ResponseGeneratorEvents]: + """ + Generate the starting events for this layer. + + :param _message: The incoming message to process. + :type _message: Optional[Union[AnyMessage, Interrupt]] + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState + + :return: tuple of (started, events) + :rtype: tuple[bool, List[ResponseStreamEvent]] + """ + return False, [] + + def on_end( + self, + _message: ResponseGeneratorMessage, + _context: LanggraphRunContext, + _stream_state: StreamEventState, + ) -> Tuple[bool, ResponseGeneratorEvents]: + """ + Generate the ending events for this layer. + TODO: handle different end conditions, e.g. normal end, error end, etc. + + :param _message: The incoming message to process. + :type _message: Optional[Union[AnyMessage, Interrupt]] + :param _context: The agent run context. + :type _context: LanggraphRunContext + :param _stream_state: The current stream event state. + :type _stream_state: StreamEventState + + :return: tuple of (started, events) + :rtype: tuple[bool, List[ResponseStreamEvent]] + """ + return False, [] + + def aggregate_content(self, _content: Any) -> None: + """ + Aggregate the content for this layer. + It is called by its child processor to pass up aggregated content. + + :param _content: The content contributed by a child generator. + :type _content: Any + """ + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py new file mode 100644 index 000000000000..a4abed66f043 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_function_call_argument_event_generator.py @@ -0,0 +1,244 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import List, Optional, Union + +from langchain_core import messages as langgraph_messages +from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt + +from azure.ai.agentserver.core.models import _projects as project_models +from ._response_event_generator import ( + ResponseEventGenerator, + ResponseGeneratorEvents, + ResponseGeneratorMessage, + ResponseGeneratorResult, + StreamEventState, +) +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from .._utils import extract_function_call +from ..._context import LanggraphRunContext + + +class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): # pylint: disable=C4751 + """Generate function-call-argument delta and done events.""" + + def __init__( + self, + logger, + parent: ResponseEventGenerator, + item_id, + message_id, + output_index: int, + *, + hitl_helper: Optional[HumanInTheLoopHelper] = None, + ): + """Initialize the function-call-argument generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: str + :param output_index: The output item index. + :type output_index: int + :param hitl_helper: Optional helper for interrupt conversion. + :type hitl_helper: HumanInTheLoopHelper + """ + super().__init__(logger, parent) + self.item_id = item_id + self.output_index = output_index + self.aggregated_content = "" + self.message_id = message_id + self.hitl_helper = hitl_helper + + def try_process_message( + self, message: ResponseGeneratorMessage, context: LanggraphRunContext, stream_state: StreamEventState + ) -> ResponseGeneratorResult: + """Process one message into function-call argument events. + + :param message: The message or interrupt to process. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + is_processed = False + events: ResponseGeneratorEvents = [] + next_processor: Optional[ResponseEventGenerator] = self + if not self.started: + self.started = True # does not need to do anything special on start + + is_processed, next_processor, processed_events = self.process(message, context, stream_state) + if not is_processed: + self.logger.warning("FunctionCallArgumentEventGenerator did not process message: %s", message) + events.extend(processed_events) + + if self.should_end(message): + has_finish_reason = self.has_finish_reason(message) + is_processed, complete_events = self.on_end(message, context, stream_state) + events.extend(complete_events) + next_processor = self.parent + is_processed = has_finish_reason # if has finish reason, mark as processed and stop further processing + + return is_processed, next_processor, events + + def on_start( + self, _event: ResponseGeneratorMessage, _run_details, _stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Start argument generation for the current function call. + + :param _event: The current message. + :type _event: AnyMessage + :param _run_details: The run context, unused by this generator. + :type _run_details: LanggraphRunContext + :param _stream_state: The mutable stream state, unused on start. + :type _stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if self.started: + return True, [] + self.started = True + return True, [] + + def process( + self, + message: ResponseGeneratorMessage, + _run_details, + stream_state: StreamEventState, + ) -> ResponseGeneratorResult: + """Convert one message into function-call argument delta events. + + :param message: The message or interrupt to process. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + :param _run_details: The run context, unused by this generator. + :type _run_details: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, current generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + if self.should_end(message): + return False, self, [] + + argument = None + if isinstance(message, Interrupt): + if self.hitl_helper: + _, _, argument = self.hitl_helper.interrupt_to_function_call(message) + else: + argument = None + elif isinstance(message, langgraph_messages.BaseMessage): + tool_call = self.get_tool_call_info(message) + if tool_call: + _, _, argument = extract_function_call(tool_call) + if argument: + argument_delta_event = project_models.ResponseFunctionCallArgumentsDeltaEvent( + item_id=self.item_id, + output_index=self.output_index, + delta=argument, + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + self.aggregated_content += argument + return True, self, [argument_delta_event] + return False, self, [] + + def has_finish_reason(self, message: ResponseGeneratorMessage) -> bool: + """Check whether the message marks completion for this argument stream. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the argument stream should finish. + :rtype: bool + """ + if not message or message.id != self.message_id: + return False + if isinstance(message, langgraph_messages.AIMessageChunk): + if not message.tool_call_chunks: + # new tool call started, end this argument processing + return True + if message.response_metadata.get("finish_reason"): + # tool call finished + return True + elif isinstance(message, langgraph_messages.AIMessage): + return True + return False + + def should_end(self, event: ResponseGeneratorMessage) -> bool: + """Determine whether this generator should stop processing. + + :param event: The current event. + :type event: AnyMessage + + :return: True when processing should stop. + :rtype: bool + """ + if event is None: + return True + if event.id != self.message_id: + return True + return False + + def on_end( + self, _message: ResponseGeneratorMessage, _context: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the final function-call-arguments-done event. + + :param _message: The terminal message for the argument stream. + :type _message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and final events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( + item_id=self.item_id, + output_index=self.output_index, + arguments=self.aggregated_content, + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + if self.parent: + self.parent.aggregate_content(self.aggregated_content) # pass aggregated content to parent + return True, [done_event] + + def get_tool_call_info(self, message: ResponseGeneratorMessage): + """Extract the first tool call from a message when present. + + :param message: The message to inspect. + :type message: Union[langgraph_messages.AnyMessage, Interrupt] + + :return: The first tool call payload, if any. + :rtype: Optional[dict] + """ + if isinstance(message, langgraph_messages.AIMessageChunk): + if message.tool_call_chunks: + if len(message.tool_call_chunks) > 1: + self.logger.warning( + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_call_chunks), + ) + return message.tool_call_chunks[0] + elif isinstance(message, langgraph_messages.AIMessage): + if message.tool_calls: + if len(message.tool_calls) > 1: + self.logger.warning( + "There are %s tool calls found. Only the first one will be processed.", + len(message.tool_calls), + ) + return message.tool_calls[0] + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py new file mode 100644 index 000000000000..c69ada315afd --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_item_event_generator.py @@ -0,0 +1,267 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import Any, Optional, Union + +from langchain_core import messages as langgraph_messages +from langchain_core.messages import AnyMessage +from langgraph.types import Interrupt + +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.core.server.common.id_generator._id_generator import IdGenerator +from . import _item_resource_helpers as item_resource_helpers +from ._response_event_generator import ( + ResponseEventGenerator, + ResponseGeneratorEvents, + ResponseGeneratorMessage, + ResponseGeneratorResult, + StreamEventState, +) +from ._response_content_part_event_generator import ResponseContentPartEventGenerator +from ._response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator +from .._human_in_the_loop_helper import HumanInTheLoopHelper +from ..._context import LanggraphRunContext + + +class ResponseOutputItemEventGenerator(ResponseEventGenerator): + """Generate output-item added and done events for one streamed message.""" + + def __init__(self, logger, parent: ResponseEventGenerator, + output_index: int, message_id: Optional[str] = None, + *, hitl_helper: Optional[HumanInTheLoopHelper] = None): + """Initialize the output-item event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param output_index: The output item index. + :type output_index: int + :param message_id: The originating message identifier. + :type message_id: str + :param hitl_helper: Optional helper for human-in-the-loop interrupts. + :type hitl_helper: HumanInTheLoopHelper + """ + super().__init__(logger, parent) + self.output_index = output_index + self.message_id = message_id + self.item_resource_helper: Optional[item_resource_helpers.ItemResourceHelper] = None + self.hitl_helper = hitl_helper + + def try_process_message( + self, message: ResponseGeneratorMessage, context: LanggraphRunContext, stream_state: StreamEventState + ) -> ResponseGeneratorResult: + """Process one streamed message into output-item events. + + :param message: The message or interrupt to process. + :type message: Union[AnyMessage, Interrupt, None] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + is_processed = False + next_processor: Optional[ResponseEventGenerator] = self + events: ResponseGeneratorEvents = [] + if self.item_resource_helper is None: + if not self.try_create_item_resource_helper(message, context.agent_run.id_generator): + # cannot create item resource, skip this message + self.logger.warning("Cannot create item resource helper for message: %s, skipping.", message) + return True, self, [] + + if self.item_resource_helper and not self.started: + self.started, start_events = self.on_start(message, context, stream_state) + if not self.started: + # could not start processing, skip this message + self.logger.warning("Cannot create start events for message: %s, skipping.", message) + return True, self, [] + events.extend(start_events) + + if self.should_end(message): + # not the message this processor is handling + _, complete_events = self.on_end(message, context, stream_state) + is_processed = self.message_id == message.id if message else False + next_processor = self.parent + events.extend(complete_events) + return is_processed, next_processor, events + + child_processor = self.create_child_processor(message) + if child_processor: + self.logger.info("Created child processor: %s", child_processor) + return False, child_processor, events + + if message and not isinstance(message, Interrupt): + # no child processor, process the content directly + self.aggregate_content(message.content) + is_processed = True + + return is_processed, next_processor, events + + def on_start( + self, _event: ResponseGeneratorMessage, _context: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the output-item-added event for this message. + + :param _event: The current message or interrupt. + :type _event: Union[AnyMessage, Interrupt] + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Start status and emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if self.started: + return True, [] + if self.item_resource_helper is None: + return False, [] + + item_resource = self.item_resource_helper.create_item_resource(is_done=False) + if item_resource is None: + # cannot know what item resource to create + return False, [] + item_added_event = project_models.ResponseOutputItemAddedEvent( + output_index=self.output_index, + sequence_number=stream_state.sequence_number, + item=item_resource, + ) + stream_state.sequence_number += 1 + self.started = True + return True, [item_added_event] + + def should_end(self, event: ResponseGeneratorMessage) -> bool: + """Determine whether this output-item generator should end. + + :param event: The current message or interrupt. + :type event: Union[AnyMessage, Interrupt] + + :return: True when the generator should end. + :rtype: bool + """ + if event is None: + self.logger.info("Received None event, ending processor.") + return True + if event.id != self.message_id: + return True + return False + + def on_end( + self, _message: ResponseGeneratorMessage, _context: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the output-item-done event for this generator. + + :param _message: The terminal message or interrupt. + :type _message: Union[AnyMessage, Interrupt] + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and the emitted completion events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if not self.started or self.item_resource_helper is None: # should not happen + return False, [] + + item_resource = self.item_resource_helper.create_item_resource(is_done=True) + # response item done event + done_event = project_models.ResponseOutputItemDoneEvent( + output_index=self.output_index, + sequence_number=stream_state.sequence_number, + item=item_resource, + ) + stream_state.sequence_number += 1 + if self.parent: + self.parent.aggregate_content(item_resource) # pass aggregated content to parent + return True, [done_event] + + def aggregate_content(self, content: Any) -> None: + """Aggregate child content into the current item resource helper. + + :param content: The child content to aggregate. + :type content: Any + """ + if self.item_resource_helper is None: + return None + self.item_resource_helper.add_aggregate_content(content) + + def try_create_item_resource_helper(self, event: ResponseGeneratorMessage, id_generator: IdGenerator) -> bool: + """Create the item-resource helper for the current message type. + + :param event: The message or interrupt to inspect. + :type event: Optional[Union[AnyMessage, Interrupt]] + :param id_generator: The identifier generator for new item ids. + :type id_generator: IdGenerator + + :return: True when a helper was created. + :rtype: bool + """ + helper: Optional[item_resource_helpers.ItemResourceHelper] = None + if isinstance(event, langgraph_messages.AIMessageChunk) and event.tool_call_chunks: + helper = item_resource_helpers.FunctionCallItemResourceHelper( + item_id=id_generator.generate_function_call_id(), tool_call=event.tool_call_chunks[0] + ) + elif isinstance(event, langgraph_messages.AIMessage) and event.tool_calls: + helper = item_resource_helpers.FunctionCallItemResourceHelper( + item_id=id_generator.generate_function_call_id(), tool_call=event.tool_calls[0] + ) + elif isinstance(event, langgraph_messages.AIMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( + item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.ASSISTANT + ) + elif isinstance(event, langgraph_messages.HumanMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( + item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.USER + ) + elif isinstance(event, langgraph_messages.SystemMessage) and event.content: + helper = item_resource_helpers.MessageItemResourceHelper( + item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.SYSTEM + ) + elif isinstance(event, langgraph_messages.ToolMessage): + helper = item_resource_helpers.FunctionCallOutputItemResourceHelper( + item_id=id_generator.generate_function_output_id(), call_id=event.tool_call_id + ) + elif isinstance(event, Interrupt): + helper = item_resource_helpers.FunctionCallInterruptItemResourceHelper( + item_id=id_generator.generate_function_output_id(), + hitl_helper=self.hitl_helper, + interrupt=event, + ) + + if helper is None: + return False + + self.item_resource_helper = helper + return True + + def create_child_processor(self, message: ResponseGeneratorMessage) -> Optional[ResponseEventGenerator]: + """Create the child generator for the current item resource type. + + :param message: The originating message or interrupt. + :type message: Optional[Union[AnyMessage, Interrupt]] + + :return: The child generator, if one is required. + :rtype: Optional[ResponseEventGenerator] + """ + if self.item_resource_helper is None or message is None: + return None + if self.item_resource_helper.item_type == project_models.ItemType.FUNCTION_CALL: + return ResponseFunctionCallArgumentEventGenerator( + self.logger, + self, + item_id=self.item_resource_helper.item_id, + message_id=message.id, + output_index=self.output_index, + hitl_helper=self.hitl_helper, + ) + if self.item_resource_helper.item_type == project_models.ItemType.MESSAGE: + if self.item_resource_helper.item_id is None or self.message_id is None: + return None + return ResponseContentPartEventGenerator( + self.logger, self, self.item_resource_helper.item_id, message.id, self.output_index, content_index=0 + ) + return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py new file mode 100644 index 000000000000..af42c282c33a --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_output_text_event_generator.py @@ -0,0 +1,183 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +from typing import List, Optional + +from langchain_core import messages as langgraph_messages + +from azure.ai.agentserver.core.models import _projects as project_models +from ._response_event_generator import ( + ResponseEventGenerator, + ResponseGeneratorEvents, + ResponseGeneratorMessage, + ResponseGeneratorResult, + StreamEventState, +) +from ..._context import LanggraphRunContext + + +class ResponseOutputTextEventGenerator(ResponseEventGenerator): + """Generate text delta and done events for one response content part.""" + + def __init__( + self, + logger, + parent: ResponseEventGenerator, + content_index: int, + output_index: int, + item_id: str, + message_id: Optional[str], + ): + """Initialize the output-text event generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator in the event chain. + :type parent: ResponseEventGenerator + :param content_index: The content index within the output item. + :type content_index: int + :param output_index: The output item index. + :type output_index: int + :param item_id: The response item identifier. + :type item_id: str + :param message_id: The originating message identifier. + :type message_id: Optional[str] + """ + super().__init__(logger, parent) + self.output_index = output_index + self.content_index = content_index + self.item_id = item_id + self.message_id = message_id + self.aggregated_content = "" + + def try_process_message( + self, message: ResponseGeneratorMessage, _context, stream_state: StreamEventState + ) -> ResponseGeneratorResult: + """Process a message into text delta and completion events. + + :param message: The message chunk to process. + :type message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + is_processed = False + events: ResponseGeneratorEvents = [] + next_processor: Optional[ResponseEventGenerator] = self + if not self.started: + self.started = True + + if isinstance(message, langgraph_messages.BaseMessage): + is_processed, next_processor, processed_events = self.process(message, stream_state) + if not is_processed: + self.logger.warning("OutputTextEventGenerator did not process message: %s", message) + events.extend(processed_events) + + if self.should_end(message): + is_processed, complete_events = self.on_end(message, _context, stream_state) + events.extend(complete_events) + next_processor = self.parent + + return is_processed, next_processor, events + + def process( + self, message: langgraph_messages.BaseMessage, stream_state: StreamEventState + ) -> ResponseGeneratorResult: + """Convert message content into text delta events. + + :param message: The message containing text content. + :type message: AnyMessage + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, current generator, and emitted events. + :rtype: tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]] + """ + if message and message.content: + content = [message.content] if isinstance(message.content, str) else message.content + res: ResponseGeneratorEvents = [] + for item in content: + if not isinstance(item, str): + self.logger.warning("Skipping non-string content item: %s", item) + continue + # create an event for each content item + chunk_event = project_models.ResponseTextDeltaEvent( + item_id=self.item_id, + output_index=self.output_index, + content_index=self.content_index, + delta=item, + sequence_number=stream_state.sequence_number, + ) + self.aggregated_content += item + stream_state.sequence_number += 1 + res.append(chunk_event) + return True, self, res + return False, self, [] + + def has_finish_reason(self, message: ResponseGeneratorMessage) -> bool: + """Check whether the message marks completion for this text stream. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the message carries a finish reason. + :rtype: bool + """ + if not isinstance(message, langgraph_messages.BaseMessage) or message.id != self.message_id: + return False + if message.response_metadata and message.response_metadata.get("finish_reason"): + return True + return False + + def should_end(self, message: ResponseGeneratorMessage) -> bool: + """Determine whether text streaming for this item should end. + + :param message: The message to inspect. + :type message: AnyMessage + + :return: True when the generator should end. + :rtype: bool + """ + if message is None: + return True + if not isinstance(message, langgraph_messages.BaseMessage): + return True + if message.id != self.message_id: + return True + return False + + def on_end( + self, message: ResponseGeneratorMessage, _context: LanggraphRunContext, stream_state: StreamEventState + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the final text-done event for the current content part. + + :param message: The terminal message for this text stream. + :type message: AnyMessage + :param _context: The run context, unused by this generator. + :type _context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and final events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ + if not self.started: + return False, [] + + # finalize the item resource + done_event = project_models.ResponseTextDoneEvent( + item_id=self.item_id, + output_index=self.output_index, + content_index=self.content_index, + text=self.aggregated_content, + sequence_number=stream_state.sequence_number, + ) + stream_state.sequence_number += 1 + if self.parent: + self.parent.aggregate_content(self.aggregated_content) + has_finish = self.has_finish_reason(message) + return has_finish, [done_event] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py similarity index 50% rename from sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py rename to sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py index f19629eba94b..f84d5b234653 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_stream_event_generator.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/_response_stream_event_generator.py @@ -1,19 +1,20 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors import time -from typing import List +from typing import Any, List, Optional, Union from langchain_core import messages as langgraph_messages -from azure.ai.agentserver.core.models import projects as project_models -from .response_event_generator import ( +from azure.ai.agentserver.core.models import _projects as project_models +from ._response_event_generator import ( ResponseEventGenerator, + ResponseGeneratorEvents, + ResponseGeneratorMessage, + ResponseGeneratorResult, StreamEventState, ) -from .response_output_item_event_generator import ResponseOutputItemEventGenerator +from ._response_output_item_event_generator import ResponseOutputItemEventGenerator from ..._context import LanggraphRunContext @@ -24,13 +25,35 @@ class ResponseStreamEventGenerator(ResponseEventGenerator): """ def __init__(self, logger, parent, *, hitl_helper=None): + """Initialize the top-level response stream generator. + + :param logger: The logger used for diagnostics. + :type logger: logging.Logger + :param parent: The parent generator, if any. + :type parent: ResponseEventGenerator | None + :keyword hitl_helper: Optional helper for human-in-the-loop interrupts. + :type hitl_helper: Any + """ super().__init__(logger, parent) self.hitl_helper = hitl_helper self.aggregated_contents: List[project_models.ItemResource] = [] def on_start( - self, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: + self, + _message: ResponseGeneratorMessage, + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the initial response-created and in-progress stream events. + + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Whether generation started and the emitted events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ if self.started: return True, [] agent_id = context.agent_run.get_agent_id_object() @@ -70,23 +93,45 @@ def on_start( return True, [created_event, in_progress_event] def should_complete(self, event: langgraph_messages.AnyMessage) -> bool: - # Determine if the event indicates completion + """Determine whether the current event represents stream completion. + + :param event: The current stream event. + :type event: langgraph_messages.AnyMessage + + :return: True when the stream should be considered complete. + :rtype: bool + """ if event is None: return True return False def try_process_message( - self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: + self, + message: ResponseGeneratorMessage, + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> ResponseGeneratorResult: + """Process a streamed message or transition to a child generator. + + :param message: The streamed message to process. + :type message: Optional[langgraph_messages.AnyMessage] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Processing status, next generator, and emitted events. + :rtype: tuple[bool, Optional[ResponseEventGenerator], List[project_models.ResponseStreamEvent]] + """ is_processed = False - next_processor = self - events = [] + next_processor: Optional[ResponseEventGenerator] = self + events: ResponseGeneratorEvents = [] if not self.started: - self.started, start_events = self.on_start(context, stream_state) + self.started, start_events = self.on_start(message, context, stream_state) events.extend(start_events) - if message: + if message is not None: # create a child processor next_processor = ResponseOutputItemEventGenerator( self.logger, self, len(self.aggregated_contents), message.id, hitl_helper=self.hitl_helper @@ -95,21 +140,43 @@ def try_process_message( if self.should_end(message): # received a None message, indicating end of the stream - done_events = self.on_end(message, context, stream_state) + is_processed, done_events = self.on_end(message, context, stream_state) events.extend(done_events) - is_processed = True next_processor = None return is_processed, next_processor, events - def should_end(self, event: langgraph_messages.AnyMessage) -> bool: - # Determine if the event indicates end of the stream + def should_end(self, event: ResponseGeneratorMessage) -> bool: + """Determine whether the stream should end for the current event. + + :param event: The current stream event. + :type event: langgraph_messages.AnyMessage + + :return: True when the generator should end. + :rtype: bool + """ if event is None: return True return False - def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunContext, - stream_state: StreamEventState): + def on_end( + self, + _message: ResponseGeneratorMessage, + context: LanggraphRunContext, + stream_state: StreamEventState, + ) -> tuple[bool, ResponseGeneratorEvents]: + """Emit the final response-completed event for the stream. + + :param _message: The terminal message for the stream. + :type _message: Optional[langgraph_messages.AnyMessage] + :param context: The run context for the current request. + :type context: LanggraphRunContext + :param stream_state: The mutable stream state. + :type stream_state: StreamEventState + + :return: Completion status and the final stream events. + :rtype: tuple[bool, List[project_models.ResponseStreamEvent]] + """ agent_id = context.agent_run.get_agent_id_object() conversation = context.agent_run.get_conversation_object() response_dict = { @@ -128,13 +195,18 @@ def on_end(self, message: langgraph_messages.AnyMessage, context: LanggraphRunCo stream_state.sequence_number += 1 if self.parent: self.parent.aggregate_content(self.aggregated_contents) - return [done_event] + return True, [done_event] + + def aggregate_content(self, content: Any) -> None: + """Collect item resources produced by child generators. - def aggregate_content(self, content): - # aggregate content from children + :param content: The child content to aggregate. + :type content: Any + """ if isinstance(content, list): for c in content: self.aggregate_content(c) + return if isinstance(content, project_models.ItemResource): self.aggregated_contents.append(content) else: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py deleted file mode 100644 index 8502ec13069b..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/item_resource_helpers.py +++ /dev/null @@ -1,144 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# mypy: disable-error-code="assignment" -from typing import Optional - -from langgraph.types import Interrupt - -from azure.ai.agentserver.core.models import projects as project_models - -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call - - -class ItemResourceHelper: - def __init__(self, item_type: str, item_id: Optional[str] = None): - self.item_type = item_type - self.item_id = item_id - - def create_item_resource(self, is_done: bool): - pass - - def add_aggregate_content(self, item): - pass - - def get_aggregated_content(self): - pass - - -class FunctionCallItemResourceHelper(ItemResourceHelper): - def __init__(self, item_id: str = None, tool_call: dict = None): - super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) - self.call_id = None - self.name = None - self.arguments = "" - if tool_call: - self.name, self.call_id, _ = extract_function_call(tool_call) - - def create_item_resource(self, is_done: bool): - content = { - "id": self.item_id, - "type": self.item_type, - "call_id": self.call_id, - "name": self.name, - "arguments": self.arguments if self.arguments else "", - "status": "in_progress" if not is_done else "completed", - } - return project_models.ItemResource(content) - - def add_aggregate_content(self, item): - if isinstance(item, str): - self.arguments += item - return - if not isinstance(item, dict): - return - if item.get("type") != project_models.ItemType.FUNCTION_CALL: - return - _, _, argument = extract_function_call(item) - if argument: - self.arguments += argument - - def get_aggregated_content(self): - return self.create_item_resource(is_done=True) - - -class FunctionCallInterruptItemResourceHelper(ItemResourceHelper): - def __init__(self, - item_id: Optional[str] = None, - hitl_helper: Optional[HumanInTheLoopHelper] = None, - interrupt: Optional[Interrupt] = None): - super().__init__(project_models.ItemType.FUNCTION_CALL, item_id) - self.hitl_helper = hitl_helper - self.interrupt = interrupt - - def create_item_resource(self, is_done: bool): - if self.hitl_helper is None or self.interrupt is None: - return None - item_resource = self.hitl_helper.convert_interrupt(self.interrupt) - if item_resource is not None and not is_done: - if hasattr(item_resource, 'arguments'): - item_resource.arguments = "" # type: ignore[union-attr] - return item_resource - - def add_aggregate_content(self, item): - pass - - def get_aggregated_content(self): - return self.create_item_resource(is_done=True) - - -class FunctionCallOutputItemResourceHelper(ItemResourceHelper): - def __init__(self, item_id: str = None, call_id: str = None): - super().__init__(project_models.ItemType.FUNCTION_CALL_OUTPUT, item_id) - self.call_id = call_id - self.content = "" - - def create_item_resource(self, is_done: bool): - content = { - "id": self.item_id, - "type": self.item_type, - "status": "in_progress" if not is_done else "completed", - "call_id": self.call_id, - "output": self.content, - } - return project_models.ItemResource(content) - - def add_aggregate_content(self, item): - if isinstance(item, str): - self.content += item - return - if not isinstance(item, dict): - return - content = item.get("text") - if isinstance(content, str): - self.content += content - - def get_aggregated_content(self): - return self.create_item_resource(is_done=True) - - -class MessageItemResourceHelper(ItemResourceHelper): - def __init__(self, item_id: str, role: project_models.ResponsesMessageRole): - super().__init__(project_models.ItemType.MESSAGE, item_id) - self.role = role - self.content: list[project_models.ItemContent] = [] - - def create_item_resource(self, is_done: bool): - content = { - "id": self.item_id, - "type": self.item_type, - "status": "in_progress" if not is_done else "completed", - "content": self.content, - "role": self.role, - } - return project_models.ItemResource(content) - - def add_aggregate_content(self, item): - if isinstance(item, dict): - item = project_models.ItemContent(item) - if isinstance(item, project_models.ItemContent): - self.content.append(item) - - def get_aggregated_content(self): - return self.create_item_resource(is_done=True) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py deleted file mode 100644 index 4823de4411ae..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_content_part_event_generator.py +++ /dev/null @@ -1,154 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=unused-argument,consider-using-in,consider-merging-isinstance -# mypy: ignore-errors -from typing import List - -from langchain_core import messages as langgraph_messages - -from azure.ai.agentserver.core.models import projects as project_models - -from . import item_content_helpers -from .response_event_generator import ResponseEventGenerator, StreamEventState -from .response_output_text_event_generator import ResponseOutputTextEventGenerator - - -class ResponseContentPartEventGenerator(ResponseEventGenerator): - def __init__( - self, - logger, - parent: ResponseEventGenerator, - item_id: str, - message_id: str, - output_index: int, - content_index: int, - ): - super().__init__(logger, parent) - self.output_index = output_index - self.content_index = content_index - self.item_id = item_id - self.message_id = message_id - self.aggregated_content = "" - self.item_content_helper = None - - def try_process_message( - self, message, context, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - is_processed = False - events = [] - next_processor = self - if not self.item_content_helper: - if not self.try_create_item_content_helper(message): - # cannot create item content, skip this message - self.logger.warning(f"Cannot create item content helper for message: {message}") - return True, self, [] - if self.item_content_helper and not self.started: - self.started, start_events = self.on_start(message, context, stream_state) - if not self.started: - # could not start processing, skip this message - return True, self, [] - events.extend(start_events) - - if self.should_end(message): - complete_events = self.on_end(message, context, stream_state) - events.extend(complete_events) - next_processor = self.parent - is_processed = self.has_finish_reason(message) if message else False - return is_processed, next_processor, events - - child_processor = self.create_child_processor(message) - if child_processor: - next_processor = child_processor - - return is_processed, next_processor, events - - def on_start( # mypy: ignore[override] - self, event, run_details, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - if self.started: - return False, [] - - start_event = project_models.ResponseContentPartAddedEvent( - item_id=self.item_id, - output_index=self.output_index, - content_index=self.content_index, - part=self.item_content_helper.create_item_content(), - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - self.started = True - - return True, [start_event] - - def on_end( - self, message, context, stream_state: StreamEventState - ) -> List[project_models.ResponseStreamEvent]: # mypy: ignore[override] - aggregated_content = self.item_content_helper.create_item_content() - done_event = project_models.ResponseContentPartDoneEvent( - item_id=self.item_id, - output_index=self.output_index, - content_index=self.content_index, - part=aggregated_content, - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - if self.parent: - self.parent.aggregate_content(aggregated_content.as_dict()) - return [done_event] - - def try_create_item_content_helper(self, message): - if isinstance(message, langgraph_messages.AIMessage) or isinstance(message, langgraph_messages.ToolMessage): - if self.is_text_content(message.content): - self.item_content_helper = item_content_helpers.OutputTextItemContentHelper() - return True - if isinstance(message, langgraph_messages.HumanMessage) or isinstance( - message, langgraph_messages.SystemMessage - ): - if self.is_text_content(message.content): - self.item_content_helper = item_content_helpers.InputTextItemContentHelper() - return True - return False - - def aggregate_content(self, content): - return self.item_content_helper.aggregate_content(content) - - def is_text_content(self, content): - if isinstance(content, str): - return True - if isinstance(content, list) and all(isinstance(c, str) for c in content): - return True - return False - - def create_child_processor(self, message) -> ResponseEventGenerator: - if ( - self.item_content_helper.content_type == project_models.ItemContentType.INPUT_TEXT - or self.item_content_helper.content_type == project_models.ItemContentType.OUTPUT_TEXT - ): - return ResponseOutputTextEventGenerator( - logger=self.logger, - parent=self, - content_index=self.content_index, - output_index=self.output_index, - item_id=self.item_id, - message_id=self.message_id, - ) - raise ValueError(f"Unsupported item content type for child processor: {self.item_content_helper.content_type}") - - def has_finish_reason(self, message) -> bool: - if not isinstance(message, langgraph_messages.BaseMessageChunk): - return False - if message.response_metadata and message.response_metadata.get("finish_reason"): - return True - return False - - def should_end(self, event) -> bool: - # Determine if the event indicates end of the stream for this item - if event is None: - return True - if event.id != self.message_id: - return True - # if is Message not MessageChunk, should create child and end in the second iteration - if not isinstance(event, langgraph_messages.BaseMessageChunk): - return self.item_content_helper.has_aggregated_content - return False diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py deleted file mode 100644 index cd161b99d152..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_event_generator.py +++ /dev/null @@ -1,92 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=unused-argument,unnecessary-pass -# mypy: disable-error-code="valid-type" -from typing import List - -from langchain_core.messages import AnyMessage - -from azure.ai.agentserver.core.models import projects as project_models -from ..._context import LanggraphRunContext - - -class StreamEventState: - """ - :meta private: - State information for the stream event processing. - """ - - sequence_number: int = 0 - - -class ResponseEventGenerator: - """ - :meta private: - Abstract base class for response event generators. - """ - - started: bool = False - - def __init__(self, logger, parent): - self.logger = logger - self.parent = parent # parent generator - - def try_process_message( - self, - message: AnyMessage, # mypy: ignore[valid-type] - context: LanggraphRunContext, - stream_state: StreamEventState, - ): # mypy: ignore[empty-body] - """ - Try to process the incoming message. - - :param message: The incoming message to process. - :type message: AnyMessage - :param context: The agent run context. - :type context: LanggraphRunContext - :param stream_state: The current stream event state. - :type stream_state: StreamEventState - - :return: tuple of (is_processed, next_processor, events) - :rtype: tuple[bool, ResponseEventGenerator, List[ResponseStreamEvent]] - """ - pass - - def on_start(self) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - """ - Generate the starting events for this layer. - - :return: tuple of (started, events) - :rtype: tuple[bool, List[ResponseStreamEvent]] - """ - return False, [] - - def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - """ - Generate the ending events for this layer. - TODO: handle different end conditions, e.g. normal end, error end, etc. - - :param message: The incoming message to process. - :type message: AnyMessage - :param context: The agent run context. - :type context: LanggraphRunContext - :param stream_state: The current stream event state. - :type stream_state: StreamEventState - - :return: tuple of (started, events) - :rtype: tuple[bool, List[ResponseStreamEvent]] - """ - return False, [] - - def aggregate_content(self): - """ - Aggregate the content for this layer. - It is called by its child processor to pass up aggregated content. - - :return: content from child processor - :rtype: str | dict - """ - pass diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py deleted file mode 100644 index 56c3bde68632..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_function_call_argument_event_generator.py +++ /dev/null @@ -1,147 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=unused-argument,name-too-long -# mypy: ignore-errors -from typing import List, Union - -from langchain_core import messages as langgraph_messages -from langchain_core.messages import AnyMessage -from langgraph.types import Interrupt - -from azure.ai.agentserver.core.models import projects as project_models -from . import ResponseEventGenerator, StreamEventState -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..utils import extract_function_call -from ..._context import LanggraphRunContext - - -class ResponseFunctionCallArgumentEventGenerator(ResponseEventGenerator): - def __init__( - self, - logger, - parent: ResponseEventGenerator, - item_id, - message_id, - output_index: int, - *, - hitl_helper: HumanInTheLoopHelper = None, - ): - super().__init__(logger, parent) - self.item_id = item_id - self.output_index = output_index - self.aggregated_content = "" - self.message_id = message_id - self.hitl_helper = hitl_helper - - def try_process_message( - self, message, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - is_processed = False - events = [] - next_processor = self - if not self.started: - self.started = True # does not need to do anything special on start - - is_processed, next_processor, processed_events = self.process(message, context, stream_state) - if not is_processed: - self.logger.warning(f"FunctionCallArgumentEventGenerator did not process message: {message}") - events.extend(processed_events) - - if self.should_end(message): - has_finish_reason = self.has_finish_reason(message) - complete_events = self.on_end(message, context, stream_state) - events.extend(complete_events) - next_processor = self.parent - is_processed = has_finish_reason # if has finish reason, mark as processed and stop further processing - - return is_processed, next_processor, events - - def on_start( - self, event: AnyMessage, run_details, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - if self.started: - return True, [] - self.started = True - return True, [] - - def process( - self, message: Union[langgraph_messages.AnyMessage, Interrupt], run_details, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - if self.should_end(message): - return False, self, [] - - argument = None - if isinstance(message, Interrupt): - if self.hitl_helper: - _, _, argument = self.hitl_helper.interrupt_to_function_call(message) - else: - argument = None - else: - tool_call = self.get_tool_call_info(message) - if tool_call: - _, _, argument = extract_function_call(tool_call) - if argument: - argument_delta_event = project_models.ResponseFunctionCallArgumentsDeltaEvent( - item_id=self.item_id, - output_index=self.output_index, - delta=argument, - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - self.aggregated_content += argument - return True, self, [argument_delta_event] - return False, self, [] - - def has_finish_reason(self, message: AnyMessage) -> bool: - if not message or message.id != self.message_id: - return False - if isinstance(message, langgraph_messages.AIMessageChunk): - if not message.tool_call_chunks: - # new tool call started, end this argument processing - return True - if message.response_metadata.get("finish_reason"): - # tool call finished - return True - elif isinstance(message, langgraph_messages.AIMessage): - return True - return False - - def should_end(self, event: AnyMessage) -> bool: - if event is None: - return True - if event.id != self.message_id: - return True - return False - - def on_end( - self, message: AnyMessage, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - done_event = project_models.ResponseFunctionCallArgumentsDoneEvent( - item_id=self.item_id, - output_index=self.output_index, - arguments=self.aggregated_content, - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - self.parent.aggregate_content(self.aggregated_content) # pass aggregated content to parent - return [done_event] - - def get_tool_call_info(self, message: Union[langgraph_messages.AnyMessage, Interrupt]): - if isinstance(message, langgraph_messages.AIMessageChunk): - if message.tool_call_chunks: - if len(message.tool_call_chunks) > 1: - self.logger.warning( - f"There are {len(message.tool_call_chunks)} tool calls found. " - + "Only the first one will be processed." - ) - return message.tool_call_chunks[0] - elif isinstance(message, langgraph_messages.AIMessage): - if message.tool_calls: - if len(message.tool_calls) > 1: - self.logger.warning( - f"There are {len(message.tool_calls)} tool calls found. " - + "Only the first one will be processed." - ) - return message.tool_calls[0] - return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py deleted file mode 100644 index 14eee3c571b2..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_item_event_generator.py +++ /dev/null @@ -1,175 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: ignore-errors -from typing import List, Union - -from langchain_core import messages as langgraph_messages -from langchain_core.messages import AnyMessage -from langgraph.types import Interrupt - -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.core.server.common.id_generator.id_generator import IdGenerator -from . import ResponseEventGenerator, StreamEventState, item_resource_helpers -from .response_content_part_event_generator import ResponseContentPartEventGenerator -from .response_function_call_argument_event_generator import ResponseFunctionCallArgumentEventGenerator -from ..human_in_the_loop_helper import HumanInTheLoopHelper -from ..._context import LanggraphRunContext - - -class ResponseOutputItemEventGenerator(ResponseEventGenerator): - def __init__(self, logger, parent: ResponseEventGenerator, - output_index: int, message_id: str = None, - *, hitl_helper: HumanInTheLoopHelper = None): - super().__init__(logger, parent) - self.output_index = output_index - self.message_id = message_id - self.item_resource_helper = None - self.hitl_helper = hitl_helper - - def try_process_message( - self, message: Union[AnyMessage, Interrupt, None], context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - is_processed = False - next_processor = self - events = [] - if self.item_resource_helper is None: - if not self.try_create_item_resource_helper(message, context.agent_run.id_generator): - # cannot create item resource, skip this message - self.logger.warning(f"Cannot create item resource helper for message: {message}, skipping.") - return True, self, [] - - if self.item_resource_helper and not self.started: - self.started, start_events = self.on_start(message, context, stream_state) - if not self.started: - # could not start processing, skip this message - self.logger.warning(f"Cannot create start events for message: {message}, skipping.") - return True, self, [] - events.extend(start_events) - - if self.should_end(message): - # not the message this processor is handling - complete_events = self.on_end(message, context, stream_state) - is_processed = self.message_id == message.id if message else False - next_processor = self.parent - events.extend(complete_events) - return is_processed, next_processor, events - - child_processor = self.create_child_processor(message) - if child_processor: - self.logger.info(f"Created child processor: {child_processor}") - return False, child_processor, events - - if message: - # no child processor, process the content directly - self.aggregate_content(message.content) - is_processed = True - - return is_processed, next_processor, events - - def on_start( - self, event: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - if self.started: - return True, [] - - item_resource = self.item_resource_helper.create_item_resource(is_done=False) - if item_resource is None: - # cannot know what item resource to create - return False, None - item_added_event = project_models.ResponseOutputItemAddedEvent( - output_index=self.output_index, - sequence_number=stream_state.sequence_number, - item=item_resource, - ) - stream_state.sequence_number += 1 - self.started = True - return True, [item_added_event] - - def should_end(self, event: Union[AnyMessage, Interrupt]) -> bool: - if event is None: - self.logger.info("Received None event, ending processor.") - return True - if event.id != self.message_id: - return True - return False - - def on_end( - self, message: Union[AnyMessage, Interrupt], context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - if not self.started: # should not happen - return [] - - item_resource = self.item_resource_helper.create_item_resource(is_done=True) - # response item done event - done_event = project_models.ResponseOutputItemDoneEvent( - output_index=self.output_index, - sequence_number=stream_state.sequence_number, - item=item_resource, - ) - stream_state.sequence_number += 1 - self.parent.aggregate_content(item_resource) # pass aggregated content to parent - return [done_event] - - def aggregate_content(self, content): - # aggregate content from child processor - self.item_resource_helper.add_aggregate_content(content) - - def try_create_item_resource_helper(self, event: Union[AnyMessage, Interrupt], id_generator: IdGenerator): # pylint: disable=too-many-return-statements - if isinstance(event, langgraph_messages.AIMessageChunk) and event.tool_call_chunks: - self.item_resource_helper = item_resource_helpers.FunctionCallItemResourceHelper( - item_id=id_generator.generate_function_call_id(), tool_call=event.tool_call_chunks[0] - ) - return True - if isinstance(event, langgraph_messages.AIMessage) and event.tool_calls: - self.item_resource_helper = item_resource_helpers.FunctionCallItemResourceHelper( - item_id=id_generator.generate_function_call_id(), tool_call=event.tool_calls[0] - ) - return True - if isinstance(event, langgraph_messages.AIMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( - item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.ASSISTANT - ) - return True - if isinstance(event, langgraph_messages.HumanMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( - item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.USER - ) - return True - if isinstance(event, langgraph_messages.SystemMessage) and event.content: - self.item_resource_helper = item_resource_helpers.MessageItemResourceHelper( - item_id=id_generator.generate_message_id(), role=project_models.ResponsesMessageRole.SYSTEM - ) - return True - if isinstance(event, langgraph_messages.ToolMessage): - self.item_resource_helper = item_resource_helpers.FunctionCallOutputItemResourceHelper( - item_id=id_generator.generate_function_output_id(), call_id=event.tool_call_id - ) - return True - if isinstance(event, Interrupt): - self.item_resource_helper = item_resource_helpers.FunctionCallInterruptItemResourceHelper( - item_id=id_generator.generate_function_output_id(), - hitl_helper=self.hitl_helper, - interrupt=event, - ) - return True - return False - - def create_child_processor(self, message: Union[AnyMessage, Interrupt]): - if self.item_resource_helper is None: - return None - if self.item_resource_helper.item_type == project_models.ItemType.FUNCTION_CALL: - return ResponseFunctionCallArgumentEventGenerator( - self.logger, - self, - item_id=self.item_resource_helper.item_id, - message_id=message.id, - output_index=self.output_index, - hitl_helper=self.hitl_helper, - ) - if self.item_resource_helper.item_type == project_models.ItemType.MESSAGE: - return ResponseContentPartEventGenerator( - self.logger, self, self.item_resource_helper.item_id, message.id, self.output_index, content_index=0 - ) - return None diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py deleted file mode 100644 index 8d0e62650a2d..000000000000 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/models/response_event_generators/response_output_text_event_generator.py +++ /dev/null @@ -1,111 +0,0 @@ -# --------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# --------------------------------------------------------- -# pylint: disable=unused-argument -# mypy: disable-error-code="return-value,assignment" -from typing import List - -from azure.ai.agentserver.core.models import projects as project_models -from .response_event_generator import ( - ResponseEventGenerator, - StreamEventState, -) -from ..._context import LanggraphRunContext - - -class ResponseOutputTextEventGenerator(ResponseEventGenerator): - def __init__( - self, - logger, - parent: ResponseEventGenerator, - content_index: int, - output_index: int, - item_id: str, - message_id: str, - ): - super().__init__(logger, parent) - self.output_index = output_index - self.content_index = content_index - self.item_id = item_id - self.message_id = message_id - self.aggregated_content = "" - - def try_process_message( - self, message, context, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - is_processed = False - events = [] - next_processor = self - if not self.started: - self.started = True - - if message: - is_processed, next_processor, processed_events = self.process(message, context, stream_state) - if not is_processed: - self.logger.warning(f"OutputTextEventGenerator did not process message: {message}") - events.extend(processed_events) - - if self.should_end(message): - is_processed, complete_events = self.on_end(message, context, stream_state) - events.extend(complete_events) - next_processor = self.parent - - return is_processed, next_processor, events - - def process( - self, message, run_details, stream_state: StreamEventState - ) -> tuple[bool, ResponseEventGenerator, List[project_models.ResponseStreamEvent]]: - if message and message.content: - content = [message.content] if isinstance(message.content, str) else message.content - res = [] - for item in content: - if not isinstance(item, str): - self.logger.warning(f"Skipping non-string content item: {item}") - continue - # create an event for each content item - chunk_event = project_models.ResponseTextDeltaEvent( - item_id=self.item_id, - output_index=self.output_index, - content_index=self.content_index, - delta=item, - sequence_number=stream_state.sequence_number, - ) - self.aggregated_content += item - stream_state.sequence_number += 1 - res.append(chunk_event) - return True, self, res # mypy: ignore[return-value] - return False, self, [] - - def has_finish_reason(self, message) -> bool: - if not message or message.id != self.message_id: - return False - if message.response_metadata and message.response_metadata.get("finish_reason"): - return True - return False - - def should_end(self, message) -> bool: - # Determine if the message indicates end of the stream for this item - if message is None: - return True - if message.id != self.message_id: - return True - return False - - def on_end( # mypy: ignore[override] - self, message, context: LanggraphRunContext, stream_state: StreamEventState - ) -> tuple[bool, List[project_models.ResponseStreamEvent]]: - if not self.started: - return False, [] - - # finalize the item resource - done_event = project_models.ResponseTextDoneEvent( - item_id=self.item_id, - output_index=self.output_index, - content_index=self.content_index, - text=self.aggregated_content, - sequence_number=stream_state.sequence_number, - ) - stream_state.sequence_number += 1 - self.parent.aggregate_content(self.aggregated_content) - has_finish = self.has_finish_reason(message) - return has_finish, [done_event] diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py index 0ea9a2da80f2..4d6da83cb23b 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_builder.py @@ -1,7 +1,7 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -from typing import List, Optional, Union, overload +from typing import List, Optional, Union, cast, overload from langchain_core.language_models import BaseChatModel @@ -54,10 +54,10 @@ def use_foundry_tools( # pylint: disable=C4743 if isinstance(model_or_tools, BaseChatModel): if tools is None: raise ValueError("Tools must be provided when a model is given.") - foundry_tools = [ensure_foundry_tool(tool) for tool in tools] + foundry_tools = cast(List[FoundryToolLike], [ensure_foundry_tool(tool) for tool in tools]) get_registry().extend(foundry_tools) return FoundryToolLateBindingChatModel(model_or_tools, runtime=None, foundry_tools=foundry_tools) - foundry_tools = [ensure_foundry_tool(tool) for tool in model_or_tools] + foundry_tools = cast(List[FoundryToolLike], [ensure_foundry_tool(tool) for tool in model_or_tools]) get_registry().extend(foundry_tools) return FoundryToolBindingMiddleware(foundry_tools) diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py index 4ca422b88c41..08d7e5cf3a32 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_chat_model.py @@ -31,6 +31,15 @@ class FoundryToolLateBindingChatModel(BaseChatModel): """ def __init__(self, delegate: BaseChatModel, runtime: Optional[Runtime], foundry_tools: List[FoundryToolLike]): + """Initialize the late-binding chat model wrapper. + + :param delegate: The underlying chat model. + :type delegate: BaseChatModel + :param runtime: The active LangGraph runtime, if available. + :type runtime: Optional[Runtime] + :param foundry_tools: The Foundry tools to resolve and bind at call time. + :type foundry_tools: List[FoundryToolLike] + """ super().__init__() self._delegate = delegate self._runtime = runtime @@ -75,8 +84,6 @@ def bind_tools(self, # pylint: disable=C4758 :type tools: Sequence[Dict[str, Any] | type | Callable | BaseTool] :keyword tool_choice: Optional tool choice strategy. :type tool_choice: str | None - :keyword kwargs: Additional keyword arguments for tool binding. - :type kwargs: Any :return: A Runnable with the tools bound for later invocation. :rtype: Runnable[LanguageModelInput, AIMessage] """ @@ -89,6 +96,14 @@ def bind_tools(self, # pylint: disable=C4758 return self def _bound_delegate_for_call(self, config: Optional[RunnableConfig]) -> Runnable[LanguageModelInput, AIMessage]: + """Resolve and bind all tools before invoking the delegate model. + + :param config: The runnable config carrying the LangGraph run context. + :type config: Optional[RunnableConfig] + + :return: The delegate model with all applicable tools bound. + :rtype: Runnable[LanguageModelInput, AIMessage] + """ from .._context import LanggraphRunContext foundry_tools: Iterable[BaseTool] = [] @@ -108,24 +123,90 @@ def _bound_delegate_for_call(self, config: Optional[RunnableConfig]) -> Runnable bound_kwargs = self._bound_kwargs or {} return self._delegate.bind_tools(all_tools, **bound_kwargs) - def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + def invoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: # pylint: disable=C4758 + """Invoke the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + + :return: The model result. + :rtype: Any + """ return self._bound_delegate_for_call(config).invoke(input, config=config, **kwargs) - async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: + async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any) -> Any: # pylint: disable=C4758 + """Asynchronously invoke the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional invocation keyword arguments. + :type kwargs: Any + + :return: The model result. + :rtype: Any + """ return await self._bound_delegate_for_call(config).ainvoke(input, config=config, **kwargs) - def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + def stream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): # pylint: disable=C4758 + """Stream results from the wrapped chat model with late-bound tools. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional streaming keyword arguments. + :type kwargs: Any + + :return: A synchronous iterator of streamed outputs. + :rtype: Iterator[Any] + """ yield from self._bound_delegate_for_call(config).stream(input, config=config, **kwargs) - async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): + async def astream(self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any): # pylint: disable=C4758 + """Asynchronously stream results from the wrapped chat model. + + :param input: The model input. + :type input: Any + :param config: Optional runnable config. + :type config: Optional[RunnableConfig] + :keyword kwargs: Additional streaming keyword arguments. + :type kwargs: Any + + :return: An async iterator of streamed outputs. + :rtype: AsyncIterator[Any] + """ async for x in self._bound_delegate_for_call(config).astream(input, config=config, **kwargs): yield x @property def _llm_type(self) -> str: + """Return the descriptive model type for LangChain integrations. + + :return: The logical model type name. + :rtype: str + """ return f"foundry_tool_binding_model({getattr(self._delegate, '_llm_type', type(self._delegate).__name__)})" - def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, + def _generate(self, messages: list[BaseMessage], stop: list[str] | None = None, # pylint: disable=C4758 run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any) -> ChatResult: + """Disallow direct LangChain generation on the wrapper. + + :param messages: The prompt messages. + :type messages: list[BaseMessage] + :param stop: Optional stop sequences. + :type stop: list[str] | None + :param run_manager: Optional LangChain run manager. + :type run_manager: CallbackManagerForLLMRun | None + :keyword kwargs: Additional generation keyword arguments. + :type kwargs: Any + + :raises NotImplementedError: Always raised because calls should route through the delegate methods. + :return: This method never returns. + :rtype: ChatResult + """ # should never be called as invoke/ainvoke/stream/astream are redirected to delegate raise NotImplementedError() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py index c226e51e72ac..019d13c6c254 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_middleware.py @@ -27,6 +27,11 @@ class FoundryToolBindingMiddleware(AgentMiddleware): _DummyToolName: ClassVar[str] = "__dummy_tool_by_foundry_middleware__" def __init__(self, foundry_tools: List[FoundryToolLike]): + """Initialize the middleware with the Foundry tools to bind. + + :param foundry_tools: The Foundry tools that should be bound at runtime. + :type foundry_tools: List[FoundryToolLike] + """ super().__init__() # to ensure `create_agent()` will create a tool node when there are foundry tools to bind @@ -38,6 +43,11 @@ def __init__(self, foundry_tools: List[FoundryToolLike]): @classmethod def _dummy_tool(cls) -> BaseTool: + """Create a placeholder tool so agent creation wires a tool node. + + :return: The placeholder tool instance. + :rtype: BaseTool + """ return Tool(name=cls._DummyToolName, func=lambda x: None, description="__dummy_tool_by_foundry_middleware__") diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py index 5c77b1339132..c6da0220e5be 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_resolver.py @@ -21,6 +21,11 @@ class ResolvedTools(Iterable[BaseTool]): :type tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] """ def __init__(self, tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]]): + """Initialize the resolved-tools view. + + :param tools: The resolved tool pairs to index by Foundry tool id. + :type tools: Iterable[Tuple[ResolvedFoundryTool, BaseTool]] + """ self._by_source_id: Dict[str, List[BaseTool]] = defaultdict(list) for rt, t in tools: self._by_source_id[rt.definition.id].append(t) @@ -74,6 +79,11 @@ def get(self, tool: Union[FoundryToolLike, Iterable[FoundryToolLike], None] = No yield from self._by_source_id.get(ft.id, []) def __iter__(self): + """Iterate over all resolved LangChain tools. + + :return: An iterator over the resolved tools. + :rtype: Iterator[BaseTool] + """ for tool_list in self._by_source_id.values(): yield from tool_list @@ -85,6 +95,11 @@ class FoundryLangChainToolResolver: :type name_resolver: Optional[ToolNameResolver] """ def __init__(self, name_resolver: Optional[ToolNameResolver] = None): + """Initialize the Foundry-to-LangChain tool resolver. + + :param name_resolver: Optional resolver for stable tool names. + :type name_resolver: Optional[ToolNameResolver] + """ self._name_resolver = name_resolver or ToolNameResolver() async def resolve_from_registry(self) -> ResolvedTools: @@ -108,6 +123,14 @@ async def resolve(self, foundry_tools: List[FoundryToolLike]) -> ResolvedTools: return ResolvedTools(tools=((tool, self._create_structured_tool(tool)) for tool in resolved_foundry_tools)) def _create_structured_tool(self, resolved_tool: ResolvedFoundryTool) -> StructuredTool: + """Create a LangChain structured tool from a resolved Foundry tool. + + :param resolved_tool: The resolved Foundry tool descriptor. + :type resolved_tool: ResolvedFoundryTool + + :return: The structured tool wrapper. + :rtype: StructuredTool + """ name = self._name_resolver.resolve(resolved_tool) args_schema = self._create_pydantic_model(name, resolved_tool.input_schema) @@ -127,6 +150,16 @@ async def _tool_func(**kwargs: Any) -> str: @classmethod def _create_pydantic_model(cls, tool_name: str, input_schema: SchemaDefinition) -> type[BaseModel]: + """Create a Pydantic model for a Foundry tool input schema. + + :param tool_name: The tool name used to derive the model name. + :type tool_name: str + :param input_schema: The Foundry schema definition. + :type input_schema: SchemaDefinition + + :return: The generated Pydantic model type. + :rtype: type[BaseModel] + """ field_definitions: Dict[str, Any] = {} required_fields = input_schema.required or set() for prop_name, prop in input_schema.properties.items(): diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py index 1bfef8c39f81..ffcb8de2e911 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/azure/ai/agentserver/langgraph/tools/_tool_node.py @@ -35,7 +35,13 @@ class FoundryToolNodeWrappers(TypedDict): class FoundryToolCallWrapper: """A ToolCallWrapper that tries to resolve invokable foundry tools from context if tool is not resolved yet.""" + def __init__(self, foundry_tools: List[FoundryToolLike]): + """Initialize the wrapper with the allowed Foundry tools. + + :param foundry_tools: The Foundry tools that may be resolved at runtime. + :type foundry_tools: List[FoundryToolLike] + """ self._allowed_foundry_tools = foundry_tools def as_wrappers(self) -> FoundryToolNodeWrappers: @@ -74,6 +80,14 @@ async def call_tool_async(self, request: ToolCallRequest, invocation: AsyncToolI return await invocation(self._maybe_calling_foundry_tool(request)) def _maybe_calling_foundry_tool(self, request: ToolCallRequest) -> ToolCallRequest: + """Attach a resolved Foundry tool to the tool call request when possible. + + :param request: The incoming tool call request. + :type request: ToolCallRequest + + :return: The request with a resolved tool when a matching Foundry tool is available. + :rtype: ToolCallRequest + """ from .._context import LanggraphRunContext if (request.tool diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.checkpointer.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.checkpointer.rst new file mode 100644 index 000000000000..884fc699fd66 --- /dev/null +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.checkpointer.rst @@ -0,0 +1,7 @@ +azure.ai.agentserver.langgraph.checkpointer package +=================================================== + +.. automodule:: azure.ai.agentserver.langgraph.checkpointer + :inherited-members: + :members: + :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst index af7cc69bd859..86fdd4571cca 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.response_event_generators.rst @@ -5,70 +5,3 @@ azure.ai.agentserver.langgraph.models.response\_event\_generators package :inherited-members: :members: :undoc-members: - -Submodules ----------- - -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_content\_helpers module ------------------------------------------------------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_content_helpers - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.item\_resource\_helpers module ------------------------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.item_resource_helpers - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_content\_part\_event\_generator module ------------------------------------------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_content_part_event_generator - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_event\_generator module ---------------------------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_event_generator - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_function\_call\_argument\_event\_generator module ------------------------------------------------------------------------------------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_function_call_argument_event_generator - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_item\_event\_generator module ------------------------------------------------------------------------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_item_event_generator - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_output\_text\_event\_generator module ------------------------------------------------------------------------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_output_text_event_generator - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_event\_generators.response\_stream\_event\_generator module ------------------------------------------------------------------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.models.response_event_generators.response_stream_event_generator - :inherited-members: - :members: - :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst index aba857c3b64a..151165d50e08 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.models.rst @@ -13,70 +13,3 @@ Subpackages :maxdepth: 4 azure.ai.agentserver.langgraph.models.response_event_generators - -Submodules ----------- - -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_helper module -------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_helper - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.human\_in\_the\_loop\_json\_helper module -------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.human_in_the_loop_json_helper - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_api\_converter module ---------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_converter - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_api\_default\_converter module ------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_default_converter - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_api\_non\_stream\_response\_converter module --------------------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_non_stream_response_converter - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_api\_request\_converter module ------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_request_converter - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.response\_api\_stream\_response\_converter module ---------------------------------------------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.response_api_stream_response_converter - :inherited-members: - :members: - :undoc-members: - -azure.ai.agentserver.langgraph.models.utils module --------------------------------------------------- - -.. automodule:: azure.ai.agentserver.langgraph.models.utils - :inherited-members: - :members: - :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst index deefeb67fa96..77cba05b5ed7 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.rst @@ -12,16 +12,6 @@ Subpackages .. toctree:: :maxdepth: 4 + azure.ai.agentserver.langgraph.checkpointer azure.ai.agentserver.langgraph.models azure.ai.agentserver.langgraph.tools - -Submodules ----------- - -azure.ai.agentserver.langgraph.langgraph module ------------------------------------------------ - -.. automodule:: azure.ai.agentserver.langgraph.langgraph - :inherited-members: - :members: - :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst index 17f7ef6d2ab7..ef161b660661 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/doc/azure.ai.agentserver.langgraph.tools.rst @@ -2,5 +2,6 @@ azure.ai.agentserver.langgraph.tools package ============================================ .. automodule:: azure.ai.agentserver.langgraph.tools + :inherited-members: BaseChatModel, Runnable, RunnableSerializable, BaseModel :members: :undoc-members: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml index 62ba88994bcb..1949acae11c0 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/pyproject.toml @@ -66,5 +66,3 @@ pyright = false verifytypes = false # incompatible python version for -core verify_keywords = false whl_no_aio = false -mypy = false -apistub = false \ No newline at end of file diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py index ec45dceccfc8..5a5b22993a19 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/samples/custom_state/main.py @@ -12,8 +12,7 @@ from azure.ai.agentserver.core.models import Response, ResponseStreamEvent from azure.ai.agentserver.langgraph import LanggraphRunContext, from_langgraph -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIRequestConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter, ResponseAPIRequestConverter load_dotenv() diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py index ffc1e2fcc4c1..50a4458856ec 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/mocks/mock_checkpoint_client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional -from azure.ai.agentserver.core.checkpoints.client import ( +from azure.ai.agentserver.core.checkpoints import ( CheckpointItem, CheckpointItemId, CheckpointSession, diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py index 6f8ff173e6c3..727b501b9b3d 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_conversation_id_optional.py @@ -5,7 +5,7 @@ import pytest -from azure.ai.agentserver.langgraph.models.response_api_default_converter import ResponseAPIDefaultConverter +from azure.ai.agentserver.langgraph.models import ResponseAPIDefaultConverter class DummyGraphState: diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py index bc7cfbee93e4..2b28ae158c4f 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_historical_items_fetch.py @@ -4,7 +4,7 @@ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage -from azure.ai.agentserver.langgraph.models.response_api_request_converter import convert_item_resource_to_message +from azure.ai.agentserver.langgraph.models import convert_item_resource_to_message @pytest.mark.unit @@ -113,7 +113,7 @@ def test_convert_message_with_empty_content_list(self): def _create_converter(): """Helper to create a ResponseAPIDefaultConverter with mocked graph.""" - from azure.ai.agentserver.langgraph.models.response_api_default_converter import ( + from azure.ai.agentserver.langgraph.models import ( ResponseAPIDefaultConverter, ) @@ -122,7 +122,7 @@ def _create_converter(): mock_graph.checkpointer = None with patch( - "azure.ai.agentserver.langgraph.models.utils.is_state_schema_valid", + "azure.ai.agentserver.langgraph.models._utils.is_state_schema_valid", return_value=True, ): return ResponseAPIDefaultConverter(graph=mock_graph) @@ -381,7 +381,7 @@ async def test_fetch_returns_empty_when_no_endpoint(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value=None, ): result = await converter._fetch_historical_items("conv_123") @@ -393,7 +393,7 @@ async def test_fetch_returns_empty_on_import_error(self): converter = _create_converter() with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch.dict("sys.modules", {"openai": None}): @@ -421,7 +421,7 @@ async def mock_list(*args, **kwargs): mock_client.conversations.items.list = mock_list with patch( - "azure.ai.agentserver.langgraph.models.response_api_default_converter.get_project_endpoint", + "azure.ai.agentserver.langgraph.models._response_api_default_converter.get_project_endpoint", return_value="https://test.endpoint.com", ): with patch( diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py index b1894f7350d5..a68e5be13f13 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/test_langgraph_request_converter.py @@ -2,8 +2,8 @@ from langchain_core import messages as langgraph_messages from azure.ai.agentserver.core import models -from azure.ai.agentserver.core.models import projects as project_models -from azure.ai.agentserver.langgraph.models.response_api_request_converter import ResponseAPIMessageRequestConverter +from azure.ai.agentserver.core.models import _projects as project_models +from azure.ai.agentserver.langgraph.models import ResponseAPIMessageRequestConverter @pytest.mark.unit diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py index 7efc298559c1..f4e962d9d4c5 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/conftest.py @@ -21,7 +21,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext from azure.ai.agentserver.langgraph.tools._resolver import ResolvedTools diff --git a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py index eea917e54fd4..5f9115db66cf 100644 --- a/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py +++ b/sdk/agentserver/azure-ai-agentserver-langgraph/tests/unit_tests/tools/test_agent_integration.py @@ -23,7 +23,7 @@ SchemaProperty, SchemaType, ) -from azure.ai.agentserver.core.server.common.agent_run_context import AgentRunContext +from azure.ai.agentserver.core.server.common._agent_run_context import AgentRunContext from azure.ai.agentserver.langgraph._context import LanggraphRunContext from azure.ai.agentserver.langgraph.tools import use_foundry_tools from azure.ai.agentserver.langgraph.tools._context import FoundryToolContext