From 5e877858132f9a2360566c4aaa906af8c60cbe11 Mon Sep 17 00:00:00 2001 From: Justin Bowen Date: Fri, 13 Feb 2026 00:57:42 +0000 Subject: [PATCH 1/5] Add Azure OpenAI provider docs page and update provider listings PR #301 added first-class Azure OpenAI provider support but shipped without a dedicated documentation page. This adds: - docs/providers/azure.md with full configuration, usage, and parameter reference - Azure OpenAI entry in sidebar navigation - Azure listed as first-class provider in providers overview - OpenAI docs updated to link to dedicated Azure page instead of inlining outdated configuration cc @TheRealNeil Co-Authored-By: Claude Opus 4.6 --- docs/.vitepress/config.mts | 1 + docs/providers.md | 9 +- docs/providers/azure.md | 166 +++++++++++++++++++++++++++++++++++++ docs/providers/open_ai.md | 17 +--- 4 files changed, 177 insertions(+), 16 deletions(-) create mode 100644 docs/providers/azure.md diff --git a/docs/.vitepress/config.mts b/docs/.vitepress/config.mts index 02262704..e1123a15 100644 --- a/docs/.vitepress/config.mts +++ b/docs/.vitepress/config.mts @@ -129,6 +129,7 @@ export default defineConfig({ text: 'Providers', items: [ { text: 'Anthropic', link: '/providers/anthropic' }, + { text: 'Azure OpenAI', link: '/providers/azure' }, { text: 'Ollama', link: '/providers/ollama' }, { text: 'OpenAI', link: '/providers/open_ai' }, { text: 'OpenRouter', link: '/providers/open_router' }, diff --git a/docs/providers.md b/docs/providers.md index 9cfe45b7..d1e95a60 100644 --- a/docs/providers.md +++ b/docs/providers.md @@ -38,12 +38,19 @@ Run Llama 3, Mistral, Gemma, CodeLlama, and other open models locally. No API ke **Choose when:** Data cannot leave your infrastructure, you're developing offline, or you want to avoid API costs. Requires local setup. +### [Azure OpenAI](/providers/azure) +**Best for:** Enterprise deployments, data residency requirements, existing Azure infrastructure + +Azure-hosted OpenAI models with dedicated endpoints, Azure-specific authentication (api-key header), and deployment-based model selection. Uses Chat Completions API. + +**Choose when:** Your organization requires Azure compliance, data residency, or you already use Azure infrastructure. Same models as OpenAI but hosted on Azure. + ### [OpenAI](/providers/open_ai) **Best for:** Production applications, advanced reasoning, vision tasks GPT-4o, GPT-4.1, GPT-5, and o3 models. Two APIs available: Responses API (default) with built-in web search, image generation, and MCP integration, or Chat Completions API for standard interactions. 128K-200K context windows. -**Choose when:** You need reliable, high-quality responses with strong reasoning. Vision support and structured output work well. Azure OpenAI compatible. +**Choose when:** You need reliable, high-quality responses with strong reasoning. Vision support and structured output work well. ### [OpenRouter](/providers/open_router) **Best for:** Multi-model flexibility, cost optimization, experimentation diff --git a/docs/providers/azure.md b/docs/providers/azure.md new file mode 100644 index 00000000..77cac00a --- /dev/null +++ b/docs/providers/azure.md @@ -0,0 +1,166 @@ +--- +title: Azure OpenAI Provider +description: Integration with Azure OpenAI Service using dedicated provider with deployment-based endpoints, Azure-specific authentication, and OpenAI-compatible API features. +--- +# {{ $frontmatter.title }} + +The Azure OpenAI provider enables integration with Azure-hosted OpenAI models using a dedicated provider class. It handles Azure-specific authentication (api-key header), deployment-based endpoints, and API versioning while supporting the same features as the standard OpenAI Chat provider. + +## Configuration + +### Basic Setup + +Configure Azure OpenAI in your agent: + +```ruby +class MyAgent < ApplicationAgent + generate_with :azure_openai, + api_key: ENV["AZURE_OPENAI_API_KEY"], + azure_resource: "mycompany", + deployment_id: "gpt-4-deployment" +end +``` + +### Configuration File + +Set up Azure OpenAI in `config/active_agent.yml`: + +```yaml +azure_openai: + service: "AzureOpenAI" + api_key: <%= ENV["AZURE_OPENAI_API_KEY"] %> + azure_resource: "mycompany" + deployment_id: "gpt-4-deployment" + api_version: "2024-10-21" +``` + +### Environment Variables + +The provider checks these environment variables as fallbacks: + +| Variable | Purpose | +|----------|---------| +| `AZURE_OPENAI_API_KEY` | API key for authentication | +| `AZURE_OPENAI_ACCESS_TOKEN` | Alternative to API key | +| `AZURE_OPENAI_API_VERSION` | API version (default: `2024-10-21`) | + +## Key Differences from OpenAI + +| Feature | OpenAI | Azure OpenAI | +|---------|--------|--------------| +| **Authentication** | `Authorization: Bearer` header | `api-key` header | +| **Endpoint** | `api.openai.com` | `{resource}.openai.azure.com/openai/deployments/{deployment}/` | +| **Model selection** | Model name (e.g., `gpt-4o`) | Deployment name from Azure portal | +| **API version** | Not required | Required query parameter (e.g., `2024-10-21`) | +| **Provider name** | `:openai` | `:azure_openai` | + +## Provider-Specific Parameters + +### Required Parameters + +- **`api_key`** - Azure OpenAI API key (also accepts `access_token`) +- **`azure_resource`** - Your Azure resource name (e.g., `"mycompany"`) +- **`deployment_id`** - Your Azure deployment name (e.g., `"gpt-4-deployment"`) + +### Optional Parameters + +- **`api_version`** - Azure API version (default: `"2024-10-21"`) +- **`model`** - Model identifier for request payload +- **`max_retries`** - Maximum retry attempts +- **`timeout`** - Request timeout in seconds + +### Inherited from OpenAI + +Azure OpenAI inherits all Chat Completions API features from the OpenAI provider: + +- **Sampling parameters** - `temperature`, `max_tokens`, `top_p`, `frequency_penalty`, `presence_penalty` +- **Response configuration** - `response_format` for structured output +- **Tools** - Function calling with the common tools format +- **Embeddings** - Text embedding generation + +## Usage + +### Basic Generation + +```ruby +class SupportAgent < ApplicationAgent + generate_with :azure_openai, + api_key: ENV["AZURE_OPENAI_API_KEY"], + azure_resource: "mycompany", + deployment_id: "gpt-4-deployment", + model: "gpt-4" + + def answer + prompt(message: "How can I help you?") + end +end + +response = SupportAgent.answer.generate_now +response.message.content #=> "I'm here to help! ..." +``` + +### With Tools + +```ruby +class ToolAgent < ApplicationAgent + generate_with :azure_openai, + api_key: ENV["AZURE_OPENAI_API_KEY"], + azure_resource: "mycompany", + deployment_id: "gpt-4-deployment" + + def search_and_answer + prompt( + message: "Find information about Ruby on Rails", + tools: [ + { + name: "search", + description: "Search for information", + parameters: { + type: "object", + properties: { + query: { type: "string", description: "Search query" } + }, + required: ["query"] + } + } + ] + ) + end + + def search(query:) + SearchService.find(query) + end +end +``` + +## Provider Name Variants + +Azure OpenAI can be referenced using several naming conventions: + +```ruby +# All equivalent +generate_with :azure_openai +generate_with :azure_open_ai +generate_with :azure + +# In config/active_agent.yml +azure_openai: + service: "AzureOpenAI" +``` + +::: tip +Azure OpenAI may lag behind OpenAI's latest models and features. Check [Azure's model availability](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models) before planning deployments. +::: + +::: warning Responses API +Azure OpenAI currently uses the Chat Completions API. The OpenAI Responses API features (web search, image generation, MCP) are not available through Azure. +::: + +## Related Documentation + +- [Providers Overview](/providers) - Compare all available providers +- [OpenAI Provider](/providers/open_ai) - Standard OpenAI provider documentation +- [Tools](/actions/tools) - Function calling with common format +- [Structured Output](/actions/structured_output) - JSON schema validation +- [Configuration](/framework/configuration) - Environment-specific settings +- [Azure OpenAI Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/) - Official Azure docs diff --git a/docs/providers/open_ai.md b/docs/providers/open_ai.md index 2033bff8..142d8cbe 100644 --- a/docs/providers/open_ai.md +++ b/docs/providers/open_ai.md @@ -287,22 +287,9 @@ For similarity search, batch processing, and advanced embedding patterns, see th ## Azure OpenAI -ActiveAgent supports Azure OpenAI Service with custom endpoint configuration. +ActiveAgent provides a dedicated Azure OpenAI provider with first-class support for Azure-specific authentication, deployment-based endpoints, and API versioning. -### Configuration - -<<< @/../test/docs/providers/open_ai_examples_test.rb#azure_configuration{ruby:line-numbers} - -### Key Differences - -- **Deployment Names**: Use your Azure deployment name instead of OpenAI model names -- **API Versions**: Azure uses date-based API versions (e.g., "2024-02-01") -- **Authentication**: Use Azure-specific API keys from your Azure portal -- **Endpoints**: Custom host URL based on your Azure resource name - -::: tip -Azure OpenAI may lag behind OpenAI's latest models and features. Check Azure's model availability before planning deployments. -::: +See the **[Azure OpenAI Provider](/providers/azure)** documentation for full configuration and usage details. ## Error Handling From f5d4ad7cbad256bf1607663b21a83b83e286e1ab Mon Sep 17 00:00:00 2001 From: Justin Bowen Date: Fri, 13 Feb 2026 00:58:04 +0000 Subject: [PATCH 2/5] Document developer role handling in messages PR #290 added support for dropping OpenAI's "developer" role messages in common format (same as "system" messages), but the messages documentation did not mention this role. This updates: - Message roles list to include developer role - Note about system messages to also mention developer role cc @TheRealNeil Co-Authored-By: Claude Opus 4.6 --- docs/actions/messages.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/actions/messages.md b/docs/actions/messages.md index b1a6f5e8..a4d67fe9 100644 --- a/docs/actions/messages.md +++ b/docs/actions/messages.md @@ -13,6 +13,7 @@ Understanding roles helps you structure conversations correctly: - **User** - Input from the user to the agent (text, images, documents) - **Assistant** - Responses from the agent, including tool call requests - **System** - Instructions that guide agent behavior (set via `instructions` option) +- **Developer** - OpenAI's alternative to system role; treated identically to system messages in common format (dropped and replaced by instructions) - **Tool** - Results from tool executions (handled automatically) Most of the time you'll send user messages and inspect assistant/tool responses. @@ -44,7 +45,7 @@ Set explicit roles using hashes. The default role is `:user`: <<< @/../test/docs/actions/messages_examples_test.rb#messages_with_roles_agent {ruby:line-numbers} -**Note:** Use the `instructions` option for system messages. System role messages are dropped in common format and replaced by instructions. [Learn about instructions →](/agents/instructions) +**Note:** Use the `instructions` option for system messages. Both `system` and `developer` role messages are dropped in common format and replaced by instructions. [Learn about instructions →](/agents/instructions) ## Images and Documents From b3498263e1f1bda21f8e472e82bf6ed19064659c Mon Sep 17 00:00:00 2001 From: Justin Bowen Date: Fri, 13 Feb 2026 00:58:22 +0000 Subject: [PATCH 3/5] Add tests for remaining Anthropic streaming convenience events PR #299 added handling for higher-level gem convenience events (:text, :input_json, :citation, :thinking, :signature) but only tested :thinking and :signature. This adds coverage for the remaining three event types (:text, :input_json, :citation) to ensure they are all handled as no-ops without raising. cc @TheRealNeil Co-Authored-By: Claude Opus 4.6 --- .../streaming_convenience_events_test.rb | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 test/providers/anthropic/streaming_convenience_events_test.rb diff --git a/test/providers/anthropic/streaming_convenience_events_test.rb b/test/providers/anthropic/streaming_convenience_events_test.rb new file mode 100644 index 00000000..93700bd6 --- /dev/null +++ b/test/providers/anthropic/streaming_convenience_events_test.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require "test_helper" +require_relative "../../../lib/active_agent/providers/anthropic_provider" + +module Providers + module Anthropic + # Tests for higher-level convenience events emitted by the anthropic gem's + # MessageStream (PR #299). These events are no-ops since the underlying data + # is already handled via :content_block_delta, but they must not raise. + class StreamingConvenienceEventsTest < ActiveSupport::TestCase + setup do + @provider = ActiveAgent::Providers::AnthropicProvider.new( + service: "Anthropic", + model: "claude-sonnet-4-5", + messages: [ { role: "user", content: "Hello" } ], + stream_broadcaster: ->(message, delta, event_type) { } + ) + + @provider.send(:message_stack).push({ + role: "assistant", + content: [ { type: "text", text: "" } ] + }) + end + + MockEvent = Struct.new(:type, keyword_init: true) do + def [](key) + send(key) if respond_to?(key) + end + end + + test "handles :text event without raising" do + event = MockEvent.new(type: :text) + + assert_nothing_raised do + @provider.send(:process_stream_chunk, event) + end + end + + test "handles :input_json event without raising" do + event = MockEvent.new(type: :input_json) + + assert_nothing_raised do + @provider.send(:process_stream_chunk, event) + end + end + + test "handles :citation event without raising" do + event = MockEvent.new(type: :citation) + + assert_nothing_raised do + @provider.send(:process_stream_chunk, event) + end + end + end + end +end From b34f257aefbe3aa070310450e3a9fcdf8b8af1c8 Mon Sep 17 00:00:00 2001 From: Justin Bowen Date: Fri, 13 Feb 2026 00:58:45 +0000 Subject: [PATCH 4/5] Add unit tests for ToolChoiceClearing concern PR #278 (Universal Tools & MCP Support) introduced the ToolChoiceClearing concern (62 lines) for preventing infinite tool-calling loops, but shipped without unit tests. This adds tests covering: - No-op when tool_choice is nil - Clearing when required mode and tools were used - Clearing when specific tool forced and that tool was used - Preserving when different/no tools were used - Preserving when tool_choice is auto - NotImplementedError for unimplemented abstract methods cc @TheRealNeil Co-Authored-By: Claude Opus 4.6 --- .../concerns/tool_choice_clearing_test.rb | 143 ++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 test/providers/concerns/tool_choice_clearing_test.rb diff --git a/test/providers/concerns/tool_choice_clearing_test.rb b/test/providers/concerns/tool_choice_clearing_test.rb new file mode 100644 index 00000000..4058bbff --- /dev/null +++ b/test/providers/concerns/tool_choice_clearing_test.rb @@ -0,0 +1,143 @@ +# frozen_string_literal: true + +require "test_helper" +require "active_agent/providers/concerns/tool_choice_clearing" + +module ActiveAgent + module Providers + class ToolChoiceClearingTest < ActiveSupport::TestCase + # Mock request object with tool_choice attribute + MockRequest = Struct.new(:tool_choice, keyword_init: true) + + # Mock provider that includes the concern + class MockProvider + include ActiveAgent::Providers::ToolChoiceClearing + + attr_accessor :request, :used_function_names, :forces_required, :forces_specific + + def initialize(tool_choice:, used_function_names: [], forces_required: false, forces_specific: [false, nil]) + @request = MockRequest.new(tool_choice: tool_choice) + @used_function_names = used_function_names + @forces_required = forces_required + @forces_specific = forces_specific + end + + private + + def extract_used_function_names + @used_function_names + end + + def tool_choice_forces_required? + @forces_required + end + + def tool_choice_forces_specific? + @forces_specific + end + end + + test "does nothing when tool_choice is nil" do + provider = MockProvider.new(tool_choice: nil) + + provider.prepare_prompt_request_tools + + assert_nil provider.request.tool_choice + end + + test "does nothing when no tools were used and required is set" do + provider = MockProvider.new( + tool_choice: "required", + forces_required: true, + used_function_names: [] + ) + + provider.prepare_prompt_request_tools + + assert_equal "required", provider.request.tool_choice + end + + test "clears tool_choice when required and tools were used" do + provider = MockProvider.new( + tool_choice: "required", + forces_required: true, + used_function_names: ["get_weather"] + ) + + provider.prepare_prompt_request_tools + + assert_nil provider.request.tool_choice + end + + test "clears tool_choice when specific tool was forced and that tool was used" do + provider = MockProvider.new( + tool_choice: { type: "function", function: { name: "search" } }, + forces_specific: [true, "search"], + used_function_names: ["search"] + ) + + provider.prepare_prompt_request_tools + + assert_nil provider.request.tool_choice + end + + test "does not clear tool_choice when specific tool was forced but different tool was used" do + original_choice = { type: "function", function: { name: "search" } } + provider = MockProvider.new( + tool_choice: original_choice, + forces_specific: [true, "search"], + used_function_names: ["calculate"] + ) + + provider.prepare_prompt_request_tools + + assert_equal original_choice, provider.request.tool_choice + end + + test "does not clear tool_choice when specific tool was forced but no tools were used" do + original_choice = { type: "function", function: { name: "search" } } + provider = MockProvider.new( + tool_choice: original_choice, + forces_specific: [true, "search"], + used_function_names: [] + ) + + provider.prepare_prompt_request_tools + + assert_equal original_choice, provider.request.tool_choice + end + + test "does not clear when tool_choice is auto and tools were used" do + provider = MockProvider.new( + tool_choice: "auto", + forces_required: false, + forces_specific: [false, nil], + used_function_names: ["get_weather"] + ) + + provider.prepare_prompt_request_tools + + assert_equal "auto", provider.request.tool_choice + end + + # Verify the abstract methods raise NotImplementedError + class BareProvider + include ActiveAgent::Providers::ToolChoiceClearing + + attr_accessor :request + + def initialize + @request = MockRequest.new(tool_choice: "required") + end + end + + test "extract_used_function_names raises NotImplementedError" do + provider = BareProvider.new + + assert_raises(NotImplementedError) do + provider.prepare_prompt_request_tools + end + end + end + end +end From 1b933f8294b5365b4dfe28d843c419a8ae4c45c3 Mon Sep 17 00:00:00 2001 From: Justin Bowen Date: Fri, 13 Feb 2026 00:59:29 +0000 Subject: [PATCH 5/5] Add Usage addition and multi-turn accumulation tests PR #274 (Improved Usage Tracking) introduced Usage#+ for combining statistics across multi-turn tool-calling conversations, but shipped without tests for this feature. This adds: - Basic addition combining two usage objects - Addition with nil optional fields - Addition with nil operand returns self - Chained addition simulating multi-turn accumulation cc @TheRealNeil Co-Authored-By: Claude Opus 4.6 --- test/providers/common/usage_test.rb | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/test/providers/common/usage_test.rb b/test/providers/common/usage_test.rb index c8b187fb..1bf2cdfe 100644 --- a/test/providers/common/usage_test.rb +++ b/test/providers/common/usage_test.rb @@ -242,6 +242,54 @@ class UsageTest < ActiveSupport::TestCase assert_equal 1000, usage.provider_details[:cache_creation][:ephemeral_5m_input_tokens] assert_equal 2, usage.provider_details[:server_tool_use][:web_fetch_requests] end + + # --- Usage addition (multi-turn accumulation) --- + + test "addition combines two usage objects" do + usage1 = Usage.new(input_tokens: 100, output_tokens: 50, cached_tokens: 20) + usage2 = Usage.new(input_tokens: 75, output_tokens: 25, cached_tokens: 10) + + combined = usage1 + usage2 + + assert_equal 175, combined.input_tokens + assert_equal 75, combined.output_tokens + assert_equal 250, combined.total_tokens + assert_equal 30, combined.cached_tokens + end + + test "addition handles nil optional fields" do + usage1 = Usage.new(input_tokens: 100, output_tokens: 50) + usage2 = Usage.new(input_tokens: 75, output_tokens: 25, reasoning_tokens: 10) + + combined = usage1 + usage2 + + assert_equal 175, combined.input_tokens + assert_equal 75, combined.output_tokens + assert_equal 10, combined.reasoning_tokens + assert_nil combined.cached_tokens + end + + test "addition with nil returns self" do + usage = Usage.new(input_tokens: 100, output_tokens: 50) + + combined = usage + nil + + assert_equal 100, combined.input_tokens + assert_equal 50, combined.output_tokens + end + + test "chained addition accumulates across multiple turns" do + turn1 = Usage.new(input_tokens: 50, output_tokens: 20) + turn2 = Usage.new(input_tokens: 80, output_tokens: 30, cached_tokens: 10) + turn3 = Usage.new(input_tokens: 60, output_tokens: 25, cached_tokens: 5) + + cumulative = turn1 + turn2 + turn3 + + assert_equal 190, cumulative.input_tokens + assert_equal 75, cumulative.output_tokens + assert_equal 265, cumulative.total_tokens + assert_equal 15, cumulative.cached_tokens + end end end end