Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 92 additions & 0 deletions api/anthropic_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
"""Anthropic Claude ModelClient integration."""

import os
import logging
import re
from typing import Dict, Optional, Any

from adalflow.core.model_client import ModelClient
from adalflow.core.types import ModelType, GeneratorOutput, CompletionUsage

log = logging.getLogger(__name__)


class AnthropicClient(ModelClient):
"""AdalFlow ModelClient wrapper for the Anthropic Messages API."""

def __init__(self, api_key: Optional[str] = None):
super().__init__()
self._api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
if not self._api_key:
raise ValueError("ANTHROPIC_API_KEY environment variable must be set")
self.sync_client = self._init_client()
self._async_client = None

def _init_client(self):
import anthropic
return anthropic.Anthropic(api_key=self._api_key)

def convert_inputs_to_api_kwargs(
self,
input: Optional[Any] = None,
model_kwargs: Dict = {},
model_type: ModelType = ModelType.UNDEFINED,
) -> Dict:
if model_type != ModelType.LLM:
raise ValueError(f"AnthropicClient only supports LLM model type, got {model_type}")

kwargs = model_kwargs.copy()
kwargs.setdefault("max_tokens", 8096)

# Split system prompt from user content if AdalFlow injected the tags
system_tag_start = "<START_OF_SYSTEM_PROMPT>"
system_tag_end = "<END_OF_SYSTEM_PROMPT>"
user_tag_start = "<START_OF_USER_PROMPT>"
user_tag_end = "<END_OF_USER_PROMPT>"

system_prompt = None
user_content = input

if isinstance(input, str) and system_tag_start in input:
pattern = (
rf"{system_tag_start}\s*(.*?)\s*{system_tag_end}\s*"
rf"{user_tag_start}\s*(.*?)\s*{user_tag_end}"
)
match = re.search(pattern, input, re.DOTALL)
if match:
system_prompt = match.group(1).strip()
user_content = match.group(2).strip()
Comment on lines +51 to +58
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The regex for splitting system and user prompts is quite fragile. It expects a very specific sequence of tags and whitespace. If the input string contains the system tag but doesn't match this exact pattern (e.g., if there is text before <START_OF_SYSTEM_PROMPT> or if the user tags are missing), the match will fail. Consequently, system_prompt will remain None and the entire input (including the tags) will be sent as the user message. Consider using a more flexible parsing approach to extract the system and user parts independently.


kwargs["messages"] = [{"role": "user", "content": user_content}]
if system_prompt:
kwargs["system"] = system_prompt

return kwargs

def parse_chat_completion(self, completion) -> GeneratorOutput:
try:
content = completion.content[0].text
usage = CompletionUsage(
prompt_tokens=completion.usage.input_tokens,
completion_tokens=completion.usage.output_tokens,
total_tokens=completion.usage.input_tokens + completion.usage.output_tokens,
)
return GeneratorOutput(data=None, error=None, raw_response=content, usage=usage)
except Exception as e:
log.error(f"Error parsing Anthropic completion: {e}")
return GeneratorOutput(data=None, error=str(e), raw_response=str(completion))

def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED):
if model_type != ModelType.LLM:
raise ValueError(f"AnthropicClient only supports LLM, got {model_type}")

api_kwargs.pop("stream", None) # Anthropic streaming not used in sync path
return self.sync_client.messages.create(**api_kwargs)

async def acall(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED):
if model_type != ModelType.LLM:
raise ValueError(f"AnthropicClient only supports LLM, got {model_type}")
if self._async_client is None:
import anthropic
self._async_client = anthropic.AsyncAnthropic(api_key=self._api_key)
return await self._async_client.messages.create(**api_kwargs)
2 changes: 1 addition & 1 deletion api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ def generate_json_export(repo_url: str, pages: List[WikiPage]) -> str:
app.add_api_route("/chat/completions/stream", chat_completions_stream, methods=["POST"])

# Add the WebSocket endpoint
app.add_websocket_route("/ws/chat", handle_websocket_chat)
app.add_api_websocket_route("/ws/chat", handle_websocket_chat)

# --- Wiki Cache Helper Functions ---

Expand Down
12 changes: 9 additions & 3 deletions api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from api.google_embedder_client import GoogleEmbedderClient
from api.azureai_client import AzureAIClient
from api.dashscope_client import DashscopeClient
from api.anthropic_client import AnthropicClient
from adalflow import GoogleGenAIClient, OllamaClient

# Get API keys from environment variables
Expand All @@ -24,6 +25,7 @@
AWS_SESSION_TOKEN = os.environ.get('AWS_SESSION_TOKEN')
AWS_REGION = os.environ.get('AWS_REGION')
AWS_ROLE_ARN = os.environ.get('AWS_ROLE_ARN')
ANTHROPIC_API_KEY = os.environ.get('ANTHROPIC_API_KEY')

# Set keys in environment (in case they're needed elsewhere in the code)
if OPENAI_API_KEY:
Expand All @@ -42,6 +44,8 @@
os.environ["AWS_REGION"] = AWS_REGION
if AWS_ROLE_ARN:
os.environ["AWS_ROLE_ARN"] = AWS_ROLE_ARN
if ANTHROPIC_API_KEY:
os.environ["ANTHROPIC_API_KEY"] = ANTHROPIC_API_KEY

# Wiki authentication settings
raw_auth_mode = os.environ.get('DEEPWIKI_AUTH_MODE', 'False')
Expand All @@ -63,7 +67,8 @@
"OllamaClient": OllamaClient,
"BedrockClient": BedrockClient,
"AzureAIClient": AzureAIClient,
"DashscopeClient": DashscopeClient
"DashscopeClient": DashscopeClient,
"AnthropicClient": AnthropicClient,
}

def replace_env_placeholders(config: Union[Dict[str, Any], List[Any], str, Any]) -> Union[Dict[str, Any], List[Any], str, Any]:
Expand Down Expand Up @@ -131,15 +136,16 @@ def load_generator_config():
if provider_config.get("client_class") in CLIENT_CLASSES:
provider_config["model_client"] = CLIENT_CLASSES[provider_config["client_class"]]
# Fall back to default mapping based on provider_id
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure", "dashscope"]:
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure", "dashscope", "anthropic"]:
default_map = {
"google": GoogleGenAIClient,
"openai": OpenAIClient,
"openrouter": OpenRouterClient,
"ollama": OllamaClient,
"bedrock": BedrockClient,
"azure": AzureAIClient,
"dashscope": DashscopeClient
"dashscope": DashscopeClient,
"anthropic": AnthropicClient,
}
provider_config["model_client"] = default_map[provider_id]
else:
Expand Down
25 changes: 25 additions & 0 deletions api/config/generator.json
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@
"top_p": 0.8,
"num_ctx": 32000
}
},
"qwen3.5:0.8b": {
"options": {
"temperature": 0.7,
"top_p": 0.8,
"num_ctx": 32000
}
}
}
},
Expand Down Expand Up @@ -171,6 +178,24 @@
}
}
},
"anthropic": {
"default_model": "claude-sonnet-4-6",
"supportsCustomModel": true,
"models": {
"claude-opus-4-6": {
"temperature": 1.0,
"max_tokens": 16000
},
"claude-sonnet-4-6": {
"temperature": 1.0,
"max_tokens": 16000
},
"claude-haiku-4-5-20251001": {
"temperature": 1.0,
"max_tokens": 8096
}
}
},
"azure": {
"client_class": "AzureAIClient",
"default_model": "gpt-4o",
Expand Down
Loading