Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion custom_components/openclaw/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
# URL at which the card JS is served (registered via register_static_path)
_CARD_STATIC_URL = f"/openclaw/{_CARD_FILENAME}"
# Versioned URL used for Lovelace resource registration to avoid stale browser cache
_CARD_URL = f"{_CARD_STATIC_URL}?v=0.1.62"
_CARD_URL = f"{_CARD_STATIC_URL}?v=0.1.63"

OpenClawConfigEntry = ConfigEntry

Expand Down Expand Up @@ -838,6 +838,14 @@ def websocket_get_settings(
DEFAULT_THINKING_TIMEOUT,
),
"language": hass.config.language,
"conversation_entity_id": entry_data.get("conversation_entity_id")
if entry_data
else None,
"legacy_conversation_agent_id": entry_data.get(
"legacy_conversation_agent_id"
)
if entry_data
else None,
},
)

Expand Down
235 changes: 195 additions & 40 deletions custom_components/openclaw/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
from datetime import datetime, timezone
import logging
import re
from typing import Any
from typing import Any, AsyncIterator

from homeassistant.components import conversation
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import chat_session, intent
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers import intent

from .api import OpenClawApiClient, OpenClawApiError
from .const import (
Expand Down Expand Up @@ -57,30 +57,53 @@ async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the OpenClaw conversation agent."""
agent = OpenClawConversationAgent(hass, entry)
conversation.async_set_agent(hass, entry, agent)
async_add_entities([OpenClawConversationAgent(hass, entry)])


async def async_unload_entry(
hass: HomeAssistant,
entry: ConfigEntry,
) -> bool:
"""Unload the conversation agent."""
conversation.async_unset_agent(hass, entry)
return True


class OpenClawConversationAgent(conversation.AbstractConversationAgent):
class OpenClawConversationAgent(
conversation.ConversationEntity,
conversation.AbstractConversationAgent,
):
"""Conversation agent that routes messages through OpenClaw.

Enables OpenClaw to appear as a selectable agent in the Assist pipeline,
allowing use with Voice PE, satellites, and the built-in HA Assist dialog.
"""

_attr_supports_streaming = True

def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize the conversation agent."""
self.hass = hass
self.entry = entry
self._attr_unique_id = entry.entry_id
self._attr_name = entry.title or "OpenClaw"

async def async_added_to_hass(self) -> None:
"""Expose the entity id for frontend pipeline selection."""
await super().async_added_to_hass()
entry_data = self.hass.data.get(DOMAIN, {}).get(self.entry.entry_id)
if entry_data is not None:
entry_data["conversation_entity_id"] = self.entity_id
entry_data["legacy_conversation_agent_id"] = self.entry.entry_id

async def async_will_remove_from_hass(self) -> None:
"""Remove the cached entity id when the agent unloads."""
entry_data = self.hass.data.get(DOMAIN, {}).get(self.entry.entry_id)
if entry_data is not None:
if entry_data.get("conversation_entity_id") == self.entity_id:
entry_data.pop("conversation_entity_id", None)
if entry_data.get("legacy_conversation_agent_id") == self.entry.entry_id:
entry_data.pop("legacy_conversation_agent_id", None)
await super().async_will_remove_from_hass()

@property
def attribution(self) -> dict[str, str]:
Expand Down Expand Up @@ -132,7 +155,6 @@ async def async_process(
)
)
resolved_agent_id = voice_agent_id or configured_agent_id
conversation_id = self._resolve_conversation_id(user_input, resolved_agent_id)
active_model = self._normalize_optional_text(options.get("active_model"))
include_context = options.get(
CONF_INCLUDE_EXPOSED_CONTEXT,
Expand All @@ -154,15 +176,20 @@ async def async_process(
system_prompt = "\n\n".join(
part for part in (exposed_context, extra_system_prompt) if part
) or None
backend_conversation_id = self._resolve_conversation_id(
user_input,
resolved_agent_id,
)

try:
full_response = await self._get_response(
client,
message,
conversation_id,
resolved_agent_id,
system_prompt,
active_model,
full_response, result = await self._async_process_with_chat_log(
user_input=user_input,
client=client,
backend_conversation_id=backend_conversation_id,
message=message,
agent_id=resolved_agent_id,
system_prompt=system_prompt,
model=active_model,
)
except OpenClawApiError as err:
_LOGGER.error("OpenClaw conversation error: %s", err)
Expand All @@ -173,13 +200,14 @@ async def async_process(
refreshed = await refresh_fn()
if refreshed:
try:
full_response = await self._get_response(
client,
message,
conversation_id,
resolved_agent_id,
system_prompt,
active_model,
full_response, result = await self._async_process_with_chat_log(
user_input=user_input,
client=client,
backend_conversation_id=backend_conversation_id,
message=message,
agent_id=resolved_agent_id,
system_prompt=system_prompt,
model=active_model,
)
except OpenClawApiError as retry_err:
return self._error_result(
Expand All @@ -202,21 +230,15 @@ async def async_process(
EVENT_MESSAGE_RECEIVED,
{
ATTR_MESSAGE: full_response,
ATTR_SESSION_ID: conversation_id,
ATTR_MODEL: coordinator.data.get(DATA_MODEL) if coordinator.data else None,
ATTR_SESSION_ID: backend_conversation_id,
ATTR_MODEL: (
coordinator.data.get(DATA_MODEL) if coordinator.data else None
),
ATTR_TIMESTAMP: datetime.now(timezone.utc).isoformat(),
},
)
coordinator.update_last_activity()

intent_response = intent.IntentResponse(language=user_input.language)
intent_response.async_set_speech(full_response)

return conversation.ConversationResult(
response=intent_response,
conversation_id=conversation_id,
continue_conversation=self._should_continue(full_response),
)
return result

def _resolve_conversation_id(
self,
Expand Down Expand Up @@ -269,30 +291,163 @@ async def _get_response(
system_prompt: str | None = None,
model: str | None = None,
) -> str:
"""Get a response from OpenClaw, trying streaming first."""
full_response = ""
async for chunk in client.async_stream_message(
"""Get a non-streaming response from OpenClaw."""
response = await client.async_send_message(
message=message,
session_id=conversation_id,
model=model,
system_prompt=system_prompt,
agent_id=agent_id,
extra_headers=_VOICE_REQUEST_HEADERS,
)
return extract_text_recursive(response) or ""

async def _async_process_with_chat_log(
self,
user_input: conversation.ConversationInput,
client: OpenClawApiClient,
backend_conversation_id: str,
message: str,
agent_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
) -> tuple[str, conversation.ConversationResult]:
"""Write the assistant response into the Home Assistant chat log."""
chat_log_conversation_id = user_input.conversation_id or backend_conversation_id

with (
chat_session.async_get_chat_session(
self.hass,
chat_log_conversation_id,
) as session,
conversation.async_get_chat_log(
self.hass,
session,
user_input,
) as chat_log,
):
full_response = await self._async_populate_chat_log(
chat_log=chat_log,
client=client,
message=message,
conversation_id=backend_conversation_id,
agent_id=agent_id,
system_prompt=system_prompt,
model=model,
)
result = conversation.async_get_result_from_chat_log(user_input, chat_log)
result.continue_conversation = (
self._should_continue(full_response) or result.continue_conversation
)
return full_response, result

async def _async_populate_chat_log(
self,
chat_log: conversation.ChatLog,
client: OpenClawApiClient,
message: str,
conversation_id: str,
agent_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
) -> str:
"""Populate the HA chat log from streaming or fallback responses."""
try:
full_response = await self._async_stream_response_to_chat_log(
chat_log=chat_log,
client=client,
message=message,
conversation_id=conversation_id,
agent_id=agent_id,
system_prompt=system_prompt,
model=model,
)
if full_response:
return full_response
except OpenClawApiError as err:
_LOGGER.warning(
"OpenClaw streaming failed, falling back to non-streaming response: %s",
err,
)

full_response = await self._get_response(
client=client,
message=message,
conversation_id=conversation_id,
agent_id=agent_id,
system_prompt=system_prompt,
model=model,
)
self._add_final_response_to_chat_log(chat_log, full_response)
return full_response

async def _async_stream_response_to_chat_log(
self,
chat_log: conversation.ChatLog,
client: OpenClawApiClient,
message: str,
conversation_id: str,
agent_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
) -> str:
"""Stream OpenClaw deltas into the HA chat log."""
full_response_parts: list[str] = []
async for content in chat_log.async_add_delta_content_stream(
self._chat_log_agent_id,
self._async_openclaw_delta_stream(
client=client,
message=message,
conversation_id=conversation_id,
agent_id=agent_id,
system_prompt=system_prompt,
model=model,
),
):
full_response += chunk
if isinstance(content, conversation.AssistantContent) and content.content:
full_response_parts.append(content.content)
return "".join(full_response_parts)

if full_response:
return full_response
async def _async_openclaw_delta_stream(
self,
client: OpenClawApiClient,
message: str,
conversation_id: str,
agent_id: str | None = None,
system_prompt: str | None = None,
model: str | None = None,
) -> AsyncIterator[dict[str, Any]]:
"""Map OpenClaw SSE chunks to the delta format expected by HA."""
yield {"role": "assistant"}

response = await client.async_send_message(
async for chunk in client.async_stream_message(
message=message,
session_id=conversation_id,
model=model,
system_prompt=system_prompt,
agent_id=agent_id,
extra_headers=_VOICE_REQUEST_HEADERS,
):
if chunk:
yield {"content": chunk}

@property
def _chat_log_agent_id(self) -> str:
"""Return the assistant identifier used for HA chat log messages."""
return self.entity_id or self.entry.entry_id

def _add_final_response_to_chat_log(
self,
chat_log: conversation.ChatLog,
full_response: str,
) -> None:
"""Append a non-streaming final assistant message to the chat log."""
chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id=self._chat_log_agent_id,
content=full_response or None,
)
)
return extract_text_recursive(response) or ""

@staticmethod
def _should_continue(response: str) -> bool:
Expand Down
2 changes: 1 addition & 1 deletion custom_components/openclaw/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"iot_class": "local_polling",
"issue_tracker": "https://github.com/techartdev/OpenClawHomeAssistantIntegration/issues",
"requirements": [],
"version": "0.1.62",
"version": "0.1.63",
"dependencies": ["conversation"],
"after_dependencies": ["hassio", "lovelace"]
}
Loading