diff --git a/api/ai/ai_response_model.py b/api/ai/ai_response_model.py index 4beab39..0d9ef33 100644 --- a/api/ai/ai_response_model.py +++ b/api/ai/ai_response_model.py @@ -55,4 +55,11 @@ class ResponseMetadata(BaseModel): class StreamResponse(BaseModel): results: List[WorkflowResult] metadata: ResponseMetadata - errors: List[Any] \ No newline at end of file + errors: List[Any] + +class EnhanceRequest(BaseModel): + prompt: str + model: str = "claude-sonnet-4-20250514" + +class EnhanceResponse(BaseModel): + enhanced_prompt: str \ No newline at end of file diff --git a/api/ai/ai_service.py b/api/ai/ai_service.py index 1bd7b65..1875de4 100644 --- a/api/ai/ai_service.py +++ b/api/ai/ai_service.py @@ -12,11 +12,13 @@ WorkflowResult, ResponseMetadata, AvailableModelsResponse, - ModelInfo + ModelInfo, + EnhanceResponse ) from api.db.pg_database import SessionLocal from api.llm.router import get_model_router from api.external_api import get_related_segment_ids, get_segment_content +from api.ai.prompts import ENHANCE_META_PROMPT from fastapi import HTTPException @@ -154,4 +156,19 @@ def get_available_models_service() -> AvailableModelsResponse: context_window=model_data.get("context_window") ) - return AvailableModelsResponse(models=models) \ No newline at end of file + return AvailableModelsResponse(models=models) + +async def enhance_prompt_service(prompt: str, model: str = "claude-sonnet-4-20250514") -> EnhanceResponse: + model_router = get_model_router() + print(model) + if not model_router.validate_model_availability(model): + raise HTTPException( + status_code=503, + detail=f"Model '{model}' is not available. Check API key configuration." + ) + + llm = model_router.get_model(model, temperature=0.7, max_tokens=4096) + response = await llm.ainvoke(ENHANCE_META_PROMPT.format(prompt=prompt)) + enhanced_text = response.content if hasattr(response, "content") else str(response) + + return EnhanceResponse(enhanced_prompt=enhanced_text) \ No newline at end of file diff --git a/api/ai/ai_view.py b/api/ai/ai_view.py index 29af25a..201d675 100644 --- a/api/ai/ai_view.py +++ b/api/ai/ai_view.py @@ -1,5 +1,5 @@ -from api.ai.ai_response_model import StreamRequest, AvailableModelsResponse -from api.ai.ai_service import run_workflow_service, stream_workflow_service, get_available_models_service +from api.ai.ai_response_model import StreamRequest, AvailableModelsResponse, EnhanceRequest, EnhanceResponse +from api.ai.ai_service import run_workflow_service, stream_workflow_service, get_available_models_service, enhance_prompt_service from fastapi import APIRouter, HTTPException, Depends from fastapi.responses import StreamingResponse from starlette import status @@ -82,3 +82,15 @@ async def stream_workflow( media_type="text/event-stream", headers=SSE_HEADERS ) + +@ai_router.post("/enhance", status_code=status.HTTP_200_OK, response_model=EnhanceResponse) +async def enhance_prompt( + payload: EnhanceRequest, + authentication_credential: Annotated[ + HTTPAuthorizationCredentials, Depends(oauth2_scheme) + ], +): + return await enhance_prompt_service( + prompt=payload.prompt, + model=payload.model, + ) \ No newline at end of file diff --git a/api/ai/prompts.py b/api/ai/prompts.py index 05396fe..6bad3a2 100644 --- a/api/ai/prompts.py +++ b/api/ai/prompts.py @@ -37,3 +37,19 @@ def get_specialized_prompt( additional_context_section=additional_context_section, source_text_block=source_text_block, ) + + +ENHANCE_META_PROMPT = """You are an expert prompt engineer. Your task is to enhance and improve the given system prompt while preserving the user's original intent and purpose. + +Guidelines for enhancement: +- Make the instructions clearer and more specific +- Add structure (e.g., role definition, constraints, output format) where beneficial +- Remove ambiguity and vagueness +- Maintain the original language and tone the user intended +- Keep it concise — don't add unnecessary verbosity +- If the prompt references specific domains or tasks, sharpen those references + +Return ONLY the enhanced system prompt text. Do not include any explanation, commentary, or markdown formatting around it. + +Original system prompt to enhance: +{prompt}""" \ No newline at end of file