Skip to content
Merged
2 changes: 2 additions & 0 deletions backend/app/core/providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class Provider(str, Enum):
LANGFUSE = "langfuse"
GOOGLE = "google"
SARVAMAI = "sarvamai"
ELEVENLABS = "elevenlabs"


@dataclass
Expand All @@ -34,6 +35,7 @@ class ProviderConfig:
),
Provider.GOOGLE: ProviderConfig(required_fields=["api_key"]),
Provider.SARVAMAI: ProviderConfig(required_fields=["api_key"]),
Provider.ELEVENLABS: ProviderConfig(required_fields=["api_key"]),
}


Expand Down
11 changes: 6 additions & 5 deletions backend/app/models/llm/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from pydantic import HttpUrl, model_validator
from sqlalchemy.dialects.postgresql import JSONB
from sqlmodel import Field, Index, SQLModel, text

from app.core.util import now


Expand Down Expand Up @@ -55,8 +54,8 @@ class STTLLMParams(SQLModel):

class TTSLLMParams(SQLModel):
model: str
voice: str
language: str
voice: str | None = None
language: str | None = None
response_format: Literal["mp3", "wav", "ogg"] | None = "wav"


Expand Down Expand Up @@ -194,7 +193,9 @@ class NativeCompletionConfig(SQLModel):
Supports any LLM provider's native API format.
"""

provider: Literal["openai-native", "google-native", "sarvamai-native"] = Field(
provider: Literal[
"openai-native", "google-native", "sarvamai-native", "elevenlabs-native"
] = Field(
...,
description="Native provider type (e.g., openai-native)",
)
Expand All @@ -214,7 +215,7 @@ class KaapiCompletionConfig(SQLModel):
Supports multiple providers: OpenAI, Claude, Gemini, etc.
"""

provider: Literal["openai", "google", "sarvamai"] = Field(
provider: Literal["openai", "google", "sarvamai", "elevenlabs"] = Field(
..., description="LLM provider (openai, google, sarvamai)"
)

Expand Down
5 changes: 4 additions & 1 deletion backend/app/services/llm/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ def execute_llm_call(
session=session, project_id=project_id, config_id=config.id
)
config_blob, error = resolve_config_blob(config_crud, config)
logger.info(f"----the resolved config blob is {config_blob}")
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Debug log should be cleaned up or reformatted.

This log statement appears to be temporary debug code that should be removed or reformatted before merging:

  1. Uses ---- prefix instead of the required [execute_llm_call] prefix
  2. Logs the entire config_blob object which may contain sensitive configuration data
  3. Inconsistent with the logging pattern used throughout this file
Suggested fix

Either remove this debug line, or reformat it properly:

-                logger.info(f"----the resolved config blob is {config_blob}")
+                logger.debug(
+                    f"[execute_llm_call] Resolved config blob | "
+                    f"provider={config_blob.completion.provider if config_blob else None}, "
+                    f"job_id={job_id}"
+                )

As per coding guidelines: "Prefix all log messages with the function name in square brackets."

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
logger.info(f"----the resolved config blob is {config_blob}")
logger.debug(
f"[execute_llm_call] Resolved config blob | "
f"provider={config_blob.completion.provider if config_blob else None}, "
f"job_id={job_id}"
)
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/services/llm/jobs.py` at line 354, The debug logger.info line
that prints "----the resolved config blob is {config_blob}" in execute_llm_call
should be removed or replaced with a safe, consistent log message: either delete
the line or change it to logger.info with the required "[execute_llm_call]"
prefix and only log a non-sensitive summary (e.g., keys, masked values, or a
short status) instead of the full config_blob object to avoid leaking secrets
and to match the file's logging pattern.

if error:
return BlockResult(error=error)
else:
Expand Down Expand Up @@ -520,7 +521,7 @@ def execute_job(
callback_url_str = str(request.callback_url) if request.callback_url else None

logger.info(
f"[execute_job] Starting LLM job execution | job_id={job_id}, task_id={task_id}"
f"[execute_job] Starting LLM job execution | job_id={job_id}, task_id={task_id}, callback_url {callback_url_str}"
)

try:
Expand Down Expand Up @@ -548,6 +549,8 @@ def execute_job(
include_provider_raw_response=request.include_provider_raw_response,
)

logger.info(f"[execute_job] results: {result.error}")

if result.success:
callback_response = APIResponse.success_response(
data=result.response, metadata=result.metadata
Expand Down
Loading
Loading