Documentation Index
Fetch the complete documentation index at: https://docs.tracia.io/llms.txt
Use this file to discover all available pages before exploring further.
result = client.prompts.run(slug, variables?, options?)
# Async
result = await client.prompts.arun(slug, variables?, options?)
Execute a prompt with variable substitution and get the generated response. Tracia handles template rendering, LLM API calls, and automatically logs a span.
Parameters
| Parameter | Type | Required | Description |
|---|
slug | str | Yes | The prompt slug |
variables | dict[str, str] | No | Template variables |
options | RunOptions | No | Additional options |
options.model | str | No | Override the default model |
options.tags | list[str] | No | Tags for filtering spans |
options.user_id | str | No | End user identifier |
options.session_id | str | No | Session identifier |
options.trace_id | str | No | Group related spans together (session) |
options.parent_span_id | str | No | Link to parent span |
options.version | int | No | Run a specific prompt version (uses latest if omitted) |
options.messages | list[LocalPromptMessage] | No | Full conversation messages for multi-turn (skips template rendering) |
Response
class RunResult(BaseModel):
text: str | None # The generated text (None when only tool calls)
span_id: str # Unique span identifier
trace_id: str # Trace identifier for session grouping
prompt_version: int # Version of the prompt used
latency_ms: int # Request latency in milliseconds
usage: TokenUsage
cost: float # Cost in USD
finish_reason: FinishReason | None # Why the model stopped ("stop", "length", "tool_calls")
tool_calls: list[ToolCall] | None # Tool calls made by the model
structured_output: dict | None # Structured output when using JSON schema
messages: list[LocalPromptMessage] | None # Full conversation for multi-turn continuation
Examples
Basic Usage
result = client.prompts.run("welcome-email", {
"name": "Alice",
"product": "Tracia",
})
print(result.text)
# "Dear Alice, Welcome to Tracia!..."
With Options
from tracia import RunOptions
result = client.prompts.run(
"welcome-email",
{"name": "Alice", "product": "Tracia"},
RunOptions(
model="gpt-4",
tags=["onboarding", "email"],
user_id="user_123",
session_id="session_abc",
),
)
Pinning a Version
Run a specific prompt version instead of the latest. Useful for production deployments where you want to lock a known-good version while continuing to iterate on the prompt.
from tracia import RunOptions
result = client.prompts.run(
"welcome-email",
{"name": "Alice", "product": "Tracia"},
RunOptions(version=3),
)
print(result.prompt_version) # 3
result = client.prompts.run("welcome-email", {"name": "Alice"})
print(f"Latency: {result.latency_ms}ms")
print(f"Tokens: {result.usage.total_tokens}")
print(f"Cost: ${result.cost:.4f}")
print(f"Span ID: {result.span_id}")
print(f"Prompt Version: {result.prompt_version}")
Async Usage
result = await client.prompts.arun("welcome-email", {"name": "Alice"})
print(result.text)
When a prompt has tools configured, the model may return tool_calls instead of text. Use messages to continue the conversation with tool results.
import json
from tracia import RunOptions, LocalPromptMessage
# 1. First call - model returns tool calls
result = client.prompts.run("assistant", {"question": "Weather in Tokyo?"})
if result.finish_reason == "tool_calls" and result.tool_calls:
# 2. Execute tools locally
weather_data = get_weather(result.tool_calls[0].arguments["location"])
# 3. Continue with tool results
result2 = client.prompts.run(
"assistant",
None,
RunOptions(
messages=[
*result.messages,
LocalPromptMessage(
role="tool",
tool_call_id=result.tool_calls[0].id,
tool_name="get_weather",
content=json.dumps(weather_data),
),
],
trace_id=result.trace_id,
parent_span_id=result.span_id,
),
)
print(result2.text)
# "The weather in Tokyo is 22°C and sunny."
Trace Grouping
Use trace_id and parent_span_id to group related runs in the dashboard.
from tracia import RunOptions
result1 = client.prompts.run("step-1", {"input": "data"})
result2 = client.prompts.run(
"step-2",
{"input": result1.text},
RunOptions(
trace_id=result1.trace_id,
parent_span_id=result1.span_id,
),
)
Error Handling
from tracia import TraciaError, TraciaErrorCode
try:
result = client.prompts.run("welcome-email", {"name": "Alice"})
except TraciaError as error:
if error.code == TraciaErrorCode.NOT_FOUND:
print("Prompt does not exist")
elif error.code == TraciaErrorCode.MISSING_VARIABLES:
print("Missing required variables")
elif error.code == TraciaErrorCode.PROVIDER_ERROR:
print(f"LLM provider error: {error.message}")