const stream = tracia.runLocal({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'What is the weather in Tokyo?' }],
stream: true,
tools: [{
name: 'get_weather',
description: 'Get current weather for a location',
parameters: {
type: 'object',
properties: {
location: { type: 'string' }
},
required: ['location']
}
}],
});
// Text chunks still stream as normal
for await (const chunk of stream) {
process.stdout.write(chunk);
}
const result = await stream.result;
// Handle tool calls
if (result.finishReason === 'tool_calls') {
const toolCall = result.toolCalls[0];
const weatherData = await getWeather(toolCall.arguments.location);
// Continue conversation with tool result
const followUp = tracia.runLocal({
model: 'gpt-4o',
messages: [
{ role: 'user', content: 'What is the weather in Tokyo?' },
result.message, // Assistant's message with tool calls
{
role: 'tool',
toolCallId: toolCall.id,
toolName: toolCall.name,
content: JSON.stringify(weatherData)
}
],
stream: true,
tools: [/* same tools */]
});
for await (const chunk of followUp) {
process.stdout.write(chunk);
}
}