import { Tracia } from 'tracia';const tracia = new Tracia({ apiKey: process.env.TRACIA_API_KEY });const result = await tracia.runLocal({ model: 'claude-sonnet-4-20250514', messages: [ { role: 'system', content: 'You are a creative writing assistant.' }, { role: 'user', content: 'Write a short story opening about a time traveler.' } ], temperature: 0.9, maxOutputTokens: 1000});console.log(result.text);console.log(`Provider: ${result.provider}`);
Include previous messages to maintain conversation context:
Copy
const result = await tracia.runLocal({ model: 'gpt-4o', messages: [ { role: 'system', content: 'You are a math tutor.' }, { role: 'user', content: 'What is 15% of 80?' }, { role: 'assistant', content: '15% of 80 is 12.' }, { role: 'user', content: 'How did you calculate that?' } ]});
Enable streaming to receive responses in real-time:
Copy
const stream = tracia.runLocal({ model: 'gpt-4o', messages: [ { role: 'user', content: 'Write a short poem about coding.' } ], stream: true,});// Span ID is available immediatelyconsole.log(`Span: ${stream.spanId}`);// Iterate to receive chunksfor await (const chunk of stream) { process.stdout.write(chunk);}// Get final result with usage statsconst result = await stream.result;console.log(`\nTokens: ${result.usage.totalTokens}`);