import Together from "together-ai";
import { HeliconeManualLogger } from "@helicone/helpers";
export async function main() {
// Initialize the Helicone logger
const heliconeLogger = new HeliconeManualLogger({
apiKey: process.env.HELICONE_API_KEY!,
headers: {}, // You can add custom headers here
});
// Initialize the Together client
const together = new Together();
// Create your request body
const body = {
model: "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
messages: [{ role: "user", content: "Your question here" }],
stream: true,
} as Together.Chat.CompletionCreateParamsStreaming & { stream: true };
// Make the request
const response = await together.chat.completions.create(body);
// Split the stream into two for logging and processing
const [stream1, stream2] = response.tee();
// Log the stream to Helicone using the async stream parser
heliconeLogger.logStream(body, async (resultRecorder) => {
resultRecorder.attachStream(stream1.toReadableStream());
});
// Process the stream for your application
const textDecoder = new TextDecoder();
for await (const chunk of stream2.toReadableStream()) {
console.log(textDecoder.decode(chunk));
}
return stream2;
}