{ "request": { "method": "POST", "url": "http://localhost:11434/v1/v1/completions", "headers": {}, "body": { "model": "llama3.2:3b-instruct-fp16", "messages": [ { "role": "user", "content": "Test OpenAI telemetry creation" } ], "stream": false }, "endpoint": "/v1/completions", "model": "llama3.2:3b-instruct-fp16" }, "response": { "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { "id": "chatcmpl-297", "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "import openai\n\n# You can replace this with your own API key\nAPI_KEY = \"your_openai_api_key\"\n\n# Create an OpenAI instance\nopenai_client = openai.Client(api_key=API_KEY)\n\n# Test the telemetry endpoint by creating a new telemetry instance\ntelemetry = openai_client.create_telemetry()\n\nprint(telemetry)", "refusal": null, "role": "assistant", "annotations": null, "audio": null, "function_call": null, "tool_calls": null } } ], "created": 1754051845, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion", "service_tier": null, "system_fingerprint": "fp_ollama", "usage": { "completion_tokens": 72, "prompt_tokens": 30, "total_tokens": 102, "completion_tokens_details": null, "prompt_tokens_details": null } } }, "is_streaming": false } }