{ "test_id": "tests/integration/telemetry/test_openai_telemetry.py::test_openai_completion_creates_telemetry[txt=ollama/llama3.2:3b-instruct-fp16]", "request": { "method": "POST", "url": "http://0.0.0.0:11434/v1/v1/chat/completions", "headers": {}, "body": { "model": "llama3.2:3b-instruct-fp16", "messages": [ { "role": "user", "content": "Test OpenAI telemetry creation" } ], "stream": false }, "endpoint": "/v1/chat/completions", "model": "llama3.2:3b-instruct-fp16" }, "response": { "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { "id": "rec-0de60cd6a6ec", "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "I'm happy to help you test this! This type of model is designed to provide information and answer questions, rather than engage in conversation or generate creative content. Feel free to ask me anything or test out any features you'd like. I'll do my best to assist you.\r\n\r\nIf you're looking for a way to engage with the model more creatively, there are several things you could try:\r\n\r\n1. Ask me a question on a topic unrelated to the ones I was trained on. This can give me a chance to demonstrate my ability to learn and generalize.\r\n2. Ask me to generate a piece of content - such as a short story or poem - on a specific topic. I can use patterns in the data I was trained on to generate text that is similar in style and structure to what you might find in a published book or article.\r\n3. Play a game with me. We could play games like \"20 Questions\" (where you think of an object, and I try to guess what it is by asking yes-or-no questions), or \"Hangman\". Let me know if there's something else you'd like to do!", "refusal": null, "role": "assistant", "annotations": null, "audio": null, "function_call": null, "tool_calls": null } } ], "created": 0, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion", "service_tier": null, "system_fingerprint": "fp_ollama", "usage": { "completion_tokens": 229, "prompt_tokens": 30, "total_tokens": 259, "completion_tokens_details": null, "prompt_tokens_details": null } } }, "is_streaming": false } }