{ "test_id": "tests/integration/telemetry/test_completions.py::test_telemetry_format_completeness[txt=ollama/llama3.2:3b-instruct-fp16]", "request": { "method": "POST", "url": "http://0.0.0.0:11434/v1/v1/chat/completions", "headers": {}, "body": { "model": "llama3.2:3b-instruct-fp16", "messages": [ { "role": "user", "content": "Test trace openai with temperature 0.7" } ], "max_tokens": 100, "stream": false, "temperature": 0.7 }, "endpoint": "/v1/chat/completions", "model": "llama3.2:3b-instruct-fp16" }, "response": { "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { "id": "rec-1fcfd86d8111", "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "message": { "content": "I can guide you through the process, but please note that testing OpenAI's models requires a bit of setup and potential API limits.\n\nOpenAI's API is designed to be used in a controlled environment for research purposes. You need an account on their platform and access to the API key.\n\nHere's how you can test a model with temperature 0.7 using Python:\n\n```python\nimport numpy as np\n\n# Replace 'YOUR_API_KEY' with your actual OpenAI API key.\nAPI_KEY", "refusal": null, "role": "assistant", "annotations": null, "audio": null, "function_call": null, "tool_calls": null } } ], "created": 0, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion", "service_tier": null, "system_fingerprint": "fp_ollama", "usage": { "completion_tokens": 100, "prompt_tokens": 35, "total_tokens": 135, "completion_tokens_details": null, "prompt_tokens_details": null } } }, "is_streaming": false }, "id_normalization_mapping": {} }