llama-stack-mirror/tests/integration/recordings/responses/a5187d9d5057.json
Charlie Doern e461246eed test: add telemetry tests for openai
ensure openai completion routes produce metrics in the existing telemetry suite

Signed-off-by: Charlie Doern <cdoern@redhat.com>
2025-08-06 15:55:28 -04:00

56 lines
2 KiB
JSON

{
"request": {
"method": "POST",
"url": "http://localhost:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "Test trace openai 1"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-64",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "You want to test the capabilities of the OpenAI Text-to-Text model (also known as T0).\n\nPlease note that I'll be using a pre-trained model, so my responses might not be entirely customized to your specific prompt or context. That being said, I'll do my best to mimic the behavior of the original model.\n\nWhat would you like to test or ask? Please provide a prompt or question, and I'll respond accordingly.\n\n(Note: if you'd like to run a longer experiment or try out specific models like text completion or code generation, feel free to let me know and we can figure out a way to collaborate.)",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 1754510052,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 129,
"prompt_tokens": 31,
"total_tokens": 160,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}