llama-stack-mirror/tests/integration/recordings/responses/bf79a89cc37f.json
Charlie Doern ddefd158e9 test: add integration tests for query_metrics
the integration tests check if all of the metrics we currently support actual are queryable after inference requests

this also tests thinks like aggregation, label filtering, etc

Signed-off-by: Charlie Doern <cdoern@redhat.com>
2025-08-22 15:54:33 -04:00

56 lines
1.7 KiB
JSON

{
"request": {
"method": "POST",
"url": "http://localhost:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b",
"messages": [
{
"role": "user",
"content": "OpenAI test 3"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-48",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "I'm happy to help, but it seems you want me to engage in a basic conversation as OpenAI's new chat model, right? I can do that!\n\nHere's my response:\n\nHello! How are you today? Is there something specific on your mind that you'd like to talk about or any particular topic you'd like to explore together?\n\nWhat is it that you're curious about?",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 1755891524,
"model": "llama3.2:3b",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 80,
"prompt_tokens": 30,
"total_tokens": 110,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}