llama-stack-mirror/tests/integration/recordings/responses/bf79a89cc37f.json
Ashwin Bharambe 6f1afabcdc chore(tests): normalize recording IDs and timestamps to reduce git diff noise
IDs are now deterministic hashes based on request content, and timestamps are normalized to constants, eliminating spurious changes when re-recording tests.
2025-10-03 16:13:14 -07:00

56 lines
1.7 KiB
JSON

{
"request": {
"method": "POST",
"url": "http://localhost:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b",
"messages": [
{
"role": "user",
"content": "OpenAI test 3"
}
],
"stream": false
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "rec-bf79a89cc37f",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"message": {
"content": "I'm happy to help, but it seems you want me to engage in a basic conversation as OpenAI's new chat model, right? I can do that!\n\nHere's my response:\n\nHello! How are you today? Is there something specific on your mind that you'd like to talk about or any particular topic you'd like to explore together?\n\nWhat is it that you're curious about?",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": null
}
}
],
"created": 0,
"model": "llama3.2:3b",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 80,
"prompt_tokens": 30,
"total_tokens": 110,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}