mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-10 11:39:47 +00:00
- Add _create_metric_stub_from_protobuf method to correctly parse protobuf metrics - Add _extract_attributes_from_data_point helper method - Change metric handling to use protobuf-specific parsing instead of OpenTelemetry native parsing - Add missing typing import - Add OTEL_METRIC_EXPORT_INTERVAL environment variable for test configuration This fixes the CI failure where metrics were not being properly extracted from protobuf data in server mode tests.
60 lines
2.1 KiB
JSON
Generated
60 lines
2.1 KiB
JSON
Generated
{
|
|
"test_id": "tests/integration/telemetry/test_completions.py::test_telemetry_format_completeness[txt=ollama/llama3.2:3b-instruct-fp16]",
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": "Test trace openai with temperature 0.7"
|
|
}
|
|
],
|
|
"max_tokens": 100,
|
|
"stream": false,
|
|
"temperature": 0.7
|
|
},
|
|
"endpoint": "/v1/chat/completions",
|
|
"model": "llama3.2:3b-instruct-fp16"
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
|
|
"__data__": {
|
|
"id": "rec-1fcfd86d8111",
|
|
"choices": [
|
|
{
|
|
"finish_reason": "length",
|
|
"index": 0,
|
|
"logprobs": null,
|
|
"message": {
|
|
"content": "To test the trace function from OpenAI's API with a temperature of 0.7, you can use the following Python code:\n\n```python\nimport json\n\n# Import the required libraries\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\n# Set the API endpoint and model name\nmodel_name = \"dalle-mini\"\n\n# Initialize the model and tokenizer\nmodel = AutoModelForCausalLM.from_pretrained(model_name)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\n",
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"annotations": null,
|
|
"audio": null,
|
|
"function_call": null,
|
|
"tool_calls": null
|
|
}
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"object": "chat.completion",
|
|
"service_tier": null,
|
|
"system_fingerprint": "fp_ollama",
|
|
"usage": {
|
|
"completion_tokens": 100,
|
|
"prompt_tokens": 35,
|
|
"total_tokens": 135,
|
|
"completion_tokens_details": null,
|
|
"prompt_tokens_details": null
|
|
}
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
},
|
|
"id_normalization_mapping": {}
|
|
}
|