mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
# What does this PR do? use together's new base64 support ## Test Plan recordings for: ./scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup together --subdirs inference --pattern openai
61 lines
2.1 KiB
JSON
61 lines
2.1 KiB
JSON
{
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "https://api.together.xyz/v1/v1/chat/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": "Test trace openai with temperature 1"
|
|
}
|
|
],
|
|
"max_tokens": 100,
|
|
"stream": false,
|
|
"temperature": 0.7
|
|
},
|
|
"endpoint": "/v1/chat/completions",
|
|
"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
|
|
"__data__": {
|
|
"id": "oCfxJ43-4Yz4kd-984c2b99bc6a9045",
|
|
"choices": [
|
|
{
|
|
"finish_reason": "length",
|
|
"index": 0,
|
|
"logprobs": null,
|
|
"message": {
|
|
"content": "I'll do my best to generate a response with a temperature of 1. \n\nIn the context of language models like myself, temperature is a parameter that controls the level of randomness or creativity in the generated text. A temperature of 1 is relatively high, which means the model will produce more diverse and potentially less coherent text.\n\nHere's a response with a temperature of 1:\n\n\"The stars shone brightly in the midnight sky, like diamonds scattered across the velvet expanse. The world was bath",
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"annotations": null,
|
|
"audio": null,
|
|
"function_call": null,
|
|
"tool_calls": []
|
|
},
|
|
"seed": 8314651915728863000
|
|
}
|
|
],
|
|
"created": 1758820596,
|
|
"model": "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
|
|
"object": "chat.completion",
|
|
"service_tier": null,
|
|
"system_fingerprint": null,
|
|
"usage": {
|
|
"completion_tokens": 100,
|
|
"prompt_tokens": 43,
|
|
"total_tokens": 143,
|
|
"completion_tokens_details": null,
|
|
"prompt_tokens_details": null,
|
|
"cached_tokens": 0
|
|
},
|
|
"prompt": []
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
}
|
|
}
|