mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-14 14:27:58 +00:00
Propagate test IDs from client to server via HTTP headers to maintain proper test isolation when running with server-based stack configs. Without this, recorded/replayed inference requests in server mode would leak across tests. Changes: - Patch client _prepare_request to inject test ID into provider data header - Sync test context from provider data on server side before storage operations - Set LLAMA_STACK_TEST_STACK_CONFIG_TYPE env var based on stack config - Configure console width for cleaner log output in CI - Add SQLITE_STORE_DIR temp directory for test data isolation
57 lines
3.5 KiB
JSON
57 lines
3.5 KiB
JSON
{
|
|
"test_id": null,
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": "Test trace openai 1"
|
|
}
|
|
],
|
|
"stream": false
|
|
},
|
|
"endpoint": "/v1/chat/completions",
|
|
"model": "llama3.2:3b-instruct-fp16"
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
|
|
"__data__": {
|
|
"id": "rec-1098240ef53b",
|
|
"choices": [
|
|
{
|
|
"finish_reason": "stop",
|
|
"index": 0,
|
|
"logprobs": null,
|
|
"message": {
|
|
"content": "I'd be happy to help you test the OpenAI API, specifically version 1. To start, we'll need to complete some basic setup steps.\n\nPlease note that the OpenAI platform is still developing and has a lot of limitations on their API keys for new accounts (the first few tiers). \n\nHere's an example of how you can use the `python-openai` library in python to test the OpenAI API v. 1 (Note: The free version only has a limited set of options):\n\n### Required installations\n\nBefore we begin, make sure you have the following installed:\n\n* Python 3.7 or higher\n* pip\n* `python-openai`\n\nYou can install the required packages by running the following command in your terminal:\n\n```bash\npip install python-openai\n```\n\nPlease make sure to get an API key from [OpenAI website](https://www.openai.com/), as you will need it for testing.\n\n### Getting Started with Testing\n\nBelow is a sample code example demonstrating how to call some of the `python-openai` functions:\n\n```python\nfrom openai import api\n\n\n# Load api key into an object.\ndef load_model(api_key):\n \"\"\"Load and store the API key in order to use the API.\"\"\"\n model = api.Model()\n model.api_key = api_key\n\n return model\n\n\n\nif __name__ == \"__main__\":\n \n # Define your OpenAI API KEY\n API_KEY = \"YOUR_API_KEY_HERE\"\n\n load_model(api_key=API_KEY)\n \n\n # Perform an API call\n \n model = load_model()\n print(\"This will be printed if successful \")\n \n\n\n\n# Don't forget to close and clean up the model when finished testing.\nmodel.__enter__()\nmodel.close()\nmodel.__exit__()\n\n```\n\nPlease replace `\"YOUR_API_KEY_HERE\"` with your actual OpenAI username.\n\nTo do so, you'll need an API key: go to https://www.openai.com and register a new account first.\n\nIf your tests run as expected without any errors, it means that your OpenAI API token is working. Make sure to store the token securely for use in future projects or testing code for optimal results.\nPlease let me know if there are any issues at all during testing or if I can assist with anything else related to getting started",
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"annotations": null,
|
|
"audio": null,
|
|
"function_call": null,
|
|
"tool_calls": null
|
|
}
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"object": "chat.completion",
|
|
"service_tier": null,
|
|
"system_fingerprint": "fp_ollama",
|
|
"usage": {
|
|
"completion_tokens": 476,
|
|
"prompt_tokens": 31,
|
|
"total_tokens": 507,
|
|
"completion_tokens_details": null,
|
|
"prompt_tokens_details": null
|
|
}
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
}
|
|
}
|