mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
fix: environment variable typo in inference recorder error message (#3374)
The error message was referencing LLAMA_STACK_INFERENCE_MODE instead of the correct LLAMA_STACK_TEST_INFERENCE_MODE environment variable.
This commit is contained in:
parent
ad6ea7fb91
commit
ef02b9ea10
1 changed files with 1 additions and 1 deletions
|
@ -292,7 +292,7 @@ async def _patched_inference_method(original_method, self, client_type, endpoint
|
|||
f"No recorded response found for request hash: {request_hash}\n"
|
||||
f"Request: {method} {url} {body}\n"
|
||||
f"Model: {body.get('model', 'unknown')}\n"
|
||||
f"To record this response, run with LLAMA_STACK_INFERENCE_MODE=record"
|
||||
f"To record this response, run with LLAMA_STACK_TEST_INFERENCE_MODE=record"
|
||||
)
|
||||
|
||||
elif _current_mode == InferenceMode.RECORD:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue