mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 18:12:43 +00:00
try once more
This commit is contained in:
parent
707ff21c84
commit
b5af8ac901
2 changed files with 1 additions and 2 deletions
|
|
@ -261,7 +261,7 @@ async def _patched_inference_method(original_method, self, client_type, endpoint
|
|||
else:
|
||||
raise RuntimeError(
|
||||
f"No recorded response found for request hash: {request_hash}\n"
|
||||
f"Endpoint: {endpoint}\n"
|
||||
f"Request: {method} {url} {body}\n"
|
||||
f"Model: {body.get('model', 'unknown')}\n"
|
||||
f"To record this response, run with LLAMA_STACK_INFERENCE_MODE=record"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
import llama_stack_client as lsc_package
|
||||
import pytest
|
||||
import requests
|
||||
from llama_stack_client import Agent, AgentEventLogger, Document
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue