forked from phoenix-oss/llama-stack-mirror
chore: remove recordable mock (#2088)
# What does this PR do? We've disabled it for a while given that this hasn't worked as well as expected given the frequent changes of llama_stack_client and how this requires both repos to be in sync. ## Test Plan Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
parent
a5d151e912
commit
4597145011
5 changed files with 36 additions and 57965 deletions
|
@ -56,8 +56,8 @@ def get_boiling_point_with_metadata(liquid_name: str, celcius: bool = True) -> d
|
|||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def agent_config(llama_stack_client_with_mocked_inference, text_model_id):
|
||||
available_shields = [shield.identifier for shield in llama_stack_client_with_mocked_inference.shields.list()]
|
||||
def agent_config(llama_stack_client, text_model_id):
|
||||
available_shields = [shield.identifier for shield in llama_stack_client.shields.list()]
|
||||
available_shields = available_shields[:1]
|
||||
agent_config = dict(
|
||||
model=text_model_id,
|
||||
|
@ -77,8 +77,8 @@ def agent_config(llama_stack_client_with_mocked_inference, text_model_id):
|
|||
return agent_config
|
||||
|
||||
|
||||
def test_agent_simple(llama_stack_client_with_mocked_inference, agent_config):
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
def test_agent_simple(llama_stack_client, agent_config):
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
simple_hello = agent.create_turn(
|
||||
|
@ -179,7 +179,7 @@ def test_agent_name(llama_stack_client, text_model_id):
|
|||
assert "hello" in agent_logs[0]["output"].lower()
|
||||
|
||||
|
||||
def test_tool_config(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_tool_config(agent_config):
|
||||
common_params = dict(
|
||||
model="meta-llama/Llama-3.2-3B-Instruct",
|
||||
instructions="You are a helpful assistant",
|
||||
|
@ -235,7 +235,7 @@ def test_tool_config(llama_stack_client_with_mocked_inference, agent_config):
|
|||
Server__AgentConfig(**agent_config)
|
||||
|
||||
|
||||
def test_builtin_tool_web_search(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_builtin_tool_web_search(llama_stack_client, agent_config):
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"instructions": "You are a helpful assistant that can use web search to answer questions.",
|
||||
|
@ -243,7 +243,7 @@ def test_builtin_tool_web_search(llama_stack_client_with_mocked_inference, agent
|
|||
"builtin::websearch",
|
||||
],
|
||||
}
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
|
@ -266,14 +266,14 @@ def test_builtin_tool_web_search(llama_stack_client_with_mocked_inference, agent
|
|||
assert found_tool_execution
|
||||
|
||||
|
||||
def test_builtin_tool_code_execution(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_builtin_tool_code_execution(llama_stack_client, agent_config):
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"tools": [
|
||||
"builtin::code_interpreter",
|
||||
],
|
||||
}
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
|
@ -296,7 +296,7 @@ def test_builtin_tool_code_execution(llama_stack_client_with_mocked_inference, a
|
|||
# server, this means the _server_ must have `bwrap` available. If you are using library client, then
|
||||
# you must have `bwrap` available in test's environment.
|
||||
@pytest.mark.skip(reason="Code interpreter is currently disabled in the Stack")
|
||||
def test_code_interpreter_for_attachments(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_code_interpreter_for_attachments(llama_stack_client, agent_config):
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"tools": [
|
||||
|
@ -304,7 +304,7 @@ def test_code_interpreter_for_attachments(llama_stack_client_with_mocked_inferen
|
|||
],
|
||||
}
|
||||
|
||||
codex_agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
codex_agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = codex_agent.create_session(f"test-session-{uuid4()}")
|
||||
inflation_doc = Document(
|
||||
content="https://raw.githubusercontent.com/meta-llama/llama-stack-apps/main/examples/resources/inflation.csv",
|
||||
|
@ -332,14 +332,14 @@ def test_code_interpreter_for_attachments(llama_stack_client_with_mocked_inferen
|
|||
assert "Tool:code_interpreter" in logs_str
|
||||
|
||||
|
||||
def test_custom_tool(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_custom_tool(llama_stack_client, agent_config):
|
||||
client_tool = get_boiling_point
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
"tools": [client_tool],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
|
@ -358,7 +358,7 @@ def test_custom_tool(llama_stack_client_with_mocked_inference, agent_config):
|
|||
assert "get_boiling_point" in logs_str
|
||||
|
||||
|
||||
def test_custom_tool_infinite_loop(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_custom_tool_infinite_loop(llama_stack_client, agent_config):
|
||||
client_tool = get_boiling_point
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
|
@ -367,7 +367,7 @@ def test_custom_tool_infinite_loop(llama_stack_client_with_mocked_inference, age
|
|||
"max_infer_iters": 5,
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
|
@ -385,25 +385,21 @@ def test_custom_tool_infinite_loop(llama_stack_client_with_mocked_inference, age
|
|||
assert num_tool_calls <= 5
|
||||
|
||||
|
||||
def test_tool_choice_required(llama_stack_client_with_mocked_inference, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(
|
||||
llama_stack_client_with_mocked_inference, agent_config, "required"
|
||||
)
|
||||
def test_tool_choice_required(llama_stack_client, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(llama_stack_client, agent_config, "required")
|
||||
assert len(tool_execution_steps) > 0
|
||||
|
||||
|
||||
def test_tool_choice_none(llama_stack_client_with_mocked_inference, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(llama_stack_client_with_mocked_inference, agent_config, "none")
|
||||
def test_tool_choice_none(llama_stack_client, agent_config):
|
||||
tool_execution_steps = run_agent_with_tool_choice(llama_stack_client, agent_config, "none")
|
||||
assert len(tool_execution_steps) == 0
|
||||
|
||||
|
||||
def test_tool_choice_get_boiling_point(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_tool_choice_get_boiling_point(llama_stack_client, agent_config):
|
||||
if "llama" not in agent_config["model"].lower():
|
||||
pytest.xfail("NotImplemented for non-llama models")
|
||||
|
||||
tool_execution_steps = run_agent_with_tool_choice(
|
||||
llama_stack_client_with_mocked_inference, agent_config, "get_boiling_point"
|
||||
)
|
||||
tool_execution_steps = run_agent_with_tool_choice(llama_stack_client, agent_config, "get_boiling_point")
|
||||
assert len(tool_execution_steps) >= 1 and tool_execution_steps[0].tool_calls[0].tool_name == "get_boiling_point"
|
||||
|
||||
|
||||
|
@ -435,7 +431,7 @@ def run_agent_with_tool_choice(client, agent_config, tool_choice):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("rag_tool_name", ["builtin::rag/knowledge_search", "builtin::rag"])
|
||||
def test_rag_agent(llama_stack_client_with_mocked_inference, agent_config, rag_tool_name):
|
||||
def test_rag_agent(llama_stack_client, agent_config, rag_tool_name):
|
||||
urls = ["chat.rst", "llama3.rst", "memory_optimizations.rst", "lora_finetune.rst"]
|
||||
documents = [
|
||||
Document(
|
||||
|
@ -447,12 +443,12 @@ def test_rag_agent(llama_stack_client_with_mocked_inference, agent_config, rag_t
|
|||
for i, url in enumerate(urls)
|
||||
]
|
||||
vector_db_id = f"test-vector-db-{uuid4()}"
|
||||
llama_stack_client_with_mocked_inference.vector_dbs.register(
|
||||
llama_stack_client.vector_dbs.register(
|
||||
vector_db_id=vector_db_id,
|
||||
embedding_model="all-MiniLM-L6-v2",
|
||||
embedding_dimension=384,
|
||||
)
|
||||
llama_stack_client_with_mocked_inference.tool_runtime.rag_tool.insert(
|
||||
llama_stack_client.tool_runtime.rag_tool.insert(
|
||||
documents=documents,
|
||||
vector_db_id=vector_db_id,
|
||||
# small chunks help to get specific info out of the docs
|
||||
|
@ -469,7 +465,7 @@ def test_rag_agent(llama_stack_client_with_mocked_inference, agent_config, rag_t
|
|||
)
|
||||
],
|
||||
}
|
||||
rag_agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
rag_agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = rag_agent.create_session(f"test-session-{uuid4()}")
|
||||
user_prompts = [
|
||||
(
|
||||
|
@ -494,7 +490,7 @@ def test_rag_agent(llama_stack_client_with_mocked_inference, agent_config, rag_t
|
|||
assert expected_kw in response.output_message.content.lower()
|
||||
|
||||
|
||||
def test_rag_agent_with_attachments(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_rag_agent_with_attachments(llama_stack_client, agent_config):
|
||||
urls = ["llama3.rst", "lora_finetune.rst"]
|
||||
documents = [
|
||||
# passign as url
|
||||
|
@ -517,7 +513,7 @@ def test_rag_agent_with_attachments(llama_stack_client_with_mocked_inference, ag
|
|||
metadata={},
|
||||
),
|
||||
]
|
||||
rag_agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
rag_agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = rag_agent.create_session(f"test-session-{uuid4()}")
|
||||
user_prompts = [
|
||||
(
|
||||
|
@ -553,7 +549,7 @@ def test_rag_agent_with_attachments(llama_stack_client_with_mocked_inference, ag
|
|||
|
||||
|
||||
@pytest.mark.skip(reason="Code interpreter is currently disabled in the Stack")
|
||||
def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_rag_and_code_agent(llama_stack_client, agent_config):
|
||||
if "llama-4" in agent_config["model"].lower():
|
||||
pytest.xfail("Not working for llama4")
|
||||
|
||||
|
@ -578,12 +574,12 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf
|
|||
)
|
||||
)
|
||||
vector_db_id = f"test-vector-db-{uuid4()}"
|
||||
llama_stack_client_with_mocked_inference.vector_dbs.register(
|
||||
llama_stack_client.vector_dbs.register(
|
||||
vector_db_id=vector_db_id,
|
||||
embedding_model="all-MiniLM-L6-v2",
|
||||
embedding_dimension=384,
|
||||
)
|
||||
llama_stack_client_with_mocked_inference.tool_runtime.rag_tool.insert(
|
||||
llama_stack_client.tool_runtime.rag_tool.insert(
|
||||
documents=documents,
|
||||
vector_db_id=vector_db_id,
|
||||
chunk_size_in_tokens=128,
|
||||
|
@ -598,7 +594,7 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf
|
|||
"builtin::code_interpreter",
|
||||
],
|
||||
}
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
user_prompts = [
|
||||
(
|
||||
"when was Perplexity the company founded?",
|
||||
|
@ -632,7 +628,7 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf
|
|||
"client_tools",
|
||||
[(get_boiling_point, False), (get_boiling_point_with_metadata, True)],
|
||||
)
|
||||
def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_config, client_tools):
|
||||
def test_create_turn_response(llama_stack_client, agent_config, client_tools):
|
||||
client_tool, expects_metadata = client_tools
|
||||
agent_config = {
|
||||
**agent_config,
|
||||
|
@ -641,7 +637,7 @@ def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_co
|
|||
"tools": [client_tool],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
input_prompt = f"Call {client_tools[0].__name__} tool and answer What is the boiling point of polyjuice?"
|
||||
|
@ -677,7 +673,7 @@ def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_co
|
|||
last_step_completed_at = step.completed_at
|
||||
|
||||
|
||||
def test_multi_tool_calls(llama_stack_client_with_mocked_inference, agent_config):
|
||||
def test_multi_tool_calls(llama_stack_client, agent_config):
|
||||
if "gpt" not in agent_config["model"]:
|
||||
pytest.xfail("Only tested on GPT models")
|
||||
|
||||
|
@ -686,7 +682,7 @@ def test_multi_tool_calls(llama_stack_client_with_mocked_inference, agent_config
|
|||
"tools": [get_boiling_point],
|
||||
}
|
||||
|
||||
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
|
||||
agent = Agent(llama_stack_client, **agent_config)
|
||||
session_id = agent.create_session(f"test-session-{uuid4()}")
|
||||
|
||||
response = agent.create_turn(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue