mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 17:29:01 +00:00
debug
This commit is contained in:
parent
07806f85c7
commit
52776ca897
1 changed files with 4 additions and 0 deletions
|
@ -67,6 +67,7 @@ async def agents_stack(request, inference_model, safety_shield):
|
||||||
for key in ["inference", "safety", "memory", "agents"]:
|
for key in ["inference", "safety", "memory", "agents"]:
|
||||||
fixture = request.getfixturevalue(f"{key}_{fixture_dict[key]}")
|
fixture = request.getfixturevalue(f"{key}_{fixture_dict[key]}")
|
||||||
providers[key] = fixture.providers
|
providers[key] = fixture.providers
|
||||||
|
print(f"fixture {key} providers: {providers[key]}")
|
||||||
if key == "inference":
|
if key == "inference":
|
||||||
providers[key].append(
|
providers[key].append(
|
||||||
Provider(
|
Provider(
|
||||||
|
@ -81,6 +82,9 @@ async def agents_stack(request, inference_model, safety_shield):
|
||||||
inference_models = (
|
inference_models = (
|
||||||
inference_model if isinstance(inference_model, list) else [inference_model]
|
inference_model if isinstance(inference_model, list) else [inference_model]
|
||||||
)
|
)
|
||||||
|
print(providers)
|
||||||
|
|
||||||
|
print(inference_models, safety_shield)
|
||||||
models = [
|
models = [
|
||||||
ModelInput(
|
ModelInput(
|
||||||
model_id=model,
|
model_id=model,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue