Fixes to the llama stack configure script + inference adapters

This commit is contained in:
Ashwin Bharambe 2024-09-03 23:22:21 -07:00
parent 4869f2b983
commit 1380d78c19
11 changed files with 124 additions and 37 deletions

View file

@ -42,8 +42,8 @@ def available_inference_providers() -> List[ProviderSpec]:
pip_packages=[
"fireworks-ai",
],
module="llama_toolchain.inference.fireworks",
config_class="llama_toolchain.inference.fireworks.FireworksImplConfig",
module="llama_toolchain.inference.adapters.fireworks",
config_class="llama_toolchain.inference.adapters.fireworks.FireworksImplConfig",
),
),
remote_provider_spec(
@ -53,8 +53,8 @@ def available_inference_providers() -> List[ProviderSpec]:
pip_packages=[
"together",
],
module="llama_toolchain.inference.together",
config_class="llama_toolchain.inference.together.TogetherImplConfig",
module="llama_toolchain.inference.adapters.together",
config_class="llama_toolchain.inference.adapters.together.TogetherImplConfig",
),
),
]