Codemod from llama_toolchain -> llama_stack

- added providers/registry
- cleaned up api/ subdirectories and moved impls away
- restructured api/api.py
- from llama_stack.apis.<api> import foo should work now
- update imports to do llama_stack.apis.<api>
- update many other imports
- added __init__, fixed some registry imports
- updated registry imports
- create_agentic_system -> create_agent
- AgenticSystem -> Agent
This commit is contained in:
Ashwin Bharambe 2024-09-16 17:34:07 -07:00
parent 2cf731faea
commit 76b354a081
128 changed files with 381 additions and 376 deletions

View file

@ -1,69 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_stack.core.datatypes import * # noqa: F403
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.inference,
provider_id="meta-reference",
pip_packages=[
"accelerate",
"blobfile",
"codeshield",
"fairscale",
"fbgemm-gpu==0.8.0",
"torch",
"transformers",
"zmq",
],
module="llama_stack.inference.meta_reference",
config_class="llama_stack.inference.meta_reference.MetaReferenceImplConfig",
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="ollama",
pip_packages=["ollama"],
module="llama_stack.inference.adapters.ollama",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="tgi",
pip_packages=["huggingface_hub"],
module="llama_stack.inference.adapters.tgi",
config_class="llama_stack.inference.adapters.tgi.TGIImplConfig",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="fireworks",
pip_packages=[
"fireworks-ai",
],
module="llama_stack.inference.adapters.fireworks",
config_class="llama_stack.inference.adapters.fireworks.FireworksImplConfig",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_id="together",
pip_packages=[
"together",
],
module="llama_stack.inference.adapters.together",
config_class="llama_stack.inference.adapters.together.TogetherImplConfig",
),
),
]