Merge branch 'meta-llama:main' into main

This commit is contained in:
Chacksu 2024-11-21 19:49:53 -05:00 committed by GitHub
commit 4136accf48
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 7 additions and 14 deletions

View file

@ -1,5 +1,4 @@
# Llama Stack Distributions # Building Llama Stacks
```{toctree} ```{toctree}
:maxdepth: 2 :maxdepth: 2
@ -12,6 +11,7 @@ ondevice_distro/index
## Introduction ## Introduction
Llama Stack Distributions are pre-built Docker containers/Conda environments that assemble APIs and Providers to provide a consistent whole to the end application developer. Llama Stack Distributions are pre-built Docker containers/Conda environments that assemble APIs and Providers to provide a consistent whole to the end application developer.
These distributions allow you to mix-and-match providers - some could be backed by local code and some could be remote. This flexibility enables you to choose the optimal setup for your use case, such as serving a small model locally while using a cloud provider for larger models, all while maintaining a consistent API interface for your application. These distributions allow you to mix-and-match providers - some could be backed by local code and some could be remote. This flexibility enables you to choose the optimal setup for your use case, such as serving a small model locally while using a cloud provider for larger models, all while maintaining a consistent API interface for your application.

View file

@ -149,7 +149,6 @@ if __name__ == "__main__":
## Next Steps ## Next Steps
- You can mix and match different providers for inference, memory, agents, evals etc. See [Building custom distributions](../distributions/index.md) You can mix and match different providers for inference, memory, agents, evals etc. See [Building Llama Stacks](../distributions/index.md)
- [Developer Cookbook](developer_cookbook.md)
For example applications and more detailed tutorials, visit our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository. For example applications and more detailed tutorials, visit our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository.

View file

@ -396,12 +396,6 @@ class ChatAgent(ShieldRunnerMixin):
n_iter = 0 n_iter = 0
while True: while True:
msg = input_messages[-1] msg = input_messages[-1]
if msg.role == Role.user.value:
color = "blue"
elif msg.role == Role.ipython.value:
color = "yellow"
else:
color = None
if len(str(msg)) > 1000: if len(str(msg)) > 1000:
msg_str = f"{str(msg)[:500]}...<more>...{str(msg)[-500:]}" msg_str = f"{str(msg)[:500]}...<more>...{str(msg)[-500:]}"
else: else:

View file

@ -18,4 +18,4 @@ class LogFormat(Enum):
@json_schema_type @json_schema_type
class ConsoleConfig(BaseModel): class ConsoleConfig(BaseModel):
log_format: LogFormat = LogFormat.JSON log_format: LogFormat = LogFormat.TEXT

View file

@ -2,8 +2,8 @@ blobfile
fire fire
httpx httpx
huggingface-hub huggingface-hub
llama-models>=0.0.53 llama-models>=0.0.54
llama-stack-client>=0.0.53 llama-stack-client>=0.0.54
prompt-toolkit prompt-toolkit
python-dotenv python-dotenv
pydantic>=2 pydantic>=2

View file

@ -16,7 +16,7 @@ def read_requirements():
setup( setup(
name="llama_stack", name="llama_stack",
version="0.0.53", version="0.0.54",
author="Meta Llama", author="Meta Llama",
author_email="llama-oss@meta.com", author_email="llama-oss@meta.com",
description="Llama Stack", description="Llama Stack",