mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 00:34:44 +00:00
Merge branch 'meta-llama:main' into main
This commit is contained in:
commit
4136accf48
6 changed files with 7 additions and 14 deletions
|
@ -1,5 +1,4 @@
|
|||
# Llama Stack Distributions
|
||||
|
||||
# Building Llama Stacks
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 2
|
||||
|
@ -12,6 +11,7 @@ ondevice_distro/index
|
|||
## Introduction
|
||||
|
||||
Llama Stack Distributions are pre-built Docker containers/Conda environments that assemble APIs and Providers to provide a consistent whole to the end application developer.
|
||||
|
||||
These distributions allow you to mix-and-match providers - some could be backed by local code and some could be remote. This flexibility enables you to choose the optimal setup for your use case, such as serving a small model locally while using a cloud provider for larger models, all while maintaining a consistent API interface for your application.
|
||||
|
||||
|
||||
|
|
|
@ -149,7 +149,6 @@ if __name__ == "__main__":
|
|||
|
||||
## Next Steps
|
||||
|
||||
- You can mix and match different providers for inference, memory, agents, evals etc. See [Building custom distributions](../distributions/index.md)
|
||||
- [Developer Cookbook](developer_cookbook.md)
|
||||
You can mix and match different providers for inference, memory, agents, evals etc. See [Building Llama Stacks](../distributions/index.md)
|
||||
|
||||
For example applications and more detailed tutorials, visit our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository.
|
||||
|
|
|
@ -396,12 +396,6 @@ class ChatAgent(ShieldRunnerMixin):
|
|||
n_iter = 0
|
||||
while True:
|
||||
msg = input_messages[-1]
|
||||
if msg.role == Role.user.value:
|
||||
color = "blue"
|
||||
elif msg.role == Role.ipython.value:
|
||||
color = "yellow"
|
||||
else:
|
||||
color = None
|
||||
if len(str(msg)) > 1000:
|
||||
msg_str = f"{str(msg)[:500]}...<more>...{str(msg)[-500:]}"
|
||||
else:
|
||||
|
|
|
@ -18,4 +18,4 @@ class LogFormat(Enum):
|
|||
|
||||
@json_schema_type
|
||||
class ConsoleConfig(BaseModel):
|
||||
log_format: LogFormat = LogFormat.JSON
|
||||
log_format: LogFormat = LogFormat.TEXT
|
||||
|
|
|
@ -2,8 +2,8 @@ blobfile
|
|||
fire
|
||||
httpx
|
||||
huggingface-hub
|
||||
llama-models>=0.0.53
|
||||
llama-stack-client>=0.0.53
|
||||
llama-models>=0.0.54
|
||||
llama-stack-client>=0.0.54
|
||||
prompt-toolkit
|
||||
python-dotenv
|
||||
pydantic>=2
|
||||
|
|
2
setup.py
2
setup.py
|
@ -16,7 +16,7 @@ def read_requirements():
|
|||
|
||||
setup(
|
||||
name="llama_stack",
|
||||
version="0.0.53",
|
||||
version="0.0.54",
|
||||
author="Meta Llama",
|
||||
author_email="llama-oss@meta.com",
|
||||
description="Llama Stack",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue