mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-18 14:58:44 +00:00
address feedback
This commit is contained in:
parent
15200e23ba
commit
efdc67e5b7
7 changed files with 322 additions and 30 deletions
|
|
@ -1,14 +1,12 @@
|
|||
# Getting Started with Llama Stack
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
```
|
||||
|
||||
In this guide, we'll walk through using ollama as the inference provider and build a simple python application that uses the Llama Stack Client SDK
|
||||
|
||||
Llama stack consists of a distribution server and an accompanying client SDK. The distribution server can be configured for different providers for inference, memory, agents, evals etc. This configuration is defined in a yaml file called `run.yaml`.
|
||||
|
||||
Running inference on the underlying Llama model is one of the most critical requirements. Depending on what hardware you have available, you have various options. Note that each option have different necessary prerequisites. We will use ollama as the inference provider as it is the easiest to get started with.
|
||||
|
||||
### Step 1. Start the inference server
|
||||
```bash
|
||||
export LLAMA_STACK_PORT=5001
|
||||
|
|
@ -33,12 +31,11 @@ docker run \
|
|||
|
||||
```
|
||||
|
||||
### Step 3. Install the client
|
||||
### Step 3. Use the Llama Stack client SDK
|
||||
```bash
|
||||
pip install llama-stack-client
|
||||
```
|
||||
|
||||
#### Check the connectivity to the server
|
||||
We will use the `llama-stack-client` CLI to check the connectivity to the server. This should be installed in your environment if you installed the SDK.
|
||||
```bash
|
||||
llama-stack-client --endpoint http://localhost:5001 models list
|
||||
|
|
@ -49,7 +46,12 @@ llama-stack-client --endpoint http://localhost:5001 models list
|
|||
└──────────────────────────────────┴─────────────┴───────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
### Step 4. Use the SDK
|
||||
Chat completion using the CLI
|
||||
```bash
|
||||
llama-stack-client --endpoint http://localhost:5001 inference chat_completion --message "hello, what model are you?"
|
||||
```
|
||||
|
||||
Simple python example using the client SDK
|
||||
```python
|
||||
from llama_stack_client import LlamaStackClient
|
||||
|
||||
|
|
@ -70,13 +72,136 @@ response = client.inference.chat_completion(
|
|||
print(response.completion_message.content)
|
||||
```
|
||||
|
||||
### Step 5. Your first RAG agent
|
||||
Refer to [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/blob/main/examples/agents/rag_with_memory_bank.py) on an example of how to build a RAG agent with memory.
|
||||
### Step 4. Your first RAG agent
|
||||
```python
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
|
||||
import fire
|
||||
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
from llama_stack_client.types import Attachment
|
||||
from llama_stack_client.types.agent_create_params import AgentConfig
|
||||
|
||||
|
||||
async def run_main(host: str, port: int, disable_safety: bool = False):
|
||||
urls = [
|
||||
"memory_optimizations.rst",
|
||||
"chat.rst",
|
||||
"llama3.rst",
|
||||
"datasets.rst",
|
||||
"qat_finetune.rst",
|
||||
"lora_finetune.rst",
|
||||
]
|
||||
|
||||
attachments = [
|
||||
Attachment(
|
||||
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
|
||||
mime_type="text/plain",
|
||||
)
|
||||
for i, url in enumerate(urls)
|
||||
]
|
||||
|
||||
client = LlamaStackClient(
|
||||
base_url=f"http://{host}:{port}",
|
||||
)
|
||||
|
||||
available_shields = [shield.identifier for shield in client.shields.list()]
|
||||
if not available_shields:
|
||||
print("No available shields. Disable safety.")
|
||||
else:
|
||||
print(f"Available shields found: {available_shields}")
|
||||
available_models = [model.identifier for model in client.models.list()]
|
||||
if not available_models:
|
||||
raise ValueError("No available models")
|
||||
else:
|
||||
selected_model = available_models[0]
|
||||
print(f"Using model: {selected_model}")
|
||||
|
||||
agent_config = AgentConfig(
|
||||
model=selected_model,
|
||||
instructions="You are a helpful assistant",
|
||||
sampling_params={
|
||||
"strategy": "greedy",
|
||||
"temperature": 1.0,
|
||||
"top_p": 0.9,
|
||||
},
|
||||
tools=[
|
||||
{
|
||||
"type": "memory",
|
||||
"memory_bank_configs": [],
|
||||
"query_generator_config": {"type": "default", "sep": " "},
|
||||
"max_tokens_in_context": 4096,
|
||||
"max_chunks": 10,
|
||||
},
|
||||
],
|
||||
tool_choice="auto",
|
||||
tool_prompt_format="json",
|
||||
input_shields=available_shields if available_shields else [],
|
||||
output_shields=available_shields if available_shields else [],
|
||||
enable_session_persistence=False,
|
||||
)
|
||||
|
||||
agent = Agent(client, agent_config)
|
||||
session_id = agent.create_session("test-session")
|
||||
print(f"Created session_id={session_id} for Agent({agent.agent_id})")
|
||||
|
||||
user_prompts = [
|
||||
(
|
||||
"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.",
|
||||
attachments,
|
||||
),
|
||||
(
|
||||
"What are the top 5 topics that were explained? Only list succinct bullet points.",
|
||||
None,
|
||||
),
|
||||
(
|
||||
"Was anything related to 'Llama3' discussed, if so what?",
|
||||
None,
|
||||
),
|
||||
(
|
||||
"Tell me how to use LoRA",
|
||||
None,
|
||||
),
|
||||
(
|
||||
"What about Quantization?",
|
||||
None,
|
||||
),
|
||||
]
|
||||
|
||||
for prompt in user_prompts:
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt[0],
|
||||
}
|
||||
],
|
||||
attachments=prompt[1],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
async for log in EventLogger().log(response):
|
||||
log.print()
|
||||
|
||||
|
||||
def main(host: str, port: int):
|
||||
asyncio.run(run_main(host, port))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
For more advanced topics, check out:
|
||||
|
||||
- You can mix and match different providers for inference, memory, agents, evals etc. See [Building custom distributions](../distributions/index.md)
|
||||
- [Developer Cookbook](developer_cookbook.md)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue