mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
enable session persistence
This commit is contained in:
parent
0185d620eb
commit
a2fc114b64
1 changed files with 2 additions and 1 deletions
|
@ -62,7 +62,7 @@ llama-stack-client --endpoint http://localhost:$LLAMA_STACK_PORT models list
|
||||||
You can test basic Llama inference completion using the CLI too.
|
You can test basic Llama inference completion using the CLI too.
|
||||||
```bash
|
```bash
|
||||||
llama-stack-client --endpoint http://localhost:$LLAMA_STACK_PORT \
|
llama-stack-client --endpoint http://localhost:$LLAMA_STACK_PORT \
|
||||||
inference chat-completion \
|
inference chat_completion \
|
||||||
--message "hello, what model are you?"
|
--message "hello, what model are you?"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -118,6 +118,7 @@ async def run_main():
|
||||||
model=os.environ["INFERENCE_MODEL"],
|
model=os.environ["INFERENCE_MODEL"],
|
||||||
instructions="You are a helpful assistant",
|
instructions="You are a helpful assistant",
|
||||||
tools=[{"type": "memory"}], # enable Memory aka RAG
|
tools=[{"type": "memory"}], # enable Memory aka RAG
|
||||||
|
enable_session_persistence=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
agent = Agent(client, agent_config)
|
agent = Agent(client, agent_config)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue