From 4afa23dbff1a2fc1df39b223433637f4c9418a0f Mon Sep 17 00:00:00 2001 From: Omar Abdelwahab Date: Wed, 8 Oct 2025 13:54:59 -0700 Subject: [PATCH] Updated Test Chat Completion section for step 4 of method 3 --- .../configuring_and_launching_llama_stack.md | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/docs/docs/getting_started/configuring_and_launching_llama_stack.md b/docs/docs/getting_started/configuring_and_launching_llama_stack.md index 349d8c1cd..5f92fdd7e 100644 --- a/docs/docs/getting_started/configuring_and_launching_llama_stack.md +++ b/docs/docs/getting_started/configuring_and_launching_llama_stack.md @@ -322,19 +322,30 @@ llama-stack-client providers list ``` #### Test Chat Completion +Verify with the client (recommended): + ```bash -# Basic HTTP test +# Verify providers are configured correctly (recommended first step) +uv run --with llama-stack-client llama-stack-client providers list + +# Test chat completion using the client +uv run --with llama-stack-client llama-stack-client inference chat-completion \ + --model llama3.1:8b \ + --message "Hello!" + +# Alternative if you have llama-stack-client installed +llama-stack-client providers list +llama-stack-client inference chat-completion \ + --model llama3.1:8b \ + --message "Hello!" + +# Or using basic HTTP test curl -X POST http://localhost:8321/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "llama3.1:8b", "messages": [{"role": "user", "content": "Hello!"}] }' - -# Or using the client (more robust) -uv run --with llama-stack-client llama-stack-client inference chat-completion \ - --model llama3.1:8b \ - --message "Hello!" ``` ## Configuration Management