mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 00:49:42 +00:00
Merge branch 'main' into patch-1
This commit is contained in:
commit
92c2edd61c
35 changed files with 1916 additions and 1589 deletions
|
|
@ -13,7 +13,7 @@ llama stack build --template starter --image-type venv
|
|||
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
|
||||
|
||||
client = LlamaStackAsLibraryClient(
|
||||
"ollama",
|
||||
"starter",
|
||||
# provider_data is optional, but if you need to pass in any provider specific data, you can do so here.
|
||||
provider_data={"tavily_search_api_key": os.environ["TAVILY_SEARCH_API_KEY"]},
|
||||
)
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ To enable external providers, you need to add `module` into your build yaml, all
|
|||
an example entry in your build.yaml should look like:
|
||||
|
||||
```
|
||||
- provider_id: ramalama
|
||||
provider_type: remote::ramalama
|
||||
- provider_type: remote::ramalama
|
||||
module: ramalama_stack
|
||||
```
|
||||
|
||||
|
|
@ -255,8 +254,7 @@ distribution_spec:
|
|||
container_image: null
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: ramalama
|
||||
provider_type: remote::ramalama
|
||||
- provider_type: remote::ramalama
|
||||
module: ramalama_stack==0.3.0a0
|
||||
image_type: venv
|
||||
image_name: null
|
||||
|
|
|
|||
|
|
@ -9,11 +9,13 @@ OpenAI inference provider for accessing GPT models and other OpenAI services.
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `api_key` | `str \| None` | No | | API key for OpenAI models |
|
||||
| `base_url` | `<class 'str'>` | No | https://api.openai.com/v1 | Base URL for OpenAI API |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
api_key: ${env.OPENAI_API_KEY:=}
|
||||
base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1}
|
||||
|
||||
```
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue