Merge remote-tracking branch 'origin/main' into resp_branching
|
@ -102,7 +102,7 @@ You can start a chroma-db easily using docker.
|
|||
# This is where the indices are persisted
|
||||
mkdir -p $HOME/chromadb
|
||||
|
||||
podman run --rm -it \
|
||||
docker run --rm -it \
|
||||
--network host \
|
||||
--name chromadb \
|
||||
-v $HOME/chromadb:/chroma/chroma \
|
||||
|
@ -127,7 +127,7 @@ docker run -it \
|
|||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v $HOME/.llama:/root/.llama \
|
||||
# NOTE: mount the llama-stack / llama-model directories if testing local changes else not needed
|
||||
-v /home/hjshah/git/llama-stack:/app/llama-stack-source -v /home/hjshah/git/llama-models:/app/llama-models-source \
|
||||
-v $HOME/git/llama-stack:/app/llama-stack-source -v $HOME/git/llama-models:/app/llama-models-source \
|
||||
# localhost/distribution-dell:dev if building / testing locally
|
||||
llamastack/distribution-dell\
|
||||
--port $LLAMA_STACK_PORT \
|
||||
|
|
|
@ -14,13 +14,13 @@ Llama Stack is the open-source framework for building generative AI applications
|
|||
|
||||
:::tip Llama 4 is here!
|
||||
|
||||
Check out [Getting Started with Llama 4](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started_llama4.ipynb)
|
||||
Check out [Getting Started with Llama 4](https://colab.research.google.com/github/llamastack/llama-stack/blob/main/docs/getting_started_llama4.ipynb)
|
||||
|
||||
:::
|
||||
|
||||
:::tip News
|
||||
|
||||
Llama Stack is now available! See the [release notes](https://github.com/meta-llama/llama-stack/releases) for more details.
|
||||
Llama Stack is now available! See the [release notes](https://github.com/llamastack/llama-stack/releases) for more details.
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -15,6 +15,50 @@ const config: Config = {
|
|||
onBrokenMarkdownLinks: "warn",
|
||||
favicon: "img/favicon.ico",
|
||||
|
||||
// Enhanced favicon and meta configuration
|
||||
headTags: [
|
||||
{
|
||||
tagName: 'link',
|
||||
attributes: {
|
||||
rel: 'icon',
|
||||
type: 'image/png',
|
||||
sizes: '32x32',
|
||||
href: '/img/favicon-32x32.png',
|
||||
},
|
||||
},
|
||||
{
|
||||
tagName: 'link',
|
||||
attributes: {
|
||||
rel: 'icon',
|
||||
type: 'image/png',
|
||||
sizes: '16x16',
|
||||
href: '/img/favicon-16x16.png',
|
||||
},
|
||||
},
|
||||
{
|
||||
tagName: 'link',
|
||||
attributes: {
|
||||
rel: 'apple-touch-icon',
|
||||
sizes: '180x180',
|
||||
href: '/img/llama-stack-logo.png',
|
||||
},
|
||||
},
|
||||
{
|
||||
tagName: 'meta',
|
||||
attributes: {
|
||||
name: 'theme-color',
|
||||
content: '#7C3AED', // Purple color from your logo
|
||||
},
|
||||
},
|
||||
{
|
||||
tagName: 'link',
|
||||
attributes: {
|
||||
rel: 'manifest',
|
||||
href: '/site.webmanifest',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
// GitHub pages deployment config.
|
||||
organizationName: 'reluctantfuturist',
|
||||
projectName: 'llama-stack',
|
||||
|
|
|
@ -34,10 +34,17 @@ def str_presenter(dumper, data):
|
|||
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style)
|
||||
|
||||
|
||||
def generate_spec(output_dir: Path, stability_filter: str = None, main_spec: bool = False):
|
||||
def generate_spec(output_dir: Path, stability_filter: str = None, main_spec: bool = False, combined_spec: bool = False):
|
||||
"""Generate OpenAPI spec with optional stability filtering."""
|
||||
|
||||
if stability_filter:
|
||||
if combined_spec:
|
||||
# Special case for combined stable + experimental APIs
|
||||
title_suffix = " - Stable & Experimental APIs"
|
||||
filename_prefix = "stainless-"
|
||||
description_suffix = "\n\n**🔗 COMBINED**: This specification includes both stable production-ready APIs and experimental pre-release APIs. Use stable APIs for production deployments and experimental APIs for testing new features."
|
||||
# Use the special "stainless" filter to include stable + experimental APIs
|
||||
stability_filter = "stainless"
|
||||
elif stability_filter:
|
||||
title_suffix = {
|
||||
"stable": " - Stable APIs" if not main_spec else "",
|
||||
"experimental": " - Experimental APIs",
|
||||
|
@ -125,6 +132,9 @@ def main(output_dir: str):
|
|||
generate_spec(output_dir, "experimental")
|
||||
generate_spec(output_dir, "deprecated")
|
||||
|
||||
print("Generating combined stable + experimental specification...")
|
||||
generate_spec(output_dir, combined_spec=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(main)
|
||||
|
|
|
@ -948,6 +948,10 @@ class Generator:
|
|||
# Include only deprecated endpoints
|
||||
if deprecated:
|
||||
filtered_operations.append(op)
|
||||
elif self.options.stability_filter == "stainless":
|
||||
# Include both stable (v1 non-deprecated) and experimental (v1alpha, v1beta) endpoints
|
||||
if (stability_level == "v1" and not deprecated) or stability_level in ["v1alpha", "v1beta"]:
|
||||
filtered_operations.append(op)
|
||||
|
||||
operations = filtered_operations
|
||||
print(
|
||||
|
|
|
@ -16,7 +16,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Getting Started',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'getting_started/quickstart',
|
||||
'getting_started/detailed_tutorial',
|
||||
|
@ -26,7 +26,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Concepts',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'concepts/index',
|
||||
'concepts/architecture',
|
||||
|
@ -48,7 +48,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Distributions',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'distributions/index',
|
||||
'distributions/list_of_distributions',
|
||||
|
@ -93,7 +93,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Providers',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'providers/index',
|
||||
{
|
||||
|
@ -276,7 +276,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Building Applications',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'building_applications/index',
|
||||
'building_applications/rag',
|
||||
|
@ -293,7 +293,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Advanced APIs',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'advanced_apis/post_training',
|
||||
'advanced_apis/evaluation',
|
||||
|
@ -303,7 +303,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Deploying',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'deploying/index',
|
||||
'deploying/kubernetes_deployment',
|
||||
|
@ -313,7 +313,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'Contributing',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'contributing/index',
|
||||
'contributing/new_api_provider',
|
||||
|
@ -324,7 +324,7 @@ const sidebars: SidebarsConfig = {
|
|||
{
|
||||
type: 'category',
|
||||
label: 'References',
|
||||
collapsed: false,
|
||||
collapsed: true,
|
||||
items: [
|
||||
'references/index',
|
||||
'references/llama_cli_reference/index',
|
||||
|
|
7163
docs/static/deprecated-llama-stack-spec.html
vendored
5435
docs/static/deprecated-llama-stack-spec.yaml
vendored
BIN
docs/static/img/favicon-16x16.png
vendored
Normal file
After Width: | Height: | Size: 657 B |
BIN
docs/static/img/favicon-32x32.png
vendored
Normal file
After Width: | Height: | Size: 1.9 KiB |
BIN
docs/static/img/favicon-48x48.png
vendored
Normal file
After Width: | Height: | Size: 3.3 KiB |
BIN
docs/static/img/favicon-64x64.png
vendored
Normal file
After Width: | Height: | Size: 4.9 KiB |
BIN
docs/static/img/favicon.ico
vendored
Normal file
After Width: | Height: | Size: 679 B |
BIN
docs/static/img/favicon.png
vendored
Normal file
After Width: | Height: | Size: 1.9 KiB |
BIN
docs/static/img/llama-stack.png
vendored
Before Width: | Height: | Size: 71 KiB After Width: | Height: | Size: 196 KiB |
42
docs/static/llama-stack-spec.html
vendored
|
@ -1310,16 +1310,11 @@
|
|||
"post": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "An OpenAIResponseObject.",
|
||||
"description": "A ListOpenAIResponseObject.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseObject"
|
||||
}
|
||||
},
|
||||
"text/event-stream": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseObjectStream"
|
||||
"$ref": "#/components/schemas/ListOpenAIResponseObject"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1340,14 +1335,14 @@
|
|||
"tags": [
|
||||
"Agents"
|
||||
],
|
||||
"summary": "Create a new OpenAI response.",
|
||||
"description": "Create a new OpenAI response.",
|
||||
"summary": "List all OpenAI responses.",
|
||||
"description": "List all OpenAI responses.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/CreateOpenaiResponseRequest"
|
||||
"$ref": "#/components/schemas/ListOpenaiResponsesRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -8238,6 +8233,33 @@
|
|||
],
|
||||
"title": "OpenAIResponseObjectStreamResponseWebSearchCallSearching"
|
||||
},
|
||||
"ListOpenaiResponsesRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"after": {
|
||||
"type": "string",
|
||||
"description": "The ID of the last response to return."
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "The number of responses to return."
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The model to filter responses by."
|
||||
},
|
||||
"order": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"asc",
|
||||
"desc"
|
||||
],
|
||||
"description": "The order to sort responses by when sorted by created_at ('asc' or 'desc')."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"title": "ListOpenaiResponsesRequest"
|
||||
},
|
||||
"OpenAIDeleteResponseObject": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
34
docs/static/llama-stack-spec.yaml
vendored
|
@ -967,14 +967,11 @@ paths:
|
|||
post:
|
||||
responses:
|
||||
'200':
|
||||
description: An OpenAIResponseObject.
|
||||
description: A ListOpenAIResponseObject.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenAIResponseObject'
|
||||
text/event-stream:
|
||||
schema:
|
||||
$ref: '#/components/schemas/OpenAIResponseObjectStream'
|
||||
$ref: '#/components/schemas/ListOpenAIResponseObject'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest400'
|
||||
'429':
|
||||
|
@ -987,14 +984,14 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Agents
|
||||
summary: Create a new OpenAI response.
|
||||
description: Create a new OpenAI response.
|
||||
summary: List all OpenAI responses.
|
||||
description: List all OpenAI responses.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CreateOpenaiResponseRequest'
|
||||
$ref: '#/components/schemas/ListOpenaiResponsesRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/responses/{response_id}:
|
||||
|
@ -6199,6 +6196,27 @@ components:
|
|||
- type
|
||||
title: >-
|
||||
OpenAIResponseObjectStreamResponseWebSearchCallSearching
|
||||
ListOpenaiResponsesRequest:
|
||||
type: object
|
||||
properties:
|
||||
after:
|
||||
type: string
|
||||
description: The ID of the last response to return.
|
||||
limit:
|
||||
type: integer
|
||||
description: The number of responses to return.
|
||||
model:
|
||||
type: string
|
||||
description: The model to filter responses by.
|
||||
order:
|
||||
type: string
|
||||
enum:
|
||||
- asc
|
||||
- desc
|
||||
description: >-
|
||||
The order to sort responses by when sorted by created_at ('asc' or 'desc').
|
||||
additionalProperties: false
|
||||
title: ListOpenaiResponsesRequest
|
||||
OpenAIDeleteResponseObject:
|
||||
type: object
|
||||
properties:
|
||||
|
|
BIN
docs/static/llama-stack.png
vendored
Before Width: | Height: | Size: 196 KiB |
36
docs/static/site.webmanifest
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"name": "Llama Stack",
|
||||
"short_name": "Llama Stack",
|
||||
"description": "The open-source framework for building generative AI applications",
|
||||
"start_url": "/",
|
||||
"display": "standalone",
|
||||
"theme_color": "#7C3AED",
|
||||
"background_color": "#ffffff",
|
||||
"icons": [
|
||||
{
|
||||
"src": "/img/favicon-16x16.png",
|
||||
"sizes": "16x16",
|
||||
"type": "image/png"
|
||||
},
|
||||
{
|
||||
"src": "/img/favicon-32x32.png",
|
||||
"sizes": "32x32",
|
||||
"type": "image/png"
|
||||
},
|
||||
{
|
||||
"src": "/img/favicon-48x48.png",
|
||||
"sizes": "48x48",
|
||||
"type": "image/png"
|
||||
},
|
||||
{
|
||||
"src": "/img/favicon-64x64.png",
|
||||
"sizes": "64x64",
|
||||
"type": "image/png"
|
||||
},
|
||||
{
|
||||
"src": "/img/llama-stack-logo.png",
|
||||
"sizes": "200x200",
|
||||
"type": "image/png"
|
||||
}
|
||||
]
|
||||
}
|