Merge branch 'main' into elasticsearch-integration

This commit is contained in:
Enrico Zimuel 2025-12-02 07:42:29 +01:00 committed by GitHub
commit 765cf1c9f5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 66765 additions and 7 deletions

View file

@ -64,6 +64,7 @@ jobs:
ref: ${{ github.event.pull_request.base.ref }}
path: 'base'
# Cache oasdiff to avoid checksum failures and speed up builds
- name: Cache oasdiff
if: steps.skip-check.outputs.skip != 'true'
@ -136,6 +137,23 @@ jobs:
run: |
oasdiff breaking --fail-on ERR $BASE_SPEC $CURRENT_SPEC --match-path '^/v1/'
# Run oasdiff to detect breaking changes in the API specification when compared to the OpenAI openAPI spec
- name: Run OpenAPI Breaking Change Diff Against OpenAI API
if: steps.skip-check.outputs.skip != 'true'
continue-on-error: true
shell: bash
run: |
OPENAI_SPEC=docs/static/openai-spec-2.3.0.yml
LLAMA_STACK_SPEC=docs/static/llama-stack-spec.yaml
# Compare Llama Stack spec against OpenAI spec.
# This finds breaking changes in our implementation of common endpoints.
# By using our spec as the base, we avoid errors for endpoints we don't implement.
oasdiff breaking --fail-on ERR \
"$LLAMA_STACK_SPEC" \
"$OPENAI_SPEC" \
--strip-prefix-base "/v1"
# Report when test is skipped
- name: Report skip reason
if: steps.skip-check.outputs.skip == 'true'

View file

@ -19,6 +19,7 @@ repos:
- id: no-commit-to-branch
- id: check-yaml
args: ["--unsafe"]
exclude: 'docs/static/openai-spec-2.3.0.yml'
- id: detect-private-key
- id: mixed-line-ending
args: [--fix=lf] # Forces to replace line ending by LF (line feed)

View file

@ -28,7 +28,7 @@ Llama Stack provides several pre-configured distributions to help you get starte
- Run locally with Ollama for development
```bash
docker pull llama-stack/distribution-starter
docker pull llamastack/distribution-starter
```
**Guides:** [Starter Distribution Guide](self_hosted_distro/starter)
@ -41,7 +41,7 @@ docker pull llama-stack/distribution-starter
- Need to run inference locally
```bash
docker pull llama-stack/distribution-meta-reference-gpu
docker pull llamastack/distribution-meta-reference-gpu
```
**Guides:** [Meta Reference GPU Guide](self_hosted_distro/meta-reference-gpu)

66741
docs/static/openai-spec-2.3.0.yml vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -4,8 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from __future__ import annotations
import uuid
from datetime import UTC, datetime
from typing import TYPE_CHECKING, Annotated, Any, cast
@ -39,7 +37,7 @@ from .config import S3FilesImplConfig
# TODO: provider data for S3 credentials
def _create_s3_client(config: S3FilesImplConfig) -> S3Client:
def _create_s3_client(config: S3FilesImplConfig) -> "S3Client":
try:
s3_config = {
"region_name": config.region,
@ -66,7 +64,7 @@ def _create_s3_client(config: S3FilesImplConfig) -> S3Client:
raise RuntimeError(f"Failed to initialize S3 client: {e}") from e
async def _create_bucket_if_not_exists(client: S3Client, config: S3FilesImplConfig) -> None:
async def _create_bucket_if_not_exists(client: "S3Client", config: S3FilesImplConfig) -> None:
try:
client.head_bucket(Bucket=config.bucket_name)
except ClientError as e:
@ -192,7 +190,7 @@ class S3FilesImpl(Files):
pass
@property
def client(self) -> S3Client:
def client(self) -> "S3Client":
assert self._client is not None, "Provider not initialized"
return self._client