mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
Merge branch 'main' into elasticsearch-integration
This commit is contained in:
commit
765cf1c9f5
5 changed files with 66765 additions and 7 deletions
18
.github/workflows/conformance.yml
vendored
18
.github/workflows/conformance.yml
vendored
|
|
@ -64,6 +64,7 @@ jobs:
|
||||||
ref: ${{ github.event.pull_request.base.ref }}
|
ref: ${{ github.event.pull_request.base.ref }}
|
||||||
path: 'base'
|
path: 'base'
|
||||||
|
|
||||||
|
|
||||||
# Cache oasdiff to avoid checksum failures and speed up builds
|
# Cache oasdiff to avoid checksum failures and speed up builds
|
||||||
- name: Cache oasdiff
|
- name: Cache oasdiff
|
||||||
if: steps.skip-check.outputs.skip != 'true'
|
if: steps.skip-check.outputs.skip != 'true'
|
||||||
|
|
@ -136,6 +137,23 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
oasdiff breaking --fail-on ERR $BASE_SPEC $CURRENT_SPEC --match-path '^/v1/'
|
oasdiff breaking --fail-on ERR $BASE_SPEC $CURRENT_SPEC --match-path '^/v1/'
|
||||||
|
|
||||||
|
# Run oasdiff to detect breaking changes in the API specification when compared to the OpenAI openAPI spec
|
||||||
|
- name: Run OpenAPI Breaking Change Diff Against OpenAI API
|
||||||
|
if: steps.skip-check.outputs.skip != 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
OPENAI_SPEC=docs/static/openai-spec-2.3.0.yml
|
||||||
|
LLAMA_STACK_SPEC=docs/static/llama-stack-spec.yaml
|
||||||
|
|
||||||
|
# Compare Llama Stack spec against OpenAI spec.
|
||||||
|
# This finds breaking changes in our implementation of common endpoints.
|
||||||
|
# By using our spec as the base, we avoid errors for endpoints we don't implement.
|
||||||
|
oasdiff breaking --fail-on ERR \
|
||||||
|
"$LLAMA_STACK_SPEC" \
|
||||||
|
"$OPENAI_SPEC" \
|
||||||
|
--strip-prefix-base "/v1"
|
||||||
|
|
||||||
# Report when test is skipped
|
# Report when test is skipped
|
||||||
- name: Report skip reason
|
- name: Report skip reason
|
||||||
if: steps.skip-check.outputs.skip == 'true'
|
if: steps.skip-check.outputs.skip == 'true'
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ repos:
|
||||||
- id: no-commit-to-branch
|
- id: no-commit-to-branch
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
args: ["--unsafe"]
|
args: ["--unsafe"]
|
||||||
|
exclude: 'docs/static/openai-spec-2.3.0.yml'
|
||||||
- id: detect-private-key
|
- id: detect-private-key
|
||||||
- id: mixed-line-ending
|
- id: mixed-line-ending
|
||||||
args: [--fix=lf] # Forces to replace line ending by LF (line feed)
|
args: [--fix=lf] # Forces to replace line ending by LF (line feed)
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ Llama Stack provides several pre-configured distributions to help you get starte
|
||||||
- Run locally with Ollama for development
|
- Run locally with Ollama for development
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker pull llama-stack/distribution-starter
|
docker pull llamastack/distribution-starter
|
||||||
```
|
```
|
||||||
|
|
||||||
**Guides:** [Starter Distribution Guide](self_hosted_distro/starter)
|
**Guides:** [Starter Distribution Guide](self_hosted_distro/starter)
|
||||||
|
|
@ -41,7 +41,7 @@ docker pull llama-stack/distribution-starter
|
||||||
- Need to run inference locally
|
- Need to run inference locally
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker pull llama-stack/distribution-meta-reference-gpu
|
docker pull llamastack/distribution-meta-reference-gpu
|
||||||
```
|
```
|
||||||
|
|
||||||
**Guides:** [Meta Reference GPU Guide](self_hosted_distro/meta-reference-gpu)
|
**Guides:** [Meta Reference GPU Guide](self_hosted_distro/meta-reference-gpu)
|
||||||
|
|
|
||||||
66741
docs/static/openai-spec-2.3.0.yml
vendored
Normal file
66741
docs/static/openai-spec-2.3.0.yml
vendored
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -4,8 +4,6 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import UTC, datetime
|
from datetime import UTC, datetime
|
||||||
from typing import TYPE_CHECKING, Annotated, Any, cast
|
from typing import TYPE_CHECKING, Annotated, Any, cast
|
||||||
|
|
@ -39,7 +37,7 @@ from .config import S3FilesImplConfig
|
||||||
# TODO: provider data for S3 credentials
|
# TODO: provider data for S3 credentials
|
||||||
|
|
||||||
|
|
||||||
def _create_s3_client(config: S3FilesImplConfig) -> S3Client:
|
def _create_s3_client(config: S3FilesImplConfig) -> "S3Client":
|
||||||
try:
|
try:
|
||||||
s3_config = {
|
s3_config = {
|
||||||
"region_name": config.region,
|
"region_name": config.region,
|
||||||
|
|
@ -66,7 +64,7 @@ def _create_s3_client(config: S3FilesImplConfig) -> S3Client:
|
||||||
raise RuntimeError(f"Failed to initialize S3 client: {e}") from e
|
raise RuntimeError(f"Failed to initialize S3 client: {e}") from e
|
||||||
|
|
||||||
|
|
||||||
async def _create_bucket_if_not_exists(client: S3Client, config: S3FilesImplConfig) -> None:
|
async def _create_bucket_if_not_exists(client: "S3Client", config: S3FilesImplConfig) -> None:
|
||||||
try:
|
try:
|
||||||
client.head_bucket(Bucket=config.bucket_name)
|
client.head_bucket(Bucket=config.bucket_name)
|
||||||
except ClientError as e:
|
except ClientError as e:
|
||||||
|
|
@ -192,7 +190,7 @@ class S3FilesImpl(Files):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def client(self) -> S3Client:
|
def client(self) -> "S3Client":
|
||||||
assert self._client is not None, "Provider not initialized"
|
assert self._client is not None, "Provider not initialized"
|
||||||
return self._client
|
return self._client
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue