mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
feat: split API and provider specs into separate llama-stack-api pkg
Extract API definitions, models, and provider specifications into a standalone llama-stack-api package that can be published to PyPI independently of the main llama-stack server. Motivation External providers currently import from llama-stack, which overrides the installed version and causes dependency conflicts. This separation allows external providers to: - Install only the type definitions they need without server dependencies - Avoid version conflicts with the installed llama-stack package - Be versioned and released independently This enables us to re-enable external provider module tests that were previously blocked by these import conflicts. Changes - Created llama-stack-api package with minimal dependencies (pydantic, jsonschema) - Moved APIs, providers datatypes, strong_typing, and schema_utils - Updated all imports from llama_stack.* to llama_stack_api.* - Preserved git history using git mv for moved files - Configured local editable install for development workflow - Updated linting and type-checking configuration for both packages - Rebased on top of upstream src/ layout changes Testing Package builds successfully and can be imported independently. All pre-commit hooks pass with expected exclusions maintained. Next Steps - Publish llama-stack-api to PyPI - Update external provider dependencies - Re-enable external provider module tests Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
e5a55f3677
commit
85d407c2a0
359 changed files with 1259 additions and 980 deletions
23
uv.lock
generated
23
uv.lock
generated
|
|
@ -1,5 +1,5 @@
|
|||
version = 1
|
||||
revision = 3
|
||||
revision = 2
|
||||
requires-python = ">=3.12"
|
||||
resolution-markers = [
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
|
|
@ -1945,6 +1945,7 @@ dependencies = [
|
|||
{ name = "httpx" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "jsonschema" },
|
||||
{ name = "llama-stack-api" },
|
||||
{ name = "openai" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
|
|
@ -2094,6 +2095,7 @@ requires-dist = [
|
|||
{ name = "httpx" },
|
||||
{ name = "jinja2", specifier = ">=3.1.6" },
|
||||
{ name = "jsonschema" },
|
||||
{ name = "llama-stack-api", editable = "src/llama-stack-api" },
|
||||
{ name = "llama-stack-client", marker = "extra == 'client'", specifier = ">=0.3.0" },
|
||||
{ name = "openai", specifier = ">=2.5.0" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||
|
|
@ -2226,6 +2228,25 @@ unit = [
|
|||
{ name = "together" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "llama-stack-api"
|
||||
version = "0.1.0"
|
||||
source = { editable = "src/llama-stack-api" }
|
||||
dependencies = [
|
||||
{ name = "jsonschema" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http" },
|
||||
{ name = "opentelemetry-sdk" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "jsonschema" },
|
||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
|
||||
{ name = "pydantic", specifier = ">=2.11.9" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "llama-stack-client"
|
||||
version = "0.3.0"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue