chore: remove openai dependency from providers (#3398)

# What does this PR do?

The openai package is already a dependency of the llama-stack project
itself, so let's the project dictate which openai version we need and
avoid potential breakage with unsatisfiable dependency resolution.

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-09-11 10:19:59 +02:00 committed by GitHub
parent 0c7f49490c
commit 8e05c68d15
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 13 additions and 23 deletions

View file

@ -13,7 +13,7 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.batches, api=Api.batches,
provider_type="inline::reference", provider_type="inline::reference",
pip_packages=["openai"], pip_packages=[],
module="llama_stack.providers.inline.batches.reference", module="llama_stack.providers.inline.batches.reference",
config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig", config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig",
api_dependencies=[ api_dependencies=[

View file

@ -75,7 +75,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="vllm", adapter_type="vllm",
pip_packages=["openai"], pip_packages=[],
module="llama_stack.providers.remote.inference.vllm", module="llama_stack.providers.remote.inference.vllm",
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
description="Remote vLLM inference provider for connecting to vLLM servers.", description="Remote vLLM inference provider for connecting to vLLM servers.",
@ -151,9 +151,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="databricks", adapter_type="databricks",
pip_packages=[ pip_packages=[],
"openai",
],
module="llama_stack.providers.remote.inference.databricks", module="llama_stack.providers.remote.inference.databricks",
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
description="Databricks inference provider for running models on Databricks' unified analytics platform.", description="Databricks inference provider for running models on Databricks' unified analytics platform.",
@ -163,9 +161,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="nvidia", adapter_type="nvidia",
pip_packages=[ pip_packages=[],
"openai",
],
module="llama_stack.providers.remote.inference.nvidia", module="llama_stack.providers.remote.inference.nvidia",
config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig",
description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.",
@ -175,7 +171,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="runpod", adapter_type="runpod",
pip_packages=["openai"], pip_packages=[],
module="llama_stack.providers.remote.inference.runpod", module="llama_stack.providers.remote.inference.runpod",
config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
description="RunPod inference provider for running models on RunPod's cloud GPU platform.", description="RunPod inference provider for running models on RunPod's cloud GPU platform.",
@ -207,7 +203,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="gemini", adapter_type="gemini",
pip_packages=["litellm", "openai"], pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.gemini", module="llama_stack.providers.remote.inference.gemini",
config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig",
provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator",
@ -218,7 +214,7 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="vertexai", adapter_type="vertexai",
pip_packages=["litellm", "google-cloud-aiplatform", "openai"], pip_packages=["litellm", "google-cloud-aiplatform"],
module="llama_stack.providers.remote.inference.vertexai", module="llama_stack.providers.remote.inference.vertexai",
config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig",
provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator",
@ -248,7 +244,7 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="groq", adapter_type="groq",
pip_packages=["litellm", "openai"], pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.groq", module="llama_stack.providers.remote.inference.groq",
config_class="llama_stack.providers.remote.inference.groq.GroqConfig", config_class="llama_stack.providers.remote.inference.groq.GroqConfig",
provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator",
@ -270,7 +266,7 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="sambanova", adapter_type="sambanova",
pip_packages=["litellm", "openai"], pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.sambanova", module="llama_stack.providers.remote.inference.sambanova",
config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",

View file

@ -38,7 +38,7 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.scoring, api=Api.scoring,
provider_type="inline::braintrust", provider_type="inline::braintrust",
pip_packages=["autoevals", "openai"], pip_packages=["autoevals"],
module="llama_stack.providers.inline.scoring.braintrust", module="llama_stack.providers.inline.scoring.braintrust",
config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig", config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
api_dependencies=[ api_dependencies=[

View file

@ -32,7 +32,7 @@ dependencies = [
"jinja2>=3.1.6", "jinja2>=3.1.6",
"jsonschema", "jsonschema",
"llama-stack-client>=0.2.21", "llama-stack-client>=0.2.21",
"openai>=1.99.6", "openai>=1.100.0", # for expires_after support
"prompt-toolkit", "prompt-toolkit",
"python-dotenv", "python-dotenv",
"python-jose[cryptography]", "python-jose[cryptography]",
@ -80,7 +80,6 @@ dev = [
unit = [ unit = [
"sqlite-vec", "sqlite-vec",
"ollama", "ollama",
"openai",
"aiosqlite", "aiosqlite",
"aiohttp", "aiohttp",
"psycopg2-binary>=2.9.0", "psycopg2-binary>=2.9.0",
@ -105,7 +104,6 @@ unit = [
# separately. If you are using "uv" to execute your tests, you can use the "--group" flag to specify extra # separately. If you are using "uv" to execute your tests, you can use the "--group" flag to specify extra
# dependencies. # dependencies.
test = [ test = [
"openai>=1.100.0", # for expires_after support
"aiosqlite", "aiosqlite",
"aiohttp", "aiohttp",
"torch>=2.6.0", "torch>=2.6.0",

8
uv.lock generated
View file

@ -1,5 +1,5 @@
version = 1 version = 1
revision = 3 revision = 2
requires-python = ">=3.12" requires-python = ">=3.12"
resolution-markers = [ resolution-markers = [
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')", "(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
@ -1839,7 +1839,6 @@ test = [
{ name = "datasets" }, { name = "datasets" },
{ name = "mcp" }, { name = "mcp" },
{ name = "milvus-lite" }, { name = "milvus-lite" },
{ name = "openai" },
{ name = "psycopg2-binary" }, { name = "psycopg2-binary" },
{ name = "pymilvus" }, { name = "pymilvus" },
{ name = "pypdf" }, { name = "pypdf" },
@ -1865,7 +1864,6 @@ unit = [
{ name = "milvus-lite" }, { name = "milvus-lite" },
{ name = "moto", extra = ["s3"] }, { name = "moto", extra = ["s3"] },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" },
{ name = "psycopg2-binary" }, { name = "psycopg2-binary" },
{ name = "pymilvus" }, { name = "pymilvus" },
{ name = "pypdf" }, { name = "pypdf" },
@ -1889,7 +1887,7 @@ requires-dist = [
{ name = "jsonschema" }, { name = "jsonschema" },
{ name = "llama-stack-client", specifier = ">=0.2.21" }, { name = "llama-stack-client", specifier = ">=0.2.21" },
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.21" }, { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.21" },
{ name = "openai", specifier = ">=1.99.6" }, { name = "openai", specifier = ">=1.100.0" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" }, { name = "opentelemetry-sdk", specifier = ">=1.30.0" },
{ name = "pandas", marker = "extra == 'ui'" }, { name = "pandas", marker = "extra == 'ui'" },
@ -1959,7 +1957,6 @@ test = [
{ name = "datasets", specifier = ">=4.0.0" }, { name = "datasets", specifier = ">=4.0.0" },
{ name = "mcp" }, { name = "mcp" },
{ name = "milvus-lite", specifier = ">=2.5.0" }, { name = "milvus-lite", specifier = ">=2.5.0" },
{ name = "openai", specifier = ">=1.100.0" },
{ name = "psycopg2-binary", specifier = ">=2.9.0" }, { name = "psycopg2-binary", specifier = ">=2.9.0" },
{ name = "pymilvus", specifier = ">=2.6.1" }, { name = "pymilvus", specifier = ">=2.6.1" },
{ name = "pypdf" }, { name = "pypdf" },
@ -1984,7 +1981,6 @@ unit = [
{ name = "milvus-lite", specifier = ">=2.5.0" }, { name = "milvus-lite", specifier = ">=2.5.0" },
{ name = "moto", extras = ["s3"], specifier = ">=5.1.10" }, { name = "moto", extras = ["s3"], specifier = ">=5.1.10" },
{ name = "ollama" }, { name = "ollama" },
{ name = "openai" },
{ name = "psycopg2-binary", specifier = ">=2.9.0" }, { name = "psycopg2-binary", specifier = ">=2.9.0" },
{ name = "pymilvus", specifier = ">=2.6.1" }, { name = "pymilvus", specifier = ">=2.6.1" },
{ name = "pypdf" }, { name = "pypdf" },