From 27d6becfd0f2b3071fc650eee0ae8e15a7ae8115 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Mon, 18 Aug 2025 12:20:50 -0700 Subject: [PATCH] fix(misc): pin openai dependency to < 1.100.0 (#3192) This OpenAI client release https://github.com/openai/openai-python/commit/0843a1116498bc3312db9904adf71a4fb0a0a77e ends up breaking litellm https://github.com/BerriAI/litellm/blob/169a17400f1f5e36815c7d89128754975cd0584d/litellm/types/llms/openai.py#L40 Update the dependency pin. Also make the imports a bit more defensive anyhow if something else during `llama stack build` ends up moving openai to a previous version. ## Test Plan Run pre-release script integration tests. --- .../utils/inference/openai_compat.py | 12 +++++++++--- pyproject.toml | 2 +- scripts/integration-tests.sh | 19 ++++++++++++++++--- uv.lock | 2 +- 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py index 6297cc2ed..5e6c26884 100644 --- a/llama_stack/providers/utils/inference/openai_compat.py +++ b/llama_stack/providers/utils/inference/openai_compat.py @@ -31,9 +31,15 @@ from openai.types.chat import ( from openai.types.chat import ( ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam, ) -from openai.types.chat import ( - ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall, -) + +try: + from openai.types.chat import ( + ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall, + ) +except ImportError: + from openai.types.chat.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall, + ) from openai.types.chat import ( ChatCompletionMessageParam as OpenAIChatCompletionMessage, ) diff --git a/pyproject.toml b/pyproject.toml index db0ad1f00..f02c02c41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ "jsonschema", "llama-stack-client>=0.2.17", "llama-api-client>=0.1.2", - "openai>=1.99.6", + "openai>=1.99.6,<1.100.0", "prompt-toolkit", "python-dotenv", "python-jose[cryptography]", diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index e9a5283e1..66e6d8e57 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -144,6 +144,19 @@ else export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings" fi +# check if "llama" and "pytest" are available. this script does not use `uv run` given +# it can be used in a pre-release environment where we have not been able to tell +# uv about pre-release dependencies properly (yet). +if ! command -v llama &> /dev/null; then + echo "llama could not be found, ensure llama-stack is installed" + exit 1 +fi + +if ! command -v pytest &> /dev/null; then + echo "pytest could not be found, ensure pytest is installed" + exit 1 +fi + # Start Llama Stack Server if needed if [[ "$STACK_CONFIG" == *"server:"* ]]; then # check if server is already running @@ -151,7 +164,7 @@ if [[ "$STACK_CONFIG" == *"server:"* ]]; then echo "Llama Stack Server is already running, skipping start" else echo "=== Starting Llama Stack Server ===" - nohup uv run llama stack run ci-tests --image-type venv > server.log 2>&1 & + nohup llama stack run ci-tests --image-type venv > server.log 2>&1 & echo "Waiting for Llama Stack Server to start..." for i in {1..30}; do @@ -189,7 +202,7 @@ fi if [[ "$RUN_VISION_TESTS" == "true" ]]; then echo "Running vision tests..." set +e - uv run pytest -s -v tests/integration/inference/test_vision_inference.py \ + pytest -s -v tests/integration/inference/test_vision_inference.py \ --stack-config="$STACK_CONFIG" \ -k "$PYTEST_PATTERN" \ --vision-model=ollama/llama3.2-vision:11b \ @@ -257,7 +270,7 @@ echo "=== Running all collected tests in a single pytest command ===" echo "Total test files: $(echo $TEST_FILES | wc -w)" set +e -uv run pytest -s -v $TEST_FILES \ +pytest -s -v $TEST_FILES \ --stack-config="$STACK_CONFIG" \ -k "$PYTEST_PATTERN" \ --text-model="$TEXT_MODEL" \ diff --git a/uv.lock b/uv.lock index a09406770..3e3bf7e24 100644 --- a/uv.lock +++ b/uv.lock @@ -1856,7 +1856,7 @@ requires-dist = [ { name = "llama-api-client", specifier = ">=0.1.2" }, { name = "llama-stack-client", specifier = ">=0.2.17" }, { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.17" }, - { name = "openai", specifier = ">=1.99.6" }, + { name = "openai", specifier = ">=1.99.6,<1.100.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, { name = "opentelemetry-sdk", specifier = ">=1.30.0" }, { name = "pandas", marker = "extra == 'ui'" },