fix(misc): pin openai dependency to < 1.100.0 (#3192)

This OpenAI client release
0843a11164
ends up breaking litellm
169a17400f/litellm/types/llms/openai.py (L40)

Update the dependency pin. Also make the imports a bit more defensive
anyhow if something else during `llama stack build` ends up moving
openai to a previous version.

## Test Plan

Run pre-release script integration tests.
This commit is contained in:
Ashwin Bharambe 2025-08-18 12:20:50 -07:00 committed by GitHub
parent f8398d25ff
commit 27d6becfd0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 27 additions and 8 deletions

View file

@ -31,9 +31,15 @@ from openai.types.chat import (
from openai.types.chat import (
ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam,
)
from openai.types.chat import (
ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
)
try:
from openai.types.chat import (
ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
)
except ImportError:
from openai.types.chat.chat_completion_message_tool_call import (
ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall,
)
from openai.types.chat import (
ChatCompletionMessageParam as OpenAIChatCompletionMessage,
)

View file

@ -33,7 +33,7 @@ dependencies = [
"jsonschema",
"llama-stack-client>=0.2.17",
"llama-api-client>=0.1.2",
"openai>=1.99.6",
"openai>=1.99.6,<1.100.0",
"prompt-toolkit",
"python-dotenv",
"python-jose[cryptography]",

View file

@ -144,6 +144,19 @@ else
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings"
fi
# check if "llama" and "pytest" are available. this script does not use `uv run` given
# it can be used in a pre-release environment where we have not been able to tell
# uv about pre-release dependencies properly (yet).
if ! command -v llama &> /dev/null; then
echo "llama could not be found, ensure llama-stack is installed"
exit 1
fi
if ! command -v pytest &> /dev/null; then
echo "pytest could not be found, ensure pytest is installed"
exit 1
fi
# Start Llama Stack Server if needed
if [[ "$STACK_CONFIG" == *"server:"* ]]; then
# check if server is already running
@ -151,7 +164,7 @@ if [[ "$STACK_CONFIG" == *"server:"* ]]; then
echo "Llama Stack Server is already running, skipping start"
else
echo "=== Starting Llama Stack Server ==="
nohup uv run llama stack run ci-tests --image-type venv > server.log 2>&1 &
nohup llama stack run ci-tests --image-type venv > server.log 2>&1 &
echo "Waiting for Llama Stack Server to start..."
for i in {1..30}; do
@ -189,7 +202,7 @@ fi
if [[ "$RUN_VISION_TESTS" == "true" ]]; then
echo "Running vision tests..."
set +e
uv run pytest -s -v tests/integration/inference/test_vision_inference.py \
pytest -s -v tests/integration/inference/test_vision_inference.py \
--stack-config="$STACK_CONFIG" \
-k "$PYTEST_PATTERN" \
--vision-model=ollama/llama3.2-vision:11b \
@ -257,7 +270,7 @@ echo "=== Running all collected tests in a single pytest command ==="
echo "Total test files: $(echo $TEST_FILES | wc -w)"
set +e
uv run pytest -s -v $TEST_FILES \
pytest -s -v $TEST_FILES \
--stack-config="$STACK_CONFIG" \
-k "$PYTEST_PATTERN" \
--text-model="$TEXT_MODEL" \

2
uv.lock generated
View file

@ -1856,7 +1856,7 @@ requires-dist = [
{ name = "llama-api-client", specifier = ">=0.1.2" },
{ name = "llama-stack-client", specifier = ">=0.2.17" },
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.17" },
{ name = "openai", specifier = ">=1.99.6" },
{ name = "openai", specifier = ">=1.99.6,<1.100.0" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
{ name = "pandas", marker = "extra == 'ui'" },