mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-21 09:23:13 +00:00
fix(misc): pin openai dependency to < 1.100.0 (#3192)
This OpenAI client release0843a11164
ends up breaking litellm169a17400f/litellm/types/llms/openai.py (L40)
Update the dependency pin. Also make the imports a bit more defensive anyhow if something else during `llama stack build` ends up moving openai to a previous version. ## Test Plan Run pre-release script integration tests.
This commit is contained in:
parent
f8398d25ff
commit
27d6becfd0
4 changed files with 27 additions and 8 deletions
|
@ -31,9 +31,15 @@ from openai.types.chat import (
|
||||||
from openai.types.chat import (
|
from openai.types.chat import (
|
||||||
ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam,
|
ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam,
|
||||||
)
|
)
|
||||||
from openai.types.chat import (
|
|
||||||
|
try:
|
||||||
|
from openai.types.chat import (
|
||||||
ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
|
ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
|
||||||
)
|
)
|
||||||
|
except ImportError:
|
||||||
|
from openai.types.chat.chat_completion_message_tool_call import (
|
||||||
|
ChatCompletionMessageToolCall as OpenAIChatCompletionMessageFunctionToolCall,
|
||||||
|
)
|
||||||
from openai.types.chat import (
|
from openai.types.chat import (
|
||||||
ChatCompletionMessageParam as OpenAIChatCompletionMessage,
|
ChatCompletionMessageParam as OpenAIChatCompletionMessage,
|
||||||
)
|
)
|
||||||
|
|
|
@ -33,7 +33,7 @@ dependencies = [
|
||||||
"jsonschema",
|
"jsonschema",
|
||||||
"llama-stack-client>=0.2.17",
|
"llama-stack-client>=0.2.17",
|
||||||
"llama-api-client>=0.1.2",
|
"llama-api-client>=0.1.2",
|
||||||
"openai>=1.99.6",
|
"openai>=1.99.6,<1.100.0",
|
||||||
"prompt-toolkit",
|
"prompt-toolkit",
|
||||||
"python-dotenv",
|
"python-dotenv",
|
||||||
"python-jose[cryptography]",
|
"python-jose[cryptography]",
|
||||||
|
|
|
@ -144,6 +144,19 @@ else
|
||||||
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings"
|
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# check if "llama" and "pytest" are available. this script does not use `uv run` given
|
||||||
|
# it can be used in a pre-release environment where we have not been able to tell
|
||||||
|
# uv about pre-release dependencies properly (yet).
|
||||||
|
if ! command -v llama &> /dev/null; then
|
||||||
|
echo "llama could not be found, ensure llama-stack is installed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v pytest &> /dev/null; then
|
||||||
|
echo "pytest could not be found, ensure pytest is installed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Start Llama Stack Server if needed
|
# Start Llama Stack Server if needed
|
||||||
if [[ "$STACK_CONFIG" == *"server:"* ]]; then
|
if [[ "$STACK_CONFIG" == *"server:"* ]]; then
|
||||||
# check if server is already running
|
# check if server is already running
|
||||||
|
@ -151,7 +164,7 @@ if [[ "$STACK_CONFIG" == *"server:"* ]]; then
|
||||||
echo "Llama Stack Server is already running, skipping start"
|
echo "Llama Stack Server is already running, skipping start"
|
||||||
else
|
else
|
||||||
echo "=== Starting Llama Stack Server ==="
|
echo "=== Starting Llama Stack Server ==="
|
||||||
nohup uv run llama stack run ci-tests --image-type venv > server.log 2>&1 &
|
nohup llama stack run ci-tests --image-type venv > server.log 2>&1 &
|
||||||
|
|
||||||
echo "Waiting for Llama Stack Server to start..."
|
echo "Waiting for Llama Stack Server to start..."
|
||||||
for i in {1..30}; do
|
for i in {1..30}; do
|
||||||
|
@ -189,7 +202,7 @@ fi
|
||||||
if [[ "$RUN_VISION_TESTS" == "true" ]]; then
|
if [[ "$RUN_VISION_TESTS" == "true" ]]; then
|
||||||
echo "Running vision tests..."
|
echo "Running vision tests..."
|
||||||
set +e
|
set +e
|
||||||
uv run pytest -s -v tests/integration/inference/test_vision_inference.py \
|
pytest -s -v tests/integration/inference/test_vision_inference.py \
|
||||||
--stack-config="$STACK_CONFIG" \
|
--stack-config="$STACK_CONFIG" \
|
||||||
-k "$PYTEST_PATTERN" \
|
-k "$PYTEST_PATTERN" \
|
||||||
--vision-model=ollama/llama3.2-vision:11b \
|
--vision-model=ollama/llama3.2-vision:11b \
|
||||||
|
@ -257,7 +270,7 @@ echo "=== Running all collected tests in a single pytest command ==="
|
||||||
echo "Total test files: $(echo $TEST_FILES | wc -w)"
|
echo "Total test files: $(echo $TEST_FILES | wc -w)"
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
uv run pytest -s -v $TEST_FILES \
|
pytest -s -v $TEST_FILES \
|
||||||
--stack-config="$STACK_CONFIG" \
|
--stack-config="$STACK_CONFIG" \
|
||||||
-k "$PYTEST_PATTERN" \
|
-k "$PYTEST_PATTERN" \
|
||||||
--text-model="$TEXT_MODEL" \
|
--text-model="$TEXT_MODEL" \
|
||||||
|
|
2
uv.lock
generated
2
uv.lock
generated
|
@ -1856,7 +1856,7 @@ requires-dist = [
|
||||||
{ name = "llama-api-client", specifier = ">=0.1.2" },
|
{ name = "llama-api-client", specifier = ">=0.1.2" },
|
||||||
{ name = "llama-stack-client", specifier = ">=0.2.17" },
|
{ name = "llama-stack-client", specifier = ">=0.2.17" },
|
||||||
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.17" },
|
{ name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.17" },
|
||||||
{ name = "openai", specifier = ">=1.99.6" },
|
{ name = "openai", specifier = ">=1.99.6,<1.100.0" },
|
||||||
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
|
||||||
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
|
{ name = "opentelemetry-sdk", specifier = ">=1.30.0" },
|
||||||
{ name = "pandas", marker = "extra == 'ui'" },
|
{ name = "pandas", marker = "extra == 'ui'" },
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue