forked from phoenix/litellm-mirror
fix(Dockerfile): support mac
This commit is contained in:
parent
9660f0e0b1
commit
50b741f8fa
2 changed files with 18 additions and 18 deletions
34
Dockerfile
34
Dockerfile
|
@ -1,17 +1,17 @@
|
|||
# Base image
|
||||
# Base image for building
|
||||
ARG LITELLM_BUILD_IMAGE=python:3.9
|
||||
|
||||
# Runtime image
|
||||
ARG LITELLM_RUNTIME_IMAGE=python:3.9-slim
|
||||
|
||||
# allow users to specify, else use python 3.9
|
||||
# Builder stage
|
||||
FROM $LITELLM_BUILD_IMAGE as builder
|
||||
|
||||
# Set the working directory to /app
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && \
|
||||
RUN apt-get clean && apt-get update && \
|
||||
apt-get install -y gcc python3-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
@ -31,26 +31,26 @@ RUN pip install dist/*.whl
|
|||
RUN pip install wheel && \
|
||||
pip wheel --no-cache-dir --wheel-dir=/app/wheels -r requirements.txt
|
||||
|
||||
###############################################################################
|
||||
# Clear out any existing builds and build the package
|
||||
RUN rm -rf dist/* && python -m build
|
||||
|
||||
# There should be only one wheel file now, assume the build only creates one
|
||||
RUN ls -1 dist/*.whl | head -1
|
||||
|
||||
# Runtime stage
|
||||
FROM $LITELLM_RUNTIME_IMAGE as runtime
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the current directory contents into the container at /app
|
||||
COPY . .
|
||||
# Depending on wheel naming patterns, use a wildcard if multiple versions are possible
|
||||
# Copy the built wheel from the builder stage to the runtime stage; assumes only one wheel file is present
|
||||
COPY --from=builder /app/dist/*.whl .
|
||||
|
||||
COPY --from=builder /app/wheels /app/wheels
|
||||
|
||||
RUN pip install --no-index --find-links=/app/wheels -r requirements.txt
|
||||
|
||||
# Trigger the Prisma CLI to be installed
|
||||
RUN prisma -v
|
||||
# Install the built wheel using pip; again using a wildcard if it's the only file
|
||||
RUN pip install *.whl && rm -f *.whl
|
||||
|
||||
EXPOSE 4000/tcp
|
||||
|
||||
# Start the litellm proxy, using the `litellm` cli command https://docs.litellm.ai/docs/simple_proxy
|
||||
# Start the litellm proxy with default options
|
||||
CMD ["--port", "4000"]
|
||||
|
||||
# Allow users to override the CMD when running the container, allows users to pass litellm args
|
||||
# Set your entrypoint and command
|
||||
ENTRYPOINT ["litellm"]
|
||||
CMD ["--port", "4000"]
|
|
@ -964,7 +964,7 @@ class Router:
|
|||
|
||||
if "azure" in model_name:
|
||||
if api_base is None:
|
||||
raise ValueError("api_base is required for Azure OpenAI. Set it on your config")
|
||||
raise ValueError(f"api_base is required for Azure OpenAI. Set it on your config. Model - {model}")
|
||||
if api_version is None:
|
||||
api_version = "2023-07-01-preview"
|
||||
if "gateway.ai.cloudflare.com" in api_base:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue