(security fix) - update base image for all docker images to python:3.13.1-slim (#7388)

* update base image for all docker files

* remove unused files

* fix sec vuln
This commit is contained in:
Ishaan Jaff 2024-12-23 16:20:47 -08:00 committed by GitHub
parent d883241b36
commit 564ecc728d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 5 additions and 328 deletions

View file

@ -1,8 +1,8 @@
# Base image for building
ARG LITELLM_BUILD_IMAGE=python:3.11.8-slim
ARG LITELLM_BUILD_IMAGE=python:3.13.1-slim
# Runtime image
ARG LITELLM_RUNTIME_IMAGE=python:3.11.8-slim
ARG LITELLM_RUNTIME_IMAGE=python:3.13.1-slim
# Builder stage
FROM $LITELLM_BUILD_IMAGE AS builder

View file

@ -1,8 +1,8 @@
# Base image for building
ARG LITELLM_BUILD_IMAGE=python:3.11.8-slim
ARG LITELLM_BUILD_IMAGE=python:3.13.1-slim
# Runtime image
ARG LITELLM_RUNTIME_IMAGE=python:3.11.8-slim
ARG LITELLM_RUNTIME_IMAGE=python:3.13.1-slim
# Builder stage
FROM $LITELLM_BUILD_IMAGE AS builder

View file

@ -1,5 +1,5 @@
# Use the specific Node.js v20.11.0 image
FROM node:20.18.1
FROM node:20.18.1-alpine3.20
# Set the working directory inside the container
WORKDIR /app

View file

@ -1,6 +0,0 @@
model_list:
- model_name: gpt-3.5-turbo
litellm_params:
model: openai/my-fake-model
api_key: my-fake-key
api_base: http://0.0.0.0:8090

View file

@ -1,20 +0,0 @@
# Use the official Python image as the base image
FROM python:3.9-slim
# Set the working directory in the container
WORKDIR /app
# Copy the Python requirements file
COPY requirements.txt .
# Install the Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy the application code
COPY . .
# Expose the port the app will run on
EXPOSE 8090
# Start the application
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8090"]

View file

@ -1,57 +0,0 @@
# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import litellm
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
litellm_router = litellm.Router(
model_list=[
{
"model_name": "anything", # model alias -> loadbalance between models with same `model_name`
"litellm_params": { # params for litellm completion/embedding call
"model": "openai/anything", # actual model name
"api_key": "sk-1234",
"api_base": "https://exampleopenaiendpoint-production.up.railway.app/",
},
}
]
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
# this proxy uses the OpenAI SDK to call a fixed endpoint
response = await litellm_router.acompletion(
model="anything",
messages=[
{
"role": "user",
"content": "hello who are you",
}
],
)
return response
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)

View file

@ -1,34 +0,0 @@
import uuid
from locust import HttpUser, between, task
class MyUser(HttpUser):
wait_time = between(1, 5)
@task
def chat_completion(self):
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer sk-1234",
# Include any additional headers you may need for authentication, etc.
}
# Customize the payload with "model" and "messages" keys
payload = {
"model": "fake-openai-endpoint",
"messages": [
{
"role": "system",
"content": f"{uuid.uuid4()} this is a very sweet test message from ishaan"
* 100,
},
{"role": "user", "content": "Hello, how are you?"},
],
# Add more data as necessary
}
# Make a POST request to the "chat/completions" endpoint
self.client.post("chat/completions", json=payload, headers=headers)
# Print or log the response if needed

View file

@ -1,50 +0,0 @@
# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
import uuid
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
return {
"id": f"chatcmpl-{uuid.uuid4().hex}",
"object": "chat.completion",
"created": 1677652288,
"model": "gpt-3.5-turbo-0125",
"system_fingerprint": "fp_44709d6fcb",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "\n\nHello there, how may I assist you today?",
},
"logprobs": None,
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
}
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)

View file

@ -1,51 +0,0 @@
# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from openai import AsyncOpenAI
import litellm
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
litellm_client = AsyncOpenAI(
base_url="https://exampleopenaiendpoint-production.up.railway.app/",
api_key="sk-1234",
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
# this proxy uses the OpenAI SDK to call a fixed endpoint
response = await litellm.acompletion(
model="openai/anything",
messages=[
{
"role": "user",
"content": "hello who are you",
}
],
client=litellm_client,
)
return response
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)

View file

@ -1,57 +0,0 @@
# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import litellm
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
litellm_router = litellm.Router(
model_list=[
{
"model_name": "anything", # model alias -> loadbalance between models with same `model_name`
"litellm_params": { # params for litellm completion/embedding call
"model": "openai/anything", # actual model name
"api_key": "sk-1234",
"api_base": "https://exampleopenaiendpoint-production.up.railway.app/",
},
}
]
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
# this proxy uses the OpenAI SDK to call a fixed endpoint
response = await litellm_router.acompletion(
model="anything",
messages=[
{
"role": "user",
"content": "hello who are you",
}
],
)
return response
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)

View file

@ -1,48 +0,0 @@
# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from openai import AsyncOpenAI
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
litellm_client = AsyncOpenAI(
base_url="https://exampleopenaiendpoint-production.up.railway.app/",
api_key="sk-1234",
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
# this proxy uses the OpenAI SDK to call a fixed endpoint
response = await litellm_client.chat.completions.create(
model="anything",
messages=[
{
"role": "user",
"content": "hello who are you",
}
],
)
return response
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)