(docs) litellm ollama docker image

This commit is contained in:
ishaan-jaff 2023-10-13 08:45:17 -07:00
parent f2eb1b4658
commit 5a39a3554d
4 changed files with 64 additions and 0 deletions

View file

@ -0,0 +1,25 @@
FROM ollama/ollama as ollama
RUN echo "auto installing llama2"
# auto install ollama/llama2
RUN ollama serve & sleep 2 && ollama pull llama2
RUN echo "installing litellm"
RUN apt-get update
# Install Python
RUN apt-get install -y python3 python3-pip
# Set the working directory in the container
WORKDIR /app
# Copy the current directory contents into the container at /app
COPY . /app
# Install any needed packages specified in requirements.txt
RUN python3 -m pip install litellm
COPY start.sh /start.sh
ENTRYPOINT [ "/bin/bash", "/start.sh" ]

View file

@ -0,0 +1 @@
litellm

View file

@ -0,0 +1,2 @@
ollama serve &
litellm

View file

@ -0,0 +1,36 @@
import openai
api_base = f"http://0.0.0.0:8000"
openai.api_base = api_base
openai.api_key = "temp-key"
print(openai.api_base)
print(f'LiteLLM: response from proxy with streaming')
response = openai.ChatCompletion.create(
model="ollama/llama2",
messages = [
{
"role": "user",
"content": "this is a test request, acknowledge that you got it"
}
],
stream=True
)
for chunk in response:
print(f'LiteLLM: streaming response from proxy {chunk}')
response = openai.ChatCompletion.create(
model="ollama/llama2",
messages = [
{
"role": "user",
"content": "this is a test request, acknowledge that you got it"
}
]
)
print(f'LiteLLM: response from proxy {response}')