mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
delete generated Dockerfile
This commit is contained in:
parent
3482adb257
commit
8731cc3304
2 changed files with 0 additions and 21 deletions
|
@ -1,21 +0,0 @@
|
||||||
FROM python:3.10-slim
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y iputils-ping net-tools iproute2 dnsutils telnet curl wget telnet procps psmisc lsof traceroute bubblewrap && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN pip install /app/llama-stack-source
|
|
||||||
RUN pip uninstall -y llama-models
|
|
||||||
RUN pip install /app/llama-models-source
|
|
||||||
|
|
||||||
RUN pip install fastapi fire httpx uvicorn accelerate blobfile fairscale fbgemm-gpu==0.8.0 torch torchvision transformers zmq codeshield transformers matplotlib pillow pandas scikit-learn aiosqlite psycopg2-binary redis blobfile chardet pypdf tqdm numpy scikit-learn scipy nltk sentencepiece transformers faiss-cpu
|
|
||||||
RUN pip install torch --index-url https://download.pytorch.org/whl/cpu
|
|
||||||
RUN pip install torch --index-url https://download.pytorch.org/whl/cpu
|
|
||||||
RUN pip install sentence-transformers --no-deps
|
|
||||||
|
|
||||||
# This would be good in production but for debugging flexibility lets not add it right now
|
|
||||||
# We need a more solid production ready entrypoint.sh anyway
|
|
||||||
#
|
|
||||||
CMD ["python", "-m", "llama_stack.distribution.server.server", "./llamastack-run.yaml"]
|
|
||||||
|
|
||||||
ADD tmp/configs/local-gpu-build.yaml ./llamastack-build.yaml
|
|
||||||
ADD tmp/configs/local-gpu-run.yaml ./llamastack-run.yaml
|
|
Loading…
Add table
Add a link
Reference in a new issue