diff --git a/distributions/meta-reference-gpu/build.yaml~HEAD b/distributions/meta-reference-gpu/build.yaml~HEAD new file mode 100644 index 000000000..08e034154 --- /dev/null +++ b/distributions/meta-reference-gpu/build.yaml~HEAD @@ -0,0 +1,14 @@ +name: meta-reference-gpu +distribution_spec: + docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime + description: Use code from `llama_stack` itself to serve all llama stack APIs + providers: + inference: meta-reference + memory: + - meta-reference + - remote::chromadb + - remote::pgvector + safety: meta-reference + agents: meta-reference + telemetry: meta-reference +image_type: docker