Make TGI adapter compatible with HF Inference API (#97)

This commit is contained in:
Lucain 2024-09-25 23:08:31 +02:00 committed by GitHub
parent 851c30597a
commit 615ed4bfbc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 122 additions and 96 deletions

View file

@ -0,0 +1,10 @@
name: local-hf-endpoint
distribution_spec:
description: "Like local, but use Hugging Face Inference Endpoints for running LLM inference.\nSee https://hf.co/docs/api-endpoints."
providers:
inference: remote::hf::endpoint
memory: meta-reference
safety: meta-reference
agents: meta-reference
telemetry: meta-reference
image_type: conda

View file

@ -0,0 +1,10 @@
name: local-hf-serverless
distribution_spec:
description: "Like local, but use Hugging Face Inference API (serverless) for running LLM inference.\nSee https://hf.co/docs/api-inference."
providers:
inference: remote::hf::serverless
memory: meta-reference
safety: meta-reference
agents: meta-reference
telemetry: meta-reference
image_type: conda

View file

@ -1,6 +1,6 @@
name: local-tgi
distribution_spec:
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
description: Like local, but use a TGI server for running LLM inference.
providers:
inference: remote::tgi
memory: meta-reference