forked from phoenix-oss/llama-stack-mirror
[Inference] Use huggingface_hub inference client for TGI adapter (#53)
* Use huggingface_hub inference client for TGI inference * Update the default value for TGI URL * Use InferenceClient.text_generation for TGI inference * Fixes post-review and split TGI adapter into local and Inference Endpoints ones * Update CLI reference and add typing * Rename TGI Adapter class * Use HfApi to get the namespace when not provide in the hf endpoint name * Remove unecessary method argument * Improve TGI adapter initialization condition * Move helper into impl file + fix merging conflicts
This commit is contained in:
parent
191cd28831
commit
736092f6bc
6 changed files with 171 additions and 72 deletions
|
@ -4,63 +4,68 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import AsyncGenerator, List
|
||||
|
||||
import httpx
|
||||
from typing import Any, AsyncGenerator, Dict
|
||||
|
||||
import requests
|
||||
|
||||
from huggingface_hub import HfApi, InferenceClient
|
||||
from llama_models.llama3.api.chat_format import ChatFormat
|
||||
|
||||
from llama_models.llama3.api.datatypes import Message, StopReason
|
||||
from llama_models.llama3.api.datatypes import StopReason
|
||||
from llama_models.llama3.api.tokenizer import Tokenizer
|
||||
|
||||
from text_generation import Client
|
||||
|
||||
from llama_toolchain.inference.api import * # noqa: F403
|
||||
from llama_toolchain.inference.prepare_messages import prepare_messages
|
||||
|
||||
from .config import TGIImplConfig
|
||||
|
||||
SUPPORTED_MODELS = {
|
||||
HF_SUPPORTED_MODELS = {
|
||||
"Meta-Llama3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"Meta-Llama3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"Meta-Llama3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
||||
}
|
||||
|
||||
|
||||
class TGIInferenceAdapter(Inference):
|
||||
def __init__(self, url: str) -> None:
|
||||
self.url = url.rstrip("/")
|
||||
class TGIAdapter(Inference):
|
||||
def __init__(self, config: TGIImplConfig) -> None:
|
||||
self.config = config
|
||||
self.tokenizer = Tokenizer.get_instance()
|
||||
self.formatter = ChatFormat(self.tokenizer)
|
||||
self.model = None
|
||||
self.max_tokens = None
|
||||
|
||||
@property
|
||||
def client(self) -> InferenceClient:
|
||||
return InferenceClient(model=self.config.url, token=self.config.api_token)
|
||||
|
||||
def _get_endpoint_info(self) -> Dict[str, Any]:
|
||||
return {
|
||||
**self.client.get_endpoint_info(),
|
||||
"inference_url": self.config.url,
|
||||
}
|
||||
|
||||
async def initialize(self) -> None:
|
||||
hf_models = {v: k for k, v in SUPPORTED_MODELS.items()}
|
||||
|
||||
try:
|
||||
print(f"Connecting to TGI server at: {self.url}")
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(f"{self.url}/info")
|
||||
response.raise_for_status()
|
||||
info = response.json()
|
||||
if "model_id" not in info:
|
||||
raise RuntimeError("Missing model_id in model info")
|
||||
if "max_total_tokens" not in info:
|
||||
raise RuntimeError("Missing max_total_tokens in model info")
|
||||
self.max_tokens = info["max_total_tokens"]
|
||||
info = self._get_endpoint_info()
|
||||
if "model_id" not in info:
|
||||
raise RuntimeError("Missing model_id in model info")
|
||||
if "max_total_tokens" not in info:
|
||||
raise RuntimeError("Missing max_total_tokens in model info")
|
||||
self.max_tokens = info["max_total_tokens"]
|
||||
|
||||
model_id = info["model_id"]
|
||||
if model_id not in hf_models:
|
||||
raise RuntimeError(
|
||||
f"TGI is serving model: {model_id}, use one of the supported models: {','.join(hf_models.keys())}"
|
||||
)
|
||||
|
||||
self.model = hf_models[model_id]
|
||||
model_id = info["model_id"]
|
||||
model_name = next(
|
||||
(name for name, id in HF_SUPPORTED_MODELS.items() if id == model_id),
|
||||
None,
|
||||
)
|
||||
if model_name is None:
|
||||
raise RuntimeError(
|
||||
f"TGI is serving model: {model_id}, use one of the supported models: {', '.join(HF_SUPPORTED_MODELS.values())}"
|
||||
)
|
||||
self.model_name = model_name
|
||||
self.inference_url = info["inference_url"]
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise RuntimeError("Could not connect to TGI server") from e
|
||||
raise RuntimeError(f"Error initializing TGIAdapter: {e}") from e
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
pass
|
||||
|
@ -68,16 +73,6 @@ class TGIInferenceAdapter(Inference):
|
|||
async def completion(self, request: CompletionRequest) -> AsyncGenerator:
|
||||
raise NotImplementedError()
|
||||
|
||||
def _convert_messages(self, messages: List[Message]) -> List[Message]:
|
||||
ret = []
|
||||
for message in messages:
|
||||
if message.role == "ipython":
|
||||
role = "tool"
|
||||
else:
|
||||
role = message.role
|
||||
ret.append({"role": role, "content": message.content})
|
||||
return ret
|
||||
|
||||
def get_chat_options(self, request: ChatCompletionRequest) -> dict:
|
||||
options = {}
|
||||
if request.sampling_params is not None:
|
||||
|
@ -89,47 +84,47 @@ class TGIInferenceAdapter(Inference):
|
|||
|
||||
async def chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
|
||||
messages = prepare_messages(request)
|
||||
|
||||
model_input = self.formatter.encode_dialog_prompt(messages)
|
||||
prompt = self.tokenizer.decode(model_input.tokens)
|
||||
|
||||
input_tokens = len(model_input.tokens)
|
||||
max_new_tokens = min(
|
||||
request.sampling_params.max_tokens or self.max_tokens,
|
||||
self.max_tokens - len(model_input.tokens) - 1,
|
||||
request.sampling_params.max_tokens or (self.max_tokens - input_tokens),
|
||||
self.max_tokens - input_tokens - 1,
|
||||
)
|
||||
|
||||
if request.model != self.model:
|
||||
raise ValueError(
|
||||
f"Model mismatch, expected: {self.model}, got: {request.model}"
|
||||
)
|
||||
print(f"Calculated max_new_tokens: {max_new_tokens}")
|
||||
|
||||
assert (
|
||||
request.model == self.model_name
|
||||
), f"Model mismatch, expected {self.model_name}, got {request.model}"
|
||||
|
||||
options = self.get_chat_options(request)
|
||||
|
||||
client = Client(base_url=self.url)
|
||||
if not request.stream:
|
||||
r = client.generate(
|
||||
prompt,
|
||||
response = self.client.text_generation(
|
||||
prompt=prompt,
|
||||
stream=False,
|
||||
details=True,
|
||||
max_new_tokens=max_new_tokens,
|
||||
stop_sequences=["<|eom_id|>", "<|eot_id|>"],
|
||||
**options,
|
||||
)
|
||||
|
||||
if r.details.finish_reason:
|
||||
if r.details.finish_reason == "stop":
|
||||
stop_reason = None
|
||||
if response.details.finish_reason:
|
||||
if response.details.finish_reason == "stop":
|
||||
stop_reason = StopReason.end_of_turn
|
||||
elif r.details.finish_reason == "length":
|
||||
elif response.details.finish_reason == "length":
|
||||
stop_reason = StopReason.out_of_tokens
|
||||
else:
|
||||
stop_reason = StopReason.end_of_message
|
||||
else:
|
||||
stop_reason = StopReason.out_of_tokens
|
||||
|
||||
completion_message = self.formatter.decode_assistant_message_from_content(
|
||||
r.generated_text, stop_reason
|
||||
response.generated_text,
|
||||
stop_reason,
|
||||
)
|
||||
yield ChatCompletionResponse(
|
||||
completion_message=completion_message,
|
||||
logprobs=None,
|
||||
)
|
||||
|
||||
else:
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
|
@ -137,14 +132,15 @@ class TGIInferenceAdapter(Inference):
|
|||
delta="",
|
||||
)
|
||||
)
|
||||
|
||||
buffer = ""
|
||||
ipython = False
|
||||
stop_reason = None
|
||||
tokens = []
|
||||
|
||||
for response in client.generate_stream(
|
||||
prompt,
|
||||
for response in self.client.text_generation(
|
||||
prompt=prompt,
|
||||
stream=True,
|
||||
details=True,
|
||||
max_new_tokens=max_new_tokens,
|
||||
stop_sequences=["<|eom_id|>", "<|eot_id|>"],
|
||||
**options,
|
||||
|
@ -231,3 +227,48 @@ class TGIInferenceAdapter(Inference):
|
|||
stop_reason=stop_reason,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class InferenceEndpointAdapter(TGIAdapter):
|
||||
def __init__(self, config: TGIImplConfig) -> None:
|
||||
super().__init__(config)
|
||||
self.config.url = self._construct_endpoint_url()
|
||||
|
||||
def _construct_endpoint_url(self) -> str:
|
||||
hf_endpoint_name = self.config.hf_endpoint_name
|
||||
assert hf_endpoint_name.count("/") <= 1, (
|
||||
"Endpoint name must be in the format of 'namespace/endpoint_name' "
|
||||
"or 'endpoint_name'"
|
||||
)
|
||||
if "/" not in hf_endpoint_name:
|
||||
hf_namespace: str = self.get_namespace()
|
||||
endpoint_path = f"{hf_namespace}/{hf_endpoint_name}"
|
||||
else:
|
||||
endpoint_path = hf_endpoint_name
|
||||
return f"https://api.endpoints.huggingface.cloud/v2/endpoint/{endpoint_path}"
|
||||
|
||||
def get_namespace(self) -> str:
|
||||
return HfApi().whoami()["name"]
|
||||
|
||||
@property
|
||||
def client(self) -> InferenceClient:
|
||||
return InferenceClient(model=self.inference_url, token=self.config.api_token)
|
||||
|
||||
def _get_endpoint_info(self) -> Dict[str, Any]:
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"authorization": f"Bearer {self.config.api_token}",
|
||||
}
|
||||
response = requests.get(self.config.url, headers=headers)
|
||||
response.raise_for_status()
|
||||
endpoint_info = response.json()
|
||||
return {
|
||||
"inference_url": endpoint_info["status"]["url"],
|
||||
"model_id": endpoint_info["model"]["repository"],
|
||||
"max_total_tokens": int(
|
||||
endpoint_info["model"]["image"]["custom"]["env"]["MAX_TOTAL_TOKENS"]
|
||||
),
|
||||
}
|
||||
|
||||
async def initialize(self) -> None:
|
||||
await super().initialize()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue