mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
Use the lower-level generate_stream()
method for correct tool calling
This commit is contained in:
parent
f355b9b844
commit
046afcb945
2 changed files with 80 additions and 70 deletions
|
@ -8,14 +8,15 @@ from typing import AsyncGenerator, List
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
|
|
||||||
from huggingface_hub import InferenceClient
|
|
||||||
|
|
||||||
from llama_models.llama3.api.chat_format import ChatFormat
|
from llama_models.llama3.api.chat_format import ChatFormat
|
||||||
|
|
||||||
from llama_models.llama3.api.datatypes import Message, StopReason
|
from llama_models.llama3.api.datatypes import Message, StopReason
|
||||||
from llama_models.llama3.api.tokenizer import Tokenizer
|
from llama_models.llama3.api.tokenizer import Tokenizer
|
||||||
|
|
||||||
|
from text_generation import Client
|
||||||
|
|
||||||
from llama_toolchain.inference.api import * # noqa: F403
|
from llama_toolchain.inference.api import * # noqa: F403
|
||||||
|
from llama_toolchain.inference.prepare_messages import prepare_messages
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_MODELS = {
|
SUPPORTED_MODELS = {
|
||||||
|
@ -28,26 +29,38 @@ SUPPORTED_MODELS = {
|
||||||
class TGIInferenceAdapter(Inference):
|
class TGIInferenceAdapter(Inference):
|
||||||
def __init__(self, url: str) -> None:
|
def __init__(self, url: str) -> None:
|
||||||
self.url = url.rstrip("/")
|
self.url = url.rstrip("/")
|
||||||
tokenizer = Tokenizer.get_instance()
|
self.tokenizer = Tokenizer.get_instance()
|
||||||
self.formatter = ChatFormat(tokenizer)
|
self.formatter = ChatFormat(self.tokenizer)
|
||||||
self.model = None
|
self.model = None
|
||||||
|
self.max_tokens = None
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
hf_models = {v: k for k, v in SUPPORTED_MODELS.items()}
|
hf_models = {v: k for k, v in SUPPORTED_MODELS.items()}
|
||||||
|
|
||||||
async with httpx.AsyncClient() as client:
|
try:
|
||||||
response = await client.get(f"{self.url}/info")
|
print(f"Connecting to TGI server at: {self.url}")
|
||||||
response.raise_for_status()
|
async with httpx.AsyncClient() as client:
|
||||||
info = response.json()
|
response = await client.get(f"{self.url}/info")
|
||||||
if "model_id" not in info:
|
response.raise_for_status()
|
||||||
raise RuntimeError("Missing model_id in model info")
|
info = response.json()
|
||||||
model_id = info["model_id"]
|
if "model_id" not in info:
|
||||||
if model_id not in hf_models:
|
raise RuntimeError("Missing model_id in model info")
|
||||||
raise RuntimeError(
|
if "max_total_tokens" not in info:
|
||||||
f"TGI is serving model: {model_id}, use one of the supported models: {','.join(hf_models.keys())}"
|
raise RuntimeError("Missing max_total_tokens in model info")
|
||||||
)
|
self.max_tokens = info["max_total_tokens"]
|
||||||
|
|
||||||
self.model = hf_models[model_id]
|
model_id = info["model_id"]
|
||||||
|
if model_id not in hf_models:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"TGI is serving model: {model_id}, use one of the supported models: {','.join(hf_models.keys())}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.model = hf_models[model_id]
|
||||||
|
except Exception as e:
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
traceback.print_exc()
|
||||||
|
raise RuntimeError("Could not connect to TGI server") from e
|
||||||
|
|
||||||
async def shutdown(self) -> None:
|
async def shutdown(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
@ -75,6 +88,15 @@ class TGIInferenceAdapter(Inference):
|
||||||
return options
|
return options
|
||||||
|
|
||||||
async def chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
|
async def chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
|
||||||
|
messages = prepare_messages(request)
|
||||||
|
|
||||||
|
model_input = self.formatter.encode_dialog_prompt(messages)
|
||||||
|
prompt = self.tokenizer.decode(model_input.tokens)
|
||||||
|
max_new_tokens = min(
|
||||||
|
request.sampling_params.max_tokens or self.max_tokens,
|
||||||
|
self.max_tokens - len(model_input.tokens) - 1,
|
||||||
|
)
|
||||||
|
|
||||||
if request.model != self.model:
|
if request.model != self.model:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Model mismatch, expected: {self.model}, got: {request.model}"
|
f"Model mismatch, expected: {self.model}, got: {request.model}"
|
||||||
|
@ -82,23 +104,27 @@ class TGIInferenceAdapter(Inference):
|
||||||
|
|
||||||
options = self.get_chat_options(request)
|
options = self.get_chat_options(request)
|
||||||
|
|
||||||
client = InferenceClient(base_url=self.url)
|
client = Client(base_url=self.url)
|
||||||
if not request.stream:
|
if not request.stream:
|
||||||
r = client.chat.completions.create(
|
r = client.generate(
|
||||||
model=SUPPORTED_MODELS[self.model],
|
prompt,
|
||||||
messages=self._convert_messages(request.messages),
|
max_new_tokens=max_new_tokens,
|
||||||
stream=False,
|
stop_sequences=["<|eom_id|>", "<|eot_id|>"],
|
||||||
**options,
|
**options,
|
||||||
)
|
)
|
||||||
stop_reason = None
|
|
||||||
if r.choices[0].finish_reason:
|
if r.details.finish_reason:
|
||||||
if r.choices[0].finish_reason == "stop":
|
if r.details.finish_reason == "stop":
|
||||||
stop_reason = StopReason.end_of_turn
|
stop_reason = StopReason.end_of_turn
|
||||||
elif r.choices[0].finish_reason == "length":
|
elif r.details.finish_reason == "length":
|
||||||
stop_reason = StopReason.out_of_tokens
|
stop_reason = StopReason.out_of_tokens
|
||||||
|
else:
|
||||||
|
stop_reason = StopReason.end_of_message
|
||||||
|
else:
|
||||||
|
stop_reason = StopReason.out_of_tokens
|
||||||
|
|
||||||
completion_message = self.formatter.decode_assistant_message_from_content(
|
completion_message = self.formatter.decode_assistant_message_from_content(
|
||||||
r.choices[0].message.content, stop_reason
|
r.generated_text, stop_reason
|
||||||
)
|
)
|
||||||
yield ChatCompletionResponse(
|
yield ChatCompletionResponse(
|
||||||
completion_message=completion_message,
|
completion_message=completion_message,
|
||||||
|
@ -115,30 +141,20 @@ class TGIInferenceAdapter(Inference):
|
||||||
buffer = ""
|
buffer = ""
|
||||||
ipython = False
|
ipython = False
|
||||||
stop_reason = None
|
stop_reason = None
|
||||||
|
tokens = []
|
||||||
|
|
||||||
response = client.chat.completions.create(
|
for response in client.generate_stream(
|
||||||
model=SUPPORTED_MODELS[self.model],
|
prompt,
|
||||||
messages=self._convert_messages(request.messages),
|
max_new_tokens=max_new_tokens,
|
||||||
stream=True,
|
stop_sequences=["<|eom_id|>", "<|eot_id|>"],
|
||||||
**options,
|
**options,
|
||||||
)
|
):
|
||||||
for chunk in response:
|
token_result = response.token
|
||||||
if chunk.choices[0].finish_reason:
|
|
||||||
if stop_reason is None and chunk.choices[0].finish_reason == "stop":
|
|
||||||
stop_reason = StopReason.end_of_turn
|
|
||||||
elif (
|
|
||||||
stop_reason is None
|
|
||||||
and chunk.choices[0].finish_reason == "length"
|
|
||||||
):
|
|
||||||
stop_reason = StopReason.out_of_tokens
|
|
||||||
break
|
|
||||||
|
|
||||||
text = chunk.choices[0].delta.content
|
buffer += token_result.text
|
||||||
if text is None:
|
tokens.append(token_result.id)
|
||||||
continue
|
|
||||||
|
|
||||||
# check if its a tool call ( aka starts with <|python_tag|> )
|
if not ipython and buffer.startswith("<|python_tag|>"):
|
||||||
if not ipython and text.startswith("<|python_tag|>"):
|
|
||||||
ipython = True
|
ipython = True
|
||||||
yield ChatCompletionResponseStreamChunk(
|
yield ChatCompletionResponseStreamChunk(
|
||||||
event=ChatCompletionResponseEvent(
|
event=ChatCompletionResponseEvent(
|
||||||
|
@ -149,25 +165,27 @@ class TGIInferenceAdapter(Inference):
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
buffer += text
|
buffer = buffer[len("<|python_tag|>") :]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ipython:
|
if token_result.text == "<|eot_id|>":
|
||||||
if text == "<|eot_id|>":
|
stop_reason = StopReason.end_of_turn
|
||||||
stop_reason = StopReason.end_of_turn
|
text = ""
|
||||||
text = ""
|
elif token_result.text == "<|eom_id|>":
|
||||||
continue
|
stop_reason = StopReason.end_of_message
|
||||||
elif text == "<|eom_id|>":
|
text = ""
|
||||||
stop_reason = StopReason.end_of_message
|
else:
|
||||||
text = ""
|
text = token_result.text
|
||||||
continue
|
|
||||||
|
|
||||||
buffer += text
|
if ipython:
|
||||||
delta = ToolCallDelta(
|
delta = ToolCallDelta(
|
||||||
content=text,
|
content=text,
|
||||||
parse_status=ToolCallParseStatus.in_progress,
|
parse_status=ToolCallParseStatus.in_progress,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
delta = text
|
||||||
|
|
||||||
|
if stop_reason is None:
|
||||||
yield ChatCompletionResponseStreamChunk(
|
yield ChatCompletionResponseStreamChunk(
|
||||||
event=ChatCompletionResponseEvent(
|
event=ChatCompletionResponseEvent(
|
||||||
event_type=ChatCompletionResponseEventType.progress,
|
event_type=ChatCompletionResponseEventType.progress,
|
||||||
|
@ -175,20 +193,12 @@ class TGIInferenceAdapter(Inference):
|
||||||
stop_reason=stop_reason,
|
stop_reason=stop_reason,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
buffer += text
|
if stop_reason is None:
|
||||||
yield ChatCompletionResponseStreamChunk(
|
stop_reason = StopReason.out_of_tokens
|
||||||
event=ChatCompletionResponseEvent(
|
|
||||||
event_type=ChatCompletionResponseEventType.progress,
|
|
||||||
delta=text,
|
|
||||||
stop_reason=stop_reason,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# parse tool calls and report errors
|
# parse tool calls and report errors
|
||||||
message = self.formatter.decode_assistant_message_from_content(
|
message = self.formatter.decode_assistant_message(tokens, stop_reason)
|
||||||
buffer, stop_reason
|
|
||||||
)
|
|
||||||
parsed_tool_calls = len(message.tool_calls) > 0
|
parsed_tool_calls = len(message.tool_calls) > 0
|
||||||
if ipython and not parsed_tool_calls:
|
if ipython and not parsed_tool_calls:
|
||||||
yield ChatCompletionResponseStreamChunk(
|
yield ChatCompletionResponseStreamChunk(
|
||||||
|
|
|
@ -39,7 +39,7 @@ def available_inference_providers() -> List[ProviderSpec]:
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
adapter=AdapterSpec(
|
adapter=AdapterSpec(
|
||||||
adapter_id="tgi",
|
adapter_id="tgi",
|
||||||
pip_packages=["huggingface-hub"],
|
pip_packages=["text-generation"],
|
||||||
module="llama_toolchain.inference.adapters.tgi",
|
module="llama_toolchain.inference.adapters.tgi",
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue