forked from phoenix-oss/llama-stack-mirror
* Use huggingface_hub inference client for TGI inference * Update the default value for TGI URL * Use InferenceClient.text_generation for TGI inference * Fixes post-review and split TGI adapter into local and Inference Endpoints ones * Update CLI reference and add typing * Rename TGI Adapter class * Use HfApi to get the namespace when not provide in the hf endpoint name * Remove unecessary method argument * Improve TGI adapter initialization condition * Move helper into impl file + fix merging conflicts
29 lines
1.1 KiB
Python
29 lines
1.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Optional
|
|
|
|
from llama_models.schema_utils import json_schema_type
|
|
from pydantic import BaseModel, Field
|
|
|
|
|
|
@json_schema_type
|
|
class TGIImplConfig(BaseModel):
|
|
url: Optional[str] = Field(
|
|
default=None,
|
|
description="The URL for the local TGI endpoint (e.g., http://localhost:8080)",
|
|
)
|
|
api_token: Optional[str] = Field(
|
|
default=None,
|
|
description="The HF token for Hugging Face Inference Endpoints (will default to locally saved token if not provided)",
|
|
)
|
|
hf_endpoint_name: Optional[str] = Field(
|
|
default=None,
|
|
description="The name of the Hugging Face Inference Endpoint : can be either in the format of '{namespace}/{endpoint_name}' (namespace can be the username or organization name) or just '{endpoint_name}' if logged into the same account as the namespace",
|
|
)
|
|
|
|
def is_inference_endpoint(self) -> bool:
|
|
return self.hf_endpoint_name is not None
|