forked from phoenix-oss/llama-stack-mirror
model -> model_id for TGI
This commit is contained in:
parent
c2c53d0272
commit
97dc5b68e5
2 changed files with 5 additions and 7 deletions
|
@ -75,8 +75,6 @@ Llama Stack already has a number of "adapters" available for some popular Infere
|
||||||
- Check out [Zero to Hero](zero_to_hero_guide) guide to learn in details about how to build your first agent.
|
- Check out [Zero to Hero](zero_to_hero_guide) guide to learn in details about how to build your first agent.
|
||||||
- See how you can use [Llama Stack Distributions](distributions/index) to get started with popular inference and other service providers.
|
- See how you can use [Llama Stack Distributions](distributions/index) to get started with popular inference and other service providers.
|
||||||
|
|
||||||
Kutta
|
|
||||||
|
|
||||||
We also provide a number of Client side SDKs to make it easier to connect to Llama Stack server in your preferred language.
|
We also provide a number of Client side SDKs to make it easier to connect to Llama Stack server in your preferred language.
|
||||||
|
|
||||||
| **Language** | **Client SDK** | **Package** |
|
| **Language** | **Client SDK** | **Package** |
|
||||||
|
|
|
@ -74,7 +74,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||||
|
|
||||||
async def completion(
|
async def completion(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model_id: str,
|
||||||
content: InterleavedTextMedia,
|
content: InterleavedTextMedia,
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
||||||
response_format: Optional[ResponseFormat] = None,
|
response_format: Optional[ResponseFormat] = None,
|
||||||
|
@ -82,7 +82,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[LogProbConfig] = None,
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
request = CompletionRequest(
|
request = CompletionRequest(
|
||||||
model=model,
|
model=model_id,
|
||||||
content=content,
|
content=content,
|
||||||
sampling_params=sampling_params,
|
sampling_params=sampling_params,
|
||||||
response_format=response_format,
|
response_format=response_format,
|
||||||
|
@ -176,7 +176,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||||
|
|
||||||
async def chat_completion(
|
async def chat_completion(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model_id: str,
|
||||||
messages: List[Message],
|
messages: List[Message],
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
||||||
tools: Optional[List[ToolDefinition]] = None,
|
tools: Optional[List[ToolDefinition]] = None,
|
||||||
|
@ -187,7 +187,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[LogProbConfig] = None,
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=model,
|
model=model_id,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
sampling_params=sampling_params,
|
sampling_params=sampling_params,
|
||||||
tools=tools or [],
|
tools=tools or [],
|
||||||
|
@ -256,7 +256,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||||
|
|
||||||
async def embeddings(
|
async def embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model_id: str,
|
||||||
contents: List[InterleavedTextMedia],
|
contents: List[InterleavedTextMedia],
|
||||||
) -> EmbeddingsResponse:
|
) -> EmbeddingsResponse:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue