mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 20:12:33 +00:00
Remove openai embedding override
We can just use the default, runpod embedding endpoint for vllm is nothing special and just passes through to vllm
This commit is contained in:
parent
b519434c88
commit
412ea00c0b
1 changed files with 0 additions and 23 deletions
|
|
@ -7,7 +7,6 @@
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
OpenAIEmbeddingsResponse,
|
|
||||||
OpenAIMessageParam,
|
OpenAIMessageParam,
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
)
|
)
|
||||||
|
|
@ -114,25 +113,3 @@ class RunpodInferenceAdapter(OpenAIMixin):
|
||||||
)
|
)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
async def openai_embeddings(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
input: str | list[str],
|
|
||||||
encoding_format: str | None = "float",
|
|
||||||
dimensions: int | None = None,
|
|
||||||
user: str | None = None,
|
|
||||||
) -> OpenAIEmbeddingsResponse:
|
|
||||||
# Resolve model_id to provider_resource_id
|
|
||||||
model_obj = await self.model_store.get_model(model)
|
|
||||||
provider_model_id = model_obj.provider_resource_id or model
|
|
||||||
|
|
||||||
response = await self.client.embeddings.create(
|
|
||||||
model=provider_model_id,
|
|
||||||
input=input,
|
|
||||||
encoding_format=encoding_format,
|
|
||||||
dimensions=dimensions,
|
|
||||||
user=user,
|
|
||||||
)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue