test, recording

# What does this PR do?


## Test Plan
# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-08 14:56:58 -07:00
parent 16db42e7e5
commit c76bf97ccf
228 changed files with 86861 additions and 64604 deletions

View file

@ -173,5 +173,6 @@ class MetaReferenceInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by meta-reference inference provider")

View file

@ -96,6 +96,7 @@ class SentenceTransformersInferenceImpl(
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by sentence transformers provider")
@ -124,5 +125,6 @@ class SentenceTransformersInferenceImpl(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by sentence transformers provider")

View file

@ -158,6 +158,7 @@ class BedrockInferenceAdapter(
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")
@ -186,5 +187,6 @@ class BedrockInferenceAdapter(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -63,5 +63,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -54,6 +54,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -100,6 +100,7 @@ class PassthroughInferenceAdapter(Inference):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -124,6 +125,7 @@ class PassthroughInferenceAdapter(Inference):
user=user,
guided_choice=guided_choice,
prompt_logprobs=prompt_logprobs,
**kwargs,
)
return await client.inference.openai_completion(**params)
@ -153,6 +155,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -181,6 +184,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)
return await client.inference.openai_chat_completion(**params)

View file

@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
):
"""Override to add RunPod-specific stream_options requirement."""
if stream and not stream_options:
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
max_tokens = max_tokens or self.config.max_tokens
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -247,6 +247,7 @@ class LiteLLMOpenAIMixin(
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
model_obj = await self.model_store.get_model(model)
params = await prepare_openai_completion_params(
@ -271,6 +272,7 @@ class LiteLLMOpenAIMixin(
prompt_logprobs=prompt_logprobs,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.atext_completion(**params)
@ -299,6 +301,7 @@ class LiteLLMOpenAIMixin(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
# Add usage tracking for streaming when telemetry is active
from llama_stack.providers.utils.telemetry.tracing import get_current_span
@ -335,6 +338,7 @@ class LiteLLMOpenAIMixin(
user=user,
api_key=self.get_api_key(),
api_base=self.api_base,
**kwargs,
)
return await litellm.acompletion(**params)

View file

@ -247,6 +247,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
"""
Direct OpenAI completion API call.
@ -261,6 +262,9 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
if guided_choice:
extra_body["guided_choice"] = guided_choice
# Merge any additional kwargs into extra_body
extra_body.update(kwargs)
# TODO: fix openai_completion to return type compatible with OpenAI's API response
resp = await self.client.completions.create(
**await prepare_openai_completion_params(
@ -313,6 +317,7 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""
Direct OpenAI chat completion API call.
@ -361,7 +366,10 @@ class OpenAIMixin(NeedsRequestProviderData, ABC, BaseModel):
user=user,
)
resp = await self.client.chat.completions.create(**params)
# Pass any additional provider-specific parameters as extra_body
extra_body = kwargs if kwargs else {}
resp = await self.client.chat.completions.create(**params, extra_body=extra_body)
return await self._maybe_overwrite_id(resp, stream) # type: ignore[no-any-return]