test, recording

# What does this PR do?


## Test Plan
# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-10-08 14:56:58 -07:00
parent 16db42e7e5
commit c76bf97ccf
228 changed files with 86861 additions and 64604 deletions

View file

@ -158,6 +158,7 @@ class BedrockInferenceAdapter(
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")
@ -186,5 +187,6 @@ class BedrockInferenceAdapter(
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
raise NotImplementedError("OpenAI chat completion not supported by the Bedrock provider")

View file

@ -63,5 +63,6 @@ class DatabricksInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -54,6 +54,7 @@ class LlamaCompatInferenceAdapter(OpenAIMixin):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
raise NotImplementedError()

View file

@ -100,6 +100,7 @@ class PassthroughInferenceAdapter(Inference):
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
**kwargs: Any,
) -> OpenAICompletion:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -124,6 +125,7 @@ class PassthroughInferenceAdapter(Inference):
user=user,
guided_choice=guided_choice,
prompt_logprobs=prompt_logprobs,
**kwargs,
)
return await client.inference.openai_completion(**params)
@ -153,6 +155,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
client = self._get_client()
model_obj = await self.model_store.get_model(model)
@ -181,6 +184,7 @@ class PassthroughInferenceAdapter(Inference):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)
return await client.inference.openai_chat_completion(**params)

View file

@ -57,6 +57,7 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
):
"""Override to add RunPod-specific stream_options requirement."""
if stream and not stream_options:
@ -86,4 +87,5 @@ class RunpodInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)

View file

@ -102,6 +102,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs: int | None = None,
top_p: float | None = None,
user: str | None = None,
**kwargs: Any,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
max_tokens = max_tokens or self.config.max_tokens
@ -136,4 +137,5 @@ class VLLMInferenceAdapter(OpenAIMixin):
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
**kwargs,
)