Inference to use provider resource id to register and validate (#428)

This PR changes the way model id gets translated to the final model name
that gets passed through the provider.
Major changes include:
1) Providers are responsible for registering an object and as part of
the registration returning the object with the correct provider specific
name of the model provider_resource_id
2) To help with the common look ups different names a new ModelLookup
class is created.



Tested all inference providers including together, fireworks, vllm,
ollama, meta reference and bedrock
This commit is contained in:
Dinesh Yeduguru 2024-11-12 20:02:00 -08:00 committed by GitHub
parent e51107e019
commit fdff24e77a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 460 additions and 290 deletions

View file

@ -49,7 +49,7 @@ def inference_meta_reference(inference_model) -> ProviderFixture:
providers=[
Provider(
provider_id=f"meta-reference-{i}",
provider_type="meta-reference",
provider_type="inline::meta-reference",
config=MetaReferenceInferenceConfig(
model=m,
max_seq_len=4096,
@ -142,6 +142,31 @@ def inference_bedrock() -> ProviderFixture:
)
def get_model_short_name(model_name: str) -> str:
"""Convert model name to a short test identifier.
Args:
model_name: Full model name like "Llama3.1-8B-Instruct"
Returns:
Short name like "llama_8b" suitable for test markers
"""
model_name = model_name.lower()
if "vision" in model_name:
return "llama_vision"
elif "3b" in model_name:
return "llama_3b"
elif "8b" in model_name:
return "llama_8b"
else:
return model_name.replace(".", "_").replace("-", "_")
@pytest.fixture(scope="session")
def model_id(inference_model) -> str:
return get_model_short_name(inference_model)
INFERENCE_FIXTURES = [
"meta_reference",
"ollama",

View file

@ -96,7 +96,7 @@ class TestInference:
response = await inference_impl.completion(
content="Micheael Jordan is born in ",
stream=False,
model=inference_model,
model_id=inference_model,
sampling_params=SamplingParams(
max_tokens=50,
),
@ -110,7 +110,7 @@ class TestInference:
async for r in await inference_impl.completion(
content="Roses are red,",
stream=True,
model=inference_model,
model_id=inference_model,
sampling_params=SamplingParams(
max_tokens=50,
),
@ -171,7 +171,7 @@ class TestInference:
):
inference_impl, _ = inference_stack
response = await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=sample_messages,
stream=False,
**common_params,
@ -204,7 +204,7 @@ class TestInference:
num_seasons_in_nba: int
response = await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=[
SystemMessage(content="You are a helpful assistant."),
UserMessage(content="Please give me information about Michael Jordan."),
@ -227,7 +227,7 @@ class TestInference:
assert answer.num_seasons_in_nba == 15
response = await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=[
SystemMessage(content="You are a helpful assistant."),
UserMessage(content="Please give me information about Michael Jordan."),
@ -250,7 +250,7 @@ class TestInference:
response = [
r
async for r in await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=sample_messages,
stream=True,
**common_params,
@ -286,7 +286,7 @@ class TestInference:
]
response = await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=messages,
tools=[sample_tool_definition],
stream=False,
@ -327,7 +327,7 @@ class TestInference:
response = [
r
async for r in await inference_impl.chat_completion(
model=inference_model,
model_id=inference_model,
messages=messages,
tools=[sample_tool_definition],
stream=True,