forked from phoenix-oss/llama-stack-mirror
Fix precommit check after moving to ruff (#927)
Lint check in main branch is failing. This fixes the lint check after we moved to ruff in https://github.com/meta-llama/llama-stack/pull/921. We need to move to a `ruff.toml` file as well as fixing and ignoring some additional checks. Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
4773092dd1
commit
34ab7a3b6c
217 changed files with 981 additions and 2681 deletions
|
@ -36,9 +36,7 @@ def build_model_alias(provider_model_id: str, model_descriptor: str) -> ModelAli
|
|||
)
|
||||
|
||||
|
||||
def build_model_alias_with_just_provider_model_id(
|
||||
provider_model_id: str, model_descriptor: str
|
||||
) -> ModelAlias:
|
||||
def build_model_alias_with_just_provider_model_id(provider_model_id: str, model_descriptor: str) -> ModelAlias:
|
||||
return ModelAlias(
|
||||
provider_model_id=provider_model_id,
|
||||
aliases=[],
|
||||
|
@ -54,16 +52,10 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
for alias in alias_obj.aliases:
|
||||
self.alias_to_provider_id_map[alias] = alias_obj.provider_model_id
|
||||
# also add a mapping from provider model id to itself for easy lookup
|
||||
self.alias_to_provider_id_map[alias_obj.provider_model_id] = (
|
||||
alias_obj.provider_model_id
|
||||
)
|
||||
self.alias_to_provider_id_map[alias_obj.provider_model_id] = alias_obj.provider_model_id
|
||||
# ensure we can go from llama model to provider model id
|
||||
self.alias_to_provider_id_map[alias_obj.llama_model] = (
|
||||
alias_obj.provider_model_id
|
||||
)
|
||||
self.provider_id_to_llama_model_map[alias_obj.provider_model_id] = (
|
||||
alias_obj.llama_model
|
||||
)
|
||||
self.alias_to_provider_id_map[alias_obj.llama_model] = alias_obj.provider_model_id
|
||||
self.provider_id_to_llama_model_map[alias_obj.provider_model_id] = alias_obj.llama_model
|
||||
|
||||
def get_provider_model_id(self, identifier: str) -> str:
|
||||
if identifier in self.alias_to_provider_id_map:
|
||||
|
@ -82,9 +74,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
# embedding models are always registered by their provider model id and does not need to be mapped to a llama model
|
||||
provider_resource_id = model.provider_resource_id
|
||||
else:
|
||||
provider_resource_id = self.get_provider_model_id(
|
||||
model.provider_resource_id
|
||||
)
|
||||
provider_resource_id = self.get_provider_model_id(model.provider_resource_id)
|
||||
if provider_resource_id:
|
||||
model.provider_resource_id = provider_resource_id
|
||||
else:
|
||||
|
@ -100,18 +90,13 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
f"Provider model id '{model.provider_resource_id}' is already registered to a different llama model: '{existing_llama_model}'"
|
||||
)
|
||||
else:
|
||||
if (
|
||||
model.metadata["llama_model"]
|
||||
not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR
|
||||
):
|
||||
if model.metadata["llama_model"] not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR:
|
||||
raise ValueError(
|
||||
f"Invalid llama_model '{model.metadata['llama_model']}' specified in metadata. "
|
||||
f"Must be one of: {', '.join(ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR.keys())}"
|
||||
)
|
||||
self.provider_id_to_llama_model_map[model.provider_resource_id] = (
|
||||
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[
|
||||
model.metadata["llama_model"]
|
||||
]
|
||||
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[model.metadata["llama_model"]]
|
||||
)
|
||||
|
||||
return model
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue