mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Fix precommit check after moving to ruff (#927)
Lint check in main branch is failing. This fixes the lint check after we moved to ruff in https://github.com/meta-llama/llama-stack/pull/921. We need to move to a `ruff.toml` file as well as fixing and ignoring some additional checks. Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
4773092dd1
commit
34ab7a3b6c
217 changed files with 981 additions and 2681 deletions
|
@ -40,9 +40,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=FaissImplConfig.sample_run_config(f"distributions/{name}"),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
|
||||
default_models = [
|
||||
ModelInput(
|
||||
|
|
|
@ -49,9 +49,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=SentenceTransformersInferenceConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
|
|
|
@ -61,9 +61,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=FaissImplConfig.sample_run_config(f"distributions/{name}"),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
|
|
|
@ -39,9 +39,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=NVIDIAConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
|
|
|
@ -42,9 +42,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=SambaNovaImplConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
|
|
|
@ -53,9 +53,7 @@ class RunConfigSettings(BaseModel):
|
|||
|
||||
api = Api(api_str)
|
||||
if provider_type not in provider_registry[api]:
|
||||
raise ValueError(
|
||||
f"Unknown provider type: {provider_type} for API: {api_str}"
|
||||
)
|
||||
raise ValueError(f"Unknown provider type: {provider_type} for API: {api_str}")
|
||||
|
||||
config_class = provider_registry[api][provider_type].config_class
|
||||
assert config_class is not None, (
|
||||
|
@ -64,9 +62,7 @@ class RunConfigSettings(BaseModel):
|
|||
|
||||
config_class = instantiate_class_type(config_class)
|
||||
if hasattr(config_class, "sample_run_config"):
|
||||
config = config_class.sample_run_config(
|
||||
__distro_dir__=f"distributions/{name}"
|
||||
)
|
||||
config = config_class.sample_run_config(__distro_dir__=f"distributions/{name}")
|
||||
else:
|
||||
config = {}
|
||||
|
||||
|
@ -79,7 +75,7 @@ class RunConfigSettings(BaseModel):
|
|||
)
|
||||
|
||||
# Get unique set of APIs from providers
|
||||
apis = list(sorted(providers.keys()))
|
||||
apis = sorted(providers.keys())
|
||||
|
||||
return StackRunConfig(
|
||||
image_name=name,
|
||||
|
@ -173,9 +169,7 @@ class DistributionTemplate(BaseModel):
|
|||
)
|
||||
|
||||
for yaml_pth, settings in self.run_configs.items():
|
||||
run_config = settings.run_config(
|
||||
self.name, self.providers, self.container_image
|
||||
)
|
||||
run_config = settings.run_config(self.name, self.providers, self.container_image)
|
||||
with open(yaml_output_dir / yaml_pth, "w") as f:
|
||||
yaml.safe_dump(
|
||||
run_config.model_dump(exclude_none=True),
|
||||
|
|
|
@ -59,9 +59,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
config=SentenceTransformersInferenceConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue