From 85501ed8758a7b511cf972dfcb4c685ee849e368 Mon Sep 17 00:00:00 2001 From: Dinesh Yeduguru Date: Tue, 11 Mar 2025 11:19:29 -0700 Subject: [PATCH] fix: remove Llama-3.2-1B-Instruct for fireworks (#1558) # What does this PR do? remove Llama-3.2-1B-Instruct for fireworks as its no longer appears to be hosted on website. ## Test Plan python distro_codegen.py --- .../distributions/self_hosted_distro/fireworks.md | 1 - .../providers/remote/inference/fireworks/models.py | 4 ---- llama_stack/templates/ci-tests/run.yaml | 10 ---------- llama_stack/templates/dev/run.yaml | 10 ---------- llama_stack/templates/fireworks/run-with-safety.yaml | 10 ---------- llama_stack/templates/fireworks/run.yaml | 10 ---------- 6 files changed, 45 deletions(-) diff --git a/docs/source/distributions/self_hosted_distro/fireworks.md b/docs/source/distributions/self_hosted_distro/fireworks.md index 9592a18fe..3c8f5eec9 100644 --- a/docs/source/distributions/self_hosted_distro/fireworks.md +++ b/docs/source/distributions/self_hosted_distro/fireworks.md @@ -40,7 +40,6 @@ The following models are available by default: - `accounts/fireworks/models/llama-v3p1-8b-instruct (aliases: meta-llama/Llama-3.1-8B-Instruct)` - `accounts/fireworks/models/llama-v3p1-70b-instruct (aliases: meta-llama/Llama-3.1-70B-Instruct)` - `accounts/fireworks/models/llama-v3p1-405b-instruct (aliases: meta-llama/Llama-3.1-405B-Instruct-FP8)` -- `accounts/fireworks/models/llama-v3p2-1b-instruct (aliases: meta-llama/Llama-3.2-1B-Instruct)` - `accounts/fireworks/models/llama-v3p2-3b-instruct (aliases: meta-llama/Llama-3.2-3B-Instruct)` - `accounts/fireworks/models/llama-v3p2-11b-vision-instruct (aliases: meta-llama/Llama-3.2-11B-Vision-Instruct)` - `accounts/fireworks/models/llama-v3p2-90b-vision-instruct (aliases: meta-llama/Llama-3.2-90B-Vision-Instruct)` diff --git a/llama_stack/providers/remote/inference/fireworks/models.py b/llama_stack/providers/remote/inference/fireworks/models.py index c90f632ff..a0dc11768 100644 --- a/llama_stack/providers/remote/inference/fireworks/models.py +++ b/llama_stack/providers/remote/inference/fireworks/models.py @@ -24,10 +24,6 @@ MODEL_ENTRIES = [ "accounts/fireworks/models/llama-v3p1-405b-instruct", CoreModelId.llama3_1_405b_instruct.value, ), - build_hf_repo_model_entry( - "accounts/fireworks/models/llama-v3p2-1b-instruct", - CoreModelId.llama3_2_1b_instruct.value, - ), build_hf_repo_model_entry( "accounts/fireworks/models/llama-v3p2-3b-instruct", CoreModelId.llama3_2_3b_instruct.value, diff --git a/llama_stack/templates/ci-tests/run.yaml b/llama_stack/templates/ci-tests/run.yaml index 3a973cabf..715d7c86d 100644 --- a/llama_stack/templates/ci-tests/run.yaml +++ b/llama_stack/templates/ci-tests/run.yaml @@ -120,16 +120,6 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct model_type: llm -- metadata: {} - model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-1B-Instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm - metadata: {} model_id: accounts/fireworks/models/llama-v3p2-3b-instruct provider_id: fireworks diff --git a/llama_stack/templates/dev/run.yaml b/llama_stack/templates/dev/run.yaml index 71fbcb353..f908af8c3 100644 --- a/llama_stack/templates/dev/run.yaml +++ b/llama_stack/templates/dev/run.yaml @@ -178,16 +178,6 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct model_type: llm -- metadata: {} - model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-1B-Instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm - metadata: {} model_id: accounts/fireworks/models/llama-v3p2-3b-instruct provider_id: fireworks diff --git a/llama_stack/templates/fireworks/run-with-safety.yaml b/llama_stack/templates/fireworks/run-with-safety.yaml index 359bf0194..e04141a07 100644 --- a/llama_stack/templates/fireworks/run-with-safety.yaml +++ b/llama_stack/templates/fireworks/run-with-safety.yaml @@ -132,16 +132,6 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct model_type: llm -- metadata: {} - model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-1B-Instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm - metadata: {} model_id: accounts/fireworks/models/llama-v3p2-3b-instruct provider_id: fireworks diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml index 0ce3a4505..369b9ae7b 100644 --- a/llama_stack/templates/fireworks/run.yaml +++ b/llama_stack/templates/fireworks/run.yaml @@ -126,16 +126,6 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct model_type: llm -- metadata: {} - model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-1B-Instruct - provider_id: fireworks - provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct - model_type: llm - metadata: {} model_id: accounts/fireworks/models/llama-v3p2-3b-instruct provider_id: fireworks