From 6d8f59e359d55066e420d478a439d705ebd3d4ba Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Mon, 30 Sep 2024 17:28:55 -0700 Subject: [PATCH] Cleanup --- .../adapters/inference/bedrock/bedrock.py | 2 +- .../adapters/inference/fireworks/fireworks.py | 1 + .../adapters/inference/together/together.py | 14 ++++---------- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/llama_stack/providers/adapters/inference/bedrock/bedrock.py b/llama_stack/providers/adapters/inference/bedrock/bedrock.py index de0ee84eb..9c1db4bdb 100644 --- a/llama_stack/providers/adapters/inference/bedrock/bedrock.py +++ b/llama_stack/providers/adapters/inference/bedrock/bedrock.py @@ -18,7 +18,7 @@ from llama_stack.providers.utils.inference.routable import RoutableProviderForMo from llama_stack.apis.inference import * # noqa: F403 from llama_stack.providers.adapters.inference.bedrock.config import BedrockConfig -# mapping of Model SKUs to ollama models + BEDROCK_SUPPORTED_MODELS = { "Llama3.1-8B-Instruct": "meta.llama3-1-8b-instruct-v1:0", "Llama3.1-70B-Instruct": "meta.llama3-1-70b-instruct-v1:0", diff --git a/llama_stack/providers/adapters/inference/fireworks/fireworks.py b/llama_stack/providers/adapters/inference/fireworks/fireworks.py index df8cee189..f6949cbdc 100644 --- a/llama_stack/providers/adapters/inference/fireworks/fireworks.py +++ b/llama_stack/providers/adapters/inference/fireworks/fireworks.py @@ -22,6 +22,7 @@ from llama_stack.providers.utils.inference.augment_messages import ( from .config import FireworksImplConfig + FIREWORKS_SUPPORTED_MODELS = { "Llama3.1-8B-Instruct": "fireworks/llama-v3p1-8b-instruct", "Llama3.1-70B-Instruct": "fireworks/llama-v3p1-70b-instruct", diff --git a/llama_stack/providers/adapters/inference/together/together.py b/llama_stack/providers/adapters/inference/together/together.py index 1db354bc3..9f73a81d1 100644 --- a/llama_stack/providers/adapters/inference/together/together.py +++ b/llama_stack/providers/adapters/inference/together/together.py @@ -22,6 +22,7 @@ from llama_stack.providers.utils.inference.routable import RoutableProviderForMo from .config import TogetherImplConfig + TOGETHER_SUPPORTED_MODELS = { "Llama3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", "Llama3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", @@ -167,17 +168,10 @@ class TogetherInferenceAdapter( stream=True, **options, ): - if chunk.choices[0].finish_reason: - if ( - stop_reason is None and chunk.choices[0].finish_reason == "stop" - ) or ( - stop_reason is None and chunk.choices[0].finish_reason == "eos" - ): + if finish_reason := chunk.choices[0].finish_reason: + if stop_reason is None and finish_reason in ["stop", "eos"]: stop_reason = StopReason.end_of_turn - elif ( - stop_reason is None - and chunk.choices[0].finish_reason == "length" - ): + elif stop_reason is None and finish_reason == "length": stop_reason = StopReason.out_of_tokens break