mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-16 05:22:40 +00:00
chore: remove together inference adapter's custom check_model_availability
This commit is contained in:
parent
d23ed26238
commit
a9efeaba33
1 changed files with 0 additions and 3 deletions
|
|
@ -66,9 +66,6 @@ class TogetherInferenceAdapter(OpenAIMixin, NeedsRequestProviderData):
|
||||||
async def should_refresh_models(self) -> bool:
|
async def should_refresh_models(self) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def check_model_availability(self, model):
|
|
||||||
return model in self._model_cache
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue