mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 10:13:05 +00:00
kill batch inference registry
This commit is contained in:
parent
73d927850e
commit
1d855461d5
1 changed files with 0 additions and 39 deletions
|
@ -1,39 +0,0 @@
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from llama_stack.providers.datatypes import (
|
|
||||||
Api,
|
|
||||||
InlineProviderSpec,
|
|
||||||
ProviderSpec,
|
|
||||||
)
|
|
||||||
|
|
||||||
META_REFERENCE_DEPS = [
|
|
||||||
"accelerate",
|
|
||||||
"blobfile",
|
|
||||||
"fairscale",
|
|
||||||
"torch",
|
|
||||||
"torchvision",
|
|
||||||
"transformers",
|
|
||||||
"zmq",
|
|
||||||
"lm-format-enforcer",
|
|
||||||
"sentence-transformers",
|
|
||||||
"torchao==0.5.0",
|
|
||||||
"fbgemm-gpu-genai==1.1.2",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def available_providers() -> List[ProviderSpec]:
|
|
||||||
return [
|
|
||||||
InlineProviderSpec(
|
|
||||||
api=Api.inference,
|
|
||||||
provider_type="inline::meta-reference",
|
|
||||||
pip_packages=META_REFERENCE_DEPS,
|
|
||||||
module="llama_stack.providers.inline.batch_inference.meta_reference",
|
|
||||||
config_class="llama_stack.providers.inline.batch_inference.meta_reference.MetaReferenceInferenceConfig",
|
|
||||||
),
|
|
||||||
]
|
|
Loading…
Add table
Add a link
Reference in a new issue