mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 07:33:51 +00:00
Merge branch 'refs/heads/main' into preprocessors
# Conflicts: # llama_stack/distribution/routers/routers.py # llama_stack/templates/ollama/build.yaml # llama_stack/templates/ollama/run-with-safety.yaml # llama_stack/templates/ollama/run.yaml # llama_stack/templates/remote-vllm/build.yaml # llama_stack/templates/remote-vllm/run-with-safety.yaml # llama_stack/templates/remote-vllm/run.yaml # llama_stack/templates/together/build.yaml # llama_stack/templates/together/run-with-safety.yaml # llama_stack/templates/together/run.yaml
This commit is contained in:
commit
6b9f673fdb
313 changed files with 181388 additions and 7064 deletions
|
|
@ -57,7 +57,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
vector_io_provider = Provider(
|
||||
provider_id="faiss",
|
||||
provider_type="inline::faiss",
|
||||
config=FaissVectorIOConfig.sample_run_config(f"distributions/{name}"),
|
||||
config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
|
||||
)
|
||||
|
||||
inference_model = ModelInput(
|
||||
|
|
@ -151,7 +151,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"Inference model loaded into the TGI server",
|
||||
),
|
||||
"TGI_URL": (
|
||||
"http://127.0.0.1:8080}/v1",
|
||||
"http://127.0.0.1:8080/v1",
|
||||
"URL of the TGI server with the main inference model",
|
||||
),
|
||||
"TGI_SAFETY_URL": (
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue