llama-stack-mirror/llama_stack/examples/router-table-run.yaml
2024-09-21 14:26:48 -07:00

42 lines
1,013 B
YAML

built_at: '2024-09-18T13:41:17.656743'
image_name: local
docker_image: null
conda_env: local
apis_to_serve:
- inference
# - memory
- telemetry
provider_map:
telemetry:
provider_id: meta-reference
config: {}
provider_routing_table:
inference:
- routing_key: Meta-Llama3.1-8B-Instruct
provider_id: meta-reference
config:
model: Meta-Llama3.1-8B-Instruct
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
- routing_key: Meta-Llama3.1-8B
provider_id: meta-reference
config:
model: Meta-Llama3.1-8B
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
# memory:
# - routing_key: keyvalue
# provider_id: remote::pgvector
# config:
# host: localhost
# port: 5432
# db: vectordb
# user: vectoruser
# password: xxxx
# - routing_key: vector
# provider_id: meta-reference
# config: {}