migrate router for memory wip

This commit is contained in:
Xi Yan 2024-09-20 12:19:33 -07:00
parent 7d4135d5fd
commit cda61119ce
8 changed files with 213 additions and 45 deletions

View file

@ -3,26 +3,27 @@ image_name: local
docker_image: null
conda_env: local
apis_to_serve:
- inference
# - inference
- memory
- telemetry
provider_map:
# use builtin-router as dummy field
memory: builtin-router
inference: builtin-router
telemetry:
provider_id: meta-reference
config: {}
provider_routing_table:
inference:
- routing_key: Meta-Llama3.1-8B-Instruct
provider_id: meta-reference
config:
model: Meta-Llama3.1-8B-Instruct
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
- routing_key: Meta-Llama3.1-8B
provider_id: remote::ollama
config:
url: http:ollama-url-1.com
# inference:
# - routing_key: Meta-Llama3.1-8B-Instruct
# provider_id: meta-reference
# config:
# model: Meta-Llama3.1-8B-Instruct
# quantization: null
# torch_seed: null
# max_seq_len: 4096
# max_batch_size: 1
# - routing_key: Meta-Llama3.1-8B
# provider_id: remote::ollama
# config:
# url: http:ollama-url-1.com
memory:
- routing_key: keyvalue
provider_id: remote::pgvector
@ -31,6 +32,7 @@ provider_routing_table:
port: 5432
db: vectordb
user: vectoruser
password: xxxx
- routing_key: vector
provider_id: meta-reference
config: {}