mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
test: ensure test calls contain bearer token
This commit is contained in:
parent
b1b582ffe2
commit
7412463ebb
4 changed files with 37 additions and 31 deletions
|
@ -53,9 +53,9 @@ model_list:
|
||||||
api_key: os.environ/AZURE_API_KEY
|
api_key: os.environ/AZURE_API_KEY
|
||||||
api_version: 2023-07-01-preview
|
api_version: 2023-07-01-preview
|
||||||
model: azure/azure-embedding-model
|
model: azure/azure-embedding-model
|
||||||
model_name: azure-embedding-model
|
|
||||||
model_info:
|
model_info:
|
||||||
mode: "embedding"
|
mode: embedding
|
||||||
|
model_name: azure-embedding-model
|
||||||
- litellm_params:
|
- litellm_params:
|
||||||
model: gpt-3.5-turbo
|
model: gpt-3.5-turbo
|
||||||
model_info:
|
model_info:
|
||||||
|
@ -80,43 +80,49 @@ model_list:
|
||||||
description: this is a test openai model
|
description: this is a test openai model
|
||||||
id: 9b1ef341-322c-410a-8992-903987fef439
|
id: 9b1ef341-322c-410a-8992-903987fef439
|
||||||
model_name: test_openai_models
|
model_name: test_openai_models
|
||||||
- model_name: amazon-embeddings
|
- litellm_params:
|
||||||
litellm_params:
|
model: bedrock/amazon.titan-embed-text-v1
|
||||||
model: "bedrock/amazon.titan-embed-text-v1"
|
|
||||||
model_info:
|
model_info:
|
||||||
mode: embedding
|
mode: embedding
|
||||||
- model_name: "GPT-J 6B - Sagemaker Text Embedding (Internal)"
|
model_name: amazon-embeddings
|
||||||
litellm_params:
|
- litellm_params:
|
||||||
model: "sagemaker/berri-benchmarking-gpt-j-6b-fp16"
|
model: sagemaker/berri-benchmarking-gpt-j-6b-fp16
|
||||||
model_info:
|
model_info:
|
||||||
mode: embedding
|
mode: embedding
|
||||||
- model_name: dall-e-3
|
model_name: GPT-J 6B - Sagemaker Text Embedding (Internal)
|
||||||
litellm_params:
|
- litellm_params:
|
||||||
model: dall-e-3
|
model: dall-e-3
|
||||||
model_info:
|
model_info:
|
||||||
mode: image_generation
|
mode: image_generation
|
||||||
- model_name: dall-e-3
|
model_name: dall-e-3
|
||||||
litellm_params:
|
- litellm_params:
|
||||||
model: "azure/dall-e-3-test"
|
api_base: os.environ/AZURE_SWEDEN_API_BASE
|
||||||
api_version: "2023-12-01-preview"
|
api_key: os.environ/AZURE_SWEDEN_API_KEY
|
||||||
api_base: "os.environ/AZURE_SWEDEN_API_BASE"
|
api_version: 2023-12-01-preview
|
||||||
api_key: "os.environ/AZURE_SWEDEN_API_KEY"
|
model: azure/dall-e-3-test
|
||||||
model_info:
|
model_info:
|
||||||
mode: image_generation
|
mode: image_generation
|
||||||
- model_name: dall-e-2
|
model_name: dall-e-3
|
||||||
litellm_params:
|
- litellm_params:
|
||||||
model: "azure/"
|
api_base: os.environ/AZURE_API_BASE
|
||||||
api_version: "2023-06-01-preview"
|
api_key: os.environ/AZURE_API_KEY
|
||||||
api_base: "os.environ/AZURE_API_BASE"
|
api_version: 2023-06-01-preview
|
||||||
api_key: "os.environ/AZURE_API_KEY"
|
model: azure/
|
||||||
model_info:
|
model_info:
|
||||||
mode: image_generation
|
mode: image_generation
|
||||||
- model_name: text-embedding-ada-002
|
model_name: dall-e-2
|
||||||
litellm_params:
|
- litellm_params:
|
||||||
|
api_base: os.environ/AZURE_API_BASE
|
||||||
|
api_key: os.environ/AZURE_API_KEY
|
||||||
|
api_version: 2023-07-01-preview
|
||||||
model: azure/azure-embedding-model
|
model: azure/azure-embedding-model
|
||||||
api_base: "os.environ/AZURE_API_BASE"
|
|
||||||
api_key: "os.environ/AZURE_API_KEY"
|
|
||||||
api_version: "2023-07-01-preview"
|
|
||||||
model_info:
|
model_info:
|
||||||
mode: embedding
|
|
||||||
base_model: text-embedding-ada-002
|
base_model: text-embedding-ada-002
|
||||||
|
mode: embedding
|
||||||
|
model_name: text-embedding-ada-002
|
||||||
|
- litellm_params:
|
||||||
|
model: gpt-3.5-turbo
|
||||||
|
model_info:
|
||||||
|
description: this is a test openai model
|
||||||
|
id: 34cb2419-7c63-44ae-a189-53f1d1ce5953
|
||||||
|
model_name: test_openai_models
|
||||||
|
|
|
@ -32,7 +32,7 @@ from litellm.proxy.proxy_server import (
|
||||||
) # Replace with the actual module where your FastAPI router is defined
|
) # Replace with the actual module where your FastAPI router is defined
|
||||||
|
|
||||||
# Your bearer token
|
# Your bearer token
|
||||||
token = ""
|
token = "sk-1234"
|
||||||
|
|
||||||
headers = {"Authorization": f"Bearer {token}"}
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ from litellm.proxy.proxy_server import (
|
||||||
) # Replace with the actual module where your FastAPI router is defined
|
) # Replace with the actual module where your FastAPI router is defined
|
||||||
|
|
||||||
# Your bearer token
|
# Your bearer token
|
||||||
token = ""
|
token = "sk-1234"
|
||||||
|
|
||||||
headers = {"Authorization": f"Bearer {token}"}
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ from litellm.proxy.proxy_server import (
|
||||||
) # Replace with the actual module where your FastAPI router is defined
|
) # Replace with the actual module where your FastAPI router is defined
|
||||||
|
|
||||||
# Your bearer token
|
# Your bearer token
|
||||||
token = ""
|
token = "sk-1234"
|
||||||
|
|
||||||
headers = {"Authorization": f"Bearer {token}"}
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue