add text-embedding-004

This commit is contained in:
Ishaan Jaff 2024-06-12 08:31:37 -07:00
parent f1c1dddb8b
commit 3af2d7846e
3 changed files with 55 additions and 0 deletions

View file

@ -1357,6 +1357,26 @@
"mode": "image_generation",
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"text-embedding-004": {
"max_tokens": 3072,
"max_input_tokens": 3072,
"output_vector_size": 768,
"input_cost_per_token": 0.00000000625,
"output_cost_per_token": 0,
"litellm_provider": "vertex_ai-embedding-models",
"mode": "embedding",
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
},
"text-multilingual-embedding-002": {
"max_tokens": 2048,
"max_input_tokens": 2048,
"output_vector_size": 768,
"input_cost_per_token": 0.00000000625,
"output_cost_per_token": 0,
"litellm_provider": "vertex_ai-embedding-models",
"mode": "embedding",
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
},
"textembedding-gecko": {
"max_tokens": 3072,
"max_input_tokens": 3072,

View file

@ -810,6 +810,21 @@ def test_vertexai_embedding():
pytest.fail(f"Error occurred: {e}")
def test_vertexai_embedding_embedding_latest():
try:
load_vertex_ai_credentials()
litellm.set_verbose = True
response = embedding(
model="vertex_ai/text-embedding-004",
input=["good morning from litellm", "this is another item"],
)
print(f"response:", response)
except litellm.RateLimitError as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
@pytest.mark.asyncio
async def test_vertexai_aembedding():
try:

View file

@ -1357,6 +1357,26 @@
"mode": "image_generation",
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"text-embedding-004": {
"max_tokens": 3072,
"max_input_tokens": 3072,
"output_vector_size": 768,
"input_cost_per_token": 0.00000000625,
"output_cost_per_token": 0,
"litellm_provider": "vertex_ai-embedding-models",
"mode": "embedding",
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
},
"text-multilingual-embedding-002": {
"max_tokens": 2048,
"max_input_tokens": 2048,
"output_vector_size": 768,
"input_cost_per_token": 0.00000000625,
"output_cost_per_token": 0,
"litellm_provider": "vertex_ai-embedding-models",
"mode": "embedding",
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models"
},
"textembedding-gecko": {
"max_tokens": 3072,
"max_input_tokens": 3072,