mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
* fix parallel request limiter - use one cache update call * ci/cd run again * run ci/cd again * use docker username password * fix config.yml * fix config * fix config * fix config.yml * ci/cd run again * use correct typing for batch set cache * fix async_set_cache_pipeline * fix only check user id tpm / rpm limits when limits set * fix test_openai_azure_embedding_with_oidc_and_cf * add InstanceImage type * fix vertex image transform * add langchain vertex test request * add new vertex test * update multimodal embedding tests * add test_vertexai_multimodal_embedding_base64image_in_input * simplify langchain mm embedding usage * add langchain example for multimodal embeddings on vertex * fix linting error
17 lines
421 B
Python
17 lines
421 B
Python
from langchain_openai import OpenAIEmbeddings
|
|
|
|
embeddings_models = "multimodalembedding@001"
|
|
|
|
embeddings = OpenAIEmbeddings(
|
|
model="multimodalembedding@001",
|
|
base_url="http://0.0.0.0:4000",
|
|
api_key="sk-1234", # type: ignore
|
|
)
|
|
|
|
|
|
query_result = embeddings.embed_query(
|
|
"gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png"
|
|
)
|
|
# print(len(query_result))
|
|
# print(query_result[:5])
|
|
print(query_result)
|