mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
test fix deprecated gemini-1.0-pro on vertex
This commit is contained in:
parent
7021f2f244
commit
555067e207
4 changed files with 7 additions and 7 deletions
|
@ -20,7 +20,7 @@ const requestOptions = {
|
||||||
};
|
};
|
||||||
|
|
||||||
const generativeModel = vertexAI.getGenerativeModel(
|
const generativeModel = vertexAI.getGenerativeModel(
|
||||||
{ model: 'gemini-1.0-pro' },
|
{ model: 'gemini-1.5-pro' },
|
||||||
requestOptions
|
requestOptions
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ describe('Vertex AI Tests', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const generativeModel = vertexAI.getGenerativeModel(
|
const generativeModel = vertexAI.getGenerativeModel(
|
||||||
{ model: 'gemini-1.0-pro' },
|
{ model: 'gemini-1.5-pro' },
|
||||||
requestOptions
|
requestOptions
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ describe('Vertex AI Tests', () => {
|
||||||
const vertexAI = new VertexAI({project: 'pathrise-convert-1606954137718', location: 'us-central1', apiEndpoint: "localhost:4000/vertex-ai"});
|
const vertexAI = new VertexAI({project: 'pathrise-convert-1606954137718', location: 'us-central1', apiEndpoint: "localhost:4000/vertex-ai"});
|
||||||
const customHeaders = new Headers({"x-litellm-api-key": "sk-1234"});
|
const customHeaders = new Headers({"x-litellm-api-key": "sk-1234"});
|
||||||
const requestOptions = {customHeaders: customHeaders};
|
const requestOptions = {customHeaders: customHeaders};
|
||||||
const generativeModel = vertexAI.getGenerativeModel({model: 'gemini-1.0-pro'}, requestOptions);
|
const generativeModel = vertexAI.getGenerativeModel({model: 'gemini-1.5-pro'}, requestOptions);
|
||||||
const request = {contents: [{role: 'user', parts: [{text: 'What is 2+2?'}]}]};
|
const request = {contents: [{role: 'user', parts: [{text: 'What is 2+2?'}]}]};
|
||||||
|
|
||||||
const result = await generativeModel.generateContent(request);
|
const result = await generativeModel.generateContent(request);
|
||||||
|
|
|
@ -103,7 +103,7 @@ async def test_basic_vertex_ai_pass_through_with_spendlog():
|
||||||
api_transport="rest",
|
api_transport="rest",
|
||||||
)
|
)
|
||||||
|
|
||||||
model = GenerativeModel(model_name="gemini-1.0-pro")
|
model = GenerativeModel(model_name="gemini-1.5-pro")
|
||||||
response = model.generate_content("hi")
|
response = model.generate_content("hi")
|
||||||
|
|
||||||
print("response", response)
|
print("response", response)
|
||||||
|
@ -135,7 +135,7 @@ async def test_basic_vertex_ai_pass_through_streaming_with_spendlog():
|
||||||
api_transport="rest",
|
api_transport="rest",
|
||||||
)
|
)
|
||||||
|
|
||||||
model = GenerativeModel(model_name="gemini-1.0-pro")
|
model = GenerativeModel(model_name="gemini-1.5-pro")
|
||||||
response = model.generate_content("hi", stream=True)
|
response = model.generate_content("hi", stream=True)
|
||||||
|
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
|
|
|
@ -84,7 +84,7 @@ describe('Vertex AI Tests', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const generativeModel = vertexAI.getGenerativeModel(
|
const generativeModel = vertexAI.getGenerativeModel(
|
||||||
{ model: 'gemini-1.0-pro' },
|
{ model: 'gemini-1.5-pro' },
|
||||||
requestOptions
|
requestOptions
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ describe('Vertex AI Tests', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const generativeModel = vertexAI.getGenerativeModel(
|
const generativeModel = vertexAI.getGenerativeModel(
|
||||||
{ model: 'gemini-1.0-pro' },
|
{ model: 'gemini-1.5-pro' },
|
||||||
requestOptions
|
requestOptions
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue