From ae7ee1a75d08615b8fe0a2ac0b40512cb1d41929 Mon Sep 17 00:00:00 2001 From: Roque Caballero Date: Fri, 28 Mar 2025 17:53:29 +0100 Subject: [PATCH] chore: fix model URLs --- demo-01/src/main/resources/application.yaml | 2 +- demo-02/src/main/resources/application.yaml | 2 +- demo-03/src/main/resources/application.yaml | 2 +- demo-04/src/main/resources/application.yaml | 2 +- demo-05/src/main/resources/application.yaml | 2 +- demo-06/src/main/resources/application.yaml | 4 ++-- demo-07/src/main/resources/application.yaml | 4 ++-- demo-08/src/main/resources/application.yaml | 4 ++-- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/demo-01/src/main/resources/application.yaml b/demo-01/src/main/resources/application.yaml index 48b6b31..f7ba6dc 100644 --- a/demo-01/src/main/resources/application.yaml +++ b/demo-01/src/main/resources/application.yaml @@ -3,7 +3,7 @@ quarkus: openai: #api-key: ${OPENAI_API_KEY} api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ chat-model: #model-name: gpt-4o-mini model-name: inference-llama33-70b diff --git a/demo-02/src/main/resources/application.yaml b/demo-02/src/main/resources/application.yaml index 87cd28a..7169bcd 100644 --- a/demo-02/src/main/resources/application.yaml +++ b/demo-02/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b diff --git a/demo-03/src/main/resources/application.yaml b/demo-03/src/main/resources/application.yaml index 87cd28a..7169bcd 100644 --- a/demo-03/src/main/resources/application.yaml +++ b/demo-03/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b diff --git a/demo-04/src/main/resources/application.yaml b/demo-04/src/main/resources/application.yaml index 87cd28a..7169bcd 100644 --- a/demo-04/src/main/resources/application.yaml +++ b/demo-04/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b diff --git a/demo-05/src/main/resources/application.yaml b/demo-05/src/main/resources/application.yaml index e7f8ef6..1c48b9c 100644 --- a/demo-05/src/main/resources/application.yaml +++ b/demo-05/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b diff --git a/demo-06/src/main/resources/application.yaml b/demo-06/src/main/resources/application.yaml index ce71cde..e9a4947 100644 --- a/demo-06/src/main/resources/application.yaml +++ b/demo-06/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s #https://docs.langchain4j.dev/tutorials/model-parameters/ chat-model: @@ -25,7 +25,7 @@ l4j: custom-embedding-model: #This is 1024 dimension model. pgvector must be configured accordingly model-name: inference-multilingual-e5l - base-url: https://inference-multilingual-e5l-maas.apps.ai.kvant.cloud/v1 + base-url: https://inference-multilingual-e5l-maas.apps.ai-2.kvant.cloud/v1 api-key: #PUT_YOUR_TOKEN_HERE log-requests: false log-responses: false diff --git a/demo-07/src/main/resources/application.yaml b/demo-07/src/main/resources/application.yaml index 6276467..f779101 100644 --- a/demo-07/src/main/resources/application.yaml +++ b/demo-07/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b @@ -24,7 +24,7 @@ l4j: custom-embedding-model: #This is 1024 dimension model. pgvector must be configured accordingly model-name: inference-multilingual-e5l - base-url: https://inference-multilingual-e5l-maas.apps.ai.kvant.cloud/v1 + base-url: https://inference-multilingual-e5l-maas.apps.ai-2.kvant.cloud/v1 api-key: #PUT_YOUR_TOKEN_HERE log-requests: false log-responses: false diff --git a/demo-08/src/main/resources/application.yaml b/demo-08/src/main/resources/application.yaml index 6276467..f779101 100644 --- a/demo-08/src/main/resources/application.yaml +++ b/demo-08/src/main/resources/application.yaml @@ -2,7 +2,7 @@ quarkus: langchain4j: openai: api-key: #PUT_YOUR_TOKEN_HERE - base-url: https://inference-llama33-70b-maas.apps.ai.kvant.cloud/v1/ + base-url: https://inference-llama33-70b-maas.apps.ai-2.kvant.cloud/v1/ timeout: 60s chat-model: model-name: inference-llama33-70b @@ -24,7 +24,7 @@ l4j: custom-embedding-model: #This is 1024 dimension model. pgvector must be configured accordingly model-name: inference-multilingual-e5l - base-url: https://inference-multilingual-e5l-maas.apps.ai.kvant.cloud/v1 + base-url: https://inference-multilingual-e5l-maas.apps.ai-2.kvant.cloud/v1 api-key: #PUT_YOUR_TOKEN_HERE log-requests: false log-responses: false