mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
# What does this PR do? Completes #3732 by removing runtime URL transformations and requiring users to provide full URLs in configuration. All providers now use 'base_url' consistently and respect the exact URL provided without appending paths like /v1 or /openai/v1 at runtime. BREAKING CHANGE: Users must update configs to include full URL paths (e.g., http://localhost:11434/v1 instead of http://localhost:11434). Closes #3732 ## Test Plan Existing tests should pass even with the URL changes, due to default URLs being altered. Add unit test to enforce URL standardization across remote inference providers (verifies all use 'base_url' field with HttpUrl | None type) Signed-off-by: Charlie Doern <cdoern@redhat.com>
27 lines
841 B
Python
27 lines
841 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any
|
|
|
|
from pydantic import Field, HttpUrl, SecretStr
|
|
|
|
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
|
|
|
DEFAULT_OLLAMA_URL = "http://localhost:11434/v1"
|
|
|
|
|
|
class OllamaImplConfig(RemoteInferenceProviderConfig):
|
|
auth_credential: SecretStr | None = Field(default=None, exclude=True)
|
|
|
|
base_url: HttpUrl | None = Field(default=HttpUrl(DEFAULT_OLLAMA_URL))
|
|
|
|
@classmethod
|
|
def sample_run_config(
|
|
cls, base_url: str = "${env.OLLAMA_URL:=http://localhost:11434/v1}", **kwargs
|
|
) -> dict[str, Any]:
|
|
return {
|
|
"base_url": base_url,
|
|
}
|