llama-stack-mirror/llama_stack/providers/adapters/inference/vllm/config.py
Yuan Tang 925e1afb5b
Add vLLM provider
Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
2024-10-18 22:02:59 -04:00

23 lines
No EOL
638 B
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Optional
from llama_models.schema_utils import json_schema_type
from pydantic import BaseModel, Field
# TODO: Any other engine configs
@json_schema_type
class VLLMImplConfig(BaseModel):
url: Optional[str] = Field(
default=None,
description="The URL for the vLLM model serving endpoint",
)
api_token: Optional[str] = Field(
default=None,
description="The API token",
)