fixed comments

This commit is contained in:
Hardik Shah 2025-07-10 10:40:45 -07:00
parent b9269a94b9
commit d5034ed759
4 changed files with 3 additions and 4 deletions

View file

@ -16,7 +16,7 @@ Remote vLLM inference provider for connecting to vLLM servers.
## Sample Configuration
```yaml
url: ${env.VLLM_URL:=http://localhost:8000/v1}
url: ${env.VLLM_URL}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}

View file

@ -96,7 +96,6 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
def _get_client(self) -> Fireworks:
fireworks_api_key = self._get_api_key()
print(f">>>>>> fireworks_api_key: {fireworks_api_key} <<<<<")
return Fireworks(api_key=fireworks_api_key)
def _get_openai_client(self) -> AsyncOpenAI:

View file

@ -46,7 +46,7 @@ class VLLMInferenceAdapterConfig(BaseModel):
@classmethod
def sample_run_config(
cls,
url: str = "${env.VLLM_URL:=http://localhost:8000/v1}",
url: str = "${env.VLLM_URL}",
**kwargs,
):
return {

View file

@ -26,7 +26,7 @@ providers:
- provider_id: ${env.ENABLE_VLLM:=__disabled__}
provider_type: remote::vllm
config:
url: ${env.VLLM_URL:=http://localhost:8000/v1}
url: ${env.VLLM_URL}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true}