mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Add pyright to ci/cd + Fix remaining type-checking errors (#6082)
* fix: fix type-checking errors * fix: fix additional type-checking errors * fix: additional type-checking error fixes * fix: fix additional type-checking errors * fix: additional type-check fixes * fix: fix all type-checking errors + add pyright to ci/cd * fix: fix incorrect import * ci(config.yml): use mypy on ci/cd * fix: fix type-checking errors in utils.py * fix: fix all type-checking errors on main.py * fix: fix mypy linting errors * fix(anthropic/cost_calculator.py): fix linting errors * fix: fix mypy linting errors * fix: fix linting errors
This commit is contained in:
parent
f7ce1173f3
commit
fac3b2ee42
65 changed files with 619 additions and 522 deletions
|
@ -118,13 +118,13 @@ async def create_fine_tuning_job(
|
|||
version,
|
||||
)
|
||||
|
||||
data = fine_tuning_request.model_dump(exclude_none=True)
|
||||
try:
|
||||
if premium_user is not True:
|
||||
raise ValueError(
|
||||
f"Only premium users can use this endpoint + {CommonProxyErrors.not_premium_user.value}"
|
||||
)
|
||||
# Convert Pydantic model to dict
|
||||
data = fine_tuning_request.model_dump(exclude_none=True)
|
||||
|
||||
verbose_proxy_logger.debug(
|
||||
"Request received by LiteLLM:\n{}".format(json.dumps(data, indent=4)),
|
||||
|
@ -146,7 +146,8 @@ async def create_fine_tuning_job(
|
|||
)
|
||||
|
||||
# add llm_provider_config to data
|
||||
data.update(llm_provider_config)
|
||||
if llm_provider_config is not None:
|
||||
data.update(llm_provider_config)
|
||||
|
||||
response = await litellm.acreate_fine_tuning_job(**data)
|
||||
|
||||
|
@ -262,7 +263,8 @@ async def list_fine_tuning_jobs(
|
|||
custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
|
||||
data.update(llm_provider_config)
|
||||
if llm_provider_config is not None:
|
||||
data.update(llm_provider_config)
|
||||
|
||||
response = await litellm.alist_fine_tuning_jobs(
|
||||
**data,
|
||||
|
@ -378,7 +380,8 @@ async def retrieve_fine_tuning_job(
|
|||
custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
|
||||
data.update(llm_provider_config)
|
||||
if llm_provider_config is not None:
|
||||
data.update(llm_provider_config)
|
||||
|
||||
response = await litellm.acancel_fine_tuning_job(
|
||||
**data,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue