fix: fix linting errors

This commit is contained in:
Krrish Dholakia 2024-07-11 13:36:55 -07:00
parent 6e9f048618
commit 389a51e05d
4 changed files with 97 additions and 134 deletions

View file

@ -1,12 +1,12 @@
repos:
- repo: local
hooks:
# - id: mypy
# name: mypy
# entry: python3 -m mypy --ignore-missing-imports
# language: system
# types: [python]
# files: ^litellm/
- id: mypy
name: mypy
entry: python3 -m mypy --ignore-missing-imports
language: system
types: [python]
files: ^litellm/
- id: isort
name: isort
entry: isort

View file

@ -789,6 +789,7 @@ from .utils import (
get_api_base,
get_first_chars_messages,
ModelResponse,
EmbeddingResponse,
ImageResponse,
get_provider_fields,
)

View file

@ -3,92 +3,96 @@ Deprecated. We now do together ai calls via the openai client.
Reference: https://docs.together.ai/docs/openai-api-compatibility
"""
# import os, types
# import json
# from enum import Enum
# import requests # type: ignore
# import time
# from typing import Callable, Optional
# import litellm
# import httpx # type: ignore
# from litellm.utils import ModelResponse, Usage
# from .prompt_templates.factory import prompt_factory, custom_prompt
import json
import os
import time
import types
from enum import Enum
from typing import Callable, Optional
import httpx # type: ignore
import requests # type: ignore
import litellm
from litellm.utils import ModelResponse, Usage
from .prompt_templates.factory import custom_prompt, prompt_factory
# class TogetherAIError(Exception):
# def __init__(self, status_code, message):
# self.status_code = status_code
# self.message = message
# self.request = httpx.Request(
# method="POST", url="https://api.together.xyz/inference"
# )
# self.response = httpx.Response(status_code=status_code, request=self.request)
# super().__init__(
# self.message
# ) # Call the base class constructor with the parameters it needs
class TogetherAIError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
self.request = httpx.Request(
method="POST", url="https://api.together.xyz/inference"
)
self.response = httpx.Response(status_code=status_code, request=self.request)
super().__init__(
self.message
) # Call the base class constructor with the parameters it needs
# class TogetherAIConfig:
# """
# Reference: https://docs.together.ai/reference/inference
class TogetherAIConfig:
"""
Reference: https://docs.together.ai/reference/inference
# The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters:
The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters:
# - `max_tokens` (int32, required): The maximum number of tokens to generate.
- `max_tokens` (int32, required): The maximum number of tokens to generate.
# - `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, "\n\n" will stop generation as soon as the model generates two newlines.
- `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, "\n\n" will stop generation as soon as the model generates two newlines.
# - `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output.
- `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output.
# - `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text.
- `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text.
# - `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.
- `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.
# - `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.
- `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.
# - `logprobs` (int32, optional): This parameter is not described in the prompt.
# """
- `logprobs` (int32, optional): This parameter is not described in the prompt.
"""
# max_tokens: Optional[int] = None
# stop: Optional[str] = None
# temperature: Optional[int] = None
# top_p: Optional[float] = None
# top_k: Optional[int] = None
# repetition_penalty: Optional[float] = None
# logprobs: Optional[int] = None
max_tokens: Optional[int] = None
stop: Optional[str] = None
temperature: Optional[int] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
repetition_penalty: Optional[float] = None
logprobs: Optional[int] = None
# def __init__(
# self,
# max_tokens: Optional[int] = None,
# stop: Optional[str] = None,
# temperature: Optional[int] = None,
# top_p: Optional[float] = None,
# top_k: Optional[int] = None,
# repetition_penalty: Optional[float] = None,
# logprobs: Optional[int] = None,
# ) -> None:
# locals_ = locals()
# for key, value in locals_.items():
# if key != "self" and value is not None:
# setattr(self.__class__, key, value)
def __init__(
self,
max_tokens: Optional[int] = None,
stop: Optional[str] = None,
temperature: Optional[int] = None,
top_p: Optional[float] = None,
top_k: Optional[int] = None,
repetition_penalty: Optional[float] = None,
logprobs: Optional[int] = None,
) -> None:
locals_ = locals()
for key, value in locals_.items():
if key != "self" and value is not None:
setattr(self.__class__, key, value)
# @classmethod
# def get_config(cls):
# return {
# k: v
# for k, v in cls.__dict__.items()
# if not k.startswith("__")
# and not isinstance(
# v,
# (
# types.FunctionType,
# types.BuiltinFunctionType,
# classmethod,
# staticmethod,
# ),
# )
# and v is not None
# }
@classmethod
def get_config(cls):
return {
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
# def validate_environment(api_key):

View file

@ -1934,51 +1934,7 @@ def completion(
"""
Deprecated. We now do together ai calls via the openai client - https://docs.together.ai/docs/openai-api-compatibility
"""
custom_llm_provider = "together_ai"
together_ai_key = (
api_key
or litellm.togetherai_api_key
or get_secret("TOGETHER_AI_TOKEN")
or get_secret("TOGETHERAI_API_KEY")
or litellm.api_key
)
api_base = (
api_base
or litellm.api_base
or get_secret("TOGETHERAI_API_BASE")
or "https://api.together.xyz/inference"
)
custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
model_response = together_ai.completion(
model=model,
messages=messages,
api_base=api_base,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=together_ai_key,
logging_obj=logging,
custom_prompt_dict=custom_prompt_dict,
)
if (
"stream_tokens" in optional_params
and optional_params["stream_tokens"] == True
):
# don't try to access stream object,
response = CustomStreamWrapper(
model_response,
model,
custom_llm_provider="together_ai",
logging_obj=logging,
)
return response
response = model_response
pass
elif custom_llm_provider == "palm":
palm_api_key = api_key or get_secret("PALM_API_KEY") or litellm.api_key
@ -2461,10 +2417,10 @@ def completion(
## LOGGING
generator = ollama.get_ollama_response(
api_base,
model,
prompt,
optional_params,
api_base=api_base,
model=model,
prompt=prompt,
optional_params=optional_params,
logging_obj=logging,
acompletion=acompletion,
model_response=model_response,
@ -2490,11 +2446,11 @@ def completion(
)
## LOGGING
generator = ollama_chat.get_ollama_response(
api_base,
api_key,
model,
messages,
optional_params,
api_base=api_base,
api_key=api_key,
model=model,
messages=messages,
optional_params=optional_params,
logging_obj=logging,
acompletion=acompletion,
model_response=model_response,
@ -3465,7 +3421,7 @@ def embedding(
or api_base
or get_secret("OLLAMA_API_BASE")
or "http://localhost:11434"
)
) # type: ignore
if isinstance(input, str):
input = [input]
if not all(isinstance(item, str) for item in input):
@ -3475,9 +3431,11 @@ def embedding(
llm_provider="ollama", # type: ignore
)
ollama_embeddings_fn = (
ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings
ollama.ollama_aembeddings
if aembedding is True
else ollama.ollama_embeddings
)
response = ollama_embeddings_fn(
response = ollama_embeddings_fn( # type: ignore
api_base=api_base,
model=model,
prompts=input,