diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 974c01688..3907a3f34 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 84f884b59..bf977f9fd 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/utils.py b/litellm/utils.py index bfd2437f7..12510e01a 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8,6 +8,7 @@ import tiktoken import uuid import aiohttp from tokenizers import Tokenizer +import pkg_resources encoding = tiktoken.get_encoding("cl100k_base") import importlib.metadata from .integrations.traceloop import TraceloopLogger @@ -723,7 +724,8 @@ def token_counter(model="", text=None, messages: Optional[List] = None): # anthropic elif model in litellm.anthropic_models: # Read the JSON file - with open('../llms/tokenizers/anthropic_tokenizer.json', 'r') as f: + filename = pkg_resources.resource_filename(__name__, 'llms/tokenizers/anthropic_tokenizer.json') + with open(filename, 'r') as f: json_data = json.load(f) # Decode the JSON data from utf-8 json_data_decoded = json.dumps(json_data, ensure_ascii=False) diff --git a/pyproject.toml b/pyproject.toml index fac1e2b84..5af5ffcec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.735" +version = "0.1.736" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"