forked from phoenix/litellm-mirror
remove print
This commit is contained in:
parent
cb025a7f26
commit
feb42c91a6
1 changed files with 5 additions and 6 deletions
|
@ -2048,7 +2048,6 @@ def register_model(model_cost: Union[str, dict]):
|
||||||
litellm.model_cost.setdefault(key, {}).update(value)
|
litellm.model_cost.setdefault(key, {}).update(value)
|
||||||
verbose_logger.debug(f"{key} added to model cost map")
|
verbose_logger.debug(f"{key} added to model cost map")
|
||||||
# add new model names to provider lists
|
# add new model names to provider lists
|
||||||
print(f"provider: {value.get('litellm_provider')}")
|
|
||||||
if value.get("litellm_provider") == "openai":
|
if value.get("litellm_provider") == "openai":
|
||||||
if key not in litellm.open_ai_chat_completion_models:
|
if key not in litellm.open_ai_chat_completion_models:
|
||||||
litellm.open_ai_chat_completion_models.append(key)
|
litellm.open_ai_chat_completion_models.append(key)
|
||||||
|
@ -8127,7 +8126,7 @@ class CustomStreamWrapper:
|
||||||
|
|
||||||
if chunk.startswith(self.complete_response):
|
if chunk.startswith(self.complete_response):
|
||||||
# Remove last_sent_chunk only if it appears at the start of the new chunk
|
# Remove last_sent_chunk only if it appears at the start of the new chunk
|
||||||
chunk = chunk[len(self.complete_response) :]
|
chunk = chunk[len(self.complete_response):]
|
||||||
|
|
||||||
self.complete_response += chunk
|
self.complete_response += chunk
|
||||||
return chunk
|
return chunk
|
||||||
|
@ -10125,7 +10124,7 @@ def mock_completion_streaming_obj(
|
||||||
model_response, mock_response, model, n: Optional[int] = None
|
model_response, mock_response, model, n: Optional[int] = None
|
||||||
):
|
):
|
||||||
for i in range(0, len(mock_response), 3):
|
for i in range(0, len(mock_response), 3):
|
||||||
completion_obj = Delta(role="assistant", content=mock_response[i : i + 3])
|
completion_obj = Delta(role="assistant", content=mock_response[i: i + 3])
|
||||||
if n is None:
|
if n is None:
|
||||||
model_response.choices[0].delta = completion_obj
|
model_response.choices[0].delta = completion_obj
|
||||||
else:
|
else:
|
||||||
|
@ -10134,7 +10133,7 @@ def mock_completion_streaming_obj(
|
||||||
_streaming_choice = litellm.utils.StreamingChoices(
|
_streaming_choice = litellm.utils.StreamingChoices(
|
||||||
index=j,
|
index=j,
|
||||||
delta=litellm.utils.Delta(
|
delta=litellm.utils.Delta(
|
||||||
role="assistant", content=mock_response[i : i + 3]
|
role="assistant", content=mock_response[i: i + 3]
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
_all_choices.append(_streaming_choice)
|
_all_choices.append(_streaming_choice)
|
||||||
|
@ -10146,7 +10145,7 @@ async def async_mock_completion_streaming_obj(
|
||||||
model_response, mock_response, model, n: Optional[int] = None
|
model_response, mock_response, model, n: Optional[int] = None
|
||||||
):
|
):
|
||||||
for i in range(0, len(mock_response), 3):
|
for i in range(0, len(mock_response), 3):
|
||||||
completion_obj = Delta(role="assistant", content=mock_response[i : i + 3])
|
completion_obj = Delta(role="assistant", content=mock_response[i: i + 3])
|
||||||
if n is None:
|
if n is None:
|
||||||
model_response.choices[0].delta = completion_obj
|
model_response.choices[0].delta = completion_obj
|
||||||
else:
|
else:
|
||||||
|
@ -10155,7 +10154,7 @@ async def async_mock_completion_streaming_obj(
|
||||||
_streaming_choice = litellm.utils.StreamingChoices(
|
_streaming_choice = litellm.utils.StreamingChoices(
|
||||||
index=j,
|
index=j,
|
||||||
delta=litellm.utils.Delta(
|
delta=litellm.utils.Delta(
|
||||||
role="assistant", content=mock_response[i : i + 3]
|
role="assistant", content=mock_response[i: i + 3]
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
_all_choices.append(_streaming_choice)
|
_all_choices.append(_streaming_choice)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue