mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
(Improvements) use /openai/
pass through with OpenAI Ruby for Assistants API (#8884)
* add ruby assistants testing * _join_url_paths * run ruby tests on ci/cd * TestBaseOpenAIPassThroughHandler * _join_url_paths * fix _join_url_paths * Install Ruby and Bundler * Install Ruby and Bundler
This commit is contained in:
parent
378e3d9e4d
commit
51a6a219cd
6 changed files with 219 additions and 58 deletions
|
@ -1982,6 +1982,37 @@ jobs:
|
|||
- run:
|
||||
name: Wait for app to be ready
|
||||
command: dockerize -wait http://localhost:4000 -timeout 5m
|
||||
# Add Ruby installation and testing before the existing Node.js and Python tests
|
||||
- run:
|
||||
name: Install Ruby and Bundler
|
||||
command: |
|
||||
# Import GPG keys first
|
||||
gpg --keyserver hkp://keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB || {
|
||||
curl -sSL https://rvm.io/mpapis.asc | gpg --import -
|
||||
curl -sSL https://rvm.io/pkuczynski.asc | gpg --import -
|
||||
}
|
||||
|
||||
# Install Ruby version manager (RVM)
|
||||
curl -sSL https://get.rvm.io | bash -s stable
|
||||
|
||||
# Source RVM from the correct location
|
||||
source $HOME/.rvm/scripts/rvm
|
||||
|
||||
# Install Ruby 3.2.2
|
||||
rvm install 3.2.2
|
||||
rvm use 3.2.2 --default
|
||||
|
||||
# Install latest Bundler
|
||||
gem install bundler
|
||||
|
||||
- run:
|
||||
name: Run Ruby tests
|
||||
command: |
|
||||
source $HOME/.rvm/scripts/rvm
|
||||
cd tests/pass_through_tests/ruby_passthrough_tests
|
||||
bundle install
|
||||
bundle exec rspec
|
||||
no_output_timeout: 30m
|
||||
# New steps to run Node.js test
|
||||
- run:
|
||||
name: Install Node.js
|
||||
|
|
|
@ -398,7 +398,7 @@ async def azure_proxy_route(
|
|||
)
|
||||
# Add or update query parameters
|
||||
azure_api_key = passthrough_endpoint_router.get_credentials(
|
||||
custom_llm_provider="azure",
|
||||
custom_llm_provider=litellm.LlmProviders.AZURE.value,
|
||||
region_name=None,
|
||||
)
|
||||
if azure_api_key is None:
|
||||
|
@ -413,6 +413,7 @@ async def azure_proxy_route(
|
|||
user_api_key_dict=user_api_key_dict,
|
||||
base_target_url=base_target_url,
|
||||
api_key=azure_api_key,
|
||||
custom_llm_provider=litellm.LlmProviders.AZURE,
|
||||
)
|
||||
|
||||
|
||||
|
@ -435,7 +436,7 @@ async def openai_proxy_route(
|
|||
base_target_url = "https://api.openai.com/"
|
||||
# Add or update query parameters
|
||||
openai_api_key = passthrough_endpoint_router.get_credentials(
|
||||
custom_llm_provider="openai",
|
||||
custom_llm_provider=litellm.LlmProviders.OPENAI.value,
|
||||
region_name=None,
|
||||
)
|
||||
if openai_api_key is None:
|
||||
|
@ -450,6 +451,7 @@ async def openai_proxy_route(
|
|||
user_api_key_dict=user_api_key_dict,
|
||||
base_target_url=base_target_url,
|
||||
api_key=openai_api_key,
|
||||
custom_llm_provider=litellm.LlmProviders.OPENAI,
|
||||
)
|
||||
|
||||
|
||||
|
@ -462,24 +464,19 @@ class BaseOpenAIPassThroughHandler:
|
|||
user_api_key_dict: UserAPIKeyAuth,
|
||||
base_target_url: str,
|
||||
api_key: str,
|
||||
custom_llm_provider: litellm.LlmProviders,
|
||||
):
|
||||
encoded_endpoint = httpx.URL(endpoint).path
|
||||
|
||||
# Ensure endpoint starts with '/' for proper URL construction
|
||||
if not encoded_endpoint.startswith("/"):
|
||||
encoded_endpoint = "/" + encoded_endpoint
|
||||
|
||||
# Ensure base_target_url is properly formatted for OpenAI
|
||||
base_target_url = (
|
||||
BaseOpenAIPassThroughHandler._append_v1_to_openai_passthrough_url(
|
||||
base_target_url
|
||||
)
|
||||
)
|
||||
|
||||
# Construct the full target URL by properly joining the base URL and endpoint path
|
||||
base_url = httpx.URL(base_target_url)
|
||||
updated_url = BaseOpenAIPassThroughHandler._join_url_paths(
|
||||
base_url, encoded_endpoint
|
||||
base_url=base_url,
|
||||
path=encoded_endpoint,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
|
||||
## check for streaming
|
||||
|
@ -505,21 +502,15 @@ class BaseOpenAIPassThroughHandler:
|
|||
|
||||
return received_value
|
||||
|
||||
@staticmethod
|
||||
def _append_v1_to_openai_passthrough_url(base_url: str) -> str:
|
||||
"""
|
||||
Appends the /v1 path to the OpenAI base URL if it's the OpenAI API URL
|
||||
"""
|
||||
if base_url.rstrip("/") == "https://api.openai.com":
|
||||
return "https://api.openai.com/v1"
|
||||
return base_url
|
||||
|
||||
@staticmethod
|
||||
def _append_openai_beta_header(headers: dict, request: Request) -> dict:
|
||||
"""
|
||||
Appends the OpenAI-Beta header to the headers if the request is an OpenAI Assistants API request
|
||||
"""
|
||||
if RouteChecks._is_assistants_api_request(request) is True:
|
||||
if (
|
||||
RouteChecks._is_assistants_api_request(request) is True
|
||||
and "OpenAI-Beta" not in headers
|
||||
):
|
||||
headers["OpenAI-Beta"] = "assistants=v2"
|
||||
return headers
|
||||
|
||||
|
@ -535,17 +526,31 @@ class BaseOpenAIPassThroughHandler:
|
|||
)
|
||||
|
||||
@staticmethod
|
||||
def _join_url_paths(base_url: httpx.URL, path: str) -> httpx.URL:
|
||||
def _join_url_paths(
|
||||
base_url: httpx.URL, path: str, custom_llm_provider: litellm.LlmProviders
|
||||
) -> str:
|
||||
"""
|
||||
Properly joins a base URL with a path, preserving any existing path in the base URL.
|
||||
"""
|
||||
# Join paths correctly by removing trailing/leading slashes as needed
|
||||
if not base_url.path or base_url.path == "/":
|
||||
# If base URL has no path, just use the new path
|
||||
return base_url.copy_with(path=path)
|
||||
joined_path_str = str(base_url.copy_with(path=path))
|
||||
else:
|
||||
# Otherwise, combine the paths
|
||||
base_path = base_url.path.rstrip("/")
|
||||
clean_path = path.lstrip("/")
|
||||
full_path = f"{base_path}/{clean_path}"
|
||||
joined_path_str = str(base_url.copy_with(path=full_path))
|
||||
|
||||
# Join paths correctly by removing trailing/leading slashes as needed
|
||||
base_path = base_url.path.rstrip("/")
|
||||
clean_path = path.lstrip("/")
|
||||
full_path = f"{base_path}/{clean_path}"
|
||||
# Apply OpenAI-specific path handling for both branches
|
||||
if (
|
||||
custom_llm_provider == litellm.LlmProviders.OPENAI
|
||||
and "/v1/" not in joined_path_str
|
||||
):
|
||||
# Insert v1 after api.openai.com for OpenAI requests
|
||||
joined_path_str = joined_path_str.replace(
|
||||
"api.openai.com/", "api.openai.com/v1/"
|
||||
)
|
||||
|
||||
return base_url.copy_with(path=full_path)
|
||||
return joined_path_str
|
||||
|
|
|
@ -12,6 +12,7 @@ sys.path.insert(
|
|||
0, os.path.abspath("../../../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
import litellm
|
||||
from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import (
|
||||
BaseOpenAIPassThroughHandler,
|
||||
RouteChecks,
|
||||
|
@ -21,60 +22,42 @@ from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import (
|
|||
|
||||
class TestBaseOpenAIPassThroughHandler:
|
||||
|
||||
def test_append_v1_to_openai_passthrough_url(self):
|
||||
print("\nTesting _append_v1_to_openai_passthrough_url method...")
|
||||
|
||||
# Test with OpenAI API URL
|
||||
result1 = BaseOpenAIPassThroughHandler._append_v1_to_openai_passthrough_url(
|
||||
"https://api.openai.com"
|
||||
)
|
||||
print(f"OpenAI URL: 'https://api.openai.com' → '{result1}'")
|
||||
assert result1 == "https://api.openai.com/v1"
|
||||
|
||||
# Test with OpenAI API URL with trailing slash
|
||||
result2 = BaseOpenAIPassThroughHandler._append_v1_to_openai_passthrough_url(
|
||||
"https://api.openai.com/"
|
||||
)
|
||||
print(
|
||||
f"OpenAI URL with trailing slash: 'https://api.openai.com/' → '{result2}'"
|
||||
)
|
||||
assert result2 == "https://api.openai.com/v1"
|
||||
|
||||
# Test with non-OpenAI URL
|
||||
result3 = BaseOpenAIPassThroughHandler._append_v1_to_openai_passthrough_url(
|
||||
"https://api.anthropic.com"
|
||||
)
|
||||
print(f"Non-OpenAI URL: 'https://api.anthropic.com' → '{result3}'")
|
||||
assert result3 == "https://api.anthropic.com"
|
||||
|
||||
def test_join_url_paths(self):
|
||||
print("\nTesting _join_url_paths method...")
|
||||
|
||||
# Test joining base URL with no path and a path
|
||||
base_url = httpx.URL("https://api.example.com")
|
||||
path = "/v1/chat/completions"
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(base_url, path)
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(
|
||||
base_url, path, litellm.LlmProviders.OPENAI.value
|
||||
)
|
||||
print(f"Base URL with no path: '{base_url}' + '{path}' → '{result}'")
|
||||
assert str(result) == "https://api.example.com/v1/chat/completions"
|
||||
|
||||
# Test joining base URL with path and another path
|
||||
base_url = httpx.URL("https://api.example.com/v1")
|
||||
path = "/chat/completions"
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(base_url, path)
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(
|
||||
base_url, path, litellm.LlmProviders.OPENAI.value
|
||||
)
|
||||
print(f"Base URL with path: '{base_url}' + '{path}' → '{result}'")
|
||||
assert str(result) == "https://api.example.com/v1/chat/completions"
|
||||
|
||||
# Test with path not starting with slash
|
||||
base_url = httpx.URL("https://api.example.com/v1")
|
||||
path = "chat/completions"
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(base_url, path)
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(
|
||||
base_url, path, litellm.LlmProviders.OPENAI.value
|
||||
)
|
||||
print(f"Path without leading slash: '{base_url}' + '{path}' → '{result}'")
|
||||
assert str(result) == "https://api.example.com/v1/chat/completions"
|
||||
|
||||
# Test with base URL having trailing slash
|
||||
base_url = httpx.URL("https://api.example.com/v1/")
|
||||
path = "/chat/completions"
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(base_url, path)
|
||||
result = BaseOpenAIPassThroughHandler._join_url_paths(
|
||||
base_url, path, litellm.LlmProviders.OPENAI.value
|
||||
)
|
||||
print(f"Base URL with trailing slash: '{base_url}' + '{path}' → '{result}'")
|
||||
assert str(result) == "https://api.example.com/v1/chat/completions"
|
||||
|
||||
|
@ -170,6 +153,7 @@ class TestBaseOpenAIPassThroughHandler:
|
|||
user_api_key_dict=mock_user_api_key_dict,
|
||||
base_target_url="https://api.openai.com",
|
||||
api_key="test_api_key",
|
||||
custom_llm_provider=litellm.LlmProviders.OPENAI.value,
|
||||
)
|
||||
|
||||
# Verify the result
|
||||
|
|
4
tests/pass_through_tests/ruby_passthrough_tests/Gemfile
Normal file
4
tests/pass_through_tests/ruby_passthrough_tests/Gemfile
Normal file
|
@ -0,0 +1,4 @@
|
|||
source 'https://rubygems.org'
|
||||
|
||||
gem 'rspec'
|
||||
gem 'ruby-openai'
|
42
tests/pass_through_tests/ruby_passthrough_tests/Gemfile.lock
Normal file
42
tests/pass_through_tests/ruby_passthrough_tests/Gemfile.lock
Normal file
|
@ -0,0 +1,42 @@
|
|||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
base64 (0.2.0)
|
||||
diff-lcs (1.6.0)
|
||||
event_stream_parser (1.0.0)
|
||||
faraday (2.8.1)
|
||||
base64
|
||||
faraday-net_http (>= 2.0, < 3.1)
|
||||
ruby2_keywords (>= 0.0.4)
|
||||
faraday-multipart (1.1.0)
|
||||
multipart-post (~> 2.0)
|
||||
faraday-net_http (3.0.2)
|
||||
multipart-post (2.4.1)
|
||||
rspec (3.13.0)
|
||||
rspec-core (~> 3.13.0)
|
||||
rspec-expectations (~> 3.13.0)
|
||||
rspec-mocks (~> 3.13.0)
|
||||
rspec-core (3.13.3)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-expectations (3.13.3)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-mocks (3.13.2)
|
||||
diff-lcs (>= 1.2.0, < 2.0)
|
||||
rspec-support (~> 3.13.0)
|
||||
rspec-support (3.13.2)
|
||||
ruby-openai (7.4.0)
|
||||
event_stream_parser (>= 0.3.0, < 2.0.0)
|
||||
faraday (>= 1)
|
||||
faraday-multipart (>= 1)
|
||||
ruby2_keywords (0.0.5)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
rspec
|
||||
ruby-openai
|
||||
|
||||
BUNDLED WITH
|
||||
2.6.5
|
|
@ -0,0 +1,95 @@
|
|||
require 'openai'
|
||||
require 'rspec'
|
||||
|
||||
RSpec.describe 'OpenAI Assistants Passthrough' do
|
||||
let(:client) do
|
||||
OpenAI::Client.new(
|
||||
access_token: "sk-1234",
|
||||
uri_base: "http://0.0.0.0:4000/openai"
|
||||
)
|
||||
end
|
||||
|
||||
|
||||
it 'performs basic assistant operations' do
|
||||
assistant = client.assistants.create(
|
||||
parameters: {
|
||||
name: "Math Tutor",
|
||||
instructions: "You are a personal math tutor. Write and run code to answer math questions.",
|
||||
tools: [{ type: "code_interpreter" }],
|
||||
model: "gpt-4o"
|
||||
}
|
||||
)
|
||||
expect(assistant).to include('id')
|
||||
expect(assistant['name']).to eq("Math Tutor")
|
||||
|
||||
assistants_list = client.assistants.list
|
||||
expect(assistants_list['data']).to be_an(Array)
|
||||
expect(assistants_list['data']).to include(include('id' => assistant['id']))
|
||||
|
||||
retrieved_assistant = client.assistants.retrieve(id: assistant['id'])
|
||||
expect(retrieved_assistant).to eq(assistant)
|
||||
|
||||
deleted_assistant = client.assistants.delete(id: assistant['id'])
|
||||
expect(deleted_assistant['deleted']).to be true
|
||||
expect(deleted_assistant['id']).to eq(assistant['id'])
|
||||
end
|
||||
|
||||
it 'performs streaming assistant operations' do
|
||||
puts "\n=== Starting Streaming Assistant Test ==="
|
||||
|
||||
assistant = client.assistants.create(
|
||||
parameters: {
|
||||
name: "Math Tutor",
|
||||
instructions: "You are a personal math tutor. Write and run code to answer math questions.",
|
||||
tools: [{ type: "code_interpreter" }],
|
||||
model: "gpt-4o"
|
||||
}
|
||||
)
|
||||
puts "Created assistant: #{assistant['id']}"
|
||||
expect(assistant).to include('id')
|
||||
|
||||
thread = client.threads.create
|
||||
puts "Created thread: #{thread['id']}"
|
||||
expect(thread).to include('id')
|
||||
|
||||
message = client.messages.create(
|
||||
thread_id: thread['id'],
|
||||
parameters: {
|
||||
role: "user",
|
||||
content: "I need to solve the equation `3x + 11 = 14`. Can you help me?"
|
||||
}
|
||||
)
|
||||
puts "Created message: #{message['id']}"
|
||||
puts "User question: #{message['content']}"
|
||||
expect(message).to include('id')
|
||||
expect(message['role']).to eq('user')
|
||||
|
||||
puts "\nStarting streaming response:"
|
||||
puts "------------------------"
|
||||
run = client.runs.create(
|
||||
thread_id: thread['id'],
|
||||
parameters: {
|
||||
assistant_id: assistant['id'],
|
||||
max_prompt_tokens: 256,
|
||||
max_completion_tokens: 16,
|
||||
stream: proc do |chunk, _bytesize|
|
||||
puts "Received chunk: #{chunk.inspect}" # Debug: Print raw chunk
|
||||
if chunk["object"] == "thread.message.delta"
|
||||
content = chunk.dig("delta", "content")
|
||||
puts "Content: #{content.inspect}" # Debug: Print content structure
|
||||
if content && content[0] && content[0]["text"]
|
||||
print content[0]["text"]["value"]
|
||||
$stdout.flush # Ensure output is printed immediately
|
||||
end
|
||||
end
|
||||
end
|
||||
}
|
||||
)
|
||||
puts "\n------------------------"
|
||||
puts "Run completed: #{run['id']}"
|
||||
expect(run).not_to be_nil
|
||||
ensure
|
||||
client.assistants.delete(id: assistant['id']) if assistant && assistant['id']
|
||||
client.threads.delete(id: thread['id']) if thread && thread['id']
|
||||
end
|
||||
end
|
Loading…
Add table
Add a link
Reference in a new issue