chore: unpublish /inference/chat-completion (#3609)

# What does this PR do?

BREAKING CHANGE: removes /inference/chat-completion route and updates
relevant documentation

## Test Plan

🤷
This commit is contained in:
Matthew Farrellee 2025-09-30 14:00:42 -04:00 committed by GitHub
parent 62e302613f
commit cb33f45c11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 1448 additions and 2137 deletions

View file

@ -102,15 +102,15 @@
}
],
"source": [
"response = client.inference.chat_completion(\n",
"response = client.chat.completions.create(\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are a friendly assistant.\"},\n",
" {\"role\": \"user\", \"content\": \"Write a two-sentence poem about llama.\"}\n",
" ],\n",
" model_id=MODEL_NAME,\n",
" model=MODEL_NAME,\n",
")\n",
"\n",
"print(response.completion_message.content)"
"print(response.choices[0].message.content)"
]
},
{
@ -141,14 +141,14 @@
}
],
"source": [
"response = client.inference.chat_completion(\n",
"response = client.chat.completions.create(\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": \"You are shakespeare.\"},\n",
" {\"role\": \"user\", \"content\": \"Write a two-sentence poem about llama.\"}\n",
" ],\n",
" model_id=MODEL_NAME, # Changed from model to model_id\n",
" model=MODEL_NAME,\n",
")\n",
"print(response.completion_message.content)"
"print(response.choices[0].message.content)"
]
},
{
@ -218,11 +218,11 @@
" break\n",
"\n",
" message = {\"role\": \"user\", \"content\": user_input}\n",
" response = client.inference.chat_completion(\n",
" response = client.chat.completions.create(\n",
" messages=[message],\n",
" model_id=MODEL_NAME\n",
" model=MODEL_NAME\n",
" )\n",
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
" cprint(f'> Response: {response.choices[0].message.content}', 'cyan')\n",
"\n",
"# Run the chat loop in a Jupyter Notebook cell using await\n",
"await chat_loop()\n",
@ -288,16 +288,16 @@
" user_message = {\"role\": \"user\", \"content\": user_input}\n",
" conversation_history.append(user_message)\n",
"\n",
" response = client.inference.chat_completion(\n",
" response = client.chat.completions.create(\n",
" messages=conversation_history,\n",
" model_id=MODEL_NAME,\n",
" model=MODEL_NAME,\n",
" )\n",
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
" cprint(f'> Response: {response.choices[0].message.content}', 'cyan')\n",
"\n",
" # Append the assistant message with all required fields\n",
" assistant_message = {\n",
" \"role\": \"user\",\n",
" \"content\": response.completion_message.content,\n",
" \"content\": response.choices[0].message.content,\n",
" # Add any additional required fields here if necessary\n",
" }\n",
" conversation_history.append(assistant_message)\n",
@ -349,14 +349,14 @@
" }\n",
" cprint(f'User> {message[\"content\"]}', 'green')\n",
"\n",
" response = client.inference.chat_completion(\n",
" response = client.chat.completions.create(\n",
" messages=[message],\n",
" model_id=MODEL_NAME,\n",
" model=MODEL_NAME,\n",
" stream=stream,\n",
" )\n",
"\n",
" if not stream:\n",
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
" cprint(f'> Response: {response.choices[0].message.content}', 'cyan')\n",
" else:\n",
" for log in EventLogger().log(response):\n",
" log.print()\n",