Francisco Arceo
e7d21e1ee3
feat: Add support for Conversations in Responses API ( #3743 )
...
# What does this PR do?
This PR adds support for Conversations in Responses.
<!-- If resolving an issue, uncomment and update the line below -->
<!-- Closes #[issue-number] -->
## Test Plan
Unit tests
Integration tests
<Details>
<Summary>Manual testing with this script: (click to expand)</Summary>
```python
from openai import OpenAI
client = OpenAI()
client = OpenAI(base_url="http://localhost:8321/v1/ ", api_key="none")
def test_conversation_create():
print("Testing conversation create...")
conversation = client.conversations.create(
metadata={"topic": "demo"},
items=[
{"type": "message", "role": "user", "content": "Hello!"}
]
)
print(f"Created: {conversation}")
return conversation
def test_conversation_retrieve(conv_id):
print(f"Testing conversation retrieve for {conv_id}...")
retrieved = client.conversations.retrieve(conv_id)
print(f"Retrieved: {retrieved}")
return retrieved
def test_conversation_update(conv_id):
print(f"Testing conversation update for {conv_id}...")
updated = client.conversations.update(
conv_id,
metadata={"topic": "project-x"}
)
print(f"Updated: {updated}")
return updated
def test_conversation_delete(conv_id):
print(f"Testing conversation delete for {conv_id}...")
deleted = client.conversations.delete(conv_id)
print(f"Deleted: {deleted}")
return deleted
def test_conversation_items_create(conv_id):
print(f"Testing conversation items create for {conv_id}...")
items = client.conversations.items.create(
conv_id,
items=[
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "Hello!"}]
},
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "How are you?"}]
}
]
)
print(f"Items created: {items}")
return items
def test_conversation_items_list(conv_id):
print(f"Testing conversation items list for {conv_id}...")
items = client.conversations.items.list(conv_id, limit=10)
print(f"Items list: {items}")
return items
def test_conversation_item_retrieve(conv_id, item_id):
print(f"Testing conversation item retrieve for {conv_id}/{item_id}...")
item = client.conversations.items.retrieve(conversation_id=conv_id, item_id=item_id)
print(f"Item retrieved: {item}")
return item
def test_conversation_item_delete(conv_id, item_id):
print(f"Testing conversation item delete for {conv_id}/{item_id}...")
deleted = client.conversations.items.delete(conversation_id=conv_id, item_id=item_id)
print(f"Item deleted: {deleted}")
return deleted
def test_conversation_responses_create():
print("\nTesting conversation create for a responses example...")
conversation = client.conversations.create()
print(f"Created: {conversation}")
response = client.responses.create(
model="gpt-4.1",
input=[{"role": "user", "content": "What are the 5 Ds of dodgeball?"}],
conversation=conversation.id,
)
print(f"Created response: {response} for conversation {conversation.id}")
return response, conversation
def test_conversations_responses_create_followup(
conversation,
content="Repeat what you just said but add 'this is my second time saying this'",
):
print(f"Using: {conversation.id}")
response = client.responses.create(
model="gpt-4.1",
input=[{"role": "user", "content": content}],
conversation=conversation.id,
)
print(f"Created response: {response} for conversation {conversation.id}")
conv_items = client.conversations.items.list(conversation.id)
print(f"\nRetrieving list of items for conversation {conversation.id}:")
print(conv_items.model_dump_json(indent=2))
def test_response_with_fake_conv_id():
fake_conv_id = "conv_zzzzzzzzz5dc81908289d62779d2ac510a2b0b602ef00a44"
print(f"Using {fake_conv_id}")
try:
response = client.responses.create(
model="gpt-4.1",
input=[{"role": "user", "content": "say hello"}],
conversation=fake_conv_id,
)
print(f"Created response: {response} for conversation {fake_conv_id}")
except Exception as e:
print(f"failed to create response for conversation {fake_conv_id} with error {e}")
def main():
print("Testing OpenAI Conversations API...")
# Create conversation
conversation = test_conversation_create()
conv_id = conversation.id
# Retrieve conversation
test_conversation_retrieve(conv_id)
# Update conversation
test_conversation_update(conv_id)
# Create items
items = test_conversation_items_create(conv_id)
# List items
items_list = test_conversation_items_list(conv_id)
# Retrieve specific item
if items_list.data:
item_id = items_list.data[0].id
test_conversation_item_retrieve(conv_id, item_id)
# Delete item
test_conversation_item_delete(conv_id, item_id)
# Delete conversation
test_conversation_delete(conv_id)
response, conversation2 = test_conversation_responses_create()
print('\ntesting reseponse retrieval')
test_conversation_retrieve(conversation2.id)
print('\ntesting responses follow up')
test_conversations_responses_create_followup(conversation2)
print('\ntesting responses follow up x2!')
test_conversations_responses_create_followup(
conversation2,
content="Repeat what you just said but add 'this is my third time saying this'",
)
test_response_with_fake_conv_id()
print("All tests completed!")
if __name__ == "__main__":
main()
```
</Details>
---------
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
2025-10-10 11:57:40 -07:00
Chacksu
fffdab4f5c
fix: Dell distribution missing kvstore ( #3113 )
...
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 7s
Integration Tests (Replay) / discover-tests (push) Successful in 9s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 11s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 20s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 16s
Test Llama Stack Build / generate-matrix (push) Successful in 6s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 14s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 27s
Test Llama Stack Build / build-single-provider (push) Failing after 6s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 26s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 24s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 29s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 15s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 9s
Python Package Build Test / build (3.13) (push) Failing after 6s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 14s
Python Package Build Test / build (3.12) (push) Failing after 9s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 16s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 10s
Test External API and Providers / test-external (venv) (push) Failing after 11s
Unit Tests / unit-tests (3.12) (push) Failing after 13s
Integration Tests (Replay) / Integration Tests (, , , client=, vision=) (push) Failing after 11s
Test Llama Stack Build / build (push) Failing after 8s
Unit Tests / unit-tests (3.13) (push) Failing after 37s
Pre-commit / pre-commit (push) Successful in 1m44s
# What does this PR do?
- Added kvstore config to ChromaDB provider config for Dell distribution
similar to [starter
config](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/distributions/starter/run.yaml#L110-L112 )
- Fixed
[error](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/inference/_generated/_async_client.py#L3424-L3425 )
getting endpoint information by adding `hf-inference` as the provider to
the `AsyncInferenceClient` (TGI client).
## Test Plan
```
export INFERENCE_PORT=8181
export DEH_URL=http://0.0.0.0:$INFERENCE_PORT
export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct
export CHROMADB_HOST=localhost
export CHROMADB_PORT=8000
export CHROMA_URL=http://$CHROMADB_HOST:$CHROMADB_PORT
export CUDA_VISIBLE_DEVICES=0
export LLAMA_STACK_PORT=8321
export HF_TOKEN=[redacted]
# TGI Server
docker run --rm -it \
--pull always \
--network host \
-v $HOME/.cache/huggingface:/data \
-e HF_TOKEN=$HF_TOKEN \
-e PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True \
-p $INFERENCE_PORT:$INFERENCE_PORT \
--gpus all \
ghcr.io/huggingface/text-generation-inference:latest \
--dtype float16 \
--usage-stats off \
--sharded false \
--cuda-memory-fraction 0.8 \
--model-id meta-llama/Llama-3.2-3B-Instruct \
--port $INFERENCE_PORT \
--hostname 0.0.0.0
# Chrome DB
docker run --rm -it \
--name chromadb \
--net=host -p 8000:8000 \
-v ~/chroma:/chroma/chroma \
-e IS_PERSISTENT=TRUE \
-e ANONYMIZED_TELEMETRY=FALSE \
chromadb/chroma:latest
# Llama Stack
llama stack run dell \
--port $LLAMA_STACK_PORT \
--env INFERENCE_MODEL=$INFERENCE_MODEL \
--env DEH_URL=$DEH_URL \
--env CHROMA_URL=$CHROMA_URL
```
---------
Co-authored-by: Connor Hack <connorhack@fb.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
2025-08-13 06:18:25 -07:00