mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
Fix pre-commit after rebasing
This commit is contained in:
parent
96bd6c1836
commit
bb2eb33fc3
7 changed files with 15 additions and 8722 deletions
|
@ -14,4 +14,4 @@ Agents
|
||||||
|
|
||||||
APIs for creating and interacting with agentic systems.
|
APIs for creating and interacting with agentic systems.
|
||||||
|
|
||||||
This section contains documentation for all available providers for the **agents** API.
|
This section contains documentation for all available providers for the **agents** API.
|
|
@ -4992,7 +4992,7 @@
|
||||||
"properties": {
|
"properties": {
|
||||||
"model": {
|
"model": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The identifier of the reranking model to use."
|
"description": "The identifier of the reranking model to use. The model must be a reranking model registered with Llama Stack and available via the /models endpoint."
|
||||||
},
|
},
|
||||||
"query": {
|
"query": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
|
|
|
@ -3657,7 +3657,8 @@ components:
|
||||||
model:
|
model:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
The identifier of the reranking model to use.
|
The identifier of the reranking model to use. The model must be a reranking
|
||||||
|
model registered with Llama Stack and available via the /models endpoint.
|
||||||
query:
|
query:
|
||||||
oneOf:
|
oneOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
4995
docs/static/llama-stack-spec.html
vendored
4995
docs/static/llama-stack-spec.html
vendored
File diff suppressed because it is too large
Load diff
3725
docs/static/llama-stack-spec.yaml
vendored
3725
docs/static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
|
@ -201,7 +201,6 @@ class InferenceRouter(Inference):
|
||||||
max_num_results=max_num_results,
|
max_num_results=max_num_results,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def openai_completion(
|
async def openai_completion(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -181,7 +181,14 @@ def model_providers(llama_stack_client):
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def skip_if_no_model(request):
|
def skip_if_no_model(request):
|
||||||
model_fixtures = ["text_model_id", "vision_model_id", "embedding_model_id", "judge_model_id", "shield_id", "rerank_model_id"]
|
model_fixtures = [
|
||||||
|
"text_model_id",
|
||||||
|
"vision_model_id",
|
||||||
|
"embedding_model_id",
|
||||||
|
"judge_model_id",
|
||||||
|
"shield_id",
|
||||||
|
"rerank_model_id",
|
||||||
|
]
|
||||||
test_func = request.node.function
|
test_func = request.node.function
|
||||||
|
|
||||||
actual_params = inspect.signature(test_func).parameters.keys()
|
actual_params = inspect.signature(test_func).parameters.keys()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue