mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-26 09:15:40 +00:00
chore: support default model in moderations API (#3890)
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 2s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Vector IO Integration Tests / test-matrix (push) Failing after 5s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 5s
Python Package Build Test / build (3.12) (push) Failing after 1s
Python Package Build Test / build (3.13) (push) Failing after 2s
Test Llama Stack Build / build-single-provider (push) Failing after 3s
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 4s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 3s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 7s
Test External API and Providers / test-external (venv) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Unit Tests / unit-tests (3.13) (push) Failing after 4s
Test Llama Stack Build / build (push) Failing after 3s
Unit Tests / unit-tests (3.12) (push) Failing after 5s
UI Tests / ui-tests (22) (push) Successful in 41s
Pre-commit / pre-commit (push) Successful in 1m33s
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 2s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Vector IO Integration Tests / test-matrix (push) Failing after 5s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 5s
Python Package Build Test / build (3.12) (push) Failing after 1s
Python Package Build Test / build (3.13) (push) Failing after 2s
Test Llama Stack Build / build-single-provider (push) Failing after 3s
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 4s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 3s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 7s
Test External API and Providers / test-external (venv) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Unit Tests / unit-tests (3.13) (push) Failing after 4s
Test Llama Stack Build / build (push) Failing after 3s
Unit Tests / unit-tests (3.12) (push) Failing after 5s
UI Tests / ui-tests (22) (push) Successful in 41s
Pre-commit / pre-commit (push) Successful in 1m33s
# What does this PR do? https://platform.openai.com/docs/api-reference/moderations supports optional model parameter. This PR adds support for using moderations API with model=None if a default shield id is provided via safety config. ## Test Plan added tests manual test: ``` > SAFETY_MODEL='together/meta-llama/Llama-Guard-4-12B' uv run llama stack run starter > curl http://localhost:8321/v1/moderations \ -H "Content-Type: application/json" \ -d '{ "input": [ "hello" ] }' ```
This commit is contained in:
parent
d12e5f0999
commit
9916cb3b17
23 changed files with 189 additions and 36 deletions
5
docs/static/deprecated-llama-stack-spec.html
vendored
5
docs/static/deprecated-llama-stack-spec.html
vendored
|
|
@ -8185,13 +8185,12 @@
|
|||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The content moderation model you would like to use."
|
||||
"description": "(Optional) The content moderation model you would like to use."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"input",
|
||||
"model"
|
||||
"input"
|
||||
],
|
||||
"title": "RunModerationRequest"
|
||||
},
|
||||
|
|
|
|||
3
docs/static/deprecated-llama-stack-spec.yaml
vendored
3
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -6104,11 +6104,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: >-
|
||||
The content moderation model you would like to use.
|
||||
(Optional) The content moderation model you would like to use.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- input
|
||||
- model
|
||||
title: RunModerationRequest
|
||||
ModerationObject:
|
||||
type: object
|
||||
|
|
|
|||
5
docs/static/llama-stack-spec.html
vendored
5
docs/static/llama-stack-spec.html
vendored
|
|
@ -6919,13 +6919,12 @@
|
|||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The content moderation model you would like to use."
|
||||
"description": "(Optional) The content moderation model you would like to use."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"input",
|
||||
"model"
|
||||
"input"
|
||||
],
|
||||
"title": "RunModerationRequest"
|
||||
},
|
||||
|
|
|
|||
3
docs/static/llama-stack-spec.yaml
vendored
3
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -5230,11 +5230,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: >-
|
||||
The content moderation model you would like to use.
|
||||
(Optional) The content moderation model you would like to use.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- input
|
||||
- model
|
||||
title: RunModerationRequest
|
||||
ModerationObject:
|
||||
type: object
|
||||
|
|
|
|||
5
docs/static/stainless-llama-stack-spec.html
vendored
5
docs/static/stainless-llama-stack-spec.html
vendored
|
|
@ -8591,13 +8591,12 @@
|
|||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "The content moderation model you would like to use."
|
||||
"description": "(Optional) The content moderation model you would like to use."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"input",
|
||||
"model"
|
||||
"input"
|
||||
],
|
||||
"title": "RunModerationRequest"
|
||||
},
|
||||
|
|
|
|||
3
docs/static/stainless-llama-stack-spec.yaml
vendored
3
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -6443,11 +6443,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: >-
|
||||
The content moderation model you would like to use.
|
||||
(Optional) The content moderation model you would like to use.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- input
|
||||
- model
|
||||
title: RunModerationRequest
|
||||
ModerationObject:
|
||||
type: object
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue