diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9b8b9a8df..56e35aa6e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -75,19 +75,20 @@ repos: # - id: markdown-link-check # args: ['--quiet'] -# - repo: local -# hooks: -# - id: distro-codegen -# name: Distribution Template Codegen -# additional_dependencies: -# - rich -# - pydantic -# entry: python -m llama_stack.scripts.distro_codegen -# language: python -# pass_filenames: false -# require_serial: true -# files: ^llama_stack/templates/.*$ -# stages: [manual] +- repo: local + hooks: + - id: distro-codegen + name: Distribution Template Codegen + additional_dependencies: + - rich + - pydantic + - uv==0.6.0 + entry: uv run python -m llama_stack.scripts.distro_codegen + language: python + pass_filenames: false + require_serial: true + files: ^llama_stack/templates/.*$ + files: ^llama_stack/providers/.*/inference/.*/models\.py$ ci: autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6dc08b5c0..83a717273 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -139,6 +139,16 @@ $ make html $ uv run sphinx-autobuild source build/html ``` +### Update API Documentation + +If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command: + +```bash +$ uv sync --extra dev +$ ./docs/openapi_generator/run_openapi_generator.sh +``` + +The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing. ## License By contributing to Llama, you agree that your contributions will be licensed diff --git a/distributions/dependencies.json b/distributions/dependencies.json index c1450d97e..345a29f33 100644 --- a/distributions/dependencies.json +++ b/distributions/dependencies.json @@ -21,6 +21,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -54,6 +55,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -88,6 +90,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -122,6 +125,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -157,6 +161,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -192,6 +197,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -228,6 +234,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -269,6 +276,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -306,6 +314,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -340,6 +349,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -373,6 +383,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -403,6 +414,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -438,6 +450,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -471,6 +484,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", @@ -505,6 +519,7 @@ "pandas", "pillow", "psycopg2-binary", + "pymongo", "pypdf", "redis", "requests", diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index 2b6e1d11c..40c167685 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -40,286 +40,6 @@ } ], "paths": { - "/v1/eval/tasks/{task_id}/evaluations": { - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - } - }, - "tags": [ - "Eval" - ], - "description": "", - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeprecatedEvaluateRowsRequest" - } - } - }, - "required": true - }, - "deprecated": true - } - }, - "/v1/eval-tasks/{eval_task_id}": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "oneOf": [ - { - "$ref": "#/components/schemas/Benchmark" - }, - { - "type": "null" - } - ] - } - } - } - } - }, - "tags": [ - "Benchmarks" - ], - "description": "", - "parameters": [ - { - "name": "eval_task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "deprecated": true - } - }, - "/v1/eval/tasks/{task_id}/jobs/{job_id}": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "oneOf": [ - { - "$ref": "#/components/schemas/JobStatus" - }, - { - "type": "null" - } - ] - } - } - } - } - }, - "tags": [ - "Eval" - ], - "description": "", - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "deprecated": true - }, - "delete": { - "responses": { - "200": { - "description": "OK" - } - }, - "tags": [ - "Eval" - ], - "description": "", - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "deprecated": true - } - }, - "/v1/eval/tasks/{task_id}/jobs/{job_id}/result": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - } - }, - "tags": [ - "Eval" - ], - "description": "", - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "deprecated": true - } - }, - "/v1/eval-tasks": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListBenchmarksResponse" - } - } - } - } - }, - "tags": [ - "Benchmarks" - ], - "description": "", - "parameters": [], - "deprecated": true - }, - "post": { - "responses": { - "200": { - "description": "OK" - } - }, - "tags": [ - "Benchmarks" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeprecatedRegisterEvalTaskRequest" - } - } - }, - "required": true - }, - "deprecated": true - } - }, - "/v1/eval/tasks/{task_id}/jobs": { - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Job" - } - } - } - } - }, - "tags": [ - "Eval" - ], - "description": "", - "parameters": [ - { - "name": "task_id", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/DeprecatedRunEvalRequest" - } - } - }, - "required": true - }, - "deprecated": true - } - }, "/v1/datasetio/rows": { "get": { "responses": { @@ -678,6 +398,65 @@ } } }, + "/v1/files": { + "get": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBucketResponse" + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "List all buckets.", + "parameters": [ + { + "name": "bucket", + "in": "query", + "required": true, + "schema": { + "type": "string" + } + } + ] + }, + "post": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FileUploadResponse" + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "Create a new upload session for a file identified by a bucket and key.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateUploadSessionRequest" + } + } + }, + "required": true + } + } + }, "/v1/agents/{agent_id}": { "delete": { "responses": { @@ -779,6 +558,84 @@ ] } }, + "/v1/files/{bucket}/{key}": { + "get": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FileResponse" + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "Get a file info identified by a bucket and key.", + "parameters": [ + { + "name": "bucket", + "in": "path", + "description": "Bucket name (valid chars: a-zA-Z0-9_-)", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "key", + "in": "path", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)", + "required": true, + "schema": { + "type": "string" + } + } + ] + }, + "delete": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FileResponse" + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "Delete a file identified by a bucket and key.", + "parameters": [ + { + "name": "bucket", + "in": "path", + "description": "Bucket name (valid chars: a-zA-Z0-9_-)", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "key", + "in": "path", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)", + "required": true, + "schema": { + "type": "string" + } + } + ] + } + }, "/v1/inference/embeddings": { "post": { "responses": { @@ -1216,7 +1073,7 @@ } }, "/v1/telemetry/spans/{span_id}/tree": { - "get": { + "post": { "responses": { "200": { "description": "OK", @@ -1241,27 +1098,18 @@ "schema": { "type": "string" } - }, - { - "name": "attributes_to_return", - "in": "query", - "required": false, - "schema": { - "type": "array", - "items": { - "type": "string" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetSpanTreeRequest" } } }, - { - "name": "max_depth", - "in": "query", - "required": false, - "schema": { - "type": "integer" - } - } - ] + "required": true + } } }, "/v1/tools/{tool_name}": { @@ -1470,6 +1318,91 @@ "parameters": [] } }, + "/v1/files/session:{upload_id}": { + "get": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/FileUploadResponse" + }, + { + "type": "null" + } + ] + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "Returns information about an existsing upload session", + "parameters": [ + { + "name": "upload_id", + "in": "path", + "description": "ID of the upload session", + "required": true, + "schema": { + "type": "string" + } + } + ] + }, + "post": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "oneOf": [ + { + "$ref": "#/components/schemas/FileResponse" + }, + { + "type": "null" + } + ] + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "Upload file content to an existing upload session. On the server, request body will have the raw bytes that are uploaded.", + "parameters": [ + { + "name": "upload_id", + "in": "path", + "description": "ID of the upload session", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + } + } + }, "/v1/vector-dbs/{vector_db_id}": { "get": { "responses": { @@ -1826,6 +1759,37 @@ } } }, + "/v1/files/{bucket}": { + "get": { + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListFileResponse" + } + } + } + } + }, + "tags": [ + "Files (Coming Soon)" + ], + "description": "List all files in a bucket.", + "parameters": [ + { + "name": "bucket", + "in": "path", + "description": "Bucket name (valid chars: a-zA-Z0-9_-)", + "required": true, + "schema": { + "type": "string" + } + } + ] + } + }, "/v1/models": { "get": { "responses": { @@ -2290,7 +2254,7 @@ } }, "/v1/telemetry/spans": { - "get": { + "post": { "responses": { "200": { "description": "OK", @@ -2307,42 +2271,21 @@ "Telemetry" ], "description": "", - "parameters": [ - { - "name": "attribute_filters", - "in": "query", - "required": true, - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/QueryCondition" + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QuerySpansRequest" } } }, - { - "name": "attributes_to_return", - "in": "query", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - }, - { - "name": "max_depth", - "in": "query", - "required": false, - "schema": { - "type": "integer" - } - } - ] + "required": true + } } }, "/v1/telemetry/traces": { - "get": { + "post": { "responses": { "200": { "description": "OK", @@ -2359,46 +2302,17 @@ "Telemetry" ], "description": "", - "parameters": [ - { - "name": "attribute_filters", - "in": "query", - "required": false, - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/QueryCondition" + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueryTracesRequest" } } }, - { - "name": "limit", - "in": "query", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "offset", - "in": "query", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "order_by", - "in": "query", - "required": false, - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - ] + "required": true + } } }, "/v1/eval/benchmarks/{benchmark_id}/jobs": { @@ -2645,227 +2559,86 @@ "jsonSchemaDialect": "https://json-schema.org/draft/2020-12/schema", "components": { "schemas": { - "AgentCandidate": { + "AppendRowsRequest": { "type": "object", "properties": { - "type": { - "type": "string", - "const": "agent", - "default": "agent" - }, - "config": { - "$ref": "#/components/schemas/AgentConfig" - } - }, - "additionalProperties": false, - "required": [ - "type", - "config" - ], - "title": "AgentCandidate" - }, - "AgentConfig": { - "type": "object", - "properties": { - "sampling_params": { - "$ref": "#/components/schemas/SamplingParams" - }, - "input_shields": { - "type": "array", - "items": { - "type": "string" - } - }, - "output_shields": { - "type": "array", - "items": { - "type": "string" - } - }, - "toolgroups": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AgentTool" - } - }, - "client_tools": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolDef" - } - }, - "tool_choice": { - "type": "string", - "enum": [ - "auto", - "required", - "none" - ], - "title": "ToolChoice", - "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.", - "deprecated": true - }, - "tool_prompt_format": { - "type": "string", - "enum": [ - "json", - "function_tag", - "python_list" - ], - "title": "ToolPromptFormat", - "description": "Prompt format for calling custom / zero shot tools.", - "deprecated": true - }, - "tool_config": { - "$ref": "#/components/schemas/ToolConfig" - }, - "max_infer_iters": { - "type": "integer", - "default": 10 - }, - "model": { + "dataset_id": { "type": "string" }, - "instructions": { - "type": "string" - }, - "enable_session_persistence": { - "type": "boolean", - "default": false - }, - "response_format": { - "$ref": "#/components/schemas/ResponseFormat" - } - }, - "additionalProperties": false, - "required": [ - "model", - "instructions" - ], - "title": "AgentConfig" - }, - "AgentTool": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "args": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } + "rows": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] } - }, - "additionalProperties": false, - "required": [ - "name", - "args" - ], - "title": "AgentToolGroupWithArgs" + } } - ] - }, - "AggregationFunctionType": { - "type": "string", - "enum": [ - "average", - "median", - "categorical_count", - "accuracy" + }, + "additionalProperties": false, + "required": [ + "dataset_id", + "rows" ], - "title": "AggregationFunctionType" + "title": "AppendRowsRequest" }, - "BasicScoringFnParams": { + "CompletionMessage": { "type": "object", "properties": { - "type": { + "role": { "type": "string", - "const": "basic", - "default": "basic" + "const": "assistant", + "default": "assistant", + "description": "Must be \"assistant\" to identify this as the model's response" }, - "aggregation_functions": { + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the model's response" + }, + "stop_reason": { + "type": "string", + "enum": [ + "end_of_turn", + "end_of_message", + "out_of_tokens" + ], + "description": "Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: The model finished generating the entire response. - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response. - `StopReason.out_of_tokens`: The model ran out of token budget." + }, + "tool_calls": { "type": "array", "items": { - "$ref": "#/components/schemas/AggregationFunctionType" - } + "$ref": "#/components/schemas/ToolCall" + }, + "description": "List of tool calls. Each tool call is a ToolCall object." } }, "additionalProperties": false, "required": [ - "type" + "role", + "content", + "stop_reason" ], - "title": "BasicScoringFnParams" - }, - "BenchmarkConfig": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "benchmark", - "default": "benchmark" - }, - "eval_candidate": { - "$ref": "#/components/schemas/EvalCandidate" - }, - "scoring_params": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ScoringFnParams" - } - }, - "num_examples": { - "type": "integer" - } - }, - "additionalProperties": false, - "required": [ - "type", - "eval_candidate", - "scoring_params" - ], - "title": "BenchmarkConfig" - }, - "EvalCandidate": { - "oneOf": [ - { - "$ref": "#/components/schemas/ModelCandidate" - }, - { - "$ref": "#/components/schemas/AgentCandidate" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "model": "#/components/schemas/ModelCandidate", - "agent": "#/components/schemas/AgentCandidate" - } - } + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." }, "GrammarResponseFormat": { "type": "object", @@ -3037,92 +2810,30 @@ "title": "JsonSchemaResponseFormat", "description": "Configuration for JSON schema-guided response generation." }, - "LLMAsJudgeScoringFnParams": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "llm_as_judge", - "default": "llm_as_judge" + "Message": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserMessage" }, - "judge_model": { - "type": "string" - }, - "prompt_template": { - "type": "string" - }, - "judge_score_regexes": { - "type": "array", - "items": { - "type": "string" - } - }, - "aggregation_functions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AggregationFunctionType" - } - } - }, - "additionalProperties": false, - "required": [ - "type", - "judge_model" - ], - "title": "LLMAsJudgeScoringFnParams" - }, - "ModelCandidate": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "model", - "default": "model" - }, - "model": { - "type": "string" - }, - "sampling_params": { - "$ref": "#/components/schemas/SamplingParams" - }, - "system_message": { + { "$ref": "#/components/schemas/SystemMessage" - } - }, - "additionalProperties": false, - "required": [ - "type", - "model", - "sampling_params" - ], - "title": "ModelCandidate" - }, - "RegexParserScoringFnParams": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "regex_parser", - "default": "regex_parser" }, - "parsing_regexes": { - "type": "array", - "items": { - "type": "string" - } + { + "$ref": "#/components/schemas/ToolResponseMessage" }, - "aggregation_functions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AggregationFunctionType" - } + { + "$ref": "#/components/schemas/CompletionMessage" } - }, - "additionalProperties": false, - "required": [ - "type" ], - "title": "RegexParserScoringFnParams" + "discriminator": { + "propertyName": "role", + "mapping": { + "user": "#/components/schemas/UserMessage", + "system": "#/components/schemas/SystemMessage", + "tool": "#/components/schemas/ToolResponseMessage", + "assistant": "#/components/schemas/CompletionMessage" + } + } }, "ResponseFormat": { "oneOf": [ @@ -3183,27 +2894,6 @@ } } }, - "ScoringFnParams": { - "oneOf": [ - { - "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" - }, - { - "$ref": "#/components/schemas/RegexParserScoringFnParams" - }, - { - "$ref": "#/components/schemas/BasicScoringFnParams" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", - "regex_parser": "#/components/schemas/RegexParserScoringFnParams", - "basic": "#/components/schemas/BasicScoringFnParams" - } - } - }, "SystemMessage": { "type": "object", "properties": { @@ -3248,635 +2938,6 @@ "title": "TextContentItem", "description": "A text content item" }, - "ToolConfig": { - "type": "object", - "properties": { - "tool_choice": { - "oneOf": [ - { - "type": "string", - "enum": [ - "auto", - "required", - "none" - ], - "title": "ToolChoice", - "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." - }, - { - "type": "string" - } - ], - "default": "auto", - "description": "(Optional) Whether tool use is automatic, required, or none. Can also specify a tool name to use a specific tool. Defaults to ToolChoice.auto." - }, - "tool_prompt_format": { - "type": "string", - "enum": [ - "json", - "function_tag", - "python_list" - ], - "description": "(Optional) Instructs the model how to format tool calls. By default, Llama Stack will attempt to use a format that is best adapted to the model. - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python syntax -- a list of function calls." - }, - "system_message_behavior": { - "type": "string", - "enum": [ - "append", - "replace" - ], - "description": "(Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: Replaces the default system prompt with the provided system message. The system message can include the string '{{function_definitions}}' to indicate where the function definitions should be inserted.", - "default": "append" - } - }, - "additionalProperties": false, - "title": "ToolConfig", - "description": "Configuration for tool use." - }, - "ToolDef": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolParameter" - } - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "name" - ], - "title": "ToolDef" - }, - "ToolParameter": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "parameter_type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean", - "default": true - }, - "default": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "additionalProperties": false, - "required": [ - "name", - "parameter_type", - "description", - "required" - ], - "title": "ToolParameter" - }, - "TopKSamplingStrategy": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "top_k", - "default": "top_k" - }, - "top_k": { - "type": "integer" - } - }, - "additionalProperties": false, - "required": [ - "type", - "top_k" - ], - "title": "TopKSamplingStrategy" - }, - "TopPSamplingStrategy": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "top_p", - "default": "top_p" - }, - "temperature": { - "type": "number" - }, - "top_p": { - "type": "number", - "default": 0.95 - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "TopPSamplingStrategy" - }, - "URL": { - "type": "object", - "properties": { - "uri": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "uri" - ], - "title": "URL" - }, - "DeprecatedEvaluateRowsRequest": { - "type": "object", - "properties": { - "input_rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - } - }, - "task_config": { - "$ref": "#/components/schemas/BenchmarkConfig" - } - }, - "additionalProperties": false, - "required": [ - "input_rows", - "scoring_functions", - "task_config" - ], - "title": "DeprecatedEvaluateRowsRequest" - }, - "EvaluateResponse": { - "type": "object", - "properties": { - "generations": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "scores": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ScoringResult" - } - } - }, - "additionalProperties": false, - "required": [ - "generations", - "scores" - ], - "title": "EvaluateResponse" - }, - "ScoringResult": { - "type": "object", - "properties": { - "score_rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "aggregated_results": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "score_rows", - "aggregated_results" - ], - "title": "ScoringResult" - }, - "Benchmark": { - "type": "object", - "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { - "type": "string", - "const": "benchmark", - "default": "benchmark" - }, - "dataset_id": { - "type": "string" - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - } - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_resource_id", - "provider_id", - "type", - "dataset_id", - "scoring_functions", - "metadata" - ], - "title": "Benchmark" - }, - "JobStatus": { - "type": "string", - "enum": [ - "completed", - "in_progress", - "failed", - "scheduled" - ], - "title": "JobStatus" - }, - "ListBenchmarksResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Benchmark" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListBenchmarksResponse" - }, - "DeprecatedRegisterEvalTaskRequest": { - "type": "object", - "properties": { - "eval_task_id": { - "type": "string" - }, - "dataset_id": { - "type": "string" - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - } - }, - "provider_benchmark_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "eval_task_id", - "dataset_id", - "scoring_functions" - ], - "title": "DeprecatedRegisterEvalTaskRequest" - }, - "DeprecatedRunEvalRequest": { - "type": "object", - "properties": { - "task_config": { - "$ref": "#/components/schemas/BenchmarkConfig" - } - }, - "additionalProperties": false, - "required": [ - "task_config" - ], - "title": "DeprecatedRunEvalRequest" - }, - "Job": { - "type": "object", - "properties": { - "job_id": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "job_id" - ], - "title": "Job" - }, - "AppendRowsRequest": { - "type": "object", - "properties": { - "dataset_id": { - "type": "string" - }, - "rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - } - }, - "additionalProperties": false, - "required": [ - "dataset_id", - "rows" - ], - "title": "AppendRowsRequest" - }, - "CompletionMessage": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "assistant", - "default": "assistant", - "description": "Must be \"assistant\" to identify this as the model's response" - }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The content of the model's response" - }, - "stop_reason": { - "type": "string", - "enum": [ - "end_of_turn", - "end_of_message", - "out_of_tokens" - ], - "description": "Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: The model finished generating the entire response. - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response. - `StopReason.out_of_tokens`: The model ran out of token budget." - }, - "tool_calls": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolCall" - }, - "description": "List of tool calls. Each tool call is a ToolCall object." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content", - "stop_reason" - ], - "title": "CompletionMessage", - "description": "A message containing the model's (assistant) response in a chat conversation." - }, - "Message": { - "oneOf": [ - { - "$ref": "#/components/schemas/UserMessage" - }, - { - "$ref": "#/components/schemas/SystemMessage" - }, - { - "$ref": "#/components/schemas/ToolResponseMessage" - }, - { - "$ref": "#/components/schemas/CompletionMessage" - } - ], - "discriminator": { - "propertyName": "role", - "mapping": { - "user": "#/components/schemas/UserMessage", - "system": "#/components/schemas/SystemMessage", - "tool": "#/components/schemas/ToolResponseMessage", - "assistant": "#/components/schemas/CompletionMessage" - } - } - }, "ToolCall": { "type": "object", "properties": { @@ -4099,6 +3160,60 @@ "title": "ToolResponseMessage", "description": "A message representing the result of a tool invocation." }, + "TopKSamplingStrategy": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "top_k", + "default": "top_k" + }, + "top_k": { + "type": "integer" + } + }, + "additionalProperties": false, + "required": [ + "type", + "top_k" + ], + "title": "TopKSamplingStrategy" + }, + "TopPSamplingStrategy": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "top_p", + "default": "top_p" + }, + "temperature": { + "type": "number" + }, + "top_p": { + "type": "number", + "default": 0.95 + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "TopPSamplingStrategy" + }, + "URL": { + "type": "object", + "properties": { + "uri": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "uri" + ], + "title": "URL" + }, "UserMessage": { "type": "object", "properties": { @@ -4422,6 +3537,51 @@ ], "title": "CancelTrainingJobRequest" }, + "ToolConfig": { + "type": "object", + "properties": { + "tool_choice": { + "oneOf": [ + { + "type": "string", + "enum": [ + "auto", + "required", + "none" + ], + "title": "ToolChoice", + "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." + }, + { + "type": "string" + } + ], + "default": "auto", + "description": "(Optional) Whether tool use is automatic, required, or none. Can also specify a tool name to use a specific tool. Defaults to ToolChoice.auto." + }, + "tool_prompt_format": { + "type": "string", + "enum": [ + "json", + "function_tag", + "python_list" + ], + "description": "(Optional) Instructs the model how to format tool calls. By default, Llama Stack will attempt to use a format that is best adapted to the model. - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python syntax -- a list of function calls." + }, + "system_message_behavior": { + "type": "string", + "enum": [ + "append", + "replace" + ], + "description": "(Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: Replaces the default system prompt with the provided system message. The system message can include the string '{{function_definitions}}' to indicate where the function definitions should be inserted.", + "default": "append" + } + }, + "additionalProperties": false, + "title": "ToolConfig", + "description": "Configuration for tool use." + }, "ChatCompletionRequest": { "type": "object", "properties": { @@ -4730,6 +3890,227 @@ "title": "CompletionResponseStreamChunk", "description": "A chunk of a streamed completion response." }, + "AgentConfig": { + "type": "object", + "properties": { + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "input_shields": { + "type": "array", + "items": { + "type": "string" + } + }, + "output_shields": { + "type": "array", + "items": { + "type": "string" + } + }, + "toolgroups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AgentTool" + } + }, + "client_tools": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolDef" + } + }, + "tool_choice": { + "type": "string", + "enum": [ + "auto", + "required", + "none" + ], + "title": "ToolChoice", + "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.", + "deprecated": true + }, + "tool_prompt_format": { + "type": "string", + "enum": [ + "json", + "function_tag", + "python_list" + ], + "title": "ToolPromptFormat", + "description": "Prompt format for calling custom / zero shot tools.", + "deprecated": true + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + }, + "max_infer_iters": { + "type": "integer", + "default": 10 + }, + "model": { + "type": "string" + }, + "instructions": { + "type": "string" + }, + "enable_session_persistence": { + "type": "boolean", + "default": false + }, + "response_format": { + "$ref": "#/components/schemas/ResponseFormat" + } + }, + "additionalProperties": false, + "required": [ + "model", + "instructions" + ], + "title": "AgentConfig" + }, + "AgentTool": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "name", + "args" + ], + "title": "AgentToolGroupWithArgs" + } + ] + }, + "ToolDef": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolParameter" + } + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "name" + ], + "title": "ToolDef" + }, + "ToolParameter": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parameter_type": { + "type": "string" + }, + "description": { + "type": "string" + }, + "required": { + "type": "boolean", + "default": true + }, + "default": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "additionalProperties": false, + "required": [ + "name", + "parameter_type", + "description", + "required" + ], + "title": "ToolParameter" + }, "CreateAgentRequest": { "type": "object", "properties": { @@ -5441,6 +4822,105 @@ ], "title": "AgentTurnResponseTurnStartPayload" }, + "CreateUploadSessionRequest": { + "type": "object", + "properties": { + "bucket": { + "type": "string", + "description": "Bucket under which the file is stored (valid chars: a-zA-Z0-9_-)" + }, + "key": { + "type": "string", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)" + }, + "mime_type": { + "type": "string", + "description": "MIME type of the file" + }, + "size": { + "type": "integer", + "description": "File size in bytes" + } + }, + "additionalProperties": false, + "required": [ + "bucket", + "key", + "mime_type", + "size" + ], + "title": "CreateUploadSessionRequest" + }, + "FileUploadResponse": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "ID of the upload session" + }, + "url": { + "type": "string", + "description": "Upload URL for the file or file parts" + }, + "offset": { + "type": "integer", + "description": "Upload content offset" + }, + "size": { + "type": "integer", + "description": "Upload content size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "url", + "offset", + "size" + ], + "title": "FileUploadResponse", + "description": "Response after initiating a file upload session." + }, + "FileResponse": { + "type": "object", + "properties": { + "bucket": { + "type": "string", + "description": "Bucket under which the file is stored (valid chars: a-zA-Z0-9_-)" + }, + "key": { + "type": "string", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)" + }, + "mime_type": { + "type": "string", + "description": "MIME type of the file" + }, + "url": { + "type": "string", + "description": "Upload URL for the file contents" + }, + "bytes": { + "type": "integer", + "description": "Size of the file in bytes" + }, + "created_at": { + "type": "integer", + "description": "Timestamp of when the file was created" + } + }, + "additionalProperties": false, + "required": [ + "bucket", + "key", + "mime_type", + "url", + "bytes", + "created_at" + ], + "title": "FileResponse", + "description": "Response representing a file entry." + }, "EmbeddingsRequest": { "type": "object", "properties": { @@ -5484,6 +4964,204 @@ "title": "EmbeddingsResponse", "description": "Response containing generated embeddings." }, + "AgentCandidate": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "agent", + "default": "agent" + }, + "config": { + "$ref": "#/components/schemas/AgentConfig" + } + }, + "additionalProperties": false, + "required": [ + "type", + "config" + ], + "title": "AgentCandidate" + }, + "AggregationFunctionType": { + "type": "string", + "enum": [ + "average", + "median", + "categorical_count", + "accuracy" + ], + "title": "AggregationFunctionType" + }, + "BasicScoringFnParams": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "basic", + "default": "basic" + }, + "aggregation_functions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + } + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "BasicScoringFnParams" + }, + "BenchmarkConfig": { + "type": "object", + "properties": { + "eval_candidate": { + "$ref": "#/components/schemas/EvalCandidate" + }, + "scoring_params": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ScoringFnParams" + } + }, + "num_examples": { + "type": "integer" + } + }, + "additionalProperties": false, + "required": [ + "eval_candidate", + "scoring_params" + ], + "title": "BenchmarkConfig" + }, + "EvalCandidate": { + "oneOf": [ + { + "$ref": "#/components/schemas/ModelCandidate" + }, + { + "$ref": "#/components/schemas/AgentCandidate" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "model": "#/components/schemas/ModelCandidate", + "agent": "#/components/schemas/AgentCandidate" + } + } + }, + "LLMAsJudgeScoringFnParams": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "llm_as_judge", + "default": "llm_as_judge" + }, + "judge_model": { + "type": "string" + }, + "prompt_template": { + "type": "string" + }, + "judge_score_regexes": { + "type": "array", + "items": { + "type": "string" + } + }, + "aggregation_functions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + } + } + }, + "additionalProperties": false, + "required": [ + "type", + "judge_model" + ], + "title": "LLMAsJudgeScoringFnParams" + }, + "ModelCandidate": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "model", + "default": "model" + }, + "model": { + "type": "string" + }, + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "system_message": { + "$ref": "#/components/schemas/SystemMessage" + } + }, + "additionalProperties": false, + "required": [ + "type", + "model", + "sampling_params" + ], + "title": "ModelCandidate" + }, + "RegexParserScoringFnParams": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "regex_parser", + "default": "regex_parser" + }, + "parsing_regexes": { + "type": "array", + "items": { + "type": "string" + } + }, + "aggregation_functions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + } + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "RegexParserScoringFnParams" + }, + "ScoringFnParams": { + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams", + "basic": "#/components/schemas/BasicScoringFnParams" + } + } + }, "EvaluateRowsRequest": { "type": "object", "properties": { @@ -5533,6 +5211,115 @@ ], "title": "EvaluateRowsRequest" }, + "EvaluateResponse": { + "type": "object", + "properties": { + "generations": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "scores": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + } + } + }, + "additionalProperties": false, + "required": [ + "generations", + "scores" + ], + "title": "EvaluateResponse" + }, + "ScoringResult": { + "type": "object", + "properties": { + "score_rows": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "aggregated_results": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult" + }, "Session": { "type": "object", "properties": { @@ -5598,6 +5385,70 @@ ], "title": "AgentStepResponse" }, + "Benchmark": { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "provider_resource_id": { + "type": "string" + }, + "provider_id": { + "type": "string" + }, + "type": { + "type": "string", + "const": "benchmark", + "default": "benchmark" + }, + "dataset_id": { + "type": "string" + }, + "scoring_functions": { + "type": "array", + "items": { + "type": "string" + } + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "identifier", + "provider_resource_id", + "provider_id", + "type", + "dataset_id", + "scoring_functions", + "metadata" + ], + "title": "Benchmark" + }, "AgentTurnInputType": { "type": "object", "properties": { @@ -6146,6 +5997,22 @@ ], "title": "Span" }, + "GetSpanTreeRequest": { + "type": "object", + "properties": { + "attributes_to_return": { + "type": "array", + "items": { + "type": "string" + } + }, + "max_depth": { + "type": "integer" + } + }, + "additionalProperties": false, + "title": "GetSpanTreeRequest" + }, "SpanStatus": { "type": "string", "enum": [ @@ -6417,6 +6284,16 @@ "title": "PostTrainingJobArtifactsResponse", "description": "Artifacts of a finetuning job." }, + "JobStatus": { + "type": "string", + "enum": [ + "completed", + "in_progress", + "failed", + "scheduled" + ], + "title": "JobStatus" + }, "PostTrainingJobStatusResponse": { "type": "object", "properties": { @@ -6756,6 +6633,53 @@ ], "title": "ToolInvocationResult" }, + "BucketResponse": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "name" + ], + "title": "BucketResponse" + }, + "ListBucketResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BucketResponse" + }, + "description": "List of FileResponse entries" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListBucketResponse", + "description": "Response representing a list of file entries." + }, + "ListBenchmarksResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Benchmark" + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListBenchmarksResponse" + }, "ListDatasetsResponse": { "type": "object", "properties": { @@ -6772,6 +6696,24 @@ ], "title": "ListDatasetsResponse" }, + "ListFileResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FileResponse" + }, + "description": "List of FileResponse entries" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListFileResponse", + "description": "Response representing a list of file entries." + }, "ListModelsResponse": { "type": "object", "properties": { @@ -7688,6 +7630,32 @@ ], "title": "QueryConditionOp" }, + "QuerySpansRequest": { + "type": "object", + "properties": { + "attribute_filters": { + "type": "array", + "items": { + "$ref": "#/components/schemas/QueryCondition" + } + }, + "attributes_to_return": { + "type": "array", + "items": { + "type": "string" + } + }, + "max_depth": { + "type": "integer" + } + }, + "additionalProperties": false, + "required": [ + "attribute_filters", + "attributes_to_return" + ], + "title": "QuerySpansRequest" + }, "QuerySpansResponse": { "type": "object", "properties": { @@ -7704,6 +7672,31 @@ ], "title": "QuerySpansResponse" }, + "QueryTracesRequest": { + "type": "object", + "properties": { + "attribute_filters": { + "type": "array", + "items": { + "$ref": "#/components/schemas/QueryCondition" + } + }, + "limit": { + "type": "integer" + }, + "offset": { + "type": "integer" + }, + "order_by": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "title": "QueryTracesRequest" + }, "QueryTracesResponse": { "type": "object", "properties": { @@ -8035,6 +8028,19 @@ ], "title": "RunEvalRequest" }, + "Job": { + "type": "object", + "properties": { + "job_id": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "job_id" + ], + "title": "Job" + }, "RunShieldRequest": { "type": "object", "properties": { @@ -8543,6 +8549,9 @@ { "name": "Eval" }, + { + "name": "Files (Coming Soon)" + }, { "name": "Inference", "description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.", @@ -8598,6 +8607,7 @@ "DatasetIO", "Datasets", "Eval", + "Files (Coming Soon)", "Inference", "Inspect", "Models", diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index 99300fedf..c5043665b 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -10,175 +10,6 @@ info: servers: - url: http://any-hosted-llama-stack.com paths: - /v1/eval/tasks/{task_id}/evaluations: - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - tags: - - Eval - description: '' - parameters: - - name: task_id - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/DeprecatedEvaluateRowsRequest' - required: true - deprecated: true - /v1/eval-tasks/{eval_task_id}: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/Benchmark' - - type: 'null' - tags: - - Benchmarks - description: '' - parameters: - - name: eval_task_id - in: path - required: true - schema: - type: string - deprecated: true - /v1/eval/tasks/{task_id}/jobs/{job_id}: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/JobStatus' - - type: 'null' - tags: - - Eval - description: '' - parameters: - - name: task_id - in: path - required: true - schema: - type: string - - name: job_id - in: path - required: true - schema: - type: string - deprecated: true - delete: - responses: - '200': - description: OK - tags: - - Eval - description: '' - parameters: - - name: task_id - in: path - required: true - schema: - type: string - - name: job_id - in: path - required: true - schema: - type: string - deprecated: true - /v1/eval/tasks/{task_id}/jobs/{job_id}/result: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - tags: - - Eval - description: '' - parameters: - - name: task_id - in: path - required: true - schema: - type: string - - name: job_id - in: path - required: true - schema: - type: string - deprecated: true - /v1/eval-tasks: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListBenchmarksResponse' - tags: - - Benchmarks - description: '' - parameters: [] - deprecated: true - post: - responses: - '200': - description: OK - tags: - - Benchmarks - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/DeprecatedRegisterEvalTaskRequest' - required: true - deprecated: true - /v1/eval/tasks/{task_id}/jobs: - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Job' - tags: - - Eval - description: '' - parameters: - - name: task_id - in: path - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/DeprecatedRunEvalRequest' - required: true - deprecated: true /v1/datasetio/rows: get: responses: @@ -406,6 +237,43 @@ paths: schema: $ref: '#/components/schemas/CreateAgentTurnRequest' required: true + /v1/files: + get: + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListBucketResponse' + tags: + - Files (Coming Soon) + description: List all buckets. + parameters: + - name: bucket + in: query + required: true + schema: + type: string + post: + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FileUploadResponse' + tags: + - Files (Coming Soon) + description: >- + Create a new upload session for a file identified by a bucket and key. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateUploadSessionRequest' + required: true /v1/agents/{agent_id}: delete: responses: @@ -468,6 +336,59 @@ paths: required: true schema: type: string + /v1/files/{bucket}/{key}: + get: + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FileResponse' + tags: + - Files (Coming Soon) + description: >- + Get a file info identified by a bucket and key. + parameters: + - name: bucket + in: path + description: 'Bucket name (valid chars: a-zA-Z0-9_-)' + required: true + schema: + type: string + - name: key + in: path + description: >- + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + required: true + schema: + type: string + delete: + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/FileResponse' + tags: + - Files (Coming Soon) + description: >- + Delete a file identified by a bucket and key. + parameters: + - name: bucket + in: path + description: 'Bucket name (valid chars: a-zA-Z0-9_-)' + required: true + schema: + type: string + - name: key + in: path + description: >- + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + required: true + schema: + type: string /v1/inference/embeddings: post: responses: @@ -726,7 +647,7 @@ paths: schema: type: string /v1/telemetry/spans/{span_id}/tree: - get: + post: responses: '200': description: OK @@ -743,18 +664,12 @@ paths: required: true schema: type: string - - name: attributes_to_return - in: query - required: false - schema: - type: array - items: - type: string - - name: max_depth - in: query - required: false - schema: - type: integer + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/GetSpanTreeRequest' + required: true /v1/tools/{tool_name}: get: responses: @@ -875,6 +790,57 @@ paths: - PostTraining (Coming Soon) description: '' parameters: [] + /v1/files/session:{upload_id}: + get: + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/FileUploadResponse' + - type: 'null' + tags: + - Files (Coming Soon) + description: >- + Returns information about an existsing upload session + parameters: + - name: upload_id + in: path + description: ID of the upload session + required: true + schema: + type: string + post: + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/FileResponse' + - type: 'null' + tags: + - Files (Coming Soon) + description: >- + Upload file content to an existing upload session. On the server, request + body will have the raw bytes that are uploaded. + parameters: + - name: upload_id + in: path + description: ID of the upload session + required: true + schema: + type: string + requestBody: + content: + application/octet-stream: + schema: + type: string + format: binary + required: true /v1/vector-dbs/{vector_db_id}: get: responses: @@ -1091,6 +1057,25 @@ paths: schema: $ref: '#/components/schemas/RegisterDatasetRequest' required: true + /v1/files/{bucket}: + get: + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/ListFileResponse' + tags: + - Files (Coming Soon) + description: List all files in a bucket. + parameters: + - name: bucket + in: path + description: 'Bucket name (valid chars: a-zA-Z0-9_-)' + required: true + schema: + type: string /v1/models: get: responses: @@ -1379,7 +1364,7 @@ paths: $ref: '#/components/schemas/QueryChunksRequest' required: true /v1/telemetry/spans: - get: + post: responses: '200': description: OK @@ -1390,28 +1375,15 @@ paths: tags: - Telemetry description: '' - parameters: - - name: attribute_filters - in: query - required: true - schema: - type: array - items: - $ref: '#/components/schemas/QueryCondition' - - name: attributes_to_return - in: query - required: true - schema: - type: array - items: - type: string - - name: max_depth - in: query - required: false - schema: - type: integer + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpansRequest' + required: true /v1/telemetry/traces: - get: + post: responses: '200': description: OK @@ -1422,31 +1394,13 @@ paths: tags: - Telemetry description: '' - parameters: - - name: attribute_filters - in: query - required: false - schema: - type: array - items: - $ref: '#/components/schemas/QueryCondition' - - name: limit - in: query - required: false - schema: - type: integer - - name: offset - in: query - required: false - schema: - type: integer - - name: order_by - in: query - required: false - schema: - type: array - items: - type: string + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryTracesRequest' + required: true /v1/eval/benchmarks/{benchmark_id}/jobs: post: responses: @@ -1598,157 +1552,67 @@ jsonSchemaDialect: >- https://json-schema.org/draft/2020-12/schema components: schemas: - AgentCandidate: + AppendRowsRequest: type: object properties: - type: + dataset_id: type: string - const: agent - default: agent - config: - $ref: '#/components/schemas/AgentConfig' + rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object additionalProperties: false required: - - type - - config - title: AgentCandidate - AgentConfig: + - dataset_id + - rows + title: AppendRowsRequest + CompletionMessage: type: object properties: - sampling_params: - $ref: '#/components/schemas/SamplingParams' - input_shields: - type: array - items: - type: string - output_shields: - type: array - items: - type: string - toolgroups: - type: array - items: - $ref: '#/components/schemas/AgentTool' - client_tools: - type: array - items: - $ref: '#/components/schemas/ToolDef' - tool_choice: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + $ref: '#/components/schemas/InterleavedContent' + description: The content of the model's response + stop_reason: type: string enum: - - auto - - required - - none - title: ToolChoice + - end_of_turn + - end_of_message + - out_of_tokens description: >- - Whether tool use is required or automatic. This is a hint to the model - which may not be followed. It depends on the Instruction Following capabilities - of the model. - deprecated: true - tool_prompt_format: - type: string - enum: - - json - - function_tag - - python_list - title: ToolPromptFormat - description: >- - Prompt format for calling custom / zero shot tools. - deprecated: true - tool_config: - $ref: '#/components/schemas/ToolConfig' - max_infer_iters: - type: integer - default: 10 - model: - type: string - instructions: - type: string - enable_session_persistence: - type: boolean - default: false - response_format: - $ref: '#/components/schemas/ResponseFormat' - additionalProperties: false - required: - - model - - instructions - title: AgentConfig - AgentTool: - oneOf: - - type: string - - type: object - properties: - name: - type: string - args: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - name - - args - title: AgentToolGroupWithArgs - AggregationFunctionType: - type: string - enum: - - average - - median - - categorical_count - - accuracy - title: AggregationFunctionType - BasicScoringFnParams: - type: object - properties: - type: - type: string - const: basic - default: basic - aggregation_functions: + Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: + The model finished generating the entire response. - `StopReason.end_of_message`: + The model finished generating but generated a partial response -- usually, + a tool call. The user may call the tool and continue the conversation + with the tool's response. - `StopReason.out_of_tokens`: The model ran + out of token budget. + tool_calls: type: array items: - $ref: '#/components/schemas/AggregationFunctionType' + $ref: '#/components/schemas/ToolCall' + description: >- + List of tool calls. Each tool call is a ToolCall object. additionalProperties: false required: - - type - title: BasicScoringFnParams - BenchmarkConfig: - type: object - properties: - type: - type: string - const: benchmark - default: benchmark - eval_candidate: - $ref: '#/components/schemas/EvalCandidate' - scoring_params: - type: object - additionalProperties: - $ref: '#/components/schemas/ScoringFnParams' - num_examples: - type: integer - additionalProperties: false - required: - - type - - eval_candidate - - scoring_params - title: BenchmarkConfig - EvalCandidate: - oneOf: - - $ref: '#/components/schemas/ModelCandidate' - - $ref: '#/components/schemas/AgentCandidate' - discriminator: - propertyName: type - mapping: - model: '#/components/schemas/ModelCandidate' - agent: '#/components/schemas/AgentCandidate' + - role + - content + - stop_reason + title: CompletionMessage + description: >- + A message containing the model's (assistant) response in a chat conversation. GrammarResponseFormat: type: object properties: @@ -1863,68 +1727,19 @@ components: title: JsonSchemaResponseFormat description: >- Configuration for JSON schema-guided response generation. - LLMAsJudgeScoringFnParams: - type: object - properties: - type: - type: string - const: llm_as_judge - default: llm_as_judge - judge_model: - type: string - prompt_template: - type: string - judge_score_regexes: - type: array - items: - type: string - aggregation_functions: - type: array - items: - $ref: '#/components/schemas/AggregationFunctionType' - additionalProperties: false - required: - - type - - judge_model - title: LLMAsJudgeScoringFnParams - ModelCandidate: - type: object - properties: - type: - type: string - const: model - default: model - model: - type: string - sampling_params: - $ref: '#/components/schemas/SamplingParams' - system_message: - $ref: '#/components/schemas/SystemMessage' - additionalProperties: false - required: - - type - - model - - sampling_params - title: ModelCandidate - RegexParserScoringFnParams: - type: object - properties: - type: - type: string - const: regex_parser - default: regex_parser - parsing_regexes: - type: array - items: - type: string - aggregation_functions: - type: array - items: - $ref: '#/components/schemas/AggregationFunctionType' - additionalProperties: false - required: - - type - title: RegexParserScoringFnParams + Message: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/SystemMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + - $ref: '#/components/schemas/CompletionMessage' + discriminator: + propertyName: role + mapping: + user: '#/components/schemas/UserMessage' + system: '#/components/schemas/SystemMessage' + tool: '#/components/schemas/ToolResponseMessage' + assistant: '#/components/schemas/CompletionMessage' ResponseFormat: oneOf: - $ref: '#/components/schemas/JsonSchemaResponseFormat' @@ -1960,17 +1775,6 @@ components: greedy: '#/components/schemas/GreedySamplingStrategy' top_p: '#/components/schemas/TopPSamplingStrategy' top_k: '#/components/schemas/TopKSamplingStrategy' - ScoringFnParams: - oneOf: - - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' - - $ref: '#/components/schemas/RegexParserScoringFnParams' - - $ref: '#/components/schemas/BasicScoringFnParams' - discriminator: - propertyName: type - mapping: - llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' - regex_parser: '#/components/schemas/RegexParserScoringFnParams' - basic: '#/components/schemas/BasicScoringFnParams' SystemMessage: type: object properties: @@ -2011,407 +1815,6 @@ components: - text title: TextContentItem description: A text content item - ToolConfig: - type: object - properties: - tool_choice: - oneOf: - - type: string - enum: - - auto - - required - - none - title: ToolChoice - description: >- - Whether tool use is required or automatic. This is a hint to the model - which may not be followed. It depends on the Instruction Following - capabilities of the model. - - type: string - default: auto - description: >- - (Optional) Whether tool use is automatic, required, or none. Can also - specify a tool name to use a specific tool. Defaults to ToolChoice.auto. - tool_prompt_format: - type: string - enum: - - json - - function_tag - - python_list - description: >- - (Optional) Instructs the model how to format tool calls. By default, Llama - Stack will attempt to use a format that is best adapted to the model. - - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python - syntax -- a list of function calls. - system_message_behavior: - type: string - enum: - - append - - replace - description: >- - (Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: - Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: - Replaces the default system prompt with the provided system message. The - system message can include the string '{{function_definitions}}' to indicate - where the function definitions should be inserted. - default: append - additionalProperties: false - title: ToolConfig - description: Configuration for tool use. - ToolDef: - type: object - properties: - name: - type: string - description: - type: string - parameters: - type: array - items: - $ref: '#/components/schemas/ToolParameter' - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - name - title: ToolDef - ToolParameter: - type: object - properties: - name: - type: string - parameter_type: - type: string - description: - type: string - required: - type: boolean - default: true - default: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - name - - parameter_type - - description - - required - title: ToolParameter - TopKSamplingStrategy: - type: object - properties: - type: - type: string - const: top_k - default: top_k - top_k: - type: integer - additionalProperties: false - required: - - type - - top_k - title: TopKSamplingStrategy - TopPSamplingStrategy: - type: object - properties: - type: - type: string - const: top_p - default: top_p - temperature: - type: number - top_p: - type: number - default: 0.95 - additionalProperties: false - required: - - type - title: TopPSamplingStrategy - URL: - type: object - properties: - uri: - type: string - additionalProperties: false - required: - - uri - title: URL - DeprecatedEvaluateRowsRequest: - type: object - properties: - input_rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - scoring_functions: - type: array - items: - type: string - task_config: - $ref: '#/components/schemas/BenchmarkConfig' - additionalProperties: false - required: - - input_rows - - scoring_functions - - task_config - title: DeprecatedEvaluateRowsRequest - EvaluateResponse: - type: object - properties: - generations: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - scores: - type: object - additionalProperties: - $ref: '#/components/schemas/ScoringResult' - additionalProperties: false - required: - - generations - - scores - title: EvaluateResponse - ScoringResult: - type: object - properties: - score_rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - aggregated_results: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - score_rows - - aggregated_results - title: ScoringResult - Benchmark: - type: object - properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - const: benchmark - default: benchmark - dataset_id: - type: string - scoring_functions: - type: array - items: - type: string - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - identifier - - provider_resource_id - - provider_id - - type - - dataset_id - - scoring_functions - - metadata - title: Benchmark - JobStatus: - type: string - enum: - - completed - - in_progress - - failed - - scheduled - title: JobStatus - ListBenchmarksResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Benchmark' - additionalProperties: false - required: - - data - title: ListBenchmarksResponse - DeprecatedRegisterEvalTaskRequest: - type: object - properties: - eval_task_id: - type: string - dataset_id: - type: string - scoring_functions: - type: array - items: - type: string - provider_benchmark_id: - type: string - provider_id: - type: string - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - eval_task_id - - dataset_id - - scoring_functions - title: DeprecatedRegisterEvalTaskRequest - DeprecatedRunEvalRequest: - type: object - properties: - task_config: - $ref: '#/components/schemas/BenchmarkConfig' - additionalProperties: false - required: - - task_config - title: DeprecatedRunEvalRequest - Job: - type: object - properties: - job_id: - type: string - additionalProperties: false - required: - - job_id - title: Job - AppendRowsRequest: - type: object - properties: - dataset_id: - type: string - rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - dataset_id - - rows - title: AppendRowsRequest - CompletionMessage: - type: object - properties: - role: - type: string - const: assistant - default: assistant - description: >- - Must be "assistant" to identify this as the model's response - content: - $ref: '#/components/schemas/InterleavedContent' - description: The content of the model's response - stop_reason: - type: string - enum: - - end_of_turn - - end_of_message - - out_of_tokens - description: >- - Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: - The model finished generating the entire response. - `StopReason.end_of_message`: - The model finished generating but generated a partial response -- usually, - a tool call. The user may call the tool and continue the conversation - with the tool's response. - `StopReason.out_of_tokens`: The model ran - out of token budget. - tool_calls: - type: array - items: - $ref: '#/components/schemas/ToolCall' - description: >- - List of tool calls. Each tool call is a ToolCall object. - additionalProperties: false - required: - - role - - content - - stop_reason - title: CompletionMessage - description: >- - A message containing the model's (assistant) response in a chat conversation. - Message: - oneOf: - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/SystemMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - - $ref: '#/components/schemas/CompletionMessage' - discriminator: - propertyName: role - mapping: - user: '#/components/schemas/UserMessage' - system: '#/components/schemas/SystemMessage' - tool: '#/components/schemas/ToolResponseMessage' - assistant: '#/components/schemas/CompletionMessage' ToolCall: type: object properties: @@ -2539,6 +1942,45 @@ components: title: ToolResponseMessage description: >- A message representing the result of a tool invocation. + TopKSamplingStrategy: + type: object + properties: + type: + type: string + const: top_k + default: top_k + top_k: + type: integer + additionalProperties: false + required: + - type + - top_k + title: TopKSamplingStrategy + TopPSamplingStrategy: + type: object + properties: + type: + type: string + const: top_p + default: top_p + temperature: + type: number + top_p: + type: number + default: 0.95 + additionalProperties: false + required: + - type + title: TopPSamplingStrategy + URL: + type: object + properties: + uri: + type: string + additionalProperties: false + required: + - uri + title: URL UserMessage: type: object properties: @@ -2778,6 +2220,54 @@ components: required: - job_uuid title: CancelTrainingJobRequest + ToolConfig: + type: object + properties: + tool_choice: + oneOf: + - type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following + capabilities of the model. + - type: string + default: auto + description: >- + (Optional) Whether tool use is automatic, required, or none. Can also + specify a tool name to use a specific tool. Defaults to ToolChoice.auto. + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + description: >- + (Optional) Instructs the model how to format tool calls. By default, Llama + Stack will attempt to use a format that is best adapted to the model. + - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. + - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python + syntax -- a list of function calls. + system_message_behavior: + type: string + enum: + - append + - replace + description: >- + (Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: + Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: + Replaces the default system prompt with the provided system message. The + system message can include the string '{{function_definitions}}' to indicate + where the function definitions should be inserted. + default: append + additionalProperties: false + title: ToolConfig + description: Configuration for tool use. ChatCompletionRequest: type: object properties: @@ -3041,6 +2531,142 @@ components: title: CompletionResponseStreamChunk description: >- A chunk of a streamed completion response. + AgentConfig: + type: object + properties: + sampling_params: + $ref: '#/components/schemas/SamplingParams' + input_shields: + type: array + items: + type: string + output_shields: + type: array + items: + type: string + toolgroups: + type: array + items: + $ref: '#/components/schemas/AgentTool' + client_tools: + type: array + items: + $ref: '#/components/schemas/ToolDef' + tool_choice: + type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following capabilities + of the model. + deprecated: true + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + title: ToolPromptFormat + description: >- + Prompt format for calling custom / zero shot tools. + deprecated: true + tool_config: + $ref: '#/components/schemas/ToolConfig' + max_infer_iters: + type: integer + default: 10 + model: + type: string + instructions: + type: string + enable_session_persistence: + type: boolean + default: false + response_format: + $ref: '#/components/schemas/ResponseFormat' + additionalProperties: false + required: + - model + - instructions + title: AgentConfig + AgentTool: + oneOf: + - type: string + - type: object + properties: + name: + type: string + args: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - name + - args + title: AgentToolGroupWithArgs + ToolDef: + type: object + properties: + name: + type: string + description: + type: string + parameters: + type: array + items: + $ref: '#/components/schemas/ToolParameter' + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - name + title: ToolDef + ToolParameter: + type: object + properties: + name: + type: string + parameter_type: + type: string + description: + type: string + required: + type: boolean + default: true + default: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - name + - parameter_type + - description + - required + title: ToolParameter CreateAgentRequest: type: object properties: @@ -3508,6 +3134,87 @@ components: - event_type - turn_id title: AgentTurnResponseTurnStartPayload + CreateUploadSessionRequest: + type: object + properties: + bucket: + type: string + description: >- + Bucket under which the file is stored (valid chars: a-zA-Z0-9_-) + key: + type: string + description: >- + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + mime_type: + type: string + description: MIME type of the file + size: + type: integer + description: File size in bytes + additionalProperties: false + required: + - bucket + - key + - mime_type + - size + title: CreateUploadSessionRequest + FileUploadResponse: + type: object + properties: + id: + type: string + description: ID of the upload session + url: + type: string + description: Upload URL for the file or file parts + offset: + type: integer + description: Upload content offset + size: + type: integer + description: Upload content size + additionalProperties: false + required: + - id + - url + - offset + - size + title: FileUploadResponse + description: >- + Response after initiating a file upload session. + FileResponse: + type: object + properties: + bucket: + type: string + description: >- + Bucket under which the file is stored (valid chars: a-zA-Z0-9_-) + key: + type: string + description: >- + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + mime_type: + type: string + description: MIME type of the file + url: + type: string + description: Upload URL for the file contents + bytes: + type: integer + description: Size of the file in bytes + created_at: + type: integer + description: Timestamp of when the file was created + additionalProperties: false + required: + - bucket + - key + - mime_type + - url + - bytes + - created_at + title: FileResponse + description: Response representing a file entry. EmbeddingsRequest: type: object properties: @@ -3548,6 +3255,141 @@ components: title: EmbeddingsResponse description: >- Response containing generated embeddings. + AgentCandidate: + type: object + properties: + type: + type: string + const: agent + default: agent + config: + $ref: '#/components/schemas/AgentConfig' + additionalProperties: false + required: + - type + - config + title: AgentCandidate + AggregationFunctionType: + type: string + enum: + - average + - median + - categorical_count + - accuracy + title: AggregationFunctionType + BasicScoringFnParams: + type: object + properties: + type: + type: string + const: basic + default: basic + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + additionalProperties: false + required: + - type + title: BasicScoringFnParams + BenchmarkConfig: + type: object + properties: + eval_candidate: + $ref: '#/components/schemas/EvalCandidate' + scoring_params: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringFnParams' + num_examples: + type: integer + additionalProperties: false + required: + - eval_candidate + - scoring_params + title: BenchmarkConfig + EvalCandidate: + oneOf: + - $ref: '#/components/schemas/ModelCandidate' + - $ref: '#/components/schemas/AgentCandidate' + discriminator: + propertyName: type + mapping: + model: '#/components/schemas/ModelCandidate' + agent: '#/components/schemas/AgentCandidate' + LLMAsJudgeScoringFnParams: + type: object + properties: + type: + type: string + const: llm_as_judge + default: llm_as_judge + judge_model: + type: string + prompt_template: + type: string + judge_score_regexes: + type: array + items: + type: string + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + additionalProperties: false + required: + - type + - judge_model + title: LLMAsJudgeScoringFnParams + ModelCandidate: + type: object + properties: + type: + type: string + const: model + default: model + model: + type: string + sampling_params: + $ref: '#/components/schemas/SamplingParams' + system_message: + $ref: '#/components/schemas/SystemMessage' + additionalProperties: false + required: + - type + - model + - sampling_params + title: ModelCandidate + RegexParserScoringFnParams: + type: object + properties: + type: + type: string + const: regex_parser + default: regex_parser + parsing_regexes: + type: array + items: + type: string + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + additionalProperties: false + required: + - type + title: RegexParserScoringFnParams + ScoringFnParams: + oneOf: + - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' + - $ref: '#/components/schemas/RegexParserScoringFnParams' + - $ref: '#/components/schemas/BasicScoringFnParams' + discriminator: + propertyName: type + mapping: + llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' + regex_parser: '#/components/schemas/RegexParserScoringFnParams' + basic: '#/components/schemas/BasicScoringFnParams' EvaluateRowsRequest: type: object properties: @@ -3575,6 +3417,60 @@ components: - scoring_functions - task_config title: EvaluateRowsRequest + EvaluateResponse: + type: object + properties: + generations: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + scores: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringResult' + additionalProperties: false + required: + - generations + - scores + title: EvaluateResponse + ScoringResult: + type: object + properties: + score_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + aggregated_results: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - score_rows + - aggregated_results + title: ScoringResult Session: type: object properties: @@ -3618,6 +3514,45 @@ components: required: - step title: AgentStepResponse + Benchmark: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + const: benchmark + default: benchmark + dataset_id: + type: string + scoring_functions: + type: array + items: + type: string + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - identifier + - provider_resource_id + - provider_id + - type + - dataset_id + - scoring_functions + - metadata + title: Benchmark AgentTurnInputType: type: object properties: @@ -3961,6 +3896,17 @@ components: - name - start_time title: Span + GetSpanTreeRequest: + type: object + properties: + attributes_to_return: + type: array + items: + type: string + max_depth: + type: integer + additionalProperties: false + title: GetSpanTreeRequest SpanStatus: type: string enum: @@ -4134,6 +4080,14 @@ components: - checkpoints title: PostTrainingJobArtifactsResponse description: Artifacts of a finetuning job. + JobStatus: + type: string + enum: + - completed + - in_progress + - failed + - scheduled + title: JobStatus PostTrainingJobStatusResponse: type: object properties: @@ -4339,6 +4293,40 @@ components: required: - content title: ToolInvocationResult + BucketResponse: + type: object + properties: + name: + type: string + additionalProperties: false + required: + - name + title: BucketResponse + ListBucketResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/BucketResponse' + description: List of FileResponse entries + additionalProperties: false + required: + - data + title: ListBucketResponse + description: >- + Response representing a list of file entries. + ListBenchmarksResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Benchmark' + additionalProperties: false + required: + - data + title: ListBenchmarksResponse ListDatasetsResponse: type: object properties: @@ -4350,6 +4338,20 @@ components: required: - data title: ListDatasetsResponse + ListFileResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/FileResponse' + description: List of FileResponse entries + additionalProperties: false + required: + - data + title: ListFileResponse + description: >- + Response representing a list of file entries. ListModelsResponse: type: object properties: @@ -4947,6 +4949,24 @@ components: - gt - lt title: QueryConditionOp + QuerySpansRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + attributes_to_return: + type: array + items: + type: string + max_depth: + type: integer + additionalProperties: false + required: + - attribute_filters + - attributes_to_return + title: QuerySpansRequest QuerySpansResponse: type: object properties: @@ -4958,6 +4978,23 @@ components: required: - data title: QuerySpansResponse + QueryTracesRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + limit: + type: integer + offset: + type: integer + order_by: + type: array + items: + type: string + additionalProperties: false + title: QueryTracesRequest QueryTracesResponse: type: object properties: @@ -5151,6 +5188,15 @@ components: required: - task_config title: RunEvalRequest + Job: + type: object + properties: + job_id: + type: string + additionalProperties: false + required: + - job_id + title: Job RunShieldRequest: type: object properties: @@ -5467,6 +5513,7 @@ tags: - name: DatasetIO - name: Datasets - name: Eval + - name: Files (Coming Soon) - name: Inference description: >- This API provides the raw interface to the underlying models. Two kinds of models @@ -5501,6 +5548,7 @@ x-tagGroups: - DatasetIO - Datasets - Eval + - Files (Coming Soon) - Inference - Inspect - Models diff --git a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb index 8eecf84ab..f3f41b18a 100644 --- a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb +++ b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb @@ -1017,14 +1017,14 @@ " \"content\": SYSTEM_PROMPT_TEMPLATE.format(subject=subset),\n", "}\n", "\n", - "client.eval_tasks.register(\n", - " eval_task_id=\"meta-reference::mmmu\",\n", + "client.benchmarks.register(\n", + " benchmark_id=\"meta-reference::mmmu\",\n", " dataset_id=f\"mmmu-{subset}-{split}\",\n", " scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n", ")\n", "\n", - "response = client.eval.evaluate_rows(\n", - " task_id=\"meta-reference::mmmu\",\n", + "response = client.eval.evaluate_rows_alpha(\n", + " benchmark_id=\"meta-reference::mmmu\",\n", " input_rows=eval_rows,\n", " scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n", " task_config={\n", @@ -1196,14 +1196,14 @@ " provider_id=\"together\",\n", ")\n", "\n", - "client.eval_tasks.register(\n", - " eval_task_id=\"meta-reference::simpleqa\",\n", + "client.benchmarks.register(\n", + " benchmark_id=\"meta-reference::simpleqa\",\n", " dataset_id=simpleqa_dataset_id,\n", " scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n", ")\n", "\n", - "response = client.eval.evaluate_rows(\n", - " task_id=\"meta-reference::simpleqa\",\n", + "response = client.eval.evaluate_rows_alpha(\n", + " benchmark_id=\"meta-reference::simpleqa\",\n", " input_rows=eval_rows.rows,\n", " scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n", " task_config={\n", @@ -1351,8 +1351,8 @@ " \"enable_session_persistence\": False,\n", "}\n", "\n", - "response = client.eval.evaluate_rows(\n", - " task_id=\"meta-reference::simpleqa\",\n", + "response = client.eval.evaluate_rows_alpha(\n", + " benchmark_id=\"meta-reference::simpleqa\",\n", " input_rows=eval_rows.rows,\n", " scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n", " task_config={\n", diff --git a/docs/openapi_generator/README.md b/docs/openapi_generator/README.md index e98cfaf1b..70df1da21 100644 --- a/docs/openapi_generator/README.md +++ b/docs/openapi_generator/README.md @@ -3,7 +3,7 @@ The RFC Specification (OpenAPI format) is generated from the set of API endpoint Please install the following packages before running the script: ``` -pip install python-openapi json-strong-typing fire PyYAML llama-models +pip install fire PyYAML llama-models ``` Then simply run `sh run_openapi_generator.sh` diff --git a/docs/openapi_generator/pyopenapi/generator.py b/docs/openapi_generator/pyopenapi/generator.py index 60cd7a242..4220cfc05 100644 --- a/docs/openapi_generator/pyopenapi/generator.py +++ b/docs/openapi_generator/pyopenapi/generator.py @@ -477,6 +477,7 @@ class Generator: "SyntheticDataGeneration", "PostTraining", "BatchInference", + "Files", ]: op.defining_class.__name__ = f"{op.defining_class.__name__} (Coming Soon)" print(op.defining_class.__name__) @@ -520,8 +521,30 @@ class Generator: # parameters passed anywhere parameters = path_parameters + query_parameters - # data passed in payload - if op.request_params: + webmethod = getattr(op.func_ref, "__webmethod__", None) + raw_bytes_request_body = False + if webmethod: + raw_bytes_request_body = getattr(webmethod, "raw_bytes_request_body", False) + + # data passed in request body as raw bytes cannot have request parameters + if raw_bytes_request_body and op.request_params: + raise ValueError("Cannot have both raw bytes request body and request parameters") + + # data passed in request body as raw bytes + if raw_bytes_request_body: + requestBody = RequestBody( + content={ + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary", + } + } + }, + required=True, + ) + # data passed in payload as JSON and mapped to request parameters + elif op.request_params: builder = ContentBuilder(self.schema_builder) first = next(iter(op.request_params)) request_name, request_type = first diff --git a/docs/openapi_generator/pyopenapi/operations.py b/docs/openapi_generator/pyopenapi/operations.py index 88a403182..5c78b9124 100644 --- a/docs/openapi_generator/pyopenapi/operations.py +++ b/docs/openapi_generator/pyopenapi/operations.py @@ -150,7 +150,14 @@ def _get_endpoint_functions( print(f"Processing {colored(func_name, 'white')}...") operation_name = func_name - if operation_name.startswith("get_") or operation_name.endswith("/get"): + + if webmethod.method == "GET": + prefix = "get" + elif webmethod.method == "DELETE": + prefix = "delete" + elif webmethod.method == "POST": + prefix = "post" + elif operation_name.startswith("get_") or operation_name.endswith("/get"): prefix = "get" elif ( operation_name.startswith("delete_") @@ -160,13 +167,8 @@ def _get_endpoint_functions( ): prefix = "delete" else: - if webmethod.method == "GET": - prefix = "get" - elif webmethod.method == "DELETE": - prefix = "delete" - else: - # by default everything else is a POST - prefix = "post" + # by default everything else is a POST + prefix = "post" yield prefix, operation_name, func_name, func_ref diff --git a/docs/openapi_generator/pyopenapi/specification.py b/docs/openapi_generator/pyopenapi/specification.py index 9e5363b4a..d3e5a1f19 100644 --- a/docs/openapi_generator/pyopenapi/specification.py +++ b/docs/openapi_generator/pyopenapi/specification.py @@ -78,7 +78,7 @@ class MediaType: @dataclass class RequestBody: - content: Dict[str, MediaType] + content: Dict[str, MediaType | Dict[str, Any]] description: Optional[str] = None required: Optional[bool] = None diff --git a/docs/requirements.txt b/docs/requirements.txt index b288ea1aa..4276c1800 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -sphinx +sphinx==8.1.3 myst-parser linkify -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme diff --git a/docs/source/building_applications/rag.md b/docs/source/building_applications/rag.md index 5287a2367..e6d628193 100644 --- a/docs/source/building_applications/rag.md +++ b/docs/source/building_applications/rag.md @@ -122,3 +122,20 @@ response = agent.create_turn( session_id=session_id, ) ``` + +### Unregistering Vector DBs + +If you need to clean up and unregister vector databases, you can do so as follows: + +```python +# Unregister a specified vector database +vector_db_id = "my_vector_db_id" +print(f"Unregistering vector database: {vector_db_id}") +client.vector_dbs.unregister(vector_db_id=vector_db_id) + + +# Unregister all vector databases +for vector_db_id in client.vector_dbs.list(): + print(f"Unregistering vector database: {vector_db_id.identifier}") + client.vector_dbs.unregister(vector_db_id=vector_db_id.identifier) +``` diff --git a/docs/source/building_applications/tools.md b/docs/source/building_applications/tools.md index c0f6230db..323374673 100644 --- a/docs/source/building_applications/tools.md +++ b/docs/source/building_applications/tools.md @@ -60,6 +60,11 @@ Features: - Disabled dangerous system operations - Configurable execution timeouts +> ⚠️ Important: The code interpreter tool can operate in a controlled enviroment locally or on Podman containers. To ensure proper functionality in containerised environments: +> - The container requires privileged access (e.g., --privileged). +> - Users without sufficient permissions may encounter permission errors. (`bwrap: Can't mount devpts on /newroot/dev/pts: Permission denied`) +> - 🔒 Security Warning: Privileged mode grants elevated access and bypasses security restrictions. Use only in local, isolated, or controlled environments. + #### WolframAlpha The WolframAlpha tool provides access to computational knowledge through the WolframAlpha API. @@ -103,7 +108,7 @@ Features: MCP tools are special tools that can interact with llama stack over model context protocol. These tools are dynamically discovered from an MCP endpoint and can be used to extend the agent's capabilities. -Refer to https://github.com/modelcontextprotocol/server for available MCP servers. +Refer to [https://github.com/modelcontextprotocol/servers](https://github.com/modelcontextprotocol/servers) for available MCP servers. ```python # Register MCP tools @@ -191,3 +196,36 @@ all_tools = client.tools.list_tools() # List tools in a specific group group_tools = client.tools.list_tools(toolgroup_id="search_tools") ``` + +## Simple Example: Using an Agent with the Code-Interpreter Tool + +```python +from llama_stack_client.lib.agents.agent import Agent +from llama_stack_client.types.agent_create_params import AgentConfig + +# Configure the AI agent with necessary parameters +agent_config = AgentConfig( + name="code-interpreter", + description="A code interpreter agent for executing Python code snippets", + instructions=""" + You are a highly reliable, concise, and precise assistant. + Always show the generated code, never generate your own code, and never anticipate results. + """, + model="meta-llama/Llama-3.2-3B-Instruct", + toolgroups=["builtin::code_interpreter"], + max_infer_iters=5, + enable_session_persistence=False, +) + +# Instantiate the AI agent with the given configuration +agent = Agent(client, agent_config) + +# Start a session +session_id = agent.create_session("tool_session") + +# Send a query to the AI agent for code execution +response = agent.create_turn( + messages=[{"role": "user", "content": "Run this code: print(3 ** 4 - 5 * 2)"}], + session_id=session_id, +) +``` diff --git a/docs/source/concepts/index.md b/docs/source/concepts/index.md index 403e47c48..df46e0134 100644 --- a/docs/source/concepts/index.md +++ b/docs/source/concepts/index.md @@ -13,6 +13,7 @@ A Llama Stack API is described as a collection of REST endpoints. We currently s - **DatasetIO**: interface with datasets and data loaders - **Scoring**: evaluate outputs of the system - **Eval**: generate outputs (via Inference or Agents) and perform scoring +- **VectorIO**: perform operations on vector stores, such as adding documents, searching, and deleting documents - **Telemetry**: collect telemetry data from the system We are working on adding a few more APIs to complete the application lifecycle. These will include: @@ -41,6 +42,7 @@ Some of these APIs are associated with a set of **Resources**. Here is the mappi - **Safety** is associated with `Shield` resources. - **Tool Runtime** is associated with `ToolGroup` resources. - **DatasetIO** is associated with `Dataset` resources. +- **VectorIO** is associated with `VectorDB` resources. - **Scoring** is associated with `ScoringFunction` resources. - **Eval** is associated with `Model` and `Benchmark` resources. diff --git a/docs/source/conf.py b/docs/source/conf.py index 193d661fd..a876333db 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -93,9 +93,10 @@ html_theme_options = { html_static_path = ["../_static"] # html_logo = "../_static/llama-stack-logo.png" -html_style = "../_static/css/my_theme.css" +# html_style = "../_static/css/my_theme.css" def setup(app): + app.add_css_file("css/my_theme.css") def dockerhub_role(name, rawtext, text, lineno, inliner, options={}, content=[]): url = f"https://hub.docker.com/r/llamastack/{text}" node = nodes.reference(rawtext, text, refuri=url, **options) diff --git a/docs/source/contributing/new_api_provider.md b/docs/source/contributing/new_api_provider.md index 06c44ab0d..dbbdb9646 100644 --- a/docs/source/contributing/new_api_provider.md +++ b/docs/source/contributing/new_api_provider.md @@ -6,7 +6,7 @@ This guide will walk you through the process of adding a new API provider to Lla - Begin by reviewing the [core concepts](../concepts/index.md) of Llama Stack and choose the API your provider belongs to (Inference, Safety, VectorIO, etc.) - Determine the provider type ({repopath}`Remote::llama_stack/providers/remote` or {repopath}`Inline::llama_stack/providers/inline`). Remote providers make requests to external services, while inline providers execute implementation locally. - Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`. Specify pip dependencies necessary. -- Update any distribution {repopath}`Templates::llama_stack/templates/` build.yaml and run.yaml files if they should include your provider by default. Run {repopath}`llama_stack/scripts/distro_codegen.py` if necessary. +- Update any distribution {repopath}`Templates::llama_stack/templates/` build.yaml and run.yaml files if they should include your provider by default. Run {repopath}`llama_stack/scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation. Here are some example PRs to help you get started: diff --git a/docs/source/distributions/configuration.md b/docs/source/distributions/configuration.md index d12f584f7..0f766dcd5 100644 --- a/docs/source/distributions/configuration.md +++ b/docs/source/distributions/configuration.md @@ -10,7 +10,7 @@ conda_env: ollama apis: - agents - inference -- memory +- vector_io - safety - telemetry providers: @@ -19,7 +19,7 @@ providers: provider_type: remote::ollama config: url: ${env.OLLAMA_URL:http://localhost:11434} - memory: + vector_io: - provider_id: faiss provider_type: inline::faiss config: diff --git a/docs/source/distributions/self_hosted_distro/bedrock.md b/docs/source/distributions/self_hosted_distro/bedrock.md index 64c9f8c19..14f004926 100644 --- a/docs/source/distributions/self_hosted_distro/bedrock.md +++ b/docs/source/distributions/self_hosted_distro/bedrock.md @@ -61,7 +61,8 @@ docker run \ --port $LLAMA_STACK_PORT \ --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ - --env AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN + --env AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \ + --env AWS_DEFAULT_REGION=$AWS_DEFAULT_REGION ``` ### Via Conda @@ -72,5 +73,6 @@ llama stack run ./run.yaml \ --port $LLAMA_STACK_PORT \ --env AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ --env AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ - --env AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN + --env AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \ + --env AWS_DEFAULT_REGION=$AWS_DEFAULT_REGION ``` diff --git a/docs/source/distributions/self_hosted_distro/fireworks.md b/docs/source/distributions/self_hosted_distro/fireworks.md index f77d9f656..7951e148e 100644 --- a/docs/source/distributions/self_hosted_distro/fireworks.md +++ b/docs/source/distributions/self_hosted_distro/fireworks.md @@ -47,6 +47,7 @@ The following models are available by default: - `meta-llama/Llama-3.3-70B-Instruct (accounts/fireworks/models/llama-v3p3-70b-instruct)` - `meta-llama/Llama-Guard-3-8B (accounts/fireworks/models/llama-guard-3-8b)` - `meta-llama/Llama-Guard-3-11B-Vision (accounts/fireworks/models/llama-guard-3-11b-vision)` +- `nomic-ai/nomic-embed-text-v1.5 (nomic-ai/nomic-embed-text-v1.5)` ### Prerequisite: API Keys diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index a3a45f9a8..2fa796e81 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -130,7 +130,7 @@ llama stack run ./run-with-safety.yaml \ ### (Optional) Update Model Serving Configuration ```{note} -Please check the [model_aliases](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models. +Please check the [model_entries](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models. ``` To serve a new model with `ollama` diff --git a/docs/source/distributions/self_hosted_distro/together.md b/docs/source/distributions/self_hosted_distro/together.md index 8e36c1eb0..936ae58f5 100644 --- a/docs/source/distributions/self_hosted_distro/together.md +++ b/docs/source/distributions/self_hosted_distro/together.md @@ -46,6 +46,8 @@ The following models are available by default: - `meta-llama/Llama-3.3-70B-Instruct` - `meta-llama/Llama-Guard-3-8B` - `meta-llama/Llama-Guard-3-11B-Vision` +- `togethercomputer/m2-bert-80M-8k-retrieval` +- `togethercomputer/m2-bert-80M-32k-retrieval` ### Prerequisite: API Keys diff --git a/docs/source/getting_started/index.md b/docs/source/getting_started/index.md index b28b9afa3..554f4354a 100644 --- a/docs/source/getting_started/index.md +++ b/docs/source/getting_started/index.md @@ -214,10 +214,16 @@ documents = [ for i, url in enumerate(urls) ] +vector_providers = [ + provider for provider in client.providers.list() if provider.api == "vector_io" +] +provider_id = vector_providers[0].provider_id # Use the first available vector provider + # Register a vector database vector_db_id = f"test-vector-db-{uuid.uuid4().hex}" client.vector_dbs.register( vector_db_id=vector_db_id, + provider_id=provider_id, embedding_model="all-MiniLM-L6-v2", embedding_dimension=384, ) diff --git a/docs/source/references/python_sdk_reference/index.md b/docs/source/references/python_sdk_reference/index.md index 9d1130422..b1a9396fe 100644 --- a/docs/source/references/python_sdk_reference/index.md +++ b/docs/source/references/python_sdk_reference/index.md @@ -42,10 +42,10 @@ from llama_stack_client.types import ( Methods: -- client.toolgroups.list() -> ToolgroupListResponse -- client.toolgroups.get(toolgroup_id) -> ToolGroup -- client.toolgroups.register(\*\*params) -> None -- client.toolgroups.unregister(toolgroup_id) -> None +- client.toolgroups.list() -> ToolgroupListResponse +- client.toolgroups.get(toolgroup_id) -> ToolGroup +- client.toolgroups.register(\*\*params) -> None +- client.toolgroups.unregister(toolgroup_id) -> None ## Tools @@ -57,8 +57,8 @@ from llama_stack_client.types import ListToolsResponse, Tool, ToolListResponse Methods: -- client.tools.list(\*\*params) -> ToolListResponse -- client.tools.get(tool_name) -> Tool +- client.tools.list(\*\*params) -> ToolListResponse +- client.tools.get(tool_name) -> Tool ## ToolRuntime @@ -70,15 +70,15 @@ from llama_stack_client.types import ToolDef, ToolInvocationResult Methods: -- client.tool_runtime.invoke_tool(\*\*params) -> ToolInvocationResult -- client.tool_runtime.list_tools(\*\*params) -> JSONLDecoder[ToolDef] +- client.tool_runtime.invoke_tool(\*\*params) -> ToolInvocationResult +- client.tool_runtime.list_tools(\*\*params) -> JSONLDecoder[ToolDef] ### RagTool Methods: -- client.tool_runtime.rag_tool.insert(\*\*params) -> None -- client.tool_runtime.rag_tool.query(\*\*params) -> QueryResult +- client.tool_runtime.rag_tool.insert(\*\*params) -> None +- client.tool_runtime.rag_tool.query(\*\*params) -> QueryResult ## Agents @@ -97,8 +97,8 @@ from llama_stack_client.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.delete(agent_id) -> None +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.delete(agent_id) -> None ### Session @@ -110,9 +110,9 @@ from llama_stack_client.types.agents import Session, SessionCreateResponse Methods: -- client.agents.session.create(agent_id, \*\*params) -> SessionCreateResponse -- client.agents.session.retrieve(session_id, \*, agent_id, \*\*params) -> Session -- client.agents.session.delete(session_id, \*, agent_id) -> None +- client.agents.session.create(agent_id, \*\*params) -> SessionCreateResponse +- client.agents.session.retrieve(session_id, \*, agent_id, \*\*params) -> Session +- client.agents.session.delete(session_id, \*, agent_id) -> None ### Steps @@ -124,7 +124,7 @@ from llama_stack_client.types.agents import StepRetrieveResponse Methods: -- client.agents.steps.retrieve(step_id, \*, agent_id, session_id, turn_id) -> StepRetrieveResponse +- client.agents.steps.retrieve(step_id, \*, agent_id, session_id, turn_id) -> StepRetrieveResponse ### Turn @@ -136,8 +136,8 @@ from llama_stack_client.types.agents import Turn, TurnCreateResponse Methods: -- client.agents.turn.create(session_id, \*, agent_id, \*\*params) -> TurnCreateResponse -- client.agents.turn.retrieve(turn_id, \*, agent_id, session_id) -> Turn +- client.agents.turn.create(session_id, \*, agent_id, \*\*params) -> TurnCreateResponse +- client.agents.turn.retrieve(turn_id, \*, agent_id, session_id) -> Turn ## BatchInference @@ -149,8 +149,8 @@ from llama_stack_client.types import BatchInferenceChatCompletionResponse Methods: -- client.batch_inference.chat_completion(\*\*params) -> BatchInferenceChatCompletionResponse -- client.batch_inference.completion(\*\*params) -> BatchCompletion +- client.batch_inference.chat_completion(\*\*params) -> BatchInferenceChatCompletionResponse +- client.batch_inference.completion(\*\*params) -> BatchCompletion ## Datasets @@ -166,10 +166,10 @@ from llama_stack_client.types import ( Methods: -- client.datasets.retrieve(dataset_id) -> Optional[DatasetRetrieveResponse] -- client.datasets.list() -> DatasetListResponse -- client.datasets.register(\*\*params) -> None -- client.datasets.unregister(dataset_id) -> None +- client.datasets.retrieve(dataset_id) -> Optional[DatasetRetrieveResponse] +- client.datasets.list() -> DatasetListResponse +- client.datasets.register(\*\*params) -> None +- client.datasets.unregister(dataset_id) -> None ## Eval @@ -181,8 +181,8 @@ from llama_stack_client.types import EvaluateResponse, Job Methods: -- client.eval.evaluate_rows(benchmark_id, \*\*params) -> EvaluateResponse -- client.eval.run_eval(benchmark_id, \*\*params) -> Job +- client.eval.evaluate_rows(benchmark_id, \*\*params) -> EvaluateResponse +- client.eval.run_eval(benchmark_id, \*\*params) -> Job ### Jobs @@ -194,9 +194,9 @@ from llama_stack_client.types.eval import JobStatusResponse Methods: -- client.eval.jobs.retrieve(job_id, \*, benchmark_id) -> EvaluateResponse -- client.eval.jobs.cancel(job_id, \*, benchmark_id) -> None -- client.eval.jobs.status(job_id, \*, benchmark_id) -> Optional[JobStatusResponse] +- client.eval.jobs.retrieve(job_id, \*, benchmark_id) -> EvaluateResponse +- client.eval.jobs.cancel(job_id, \*, benchmark_id) -> None +- client.eval.jobs.status(job_id, \*, benchmark_id) -> Optional[JobStatusResponse] ## Inspect @@ -208,8 +208,8 @@ from llama_stack_client.types import HealthInfo, ProviderInfo, RouteInfo, Versio Methods: -- client.inspect.health() -> HealthInfo -- client.inspect.version() -> VersionInfo +- client.inspect.health() -> HealthInfo +- client.inspect.version() -> VersionInfo ## Inference @@ -227,9 +227,9 @@ from llama_stack_client.types import ( Methods: -- client.inference.chat_completion(\*\*params) -> InferenceChatCompletionResponse -- client.inference.completion(\*\*params) -> InferenceCompletionResponse -- client.inference.embeddings(\*\*params) -> EmbeddingsResponse +- client.inference.chat_completion(\*\*params) -> InferenceChatCompletionResponse +- client.inference.completion(\*\*params) -> InferenceCompletionResponse +- client.inference.embeddings(\*\*params) -> EmbeddingsResponse ## VectorIo @@ -241,8 +241,8 @@ from llama_stack_client.types import QueryChunksResponse Methods: -- client.vector_io.insert(\*\*params) -> None -- client.vector_io.query(\*\*params) -> QueryChunksResponse +- client.vector_io.insert(\*\*params) -> None +- client.vector_io.query(\*\*params) -> QueryChunksResponse ## VectorDBs @@ -259,10 +259,10 @@ from llama_stack_client.types import ( Methods: -- client.vector_dbs.retrieve(vector_db_id) -> Optional[VectorDBRetrieveResponse] -- client.vector_dbs.list() -> VectorDBListResponse -- client.vector_dbs.register(\*\*params) -> VectorDBRegisterResponse -- client.vector_dbs.unregister(vector_db_id) -> None +- client.vector_dbs.retrieve(vector_db_id) -> Optional[VectorDBRetrieveResponse] +- client.vector_dbs.list() -> VectorDBListResponse +- client.vector_dbs.register(\*\*params) -> VectorDBRegisterResponse +- client.vector_dbs.unregister(vector_db_id) -> None ## Models @@ -274,10 +274,10 @@ from llama_stack_client.types import ListModelsResponse, Model, ModelListRespons Methods: -- client.models.retrieve(model_id) -> Optional[Model] -- client.models.list() -> ModelListResponse -- client.models.register(\*\*params) -> Model -- client.models.unregister(model_id) -> None +- client.models.retrieve(model_id) -> Optional[Model] +- client.models.list() -> ModelListResponse +- client.models.register(\*\*params) -> Model +- client.models.unregister(model_id) -> None ## PostTraining @@ -289,8 +289,8 @@ from llama_stack_client.types import ListPostTrainingJobsResponse, PostTrainingJ Methods: -- client.post_training.preference_optimize(\*\*params) -> PostTrainingJob -- client.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob +- client.post_training.preference_optimize(\*\*params) -> PostTrainingJob +- client.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob ### Job @@ -306,10 +306,10 @@ from llama_stack_client.types.post_training import ( Methods: -- client.post_training.job.list() -> JobListResponse -- client.post_training.job.artifacts(\*\*params) -> Optional[JobArtifactsResponse] -- client.post_training.job.cancel(\*\*params) -> None -- client.post_training.job.status(\*\*params) -> Optional[JobStatusResponse] +- client.post_training.job.list() -> JobListResponse +- client.post_training.job.artifacts(\*\*params) -> Optional[JobArtifactsResponse] +- client.post_training.job.cancel(\*\*params) -> None +- client.post_training.job.status(\*\*params) -> Optional[JobStatusResponse] ## Providers @@ -321,7 +321,7 @@ from llama_stack_client.types import ListProvidersResponse, ProviderListResponse Methods: -- client.providers.list() -> ProviderListResponse +- client.providers.list() -> ProviderListResponse ## Routes @@ -333,7 +333,7 @@ from llama_stack_client.types import ListRoutesResponse, RouteListResponse Methods: -- client.routes.list() -> RouteListResponse +- client.routes.list() -> RouteListResponse ## Safety @@ -345,7 +345,7 @@ from llama_stack_client.types import RunShieldResponse Methods: -- client.safety.run_shield(\*\*params) -> RunShieldResponse +- client.safety.run_shield(\*\*params) -> RunShieldResponse ## Shields @@ -357,9 +357,9 @@ from llama_stack_client.types import ListShieldsResponse, Shield, ShieldListResp Methods: -- client.shields.retrieve(identifier) -> Optional[Shield] -- client.shields.list() -> ShieldListResponse -- client.shields.register(\*\*params) -> Shield +- client.shields.retrieve(identifier) -> Optional[Shield] +- client.shields.list() -> ShieldListResponse +- client.shields.register(\*\*params) -> Shield ## SyntheticDataGeneration @@ -371,7 +371,7 @@ from llama_stack_client.types import SyntheticDataGenerationResponse Methods: -- client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGenerationResponse +- client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGenerationResponse ## Telemetry @@ -391,13 +391,13 @@ from llama_stack_client.types import ( Methods: -- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse -- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse -- client.telemetry.get_trace(trace_id) -> Trace -- client.telemetry.log_event(\*\*params) -> None -- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse -- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse -- client.telemetry.save_spans_to_dataset(\*\*params) -> None +- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse +- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse +- client.telemetry.get_trace(trace_id) -> Trace +- client.telemetry.log_event(\*\*params) -> None +- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse +- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse +- client.telemetry.save_spans_to_dataset(\*\*params) -> None ## Datasetio @@ -409,8 +409,8 @@ from llama_stack_client.types import PaginatedRowsResult Methods: -- client.datasetio.append_rows(\*\*params) -> None -- client.datasetio.get_rows_paginated(\*\*params) -> PaginatedRowsResult +- client.datasetio.append_rows(\*\*params) -> None +- client.datasetio.get_rows_paginated(\*\*params) -> PaginatedRowsResult ## Scoring @@ -422,8 +422,8 @@ from llama_stack_client.types import ScoringScoreResponse, ScoringScoreBatchResp Methods: -- client.scoring.score(\*\*params) -> ScoringScoreResponse -- client.scoring.score_batch(\*\*params) -> ScoringScoreBatchResponse +- client.scoring.score(\*\*params) -> ScoringScoreResponse +- client.scoring.score_batch(\*\*params) -> ScoringScoreBatchResponse ## ScoringFunctions @@ -439,9 +439,9 @@ from llama_stack_client.types import ( Methods: -- client.scoring_functions.retrieve(scoring_fn_id) -> Optional[ScoringFn] -- client.scoring_functions.list() -> ScoringFunctionListResponse -- client.scoring_functions.register(\*\*params) -> None +- client.scoring_functions.retrieve(scoring_fn_id) -> Optional[ScoringFn] +- client.scoring_functions.list() -> ScoringFunctionListResponse +- client.scoring_functions.register(\*\*params) -> None ## Benchmarks @@ -457,6 +457,6 @@ from llama_stack_client.types import ( Methods: -- client.benchmarks.retrieve(benchmark_id) -> Optional[Benchmark] -- client.benchmarks.list() -> BenchmarkListResponse -- client.benchmarks.register(\*\*params) -> None +- client.benchmarks.retrieve(benchmark_id) -> Optional[Benchmark] +- client.benchmarks.list() -> BenchmarkListResponse +- client.benchmarks.register(\*\*params) -> None diff --git a/llama_stack/apis/benchmarks/benchmarks.py b/llama_stack/apis/benchmarks/benchmarks.py index 91b1ca927..39ba355e9 100644 --- a/llama_stack/apis/benchmarks/benchmarks.py +++ b/llama_stack/apis/benchmarks/benchmarks.py @@ -64,23 +64,3 @@ class Benchmarks(Protocol): provider_id: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, ) -> None: ... - - @webmethod(route="/eval-tasks", method="GET") - async def DEPRECATED_list_eval_tasks(self) -> ListBenchmarksResponse: ... - - @webmethod(route="/eval-tasks/{eval_task_id}", method="GET") - async def DEPRECATED_get_eval_task( - self, - eval_task_id: str, - ) -> Optional[Benchmark]: ... - - @webmethod(route="/eval-tasks", method="POST") - async def DEPRECATED_register_eval_task( - self, - eval_task_id: str, - dataset_id: str, - scoring_functions: List[str], - provider_benchmark_id: Optional[str] = None, - provider_id: Optional[str] = None, - metadata: Optional[Dict[str, Any]] = None, - ) -> None: ... diff --git a/llama_stack/apis/eval/eval.py b/llama_stack/apis/eval/eval.py index e2ff4458e..a7b2e7670 100644 --- a/llama_stack/apis/eval/eval.py +++ b/llama_stack/apis/eval/eval.py @@ -39,7 +39,6 @@ EvalCandidate = register_schema( @json_schema_type class BenchmarkConfig(BaseModel): - type: Literal["benchmark"] = "benchmark" eval_candidate: EvalCandidate scoring_params: Dict[str, ScoringFnParams] = Field( description="Map between scoring function id and parameters for each scoring function you want to run", @@ -84,28 +83,3 @@ class Eval(Protocol): @webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", method="GET") async def job_result(self, benchmark_id: str, job_id: str) -> EvaluateResponse: ... - - @webmethod(route="/eval/tasks/{task_id}/jobs", method="POST") - async def DEPRECATED_run_eval( - self, - task_id: str, - task_config: BenchmarkConfig, - ) -> Job: ... - - @webmethod(route="/eval/tasks/{task_id}/evaluations", method="POST") - async def DEPRECATED_evaluate_rows( - self, - task_id: str, - input_rows: List[Dict[str, Any]], - scoring_functions: List[str], - task_config: BenchmarkConfig, - ) -> EvaluateResponse: ... - - @webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}", method="GET") - async def DEPRECATED_job_status(self, task_id: str, job_id: str) -> Optional[JobStatus]: ... - - @webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}", method="DELETE") - async def DEPRECATED_job_cancel(self, task_id: str, job_id: str) -> None: ... - - @webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}/result", method="GET") - async def DEPRECATED_job_result(self, task_id: str, job_id: str) -> EvaluateResponse: ... diff --git a/llama_stack/apis/files/__init__.py b/llama_stack/apis/files/__init__.py new file mode 100644 index 000000000..269baf177 --- /dev/null +++ b/llama_stack/apis/files/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .files import * # noqa: F401 F403 diff --git a/llama_stack/apis/files/files.py b/llama_stack/apis/files/files.py new file mode 100644 index 000000000..f17fadc8c --- /dev/null +++ b/llama_stack/apis/files/files.py @@ -0,0 +1,174 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import List, Optional, Protocol, runtime_checkable + +from pydantic import BaseModel + +from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol +from llama_stack.schema_utils import json_schema_type, webmethod + + +@json_schema_type +class FileUploadResponse(BaseModel): + """ + Response after initiating a file upload session. + + :param id: ID of the upload session + :param url: Upload URL for the file or file parts + :param offset: Upload content offset + :param size: Upload content size + """ + + id: str + url: str + offset: int + size: int + + +@json_schema_type +class BucketResponse(BaseModel): + name: str + + +@json_schema_type +class ListBucketResponse(BaseModel): + """ + Response representing a list of file entries. + + :param data: List of FileResponse entries + """ + + data: List[BucketResponse] + + +@json_schema_type +class FileResponse(BaseModel): + """ + Response representing a file entry. + + :param bucket: Bucket under which the file is stored (valid chars: a-zA-Z0-9_-) + :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + :param mime_type: MIME type of the file + :param url: Upload URL for the file contents + :param bytes: Size of the file in bytes + :param created_at: Timestamp of when the file was created + """ + + bucket: str + key: str + mime_type: str + url: str + bytes: int + created_at: int + + +@json_schema_type +class ListFileResponse(BaseModel): + """ + Response representing a list of file entries. + + :param data: List of FileResponse entries + """ + + data: List[FileResponse] + + +@runtime_checkable +@trace_protocol +class Files(Protocol): + @webmethod(route="/files", method="POST") + async def create_upload_session( + self, + bucket: str, + key: str, + mime_type: str, + size: int, + ) -> FileUploadResponse: + """ + Create a new upload session for a file identified by a bucket and key. + + :param bucket: Bucket under which the file is stored (valid chars: a-zA-Z0-9_-) + :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + :param mime_type: MIME type of the file + :param size: File size in bytes + """ + ... + + @webmethod(route="/files/session:{upload_id}", method="POST", raw_bytes_request_body=True) + async def upload_content_to_session( + self, + upload_id: str, + ) -> Optional[FileResponse]: + """ + Upload file content to an existing upload session. + On the server, request body will have the raw bytes that are uploaded. + + :param upload_id: ID of the upload session + """ + ... + + @webmethod(route="/files/session:{upload_id}", method="GET") + async def get_upload_session_info( + self, + upload_id: str, + ) -> Optional[FileUploadResponse]: + """ + Returns information about an existsing upload session + + :param upload_id: ID of the upload session + """ + ... + + @webmethod(route="/files", method="GET") + async def list_all_buckets( + self, + bucket: str, + ) -> ListBucketResponse: + """ + List all buckets. + """ + ... + + @webmethod(route="/files/{bucket}", method="GET") + async def list_files_in_bucket( + self, + bucket: str, + ) -> ListFileResponse: + """ + List all files in a bucket. + + :param bucket: Bucket name (valid chars: a-zA-Z0-9_-) + """ + ... + + @webmethod(route="/files/{bucket}/{key:path}", method="GET") + async def get_file( + self, + bucket: str, + key: str, + ) -> FileResponse: + """ + Get a file info identified by a bucket and key. + + :param bucket: Bucket name (valid chars: a-zA-Z0-9_-) + :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + """ + ... + + @webmethod(route="/files/{bucket}/{key:path}", method="DELETE") + async def delete_file( + self, + bucket: str, + key: str, + ) -> FileResponse: + """ + Delete a file identified by a bucket and key. + + :param bucket: Bucket name (valid chars: a-zA-Z0-9_-) + :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + """ + ... diff --git a/llama_stack/apis/telemetry/telemetry.py b/llama_stack/apis/telemetry/telemetry.py index d010a7e3b..fe75677e7 100644 --- a/llama_stack/apis/telemetry/telemetry.py +++ b/llama_stack/apis/telemetry/telemetry.py @@ -216,7 +216,7 @@ class Telemetry(Protocol): @webmethod(route="/telemetry/events", method="POST") async def log_event(self, event: Event, ttl_seconds: int = DEFAULT_TTL_DAYS * 86400) -> None: ... - @webmethod(route="/telemetry/traces", method="GET") + @webmethod(route="/telemetry/traces", method="POST") async def query_traces( self, attribute_filters: Optional[List[QueryCondition]] = None, @@ -231,7 +231,7 @@ class Telemetry(Protocol): @webmethod(route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}", method="GET") async def get_span(self, trace_id: str, span_id: str) -> Span: ... - @webmethod(route="/telemetry/spans/{span_id:path}/tree", method="GET") + @webmethod(route="/telemetry/spans/{span_id:path}/tree", method="POST") async def get_span_tree( self, span_id: str, @@ -239,7 +239,7 @@ class Telemetry(Protocol): max_depth: Optional[int] = None, ) -> QuerySpanTreeResponse: ... - @webmethod(route="/telemetry/spans", method="GET") + @webmethod(route="/telemetry/spans", method="POST") async def query_spans( self, attribute_filters: List[QueryCondition], diff --git a/llama_stack/cli/model/list.py b/llama_stack/cli/model/list.py index e6bf2216a..2f62cb9ce 100644 --- a/llama_stack/cli/model/list.py +++ b/llama_stack/cli/model/list.py @@ -5,12 +5,44 @@ # the root directory of this source tree. import argparse +import os +import time +from pathlib import Path from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.table import print_table +from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR from llama_stack.models.llama.sku_list import all_registered_models +def _get_model_size(model_dir): + return sum(f.stat().st_size for f in Path(model_dir).rglob("*") if f.is_file()) + + +def _run_model_list_downloaded_cmd() -> None: + headers = ["Model", "Size", "Modified Time"] + + rows = [] + for model in os.listdir(DEFAULT_CHECKPOINT_DIR): + abs_path = os.path.join(DEFAULT_CHECKPOINT_DIR, model) + space_usage = _get_model_size(abs_path) + model_size = f"{space_usage / (1024**3):.2f} GB" + modified_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(os.path.getmtime(abs_path))) + rows.append( + [ + model, + model_size, + modified_time, + ] + ) + + print_table( + rows, + headers, + separate_rows=True, + ) + + class ModelList(Subcommand): """List available llama models""" @@ -31,10 +63,18 @@ class ModelList(Subcommand): action="store_true", help="Show all models (not just defaults)", ) + self.parser.add_argument( + "--downloaded", + action="store_true", + help="List the downloaded models", + ) def _run_model_list_cmd(self, args: argparse.Namespace) -> None: from .safety_models import prompt_guard_model_sku + if args.downloaded: + return _run_model_list_downloaded_cmd() + headers = [ "Model Descriptor(ID)", "Hugging Face Repo", diff --git a/llama_stack/distribution/build_venv.sh b/llama_stack/distribution/build_venv.sh index 0b0bffcfd..b47cfcb83 100755 --- a/llama_stack/distribution/build_venv.sh +++ b/llama_stack/distribution/build_venv.sh @@ -15,6 +15,7 @@ TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} # This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out # Reference: https://github.com/astral-sh/uv/pull/1694 UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500} +UV_SYSTEM_PYTHON=${UV_SYSTEM_PYTHON:-} if [ -n "$LLAMA_STACK_DIR" ]; then echo "Using llama-stack-dir=$LLAMA_STACK_DIR" @@ -73,11 +74,16 @@ run() { local env_name="$1" local pip_dependencies="$2" local special_pip_deps="$3" + + if [ -n "$UV_SYSTEM_PYTHON" ]; then + echo "Installing dependencies in system Python environment" + else + echo "Using virtual environment $env_name" + uv venv "$env_name" + # shellcheck source=/dev/null + source "$env_name/bin/activate" + fi - echo "Using virtual environment $env_name" - uv venv "$env_name" - # shellcheck source=/dev/null - source "$env_name/bin/activate" if [ -n "$TEST_PYPI_VERSION" ]; then # these packages are damaged in test-pypi, so install them first uv pip install fastapi libcst diff --git a/llama_stack/distribution/common.sh b/llama_stack/distribution/common.sh index 171023389..15220048b 100755 --- a/llama_stack/distribution/common.sh +++ b/llama_stack/distribution/common.sh @@ -10,12 +10,12 @@ cleanup() { set +x echo "Cleaning up..." conda deactivate - conda env remove --name $envname -y + conda env remove --name "$envname" -y } handle_int() { - if [ -n $ENVNAME ]; then - cleanup $ENVNAME + if [ -n "$ENVNAME" ]; then + cleanup "$ENVNAME" fi exit 1 } @@ -23,8 +23,8 @@ handle_int() { handle_exit() { if [ $? -ne 0 ]; then echo -e "\033[1;31mABORTING.\033[0m" - if [ -n $ENVNAME ]; then - cleanup $ENVNAME + if [ -n "$ENVNAME" ]; then + cleanup "$ENVNAME" fi fi } @@ -33,10 +33,14 @@ setup_cleanup_handlers() { trap handle_int INT trap handle_exit EXIT - __conda_setup="$('conda' 'shell.bash' 'hook' 2>/dev/null)" - eval "$__conda_setup" - - conda deactivate + if is_command_available conda; then + __conda_setup="$('conda' 'shell.bash' 'hook' 2>/dev/null)" + eval "$__conda_setup" + conda deactivate + else + echo "conda is not available" + exit 1 + fi } # check if a command is present diff --git a/llama_stack/distribution/routers/routers.py b/llama_stack/distribution/routers/routers.py index 9d12c8a40..016ca4984 100644 --- a/llama_stack/distribution/routers/routers.py +++ b/llama_stack/distribution/routers/routers.py @@ -411,48 +411,6 @@ class EvalRouter(Eval): job_id, ) - async def DEPRECATED_run_eval( - self, - task_id: str, - task_config: BenchmarkConfig, - ) -> Job: - return await self.run_eval(benchmark_id=task_id, task_config=task_config) - - async def DEPRECATED_evaluate_rows( - self, - task_id: str, - input_rows: List[Dict[str, Any]], - scoring_functions: List[str], - task_config: BenchmarkConfig, - ) -> EvaluateResponse: - return await self.evaluate_rows( - benchmark_id=task_id, - input_rows=input_rows, - scoring_functions=scoring_functions, - task_config=task_config, - ) - - async def DEPRECATED_job_status( - self, - task_id: str, - job_id: str, - ) -> Optional[JobStatus]: - return await self.job_status(benchmark_id=task_id, job_id=job_id) - - async def DEPRECATED_job_cancel( - self, - task_id: str, - job_id: str, - ) -> None: - return await self.job_cancel(benchmark_id=task_id, job_id=job_id) - - async def DEPRECATED_job_result( - self, - task_id: str, - job_id: str, - ) -> EvaluateResponse: - return await self.job_result(benchmark_id=task_id, job_id=job_id) - class ToolRuntimeRouter(ToolRuntime): class RagToolImpl(RAGToolRuntime): diff --git a/llama_stack/distribution/routers/routing_tables.py b/llama_stack/distribution/routers/routing_tables.py index 2cddc3970..c2434e517 100644 --- a/llama_stack/distribution/routers/routing_tables.py +++ b/llama_stack/distribution/routers/routing_tables.py @@ -468,35 +468,6 @@ class BenchmarksRoutingTable(CommonRoutingTableImpl, Benchmarks): ) await self.register_object(benchmark) - async def DEPRECATED_list_eval_tasks(self) -> ListBenchmarksResponse: - logger.warning("DEPRECATED: Use /eval/benchmarks instead") - return await self.list_benchmarks() - - async def DEPRECATED_get_eval_task( - self, - eval_task_id: str, - ) -> Optional[Benchmark]: - logger.warning("DEPRECATED: Use /eval/benchmarks instead") - return await self.get_benchmark(eval_task_id) - - async def DEPRECATED_register_eval_task( - self, - eval_task_id: str, - dataset_id: str, - scoring_functions: List[str], - provider_benchmark_id: Optional[str] = None, - provider_id: Optional[str] = None, - metadata: Optional[Dict[str, Any]] = None, - ) -> None: - logger.warning("DEPRECATED: Use /eval/benchmarks instead") - return await self.register_benchmark( - benchmark_id=eval_task_id, - dataset_id=dataset_id, - scoring_functions=scoring_functions, - metadata=metadata, - provider_benchmark_id=provider_benchmark_id, - ) - class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups): async def list_tools(self, toolgroup_id: Optional[str] = None) -> ListToolsResponse: diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py index 0d234d506..8fd95498f 100644 --- a/llama_stack/distribution/server/server.py +++ b/llama_stack/distribution/server/server.py @@ -481,6 +481,8 @@ def main(): def extract_path_params(route: str) -> List[str]: segments = route.split("/") params = [seg[1:-1] for seg in segments if seg.startswith("{") and seg.endswith("}")] + # to handle path params like {param:path} + params = [param.split(":")[0] for param in params] return params diff --git a/llama_stack/distribution/stack.py b/llama_stack/distribution/stack.py index 9335dc3a9..1328c88ef 100644 --- a/llama_stack/distribution/stack.py +++ b/llama_stack/distribution/stack.py @@ -19,6 +19,7 @@ from llama_stack.apis.benchmarks import Benchmarks from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasets import Datasets from llama_stack.apis.eval import Eval +from llama_stack.apis.files import Files from llama_stack.apis.inference import Inference from llama_stack.apis.inspect import Inspect from llama_stack.apis.models import Models @@ -63,6 +64,7 @@ class LlamaStack( ToolGroups, ToolRuntime, RAGToolRuntime, + Files, ): pass diff --git a/llama_stack/distribution/utils/exec.py b/llama_stack/distribution/utils/exec.py index 63af35f84..4a3a95826 100644 --- a/llama_stack/distribution/utils/exec.py +++ b/llama_stack/distribution/utils/exec.py @@ -34,6 +34,7 @@ def _run_with_pty_unix(command): original_sigint = signal.getsignal(signal.SIGINT) ctrl_c_pressed = False + process = None def sigint_handler(signum, frame): nonlocal ctrl_c_pressed @@ -98,7 +99,7 @@ def _run_with_pty_unix(command): signal.signal(signal.SIGINT, original_sigint) os.close(master) - if process.poll() is None: + if process and process.poll() is None: process.terminate() process.wait() diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py index 0f77b7347..18d408a31 100644 --- a/llama_stack/providers/inline/eval/meta_reference/eval.py +++ b/llama_stack/providers/inline/eval/meta_reference/eval.py @@ -234,45 +234,3 @@ class MetaReferenceEvalImpl( raise ValueError(f"Job is not completed, Status: {status.value}") return self.jobs[job_id] - - async def DEPRECATED_run_eval( - self, - task_id: str, - task_config: BenchmarkConfig, - ) -> Job: - return await self.run_eval(benchmark_id=task_id, task_config=task_config) - - async def DEPRECATED_evaluate_rows( - self, - task_id: str, - input_rows: List[Dict[str, Any]], - scoring_functions: List[str], - task_config: BenchmarkConfig, - ) -> EvaluateResponse: - return await self.evaluate_rows( - benchmark_id=task_id, - input_rows=input_rows, - scoring_functions=scoring_functions, - task_config=task_config, - ) - - async def DEPRECATED_job_status( - self, - task_id: str, - job_id: str, - ) -> Optional[JobStatus]: - return await self.job_status(benchmark_id=task_id, job_id=job_id) - - async def DEPRECATED_job_cancel( - self, - task_id: str, - job_id: str, - ) -> None: - return await self.job_cancel(benchmark_id=task_id, job_id=job_id) - - async def DEPRECATED_job_result( - self, - task_id: str, - job_id: str, - ) -> EvaluateResponse: - return await self.job_result(benchmark_id=task_id, job_id=job_id) diff --git a/llama_stack/providers/inline/inference/meta_reference/inference.py b/llama_stack/providers/inline/inference/meta_reference/inference.py index c79f97def..763d9664d 100644 --- a/llama_stack/providers/inline/inference/meta_reference/inference.py +++ b/llama_stack/providers/inline/inference/meta_reference/inference.py @@ -46,7 +46,7 @@ from llama_stack.providers.utils.inference.embedding_mixin import ( ) from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, + build_hf_repo_model_entry, ) from llama_stack.providers.utils.inference.prompt_adapter import ( augment_content_with_response_format_prompt, @@ -116,7 +116,7 @@ class MetaReferenceInferenceImpl( self.model_registry_helper = ModelRegistryHelper( [ - build_model_alias( + build_hf_repo_model_entry( llama_model.descriptor(), llama_model.core_model_id.value, ) diff --git a/llama_stack/providers/inline/inference/vllm/vllm.py b/llama_stack/providers/inline/inference/vllm/vllm.py index 5536ea3a5..5b0df91e7 100644 --- a/llama_stack/providers/inline/inference/vllm/vllm.py +++ b/llama_stack/providers/inline/inference/vllm/vllm.py @@ -9,7 +9,6 @@ import os import uuid from typing import AsyncGenerator, List, Optional -from llama_models.llama3.api.chat_format import ChatFormat from llama_models.llama3.api.tokenizer import Tokenizer from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine @@ -62,7 +61,6 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): def __init__(self, config: VLLMConfig): self.config = config self.engine = None - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self): log.info("Initializing vLLM inference provider.") @@ -177,7 +175,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): log.info("Sampling params: %s", sampling_params) request_id = _random_uuid() - prompt = await chat_completion_request_to_prompt(request, self.config.model, self.formatter) + prompt = await chat_completion_request_to_prompt(request, self.config.model) vllm_sampling_params = self._sampling_params(request.sampling_params) results_generator = self.engine.generate(prompt, vllm_sampling_params, request_id) if stream: @@ -201,11 +199,13 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): response = OpenAICompatCompletionResponse( choices=[choice], ) - return process_chat_completion_response(response, self.formatter, request) + return process_chat_completion_response(response, request) async def _stream_chat_completion( self, request: ChatCompletionRequest, results_generator: AsyncGenerator ) -> AsyncGenerator: + tokenizer = Tokenizer.get_instance() + async def _generate_and_convert_to_openai_compat(): cur = [] async for chunk in results_generator: @@ -216,7 +216,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): output = chunk.outputs[-1] new_tokens = output.token_ids[len(cur) :] - text = self.formatter.tokenizer.decode(new_tokens) + text = tokenizer.decode(new_tokens) cur.extend(new_tokens) choice = OpenAICompatCompletionChoice( finish_reason=output.finish_reason, @@ -227,7 +227,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): ) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def embeddings(self, model_id: str, contents: List[InterleavedContent]) -> EmbeddingsResponse: diff --git a/llama_stack/providers/inline/post_training/torchtune/post_training.py b/llama_stack/providers/inline/post_training/torchtune/post_training.py index c77d9305f..b837362d7 100644 --- a/llama_stack/providers/inline/post_training/torchtune/post_training.py +++ b/llama_stack/providers/inline/post_training/torchtune/post_training.py @@ -40,8 +40,7 @@ class TorchtunePostTrainingImpl: self.datasets_api = datasets # TODO: assume sync job, will need jobs API for async scheduling - self.jobs_status = {} - self.jobs_list = [] + self.jobs = {} self.checkpoints_dict = {} async def supervised_fine_tune( @@ -54,9 +53,8 @@ class TorchtunePostTrainingImpl: checkpoint_dir: Optional[str], algorithm_config: Optional[AlgorithmConfig], ) -> PostTrainingJob: - for job in self.jobs_list: - if job_uuid == job.job_uuid: - raise ValueError(f"Job {job_uuid} already exists") + if job_uuid in self.jobs: + raise ValueError(f"Job {job_uuid} already exists") post_training_job = PostTrainingJob(job_uuid=job_uuid) @@ -65,8 +63,8 @@ class TorchtunePostTrainingImpl: status=JobStatus.scheduled, scheduled_at=datetime.now(), ) + self.jobs[job_uuid] = job_status_response - self.jobs_list.append(post_training_job) if isinstance(algorithm_config, LoraFinetuningConfig): try: recipe = LoraFinetuningSingleDevice( @@ -100,8 +98,6 @@ class TorchtunePostTrainingImpl: else: raise NotImplementedError() - self.jobs_status[job_uuid] = job_status_response - return post_training_job async def preference_optimize( @@ -115,13 +111,11 @@ class TorchtunePostTrainingImpl: ) -> PostTrainingJob: ... async def get_training_jobs(self) -> ListPostTrainingJobsResponse: - return ListPostTrainingJobsResponse(data=self.jobs_list) + return ListPostTrainingJobsResponse(data=[PostTrainingJob(job_uuid=uuid_) for uuid_ in self.jobs]) @webmethod(route="/post-training/job/status") async def get_training_job_status(self, job_uuid: str) -> Optional[PostTrainingJobStatusResponse]: - if job_uuid in self.jobs_status: - return self.jobs_status[job_uuid] - return None + return self.jobs.get(job_uuid, None) @webmethod(route="/post-training/job/cancel") async def cancel_training_job(self, job_uuid: str) -> None: diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index 4ab59fec4..67de380c0 100644 --- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -64,8 +64,6 @@ from torchtune.models.llama3._tokenizer import Llama3Tokenizer class LoraFinetuningSingleDevice: - # This recipe only supports GPU training - # This recipe doesn't include several training efficiency setting within origin torchtune repo, including # - compile # - activation offloading @@ -93,7 +91,7 @@ class LoraFinetuningSingleDevice: if not isinstance(algorithm_config, LoraFinetuningConfig): raise ValueError("You need to speicifc LoraFinetuningConfig for LoRA finetuning") self.algorithm_config = algorithm_config - self._device = torchtune_utils.get_device(device="cuda") + self._device = torchtune_utils.get_device() self._dtype = training.get_dtype(training_config.dtype, device=self._device) self.model_id = model @@ -231,6 +229,13 @@ class LoraFinetuningSingleDevice: # Used to ignore labels for loss computation self.ignore_labels_cache = torch.full((self._batch_size, 1), self._loss_fn.ignore_index, device=self._device) + def _log_memory_stats(self): + # torchtune raises: "Logging memory stats is not supported on CPU devices"; do nothing + if self._device.type == "cpu": + return + memory_stats = training.get_memory_stats(device=self._device) + training.log_memory_stats(memory_stats) + async def _setup_model( self, enable_activation_checkpointing: bool, @@ -293,8 +298,7 @@ class LoraFinetuningSingleDevice: # activation offloading self.activations_handling_ctx = training.get_act_offloading_ctx_manager(model, enable_activation_offloading) - memory_stats = training.get_memory_stats(device=self._device) - training.log_memory_stats(memory_stats) + self._log_memory_stats() return model @@ -506,8 +510,7 @@ class LoraFinetuningSingleDevice: "tokens_per_second_per_gpu": num_tokens / time_per_step, } - memory_stats = training.get_memory_stats(device=self._device) - log_dict.update(memory_stats) + self._log_memory_stats() if self._clip_grad_norm is not None: log_dict.update({"grad_norm": grad_norm}) diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py index a53c5cfa7..074f1ff46 100644 --- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_405b_simpleqa.py @@ -5,7 +5,11 @@ # the root directory of this source tree. from llama_stack.apis.common.type_system import NumberType -from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams, ScoringFn +from llama_stack.apis.scoring_functions import ( + AggregationFunctionType, + LLMAsJudgeScoringFnParams, + ScoringFn, +) GRADER_TEMPLATE = """ Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. @@ -87,5 +91,6 @@ llm_as_judge_405b_simpleqa = ScoringFn( judge_model="meta-llama/Llama-3.1-405B-Instruct", prompt_template=GRADER_TEMPLATE, judge_score_regexes=[r"(A|B|C)"], + aggregation_functions=[AggregationFunctionType.categorical_count.value], ), ) diff --git a/llama_stack/providers/inline/tool_runtime/rag/__init__.py b/llama_stack/providers/inline/tool_runtime/rag/__init__.py index 542872091..15118c9df 100644 --- a/llama_stack/providers/inline/tool_runtime/rag/__init__.py +++ b/llama_stack/providers/inline/tool_runtime/rag/__init__.py @@ -9,10 +9,11 @@ from typing import Any, Dict from llama_stack.providers.datatypes import Api from .config import RagToolRuntimeConfig -from .memory import MemoryToolRuntimeImpl async def get_provider_impl(config: RagToolRuntimeConfig, deps: Dict[str, Any]): + from .memory import MemoryToolRuntimeImpl + impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference]) await impl.initialize() return impl diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py index 6c787bc29..17865c93e 100644 --- a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py +++ b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py @@ -4,9 +4,11 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +import hashlib import logging import sqlite3 import struct +import uuid from typing import Any, Dict, List, Optional import numpy as np @@ -52,14 +54,14 @@ class SQLiteVecIndex(EmbeddingIndex): # Create the table to store chunk metadata. cur.execute(f""" CREATE TABLE IF NOT EXISTS {self.metadata_table} ( - id INTEGER PRIMARY KEY, + id TEXT PRIMARY KEY, chunk TEXT ); """) # Create the virtual table for embeddings. cur.execute(f""" CREATE VIRTUAL TABLE IF NOT EXISTS {self.vector_table} - USING vec0(embedding FLOAT[{self.dimension}]); + USING vec0(embedding FLOAT[{self.dimension}], id TEXT); """) self.connection.commit() @@ -69,9 +71,9 @@ class SQLiteVecIndex(EmbeddingIndex): cur.execute(f"DROP TABLE IF EXISTS {self.vector_table};") self.connection.commit() - async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray): + async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray, batch_size: int = 500): """ - Add new chunks along with their embeddings. + Add new chunks along with their embeddings using batch inserts. For each chunk, we insert its JSON into the metadata table and then insert its embedding (serialized to raw bytes) into the virtual table using the assigned rowid. If any insert fails, the transaction is rolled back to maintain consistency. @@ -80,21 +82,35 @@ class SQLiteVecIndex(EmbeddingIndex): try: # Start transaction cur.execute("BEGIN TRANSACTION") - for chunk, emb in zip(chunks, embeddings, strict=False): - # Serialize and insert the chunk metadata. - chunk_json = chunk.model_dump_json() - cur.execute(f"INSERT INTO {self.metadata_table} (chunk) VALUES (?)", (chunk_json,)) - row_id = cur.lastrowid - # Ensure the embedding is a list of floats. - emb_list = emb.tolist() if isinstance(emb, np.ndarray) else list(emb) - emb_blob = serialize_vector(emb_list) - cur.execute(f"INSERT INTO {self.vector_table} (rowid, embedding) VALUES (?, ?)", (row_id, emb_blob)) - # Commit transaction if all inserts succeed + for i in range(0, len(chunks), batch_size): + batch_chunks = chunks[i : i + batch_size] + batch_embeddings = embeddings[i : i + batch_size] + # Prepare metadata inserts + metadata_data = [ + (generate_chunk_id(chunk.metadata["document_id"], chunk.content), chunk.model_dump_json()) + for chunk in batch_chunks + ] + # Insert metadata (ON CONFLICT to avoid duplicates) + cur.executemany( + f""" + INSERT INTO {self.metadata_table} (id, chunk) + VALUES (?, ?) + ON CONFLICT(id) DO UPDATE SET chunk = excluded.chunk; + """, + metadata_data, + ) + # Prepare embeddings inserts + embedding_data = [ + (generate_chunk_id(chunk.metadata["document_id"], chunk.content), serialize_vector(emb.tolist())) + for chunk, emb in zip(batch_chunks, batch_embeddings, strict=True) + ] + # Insert embeddings in batch + cur.executemany(f"INSERT INTO {self.vector_table} (id, embedding) VALUES (?, ?);", embedding_data) self.connection.commit() except sqlite3.Error as e: self.connection.rollback() # Rollback on failure - print(f"Error inserting into {self.vector_table} - error: {e}") # Log error (Consider using logging module) + logger.error(f"Error inserting into {self.vector_table}: {e}") finally: cur.close() # Ensure cursor is closed @@ -110,7 +126,7 @@ class SQLiteVecIndex(EmbeddingIndex): query_sql = f""" SELECT m.id, m.chunk, v.distance FROM {self.vector_table} AS v - JOIN {self.metadata_table} AS m ON m.id = v.rowid + JOIN {self.metadata_table} AS m ON m.id = v.id WHERE v.embedding MATCH ? AND k = ? ORDER BY v.distance; """ @@ -204,7 +220,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): if vector_db_id not in self.cache: raise ValueError(f"Vector DB {vector_db_id} not found. Found: {list(self.cache.keys())}") # The VectorDBWithIndex helper is expected to compute embeddings via the inference_api - # and then call our index’s add_chunks. + # and then call our index's add_chunks. await self.cache[vector_db_id].insert_chunks(chunks) async def query_chunks( @@ -213,3 +229,9 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): if vector_db_id not in self.cache: raise ValueError(f"Vector DB {vector_db_id} not found") return await self.cache[vector_db_id].query_chunks(query, params) + + +def generate_chunk_id(document_id: str, chunk_text: str) -> str: + """Generate a unique chunk ID using a hash of document ID and chunk text.""" + hash_input = f"{document_id}:{chunk_text}".encode("utf-8") + return str(uuid.UUID(hashlib.md5(hash_input).hexdigest())) diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py index e72140ccf..346a2bd73 100644 --- a/llama_stack/providers/registry/inference.py +++ b/llama_stack/providers/registry/inference.py @@ -215,4 +215,14 @@ def available_providers() -> List[ProviderSpec]: config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", ), ), + remote_provider_spec( + api=Api.inference, + adapter=AdapterSpec( + adapter_type="passthrough", + pip_packages=[], + module="llama_stack.providers.remote.inference.passthrough", + config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", + provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", + ), + ), ] diff --git a/llama_stack/providers/remote/inference/bedrock/bedrock.py b/llama_stack/providers/remote/inference/bedrock/bedrock.py index e896f0597..9c5a291db 100644 --- a/llama_stack/providers/remote/inference/bedrock/bedrock.py +++ b/llama_stack/providers/remote/inference/bedrock/bedrock.py @@ -8,8 +8,6 @@ import json from typing import AsyncGenerator, AsyncIterator, Dict, List, Optional, Union from botocore.client import BaseClient -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( @@ -27,12 +25,10 @@ from llama_stack.apis.inference import ( ToolDefinition, ToolPromptFormat, ) -from llama_stack.models.llama.datatypes import CoreModelId from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig from llama_stack.providers.utils.bedrock.client import create_bedrock_client from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAICompatCompletionChoice, @@ -47,29 +43,15 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( interleaved_content_as_str, ) -MODEL_ALIASES = [ - build_model_alias( - "meta.llama3-1-8b-instruct-v1:0", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "meta.llama3-1-70b-instruct-v1:0", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "meta.llama3-1-405b-instruct-v1:0", - CoreModelId.llama3_1_405b_instruct.value, - ), -] +from .models import MODEL_ENTRIES class BedrockInferenceAdapter(ModelRegistryHelper, Inference): def __init__(self, config: BedrockConfig) -> None: - ModelRegistryHelper.__init__(self, MODEL_ALIASES) + ModelRegistryHelper.__init__(self, MODEL_ENTRIES) self._config = config self._client = create_bedrock_client(config) - self.formatter = ChatFormat(Tokenizer.get_instance()) @property def client(self) -> BaseClient: @@ -134,7 +116,7 @@ class BedrockInferenceAdapter(ModelRegistryHelper, Inference): ) response = OpenAICompatCompletionResponse(choices=[choice]) - return process_chat_completion_response(response, self.formatter, request) + return process_chat_completion_response(response, request) async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator: params = await self._get_params_for_chat_completion(request) @@ -152,7 +134,7 @@ class BedrockInferenceAdapter(ModelRegistryHelper, Inference): yield OpenAICompatCompletionResponse(choices=[choice]) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def _get_params_for_chat_completion(self, request: ChatCompletionRequest) -> Dict: @@ -166,7 +148,7 @@ class BedrockInferenceAdapter(ModelRegistryHelper, Inference): if sampling_params.repetition_penalty > 0: options["repetition_penalty"] = sampling_params.repetition_penalty - prompt = await chat_completion_request_to_prompt(request, self.get_llama_model(request.model), self.formatter) + prompt = await chat_completion_request_to_prompt(request, self.get_llama_model(request.model)) return { "modelId": bedrock_model, "body": json.dumps( diff --git a/llama_stack/providers/remote/inference/bedrock/models.py b/llama_stack/providers/remote/inference/bedrock/models.py new file mode 100644 index 000000000..c5079799f --- /dev/null +++ b/llama_stack/providers/remote/inference/bedrock/models.py @@ -0,0 +1,25 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + build_hf_repo_model_entry, +) + +MODEL_ENTRIES = [ + build_hf_repo_model_entry( + "meta.llama3-1-8b-instruct-v1:0", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "meta.llama3-1-70b-instruct-v1:0", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "meta.llama3-1-405b-instruct-v1:0", + CoreModelId.llama3_1_405b_instruct.value, + ), +] diff --git a/llama_stack/providers/remote/inference/cerebras/cerebras.py b/llama_stack/providers/remote/inference/cerebras/cerebras.py index 1ce267e8d..0a27d81d7 100644 --- a/llama_stack/providers/remote/inference/cerebras/cerebras.py +++ b/llama_stack/providers/remote/inference/cerebras/cerebras.py @@ -7,8 +7,6 @@ from typing import AsyncGenerator, List, Optional, Union from cerebras.cloud.sdk import AsyncCerebras -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( @@ -26,10 +24,9 @@ from llama_stack.apis.inference import ( ToolDefinition, ToolPromptFormat, ) -from llama_stack.models.llama.datatypes import CoreModelId, TopKSamplingStrategy +from llama_stack.models.llama.datatypes import TopKSamplingStrategy from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, @@ -44,27 +41,16 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( ) from .config import CerebrasImplConfig - -model_aliases = [ - build_model_alias( - "llama3.1-8b", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "llama-3.3-70b", - CoreModelId.llama3_3_70b_instruct.value, - ), -] +from .models import model_entries class CerebrasInferenceAdapter(ModelRegistryHelper, Inference): def __init__(self, config: CerebrasImplConfig) -> None: ModelRegistryHelper.__init__( self, - model_aliases=model_aliases, + model_entries=model_entries, ) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) self.client = AsyncCerebras( base_url=self.config.base_url, @@ -107,14 +93,14 @@ class CerebrasInferenceAdapter(ModelRegistryHelper, Inference): r = await self.client.completions.create(**params) - return process_completion_response(r, self.formatter) + return process_completion_response(r) async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: params = await self._get_params(request) stream = await self.client.completions.create(**params) - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk async def chat_completion( @@ -154,14 +140,14 @@ class CerebrasInferenceAdapter(ModelRegistryHelper, Inference): r = await self.client.completions.create(**params) - return process_chat_completion_response(r, self.formatter, request) + return process_chat_completion_response(r, request) async def _stream_chat_completion(self, request: CompletionRequest) -> AsyncGenerator: params = await self._get_params(request) stream = await self.client.completions.create(**params) - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict: @@ -170,11 +156,9 @@ class CerebrasInferenceAdapter(ModelRegistryHelper, Inference): prompt = "" if isinstance(request, ChatCompletionRequest): - prompt = await chat_completion_request_to_prompt( - request, self.get_llama_model(request.model), self.formatter - ) + prompt = await chat_completion_request_to_prompt(request, self.get_llama_model(request.model)) elif isinstance(request, CompletionRequest): - prompt = await completion_request_to_prompt(request, self.formatter) + prompt = await completion_request_to_prompt(request) else: raise ValueError(f"Unknown request type {type(request)}") diff --git a/llama_stack/providers/remote/inference/cerebras/models.py b/llama_stack/providers/remote/inference/cerebras/models.py new file mode 100644 index 000000000..a48864d49 --- /dev/null +++ b/llama_stack/providers/remote/inference/cerebras/models.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + build_hf_repo_model_entry, +) + +model_entries = [ + build_hf_repo_model_entry( + "llama3.1-8b", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "llama-3.3-70b", + CoreModelId.llama3_3_70b_instruct.value, + ), +] diff --git a/llama_stack/providers/remote/inference/databricks/databricks.py b/llama_stack/providers/remote/inference/databricks/databricks.py index 3d306e61f..de13638f5 100644 --- a/llama_stack/providers/remote/inference/databricks/databricks.py +++ b/llama_stack/providers/remote/inference/databricks/databricks.py @@ -6,8 +6,6 @@ from typing import AsyncGenerator, List, Optional -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from openai import OpenAI from llama_stack.apis.common.content_types import InterleavedContent @@ -27,7 +25,7 @@ from llama_stack.apis.inference import ( from llama_stack.models.llama.datatypes import CoreModelId from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, + build_hf_repo_model_entry, ) from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, @@ -40,12 +38,12 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( from .config import DatabricksImplConfig -model_aliases = [ - build_model_alias( +model_entries = [ + build_hf_repo_model_entry( "databricks-meta-llama-3-1-70b-instruct", CoreModelId.llama3_1_70b_instruct.value, ), - build_model_alias( + build_hf_repo_model_entry( "databricks-meta-llama-3-1-405b-instruct", CoreModelId.llama3_1_405b_instruct.value, ), @@ -54,12 +52,8 @@ model_aliases = [ class DatabricksInferenceAdapter(ModelRegistryHelper, Inference): def __init__(self, config: DatabricksImplConfig) -> None: - ModelRegistryHelper.__init__( - self, - model_aliases=model_aliases, - ) + ModelRegistryHelper.__init__(self, model_entries=model_entries) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self) -> None: return @@ -112,7 +106,7 @@ class DatabricksInferenceAdapter(ModelRegistryHelper, Inference): ) -> ChatCompletionResponse: params = self._get_params(request) r = client.completions.create(**params) - return process_chat_completion_response(r, self.formatter, request) + return process_chat_completion_response(r, request) async def _stream_chat_completion(self, request: ChatCompletionRequest, client: OpenAI) -> AsyncGenerator: params = self._get_params(request) @@ -123,13 +117,13 @@ class DatabricksInferenceAdapter(ModelRegistryHelper, Inference): yield chunk stream = _to_async_generator() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk def _get_params(self, request: ChatCompletionRequest) -> dict: return { "model": request.model, - "prompt": chat_completion_request_to_prompt(request, self.get_llama_model(request.model), self.formatter), + "prompt": chat_completion_request_to_prompt(request, self.get_llama_model(request.model)), "stream": request.stream, **get_sampling_options(request.sampling_params), } diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py index acf37b248..3f455da3c 100644 --- a/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -7,8 +7,6 @@ from typing import AsyncGenerator, List, Optional, Union from fireworks.client import Fireworks -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( @@ -29,10 +27,8 @@ from llama_stack.apis.inference import ( ToolPromptFormat, ) from llama_stack.distribution.request_headers import NeedsRequestProviderData -from llama_stack.models.llama.datatypes import CoreModelId from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.openai_compat import ( convert_message_to_openai_dict, @@ -51,56 +47,13 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( ) from .config import FireworksImplConfig - -MODEL_ALIASES = [ - build_model_alias( - "accounts/fireworks/models/llama-v3p1-8b-instruct", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p1-70b-instruct", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p1-405b-instruct", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p2-1b-instruct", - CoreModelId.llama3_2_1b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p2-3b-instruct", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p2-11b-vision-instruct", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p2-90b-vision-instruct", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-v3p3-70b-instruct", - CoreModelId.llama3_3_70b_instruct.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-guard-3-8b", - CoreModelId.llama_guard_3_8b.value, - ), - build_model_alias( - "accounts/fireworks/models/llama-guard-3-11b-vision", - CoreModelId.llama_guard_3_11b_vision.value, - ), -] +from .models import MODEL_ENTRIES class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProviderData): def __init__(self, config: FireworksImplConfig) -> None: - ModelRegistryHelper.__init__(self, MODEL_ALIASES) + ModelRegistryHelper.__init__(self, MODEL_ENTRIES) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self) -> None: pass @@ -149,7 +102,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: params = await self._get_params(request) r = await self._get_client().completion.acreate(**params) - return process_completion_response(r, self.formatter) + return process_completion_response(r) async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -161,7 +114,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv yield chunk stream = _to_async_generator() - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk def _build_options( @@ -230,7 +183,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv r = await self._get_client().chat.completions.acreate(**params) else: r = await self._get_client().completion.acreate(**params) - return process_chat_completion_response(r, self.formatter, request) + return process_chat_completion_response(r, request) async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -244,7 +197,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv yield chunk stream = _to_async_generator() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict: @@ -258,11 +211,11 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv ] else: input_dict["prompt"] = await chat_completion_request_to_prompt( - request, self.get_llama_model(request.model), self.formatter + request, self.get_llama_model(request.model) ) else: assert not media_present, "Fireworks does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request, self.formatter) + input_dict["prompt"] = await completion_request_to_prompt(request) # Fireworks always prepends with BOS if "prompt" in input_dict: @@ -284,8 +237,8 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv model = await self.model_store.get_model(model_id) kwargs = {} - if model.metadata.get("embedding_dimensions"): - kwargs["dimensions"] = model.metadata.get("embedding_dimensions") + if model.metadata.get("embedding_dimension"): + kwargs["dimensions"] = model.metadata.get("embedding_dimension") assert all(not content_has_media(content) for content in contents), ( "Fireworks does not support media for embeddings" ) diff --git a/llama_stack/providers/remote/inference/fireworks/models.py b/llama_stack/providers/remote/inference/fireworks/models.py new file mode 100644 index 000000000..c90f632ff --- /dev/null +++ b/llama_stack/providers/remote/inference/fireworks/models.py @@ -0,0 +1,63 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.apis.models.models import ModelType +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + ProviderModelEntry, + build_hf_repo_model_entry, +) + +MODEL_ENTRIES = [ + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p1-8b-instruct", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p1-70b-instruct", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p1-405b-instruct", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p2-1b-instruct", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p2-3b-instruct", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p2-11b-vision-instruct", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p2-90b-vision-instruct", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-v3p3-70b-instruct", + CoreModelId.llama3_3_70b_instruct.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-guard-3-8b", + CoreModelId.llama_guard_3_8b.value, + ), + build_hf_repo_model_entry( + "accounts/fireworks/models/llama-guard-3-11b-vision", + CoreModelId.llama_guard_3_11b_vision.value, + ), + ProviderModelEntry( + provider_model_id="nomic-ai/nomic-embed-text-v1.5", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 768, + "context_length": 8192, + }, + ), +] diff --git a/llama_stack/providers/remote/inference/groq/groq.py b/llama_stack/providers/remote/inference/groq/groq.py index 441b6af5c..c75e92dfe 100644 --- a/llama_stack/providers/remote/inference/groq/groq.py +++ b/llama_stack/providers/remote/inference/groq/groq.py @@ -31,8 +31,8 @@ from llama_stack.models.llama.sku_list import CoreModelId from llama_stack.providers.remote.inference.groq.config import GroqConfig from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, - build_model_alias_with_just_provider_model_id, + build_hf_repo_model_entry, + build_model_entry, ) from .groq_utils import ( @@ -41,20 +41,20 @@ from .groq_utils import ( convert_chat_completion_response_stream, ) -_MODEL_ALIASES = [ - build_model_alias( +_MODEL_ENTRIES = [ + build_hf_repo_model_entry( "llama3-8b-8192", CoreModelId.llama3_1_8b_instruct.value, ), - build_model_alias_with_just_provider_model_id( + build_model_entry( "llama-3.1-8b-instant", CoreModelId.llama3_1_8b_instruct.value, ), - build_model_alias( + build_hf_repo_model_entry( "llama3-70b-8192", CoreModelId.llama3_70b_instruct.value, ), - build_model_alias( + build_hf_repo_model_entry( "llama-3.3-70b-versatile", CoreModelId.llama3_3_70b_instruct.value, ), @@ -62,7 +62,7 @@ _MODEL_ALIASES = [ # Preview models aren't recommended for production use, but we include this one # to pass the test fixture # TODO(aidand): Replace this with a stable model once Groq supports it - build_model_alias( + build_hf_repo_model_entry( "llama-3.2-3b-preview", CoreModelId.llama3_2_3b_instruct.value, ), @@ -73,7 +73,7 @@ class GroqInferenceAdapter(Inference, ModelRegistryHelper, NeedsRequestProviderD _config: GroqConfig def __init__(self, config: GroqConfig): - ModelRegistryHelper.__init__(self, model_aliases=_MODEL_ALIASES) + ModelRegistryHelper.__init__(self, model_entries=_MODEL_ENTRIES) self._config = config def completion( diff --git a/llama_stack/providers/remote/inference/nvidia/models.py b/llama_stack/providers/remote/inference/nvidia/models.py new file mode 100644 index 000000000..c432861ee --- /dev/null +++ b/llama_stack/providers/remote/inference/nvidia/models.py @@ -0,0 +1,51 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + build_hf_repo_model_entry, +) + +_MODEL_ENTRIES = [ + build_hf_repo_model_entry( + "meta/llama3-8b-instruct", + CoreModelId.llama3_8b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama3-70b-instruct", + CoreModelId.llama3_70b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.1-8b-instruct", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.1-70b-instruct", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.1-405b-instruct", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.2-1b-instruct", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.2-3b-instruct", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.2-11b-vision-instruct", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "meta/llama-3.2-90b-vision-instruct", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + # TODO(mf): how do we handle Nemotron models? + # "Llama3.1-Nemotron-51B-Instruct" -> "meta/llama-3.1-nemotron-51b-instruct", +] diff --git a/llama_stack/providers/remote/inference/nvidia/nvidia.py b/llama_stack/providers/remote/inference/nvidia/nvidia.py index 8e67333af..824389577 100644 --- a/llama_stack/providers/remote/inference/nvidia/nvidia.py +++ b/llama_stack/providers/remote/inference/nvidia/nvidia.py @@ -26,19 +26,14 @@ from llama_stack.apis.inference import ( ToolChoice, ToolConfig, ) -from llama_stack.models.llama.datatypes import ( - CoreModelId, - SamplingParams, - ToolDefinition, - ToolPromptFormat, -) +from llama_stack.models.llama.datatypes import SamplingParams, ToolDefinition, ToolPromptFormat from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.prompt_adapter import content_has_media from . import NVIDIAConfig +from .models import _MODEL_ENTRIES from .openai_utils import ( convert_chat_completion_request, convert_completion_request, @@ -51,52 +46,11 @@ from .utils import _is_nvidia_hosted, check_health logger = logging.getLogger(__name__) -_MODEL_ALIASES = [ - build_model_alias( - "meta/llama3-8b-instruct", - CoreModelId.llama3_8b_instruct.value, - ), - build_model_alias( - "meta/llama3-70b-instruct", - CoreModelId.llama3_70b_instruct.value, - ), - build_model_alias( - "meta/llama-3.1-8b-instruct", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "meta/llama-3.1-70b-instruct", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "meta/llama-3.1-405b-instruct", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias( - "meta/llama-3.2-1b-instruct", - CoreModelId.llama3_2_1b_instruct.value, - ), - build_model_alias( - "meta/llama-3.2-3b-instruct", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias( - "meta/llama-3.2-11b-vision-instruct", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias( - "meta/llama-3.2-90b-vision-instruct", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - # TODO(mf): how do we handle Nemotron models? - # "Llama3.1-Nemotron-51B-Instruct" -> "meta/llama-3.1-nemotron-51b-instruct", -] - class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper): def __init__(self, config: NVIDIAConfig) -> None: # TODO(mf): filter by available models - ModelRegistryHelper.__init__(self, model_aliases=_MODEL_ALIASES) + ModelRegistryHelper.__init__(self, model_entries=_MODEL_ENTRIES) logger.info(f"Initializing NVIDIAInferenceAdapter({config.url})...") diff --git a/llama_stack/providers/remote/inference/ollama/models.py b/llama_stack/providers/remote/inference/ollama/models.py new file mode 100644 index 000000000..be556762c --- /dev/null +++ b/llama_stack/providers/remote/inference/ollama/models.py @@ -0,0 +1,103 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.apis.models.models import ModelType +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + ProviderModelEntry, + build_hf_repo_model_entry, + build_model_entry, +) + +model_entries = [ + build_hf_repo_model_entry( + "llama3.1:8b-instruct-fp16", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_model_entry( + "llama3.1:8b", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.1:70b-instruct-fp16", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_model_entry( + "llama3.1:70b", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.1:405b-instruct-fp16", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_model_entry( + "llama3.1:405b", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.2:1b-instruct-fp16", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_model_entry( + "llama3.2:1b", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.2:3b-instruct-fp16", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_model_entry( + "llama3.2:3b", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.2-vision:11b-instruct-fp16", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_model_entry( + "llama3.2-vision:latest", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.2-vision:90b-instruct-fp16", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + build_model_entry( + "llama3.2-vision:90b", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "llama3.3:70b", + CoreModelId.llama3_3_70b_instruct.value, + ), + # The Llama Guard models don't have their full fp16 versions + # so we are going to alias their default version to the canonical SKU + build_hf_repo_model_entry( + "llama-guard3:8b", + CoreModelId.llama_guard_3_8b.value, + ), + build_hf_repo_model_entry( + "llama-guard3:1b", + CoreModelId.llama_guard_3_1b.value, + ), + ProviderModelEntry( + provider_model_id="all-minilm:latest", + aliases=["all-minilm"], + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 384, + "context_length": 512, + }, + ), + ProviderModelEntry( + provider_model_id="nomic-embed-text", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 768, + "context_length": 8192, + }, + ), +] diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index f524c0734..1dbcbc294 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -8,8 +8,6 @@ import logging from typing import AsyncGenerator, List, Optional, Union import httpx -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from ollama import AsyncClient from llama_stack.apis.common.content_types import ( @@ -33,12 +31,9 @@ from llama_stack.apis.inference import ( ToolPromptFormat, ) from llama_stack.apis.models import Model, ModelType -from llama_stack.models.llama.datatypes import CoreModelId from llama_stack.providers.datatypes import ModelsProtocolPrivate from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, - build_model_alias_with_just_provider_model_id, ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAICompatCompletionChoice, @@ -58,87 +53,15 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( request_has_media, ) -log = logging.getLogger(__name__) +from .models import model_entries -model_aliases = [ - build_model_alias( - "llama3.1:8b-instruct-fp16", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.1:8b", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "llama3.1:70b-instruct-fp16", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.1:70b", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "llama3.1:405b-instruct-fp16", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.1:405b", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias( - "llama3.2:1b-instruct-fp16", - CoreModelId.llama3_2_1b_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.2:1b", - CoreModelId.llama3_2_1b_instruct.value, - ), - build_model_alias( - "llama3.2:3b-instruct-fp16", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.2:3b", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias( - "llama3.2-vision:11b-instruct-fp16", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.2-vision:latest", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias( - "llama3.2-vision:90b-instruct-fp16", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - build_model_alias_with_just_provider_model_id( - "llama3.2-vision:90b", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - build_model_alias( - "llama3.3:70b", - CoreModelId.llama3_3_70b_instruct.value, - ), - # The Llama Guard models don't have their full fp16 versions - # so we are going to alias their default version to the canonical SKU - build_model_alias( - "llama-guard3:8b", - CoreModelId.llama_guard_3_8b.value, - ), - build_model_alias( - "llama-guard3:1b", - CoreModelId.llama_guard_3_1b.value, - ), -] +log = logging.getLogger(__name__) class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): def __init__(self, url: str) -> None: - self.register_helper = ModelRegistryHelper(model_aliases) + self.register_helper = ModelRegistryHelper(model_entries) self.url = url - self.formatter = ChatFormat(Tokenizer.get_instance()) @property def client(self) -> AsyncClient: @@ -197,7 +120,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): ) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk async def _nonstream_completion(self, request: CompletionRequest) -> AsyncGenerator: @@ -212,7 +135,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): choices=[choice], ) - return process_completion_response(response, self.formatter) + return process_completion_response(response) async def chat_completion( self, @@ -262,11 +185,10 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): input_dict["prompt"] = await chat_completion_request_to_prompt( request, self.register_helper.get_llama_model(request.model), - self.formatter, ) else: assert not media_present, "Ollama does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request, self.formatter) + input_dict["prompt"] = await completion_request_to_prompt(request) input_dict["raw"] = True if fmt := request.response_format: @@ -304,7 +226,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): response = OpenAICompatCompletionResponse( choices=[choice], ) - return process_chat_completion_response(response, self.formatter, request) + return process_chat_completion_response(response, request) async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -330,7 +252,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): ) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def embeddings( @@ -352,22 +274,17 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): return EmbeddingsResponse(embeddings=embeddings) async def register_model(self, model: Model) -> Model: - async def check_model_availability(model_id: str): - response = await self.client.ps() - available_models = [m["model"] for m in response["models"]] - if model_id not in available_models: - raise ValueError( - f"Model '{model_id}' is not available in Ollama. Available models: {', '.join(available_models)}" - ) - if model.model_type == ModelType.embedding: - await check_model_availability(model.provider_resource_id) - return model + response = await self.client.list() + else: + response = await self.client.ps() + available_models = [m["model"] for m in response["models"]] + if model.provider_resource_id not in available_models: + raise ValueError( + f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}" + ) - model = await self.register_helper.register_model(model) - await check_model_availability(model.provider_resource_id) - - return model + return await self.register_helper.register_model(model) async def convert_message_to_openai_dict_for_ollama(message: Message) -> List[dict]: diff --git a/llama_stack/providers/remote/inference/passthrough/__init__.py b/llama_stack/providers/remote/inference/passthrough/__init__.py new file mode 100644 index 000000000..69dd4c461 --- /dev/null +++ b/llama_stack/providers/remote/inference/passthrough/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pydantic import BaseModel + +from .config import PassthroughImplConfig + + +class PassthroughProviderDataValidator(BaseModel): + url: str + api_key: str + + +async def get_adapter_impl(config: PassthroughImplConfig, _deps): + from .passthrough import PassthroughInferenceAdapter + + assert isinstance(config, PassthroughImplConfig), f"Unexpected config type: {type(config)}" + impl = PassthroughInferenceAdapter(config) + await impl.initialize() + return impl diff --git a/llama_stack/providers/remote/inference/passthrough/config.py b/llama_stack/providers/remote/inference/passthrough/config.py new file mode 100644 index 000000000..46325e428 --- /dev/null +++ b/llama_stack/providers/remote/inference/passthrough/config.py @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import Any, Dict, Optional + +from pydantic import BaseModel, Field, SecretStr + +from llama_stack.schema_utils import json_schema_type + + +@json_schema_type +class PassthroughImplConfig(BaseModel): + url: str = Field( + default=None, + description="The URL for the passthrough endpoint", + ) + + api_key: Optional[SecretStr] = Field( + default=None, + description="API Key for the passthrouth endpoint", + ) + + @classmethod + def sample_run_config(cls, **kwargs) -> Dict[str, Any]: + return { + "url": "${env.PASSTHROUGH_URL}", + "api_key": "${env.PASSTHROUGH_API_KEY}", + } diff --git a/llama_stack/providers/remote/inference/passthrough/passthrough.py b/llama_stack/providers/remote/inference/passthrough/passthrough.py new file mode 100644 index 000000000..a34c34f69 --- /dev/null +++ b/llama_stack/providers/remote/inference/passthrough/passthrough.py @@ -0,0 +1,148 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import AsyncGenerator, List, Optional + +from llama_stack_client import LlamaStackClient + +from llama_stack.apis.common.content_types import InterleavedContent +from llama_stack.apis.inference import ( + EmbeddingsResponse, + Inference, + LogProbConfig, + Message, + ResponseFormat, + SamplingParams, + ToolChoice, + ToolConfig, + ToolDefinition, + ToolPromptFormat, +) +from llama_stack.apis.models import Model +from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper + +from .config import PassthroughImplConfig + + +class PassthroughInferenceAdapter(Inference): + def __init__(self, config: PassthroughImplConfig) -> None: + ModelRegistryHelper.__init__(self, []) + self.config = config + + async def initialize(self) -> None: + pass + + async def shutdown(self) -> None: + pass + + async def unregister_model(self, model_id: str) -> None: + pass + + async def register_model(self, model: Model) -> Model: + return model + + def _get_client(self) -> LlamaStackClient: + passthrough_url = None + passthrough_api_key = None + provider_data = None + + if self.config.url is not None: + passthrough_url = self.config.url + else: + provider_data = self.get_request_provider_data() + if provider_data is None or not provider_data.passthrough_url: + raise ValueError( + 'Pass url of the passthrough endpoint in the header X-LlamaStack-Provider-Data as { "passthrough_url": }' + ) + passthrough_url = provider_data.passthrough_url + + if self.config.api_key is not None: + passthrough_api_key = self.config.api_key.get_secret_value() + else: + provider_data = self.get_request_provider_data() + if provider_data is None or not provider_data.passthrough_api_key: + raise ValueError( + 'Pass API Key for the passthrough endpoint in the header X-LlamaStack-Provider-Data as { "passthrough_api_key": }' + ) + passthrough_api_key = provider_data.passthrough_api_key + + return LlamaStackClient( + base_url=passthrough_url, + api_key=passthrough_api_key, + provider_data=provider_data, + ) + + async def completion( + self, + model_id: str, + content: InterleavedContent, + sampling_params: Optional[SamplingParams] = SamplingParams(), + response_format: Optional[ResponseFormat] = None, + stream: Optional[bool] = False, + logprobs: Optional[LogProbConfig] = None, + ) -> AsyncGenerator: + client = self._get_client() + model = await self.model_store.get_model(model_id) + + params = { + "model_id": model.provider_resource_id, + "content": content, + "sampling_params": sampling_params, + "response_format": response_format, + "stream": stream, + "logprobs": logprobs, + } + + params = {key: value for key, value in params.items() if value is not None} + + # only pass through the not None params + return client.inference.completion(**params) + + async def chat_completion( + self, + model_id: str, + messages: List[Message], + sampling_params: Optional[SamplingParams] = SamplingParams(), + tools: Optional[List[ToolDefinition]] = None, + tool_choice: Optional[ToolChoice] = ToolChoice.auto, + tool_prompt_format: Optional[ToolPromptFormat] = None, + response_format: Optional[ResponseFormat] = None, + stream: Optional[bool] = False, + logprobs: Optional[LogProbConfig] = None, + tool_config: Optional[ToolConfig] = None, + ) -> AsyncGenerator: + client = self._get_client() + model = await self.model_store.get_model(model_id) + + params = { + "model_id": model.provider_resource_id, + "messages": messages, + "sampling_params": sampling_params, + "tools": tools, + "tool_choice": tool_choice, + "tool_prompt_format": tool_prompt_format, + "response_format": response_format, + "stream": stream, + "logprobs": logprobs, + } + + params = {key: value for key, value in params.items() if value is not None} + + # only pass through the not None params + return client.inference.chat_completion(**params) + + async def embeddings( + self, + model_id: str, + contents: List[InterleavedContent], + ) -> EmbeddingsResponse: + client = self._get_client() + model = await self.model_store.get_model(model_id) + + return client.inference.embeddings( + model_id=model.provider_resource_id, + contents=contents, + ) diff --git a/llama_stack/providers/remote/inference/runpod/runpod.py b/llama_stack/providers/remote/inference/runpod/runpod.py index 1abb17336..09122a8e6 100644 --- a/llama_stack/providers/remote/inference/runpod/runpod.py +++ b/llama_stack/providers/remote/inference/runpod/runpod.py @@ -5,8 +5,6 @@ # the root directory of this source tree. from typing import AsyncGenerator -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from openai import OpenAI from llama_stack.apis.inference import * # noqa: F403 @@ -45,7 +43,6 @@ class RunpodInferenceAdapter(ModelRegistryHelper, Inference): def __init__(self, config: RunpodImplConfig) -> None: ModelRegistryHelper.__init__(self, stack_to_provider_models_map=RUNPOD_SUPPORTED_MODELS) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self) -> None: return @@ -56,7 +53,7 @@ class RunpodInferenceAdapter(ModelRegistryHelper, Inference): async def completion( self, model: str, - content: InterleavedTextMedia, + content: InterleavedContent, sampling_params: Optional[SamplingParams] = SamplingParams(), response_format: Optional[ResponseFormat] = None, stream: Optional[bool] = False, @@ -97,7 +94,7 @@ class RunpodInferenceAdapter(ModelRegistryHelper, Inference): ) -> ChatCompletionResponse: params = self._get_params(request) r = client.completions.create(**params) - return process_chat_completion_response(r, self.formatter, request) + return process_chat_completion_response(r, request) async def _stream_chat_completion(self, request: ChatCompletionRequest, client: OpenAI) -> AsyncGenerator: params = self._get_params(request) @@ -108,13 +105,13 @@ class RunpodInferenceAdapter(ModelRegistryHelper, Inference): yield chunk stream = _to_async_generator() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk def _get_params(self, request: ChatCompletionRequest) -> dict: return { "model": self.map_to_provider_model(request.model), - "prompt": chat_completion_request_to_prompt(request, self.formatter), + "prompt": chat_completion_request_to_prompt(request), "stream": request.stream, **get_sampling_options(request.sampling_params), } @@ -122,6 +119,6 @@ class RunpodInferenceAdapter(ModelRegistryHelper, Inference): async def embeddings( self, model: str, - contents: List[InterleavedTextMedia], + contents: List[InterleavedContent], ) -> EmbeddingsResponse: raise NotImplementedError() diff --git a/llama_stack/providers/remote/inference/sambanova/__init__.py b/llama_stack/providers/remote/inference/sambanova/__init__.py index ccf4bf1cb..3e682e69c 100644 --- a/llama_stack/providers/remote/inference/sambanova/__init__.py +++ b/llama_stack/providers/remote/inference/sambanova/__init__.py @@ -7,7 +7,6 @@ from pydantic import BaseModel from .config import SambaNovaImplConfig -from .sambanova import SambaNovaInferenceAdapter class SambaNovaProviderDataValidator(BaseModel): @@ -15,6 +14,8 @@ class SambaNovaProviderDataValidator(BaseModel): async def get_adapter_impl(config: SambaNovaImplConfig, _deps): + from .sambanova import SambaNovaInferenceAdapter + assert isinstance(config, SambaNovaImplConfig), f"Unexpected config type: {type(config)}" impl = SambaNovaInferenceAdapter(config) await impl.initialize() diff --git a/llama_stack/providers/remote/inference/sambanova/models.py b/llama_stack/providers/remote/inference/sambanova/models.py new file mode 100644 index 000000000..2231be22d --- /dev/null +++ b/llama_stack/providers/remote/inference/sambanova/models.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + build_hf_repo_model_entry, +) + +MODEL_ENTRIES = [ + build_hf_repo_model_entry( + "Meta-Llama-3.1-8B-Instruct", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-3.1-70B-Instruct", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-3.1-405B-Instruct", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-3.2-1B-Instruct", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-3.2-3B-Instruct", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-3.3-70B-Instruct", + CoreModelId.llama3_3_70b_instruct.value, + ), + build_hf_repo_model_entry( + "Llama-3.2-11B-Vision-Instruct", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "Llama-3.2-90B-Vision-Instruct", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "Meta-Llama-Guard-3-8B", + CoreModelId.llama_guard_3_8b.value, + ), +] diff --git a/llama_stack/providers/remote/inference/sambanova/sambanova.py b/llama_stack/providers/remote/inference/sambanova/sambanova.py index b906e0dcb..c05284d7d 100644 --- a/llama_stack/providers/remote/inference/sambanova/sambanova.py +++ b/llama_stack/providers/remote/inference/sambanova/sambanova.py @@ -7,8 +7,6 @@ import json from typing import AsyncGenerator -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from openai import OpenAI from llama_stack.apis.common.content_types import ( @@ -18,14 +16,12 @@ from llama_stack.apis.common.content_types import ( ) from llama_stack.apis.inference import * # noqa: F403 from llama_stack.models.llama.datatypes import ( - CoreModelId, GreedySamplingStrategy, TopKSamplingStrategy, TopPSamplingStrategy, ) from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.openai_compat import ( process_chat_completion_stream_response, @@ -35,56 +31,13 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( ) from .config import SambaNovaImplConfig - -MODEL_ALIASES = [ - build_model_alias( - "Meta-Llama-3.1-8B-Instruct", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "Meta-Llama-3.1-70B-Instruct", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "Meta-Llama-3.1-405B-Instruct", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias( - "Meta-Llama-3.2-1B-Instruct", - CoreModelId.llama3_2_1b_instruct.value, - ), - build_model_alias( - "Meta-Llama-3.2-3B-Instruct", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias( - "Meta-Llama-3.3-70B-Instruct", - CoreModelId.llama3_3_70b_instruct.value, - ), - build_model_alias( - "Llama-3.2-11B-Vision-Instruct", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias( - "Llama-3.2-90B-Vision-Instruct", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - build_model_alias( - "Meta-Llama-Guard-3-8B", - CoreModelId.llama_guard_3_8b.value, - ), -] +from .models import MODEL_ENTRIES class SambaNovaInferenceAdapter(ModelRegistryHelper, Inference): def __init__(self, config: SambaNovaImplConfig) -> None: - ModelRegistryHelper.__init__( - self, - model_aliases=MODEL_ALIASES, - ) - + ModelRegistryHelper.__init__(self, model_entries=MODEL_ENTRIES) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self) -> None: return @@ -160,7 +113,7 @@ class SambaNovaInferenceAdapter(ModelRegistryHelper, Inference): yield chunk stream = _to_async_generator() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def embeddings( diff --git a/llama_stack/providers/remote/inference/tgi/__init__.py b/llama_stack/providers/remote/inference/tgi/__init__.py index 451650323..834e51324 100644 --- a/llama_stack/providers/remote/inference/tgi/__init__.py +++ b/llama_stack/providers/remote/inference/tgi/__init__.py @@ -7,13 +7,14 @@ from typing import Union from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig -from .tgi import InferenceAPIAdapter, InferenceEndpointAdapter, TGIAdapter async def get_adapter_impl( config: Union[InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig], _deps, ): + from .tgi import InferenceAPIAdapter, InferenceEndpointAdapter, TGIAdapter + if isinstance(config, TGIImplConfig): impl = TGIAdapter() elif isinstance(config, InferenceAPIImplConfig): diff --git a/llama_stack/providers/remote/inference/tgi/tgi.py b/llama_stack/providers/remote/inference/tgi/tgi.py index 1909e01f8..1a50e3b61 100644 --- a/llama_stack/providers/remote/inference/tgi/tgi.py +++ b/llama_stack/providers/remote/inference/tgi/tgi.py @@ -9,8 +9,6 @@ import logging from typing import AsyncGenerator, List, Optional from huggingface_hub import AsyncInferenceClient, HfApi -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( @@ -34,7 +32,7 @@ from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.datatypes import ModelsProtocolPrivate from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, + build_hf_repo_model_entry, ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAICompatCompletionChoice, @@ -55,9 +53,9 @@ from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImpl log = logging.getLogger(__name__) -def build_model_aliases(): +def build_hf_repo_model_entries(): return [ - build_model_alias( + build_hf_repo_model_entry( model.huggingface_repo, model.descriptor(), ) @@ -72,8 +70,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): model_id: str def __init__(self) -> None: - self.formatter = ChatFormat(Tokenizer.get_instance()) - self.register_helper = ModelRegistryHelper(build_model_aliases()) + self.register_helper = ModelRegistryHelper(build_hf_repo_model_entries()) self.huggingface_repo_to_llama_model_id = { model.huggingface_repo: model.descriptor() for model in all_registered_models() if model.huggingface_repo } @@ -149,7 +146,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): return options async def _get_params_for_completion(self, request: CompletionRequest) -> dict: - prompt, input_tokens = await completion_request_to_prompt_model_input_info(request, self.formatter) + prompt, input_tokens = await completion_request_to_prompt_model_input_info(request) return dict( prompt=prompt, @@ -177,7 +174,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): ) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk async def _nonstream_completion(self, request: CompletionRequest) -> AsyncGenerator: @@ -193,7 +190,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): choices=[choice], ) - return process_completion_response(response, self.formatter) + return process_completion_response(response) async def chat_completion( self, @@ -236,7 +233,7 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): response = OpenAICompatCompletionResponse( choices=[choice], ) - return process_chat_completion_response(response, self.formatter, request) + return process_chat_completion_response(response, request) async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -252,12 +249,12 @@ class _HfAdapter(Inference, ModelsProtocolPrivate): ) stream = _generate_and_convert_to_openai_compat() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def _get_params(self, request: ChatCompletionRequest) -> dict: prompt, input_tokens = await chat_completion_request_to_model_input_info( - request, self.register_helper.get_llama_model(request.model), self.formatter + request, self.register_helper.get_llama_model(request.model) ) return dict( prompt=prompt, diff --git a/llama_stack/providers/remote/inference/together/models.py b/llama_stack/providers/remote/inference/together/models.py new file mode 100644 index 000000000..63d3d94b5 --- /dev/null +++ b/llama_stack/providers/remote/inference/together/models.py @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from llama_stack.apis.models.models import ModelType +from llama_stack.models.llama.datatypes import CoreModelId +from llama_stack.providers.utils.inference.model_registry import ( + ProviderModelEntry, + build_hf_repo_model_entry, +) + +MODEL_ENTRIES = [ + build_hf_repo_model_entry( + "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", + CoreModelId.llama3_1_8b_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + CoreModelId.llama3_1_70b_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + CoreModelId.llama3_1_405b_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Llama-3.2-3B-Instruct-Turbo", + CoreModelId.llama3_2_3b_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + CoreModelId.llama3_2_90b_vision_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Llama-3.3-70B-Instruct-Turbo", + CoreModelId.llama3_3_70b_instruct.value, + ), + build_hf_repo_model_entry( + "meta-llama/Meta-Llama-Guard-3-8B", + CoreModelId.llama_guard_3_8b.value, + ), + build_hf_repo_model_entry( + "meta-llama/Llama-Guard-3-11B-Vision-Turbo", + CoreModelId.llama_guard_3_11b_vision.value, + ), + ProviderModelEntry( + provider_model_id="togethercomputer/m2-bert-80M-8k-retrieval", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 768, + "context_length": 8192, + }, + ), + ProviderModelEntry( + provider_model_id="togethercomputer/m2-bert-80M-32k-retrieval", + model_type=ModelType.embedding, + metadata={ + "embedding_dimension": 768, + "context_length": 32768, + }, + ), +] diff --git a/llama_stack/providers/remote/inference/together/together.py b/llama_stack/providers/remote/inference/together/together.py index 054501da8..8afd3e85b 100644 --- a/llama_stack/providers/remote/inference/together/together.py +++ b/llama_stack/providers/remote/inference/together/together.py @@ -6,8 +6,6 @@ from typing import AsyncGenerator, List, Optional, Union -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from together import Together from llama_stack.apis.common.content_types import InterleavedContent @@ -28,10 +26,8 @@ from llama_stack.apis.inference import ( ToolPromptFormat, ) from llama_stack.distribution.request_headers import NeedsRequestProviderData -from llama_stack.models.llama.datatypes import CoreModelId from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, ) from llama_stack.providers.utils.inference.openai_compat import ( convert_message_to_openai_dict, @@ -50,52 +46,13 @@ from llama_stack.providers.utils.inference.prompt_adapter import ( ) from .config import TogetherImplConfig - -MODEL_ALIASES = [ - build_model_alias( - "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", - CoreModelId.llama3_1_8b_instruct.value, - ), - build_model_alias( - "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - CoreModelId.llama3_1_70b_instruct.value, - ), - build_model_alias( - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - CoreModelId.llama3_1_405b_instruct.value, - ), - build_model_alias( - "meta-llama/Llama-3.2-3B-Instruct-Turbo", - CoreModelId.llama3_2_3b_instruct.value, - ), - build_model_alias( - "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo", - CoreModelId.llama3_2_11b_vision_instruct.value, - ), - build_model_alias( - "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", - CoreModelId.llama3_2_90b_vision_instruct.value, - ), - build_model_alias( - "meta-llama/Llama-3.3-70B-Instruct-Turbo", - CoreModelId.llama3_3_70b_instruct.value, - ), - build_model_alias( - "meta-llama/Meta-Llama-Guard-3-8B", - CoreModelId.llama_guard_3_8b.value, - ), - build_model_alias( - "meta-llama/Llama-Guard-3-11B-Vision-Turbo", - CoreModelId.llama_guard_3_11b_vision.value, - ), -] +from .models import MODEL_ENTRIES class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProviderData): def __init__(self, config: TogetherImplConfig) -> None: - ModelRegistryHelper.__init__(self, MODEL_ALIASES) + ModelRegistryHelper.__init__(self, MODEL_ENTRIES) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) async def initialize(self) -> None: pass @@ -142,7 +99,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse: params = await self._get_params(request) r = self._get_client().completions.create(**params) - return process_completion_response(r, self.formatter) + return process_completion_response(r) async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -154,7 +111,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi yield chunk stream = _to_async_generator() - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk def _build_options( @@ -220,7 +177,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi r = self._get_client().chat.completions.create(**params) else: r = self._get_client().completions.create(**params) - return process_chat_completion_response(r, self.formatter, request) + return process_chat_completion_response(r, request) async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -235,7 +192,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi yield chunk stream = _to_async_generator() - async for chunk in process_chat_completion_stream_response(stream, self.formatter, request): + async for chunk in process_chat_completion_stream_response(stream, request): yield chunk async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict: @@ -246,11 +203,11 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages] else: input_dict["prompt"] = await chat_completion_request_to_prompt( - request, self.get_llama_model(request.model), self.formatter + request, self.get_llama_model(request.model) ) else: assert not media_present, "Together does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request, self.formatter) + input_dict["prompt"] = await completion_request_to_prompt(request) return { "model": request.model, diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index b22284302..b37b0d305 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -8,8 +8,6 @@ import logging from typing import AsyncGenerator, List, Optional, Union from llama_models.datatypes import StopReason, ToolCall -from llama_models.llama3.api.chat_format import ChatFormat -from llama_models.llama3.api.tokenizer import Tokenizer from openai import OpenAI from llama_stack.apis.common.content_types import InterleavedContent, TextDelta, ToolCallDelta, ToolCallParseStatus @@ -40,7 +38,7 @@ from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.datatypes import ModelsProtocolPrivate from llama_stack.providers.utils.inference.model_registry import ( ModelRegistryHelper, - build_model_alias, + build_hf_repo_model_entry, ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAICompatCompletionResponse, @@ -64,9 +62,9 @@ from .config import VLLMInferenceAdapterConfig log = logging.getLogger(__name__) -def build_model_aliases(): +def build_hf_repo_model_entries(): return [ - build_model_alias( + build_hf_repo_model_entry( model.huggingface_repo, model.descriptor(), ) @@ -150,19 +148,36 @@ async def _process_vllm_chat_completion_stream_response( async for chunk in stream: choice = chunk.choices[0] if choice.finish_reason: - yield ChatCompletionResponseStreamChunk( - event=ChatCompletionResponseEvent( - event_type=event_type, - delta=ToolCallDelta( - tool_call=ToolCall( - call_id=tool_call_buf.call_id, - tool_name=tool_call_buf.tool_name, - arguments=json.loads(tool_call_buf.arguments), + args_str = tool_call_buf.arguments + args = None + try: + args = {} if not args_str else json.loads(args_str) + except Exception as e: + log.warning(f"Failed to parse tool call buffer arguments: {args_str} \nError: {e}") + if args is not None: + yield ChatCompletionResponseStreamChunk( + event=ChatCompletionResponseEvent( + event_type=event_type, + delta=ToolCallDelta( + tool_call=ToolCall( + call_id=tool_call_buf.call_id, + tool_name=tool_call_buf.tool_name, + arguments=args, + ), + parse_status=ToolCallParseStatus.succeeded, ), - parse_status=ToolCallParseStatus.succeeded, - ), + ) + ) + else: + yield ChatCompletionResponseStreamChunk( + event=ChatCompletionResponseEvent( + event_type=ChatCompletionResponseEventType.progress, + delta=ToolCallDelta( + tool_call=str(tool_call_buf), + parse_status=ToolCallParseStatus.failed, + ), + ) ) - ) yield ChatCompletionResponseStreamChunk( event=ChatCompletionResponseEvent( event_type=ChatCompletionResponseEventType.complete, @@ -189,9 +204,8 @@ async def _process_vllm_chat_completion_stream_response( class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): def __init__(self, config: VLLMInferenceAdapterConfig) -> None: - self.register_helper = ModelRegistryHelper(build_model_aliases()) + self.register_helper = ModelRegistryHelper(build_hf_repo_model_entries()) self.config = config - self.formatter = ChatFormat(Tokenizer.get_instance()) self.client = None async def initialize(self) -> None: @@ -286,14 +300,14 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): if len(request.tools) > 0: res = _process_vllm_chat_completion_stream_response(stream) else: - res = process_chat_completion_stream_response(stream, self.formatter, request) + res = process_chat_completion_stream_response(stream, request) async for chunk in res: yield chunk async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: params = await self._get_params(request) r = self.client.completions.create(**params) - return process_completion_response(r, self.formatter) + return process_completion_response(r) async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: params = await self._get_params(request) @@ -305,7 +319,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): yield chunk stream = _to_async_generator() - async for chunk in process_completion_stream_response(stream, self.formatter): + async for chunk in process_completion_stream_response(stream): yield chunk async def register_model(self, model: Model) -> Model: @@ -332,10 +346,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages] else: assert not request_has_media(request), "vLLM does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt( - request, - self.formatter, - ) + input_dict["prompt"] = await completion_request_to_prompt(request) if fmt := request.response_format: if fmt.type == ResponseFormatType.json_schema.value: @@ -364,8 +375,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): kwargs = {} assert model.model_type == ModelType.embedding - assert model.metadata.get("embedding_dimensions") - kwargs["dimensions"] = model.metadata.get("embedding_dimensions") + assert model.metadata.get("embedding_dimension") + kwargs["dimensions"] = model.metadata.get("embedding_dimension") assert all(not content_has_media(content) for content in contents), "VLLM does not support media for embeddings" response = self.client.embeddings.create( model=model.provider_resource_id, diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py b/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py index 2ddf7b4fe..fb1f558e5 100644 --- a/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py +++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py @@ -7,7 +7,6 @@ from pydantic import BaseModel from .config import ModelContextProtocolConfig -from .model_context_protocol import ModelContextProtocolToolRuntimeImpl class ModelContextProtocolToolProviderDataValidator(BaseModel): @@ -15,6 +14,8 @@ class ModelContextProtocolToolProviderDataValidator(BaseModel): async def get_adapter_impl(config: ModelContextProtocolConfig, _deps): + from .model_context_protocol import ModelContextProtocolToolRuntimeImpl + impl = ModelContextProtocolToolRuntimeImpl(config) await impl.initialize() return impl diff --git a/llama_stack/providers/tests/README.md b/llama_stack/providers/tests/README.md index e4e94a3fd..f2c527f6d 100644 --- a/llama_stack/providers/tests/README.md +++ b/llama_stack/providers/tests/README.md @@ -12,6 +12,20 @@ We use `pytest` and all of its dynamism to enable the features needed. Specifica - We use `pytest_collection_modifyitems` to filter tests based on the test config (if specified). +## Pre-requisites + +Your development environment should have been configured as per the instructions in the +[CONTRIBUTING.md](../../../CONTRIBUTING.md) file. In particular, make sure to install the test extra +dependencies. Below is the full configuration: + + +```bash +$ cd llama-stack +$ uv sync --extra dev --extra test +$ uv pip install -e . +$ source .venv/bin/activate +``` + ## Common options All tests support a `--providers` option which can be a string of the form `api1=provider_fixture1,api2=provider_fixture2`. So, when testing safety (which need inference and safety APIs) you can use `--providers inference=together,safety=meta_reference` to use these fixtures in concert. @@ -50,6 +64,9 @@ pytest -s -v llama_stack/providers/tests/inference/test_text_inference.py \ --env FIREWORKS_API_KEY=<...> ``` +> [!TIP] +> If you’re using `uv`, you can isolate test executions by prefixing all commands with `uv run pytest...`. + ## Agents The Agents API composes three other APIs underneath: @@ -87,3 +104,6 @@ pytest llama_stack/providers/tests/ --config=ci_test_config.yaml Currently, we support test config on inference, agents and memory api tests. Example format of test config can be found in ci_test_config.yaml. + +## Test Data +We encourage providers to use our test data for internal development testing, so to make it easier and consistent with the tests we provide. Each test case may define its own data format, and please refer to our test source code to get details on how these fields are used in the test. diff --git a/llama_stack/providers/tests/inference/fixtures.py b/llama_stack/providers/tests/inference/fixtures.py index 2a782befc..b553b6b02 100644 --- a/llama_stack/providers/tests/inference/fixtures.py +++ b/llama_stack/providers/tests/inference/fixtures.py @@ -20,7 +20,7 @@ from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig from llama_stack.providers.remote.inference.groq import GroqConfig from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig -from llama_stack.providers.remote.inference.ollama import OllamaImplConfig +from llama_stack.providers.remote.inference.ollama import DEFAULT_OLLAMA_URL, OllamaImplConfig from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig from llama_stack.providers.remote.inference.tgi import TGIImplConfig from llama_stack.providers.remote.inference.together import TogetherImplConfig @@ -83,17 +83,13 @@ def inference_cerebras() -> ProviderFixture: @pytest.fixture(scope="session") -def inference_ollama(inference_model) -> ProviderFixture: - inference_model = [inference_model] if isinstance(inference_model, str) else inference_model - if inference_model and "Llama3.1-8B-Instruct" in inference_model: - pytest.skip("Ollama only supports Llama3.2-3B-Instruct for testing") - +def inference_ollama() -> ProviderFixture: return ProviderFixture( providers=[ Provider( provider_id="ollama", provider_type="remote::ollama", - config=OllamaImplConfig(host="localhost", port=os.getenv("OLLAMA_PORT", 11434)).model_dump(), + config=OllamaImplConfig(url=os.getenv("OLLAMA_URL", DEFAULT_OLLAMA_URL)).model_dump(), ) ], ) diff --git a/llama_stack/providers/tests/inference/test_model_registration.py b/llama_stack/providers/tests/inference/test_model_registration.py index 7c41b07ef..4a5c6a259 100644 --- a/llama_stack/providers/tests/inference/test_model_registration.py +++ b/llama_stack/providers/tests/inference/test_model_registration.py @@ -4,8 +4,6 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from unittest.mock import AsyncMock, patch - import pytest # How to run this test: @@ -15,6 +13,9 @@ import pytest class TestModelRegistration: + def provider_supports_custom_names(self, provider) -> bool: + return "remote::ollama" not in provider.__provider_spec__.provider_type + @pytest.mark.asyncio async def test_register_unsupported_model(self, inference_stack, inference_model): inference_impl, models_impl = inference_stack @@ -47,7 +48,12 @@ class TestModelRegistration: ) @pytest.mark.asyncio - async def test_register_with_llama_model(self, inference_stack): + async def test_register_with_llama_model(self, inference_stack, inference_model): + inference_impl, models_impl = inference_stack + provider = inference_impl.routing_table.get_provider_impl(inference_model) + if not self.provider_supports_custom_names(provider): + pytest.skip("Provider does not support custom model names") + _, models_impl = inference_stack _ = await models_impl.register_model( @@ -67,22 +73,6 @@ class TestModelRegistration: provider_model_id="custom-model", ) - @pytest.mark.asyncio - async def test_initialize_model_during_registering(self, inference_stack): - _, models_impl = inference_stack - - with patch( - "llama_stack.providers.inline.inference.meta_reference.inference.MetaReferenceInferenceImpl.load_model", - new_callable=AsyncMock, - ) as mock_load_model: - _ = await models_impl.register_model( - model_id="Llama3.1-8B-Instruct", - metadata={ - "llama_model": "meta-llama/Llama-3.1-8B-Instruct", - }, - ) - mock_load_model.assert_called_once() - @pytest.mark.asyncio async def test_register_with_invalid_llama_model(self, inference_stack): _, models_impl = inference_stack diff --git a/llama_stack/providers/tests/inference/test_text_inference.py b/llama_stack/providers/tests/inference/test_text_inference.py index f25b95004..1a384cc91 100644 --- a/llama_stack/providers/tests/inference/test_text_inference.py +++ b/llama_stack/providers/tests/inference/test_text_inference.py @@ -6,7 +6,7 @@ import pytest -from pydantic import BaseModel, ValidationError +from pydantic import BaseModel, TypeAdapter, ValidationError from llama_stack.apis.common.content_types import ToolCallParseStatus from llama_stack.apis.inference import ( @@ -17,6 +17,7 @@ from llama_stack.apis.inference import ( CompletionResponseStreamChunk, JsonSchemaResponseFormat, LogProbConfig, + Message, SystemMessage, ToolChoice, UserMessage, @@ -30,6 +31,7 @@ from llama_stack.models.llama.datatypes import ( ToolParamDefinition, ToolPromptFormat, ) +from llama_stack.providers.tests.test_cases.test_case import TestCase from .utils import group_chunks @@ -178,8 +180,9 @@ class TestInference: else: # no token, no logprobs assert not chunk.logprobs, "Logprobs should be empty" + @pytest.mark.parametrize("test_case", ["completion-01"]) @pytest.mark.asyncio(loop_scope="session") - async def test_completion_structured_output(self, inference_model, inference_stack): + async def test_completion_structured_output(self, inference_model, inference_stack, test_case): inference_impl, _ = inference_stack class Output(BaseModel): @@ -187,7 +190,9 @@ class TestInference: year_born: str year_retired: str - user_input = "Michael Jordan was born in 1963. He played basketball for the Chicago Bulls. He retired in 2003." + tc = TestCase(test_case) + + user_input = tc["user_input"] response = await inference_impl.completion( model_id=inference_model, content=user_input, @@ -203,9 +208,10 @@ class TestInference: assert isinstance(response.content, str) answer = Output.model_validate_json(response.content) - assert answer.name == "Michael Jordan" - assert answer.year_born == "1963" - assert answer.year_retired == "2003" + expected = tc["expected"] + assert answer.name == expected["name"] + assert answer.year_born == expected["year_born"] + assert answer.year_retired == expected["year_retired"] @pytest.mark.asyncio(loop_scope="session") async def test_chat_completion_non_streaming( @@ -224,8 +230,9 @@ class TestInference: assert isinstance(response.completion_message.content, str) assert len(response.completion_message.content) > 0 + @pytest.mark.parametrize("test_case", ["chat_completion-01"]) @pytest.mark.asyncio(loop_scope="session") - async def test_structured_output(self, inference_model, inference_stack, common_params): + async def test_structured_output(self, inference_model, inference_stack, common_params, test_case): inference_impl, _ = inference_stack class AnswerFormat(BaseModel): @@ -234,20 +241,12 @@ class TestInference: year_of_birth: int num_seasons_in_nba: int + tc = TestCase(test_case) + messages = [TypeAdapter(Message).validate_python(m) for m in tc["messages"]] + response = await inference_impl.chat_completion( model_id=inference_model, - messages=[ - # we include context about Michael Jordan in the prompt so that the test is - # focused on the funtionality of the model and not on the information embedded - # in the model. Llama 3.2 3B Instruct tends to think MJ played for 14 seasons. - SystemMessage( - content=( - "You are a helpful assistant.\n\n" - "Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons." - ) - ), - UserMessage(content="Please give me information about Michael Jordan."), - ], + messages=messages, stream=False, response_format=JsonSchemaResponseFormat( json_schema=AnswerFormat.model_json_schema(), @@ -260,10 +259,11 @@ class TestInference: assert isinstance(response.completion_message.content, str) answer = AnswerFormat.model_validate_json(response.completion_message.content) - assert answer.first_name == "Michael" - assert answer.last_name == "Jordan" - assert answer.year_of_birth == 1963 - assert answer.num_seasons_in_nba == 15 + expected = tc["expected"] + assert answer.first_name == expected["first_name"] + assert answer.last_name == expected["last_name"] + assert answer.year_of_birth == expected["year_of_birth"] + assert answer.num_seasons_in_nba == expected["num_seasons_in_nba"] response = await inference_impl.chat_completion( model_id=inference_model, diff --git a/llama_stack/providers/tests/safety/conftest.py b/llama_stack/providers/tests/safety/conftest.py index 67ca23cb6..3e8fcaeaa 100644 --- a/llama_stack/providers/tests/safety/conftest.py +++ b/llama_stack/providers/tests/safety/conftest.py @@ -83,7 +83,6 @@ def pytest_generate_tests(metafunc): if "safety_shield" in metafunc.fixturenames: shield_id = metafunc.config.getoption("--safety-shield") if shield_id: - assert shield_id.startswith("meta-llama/") params = [pytest.param(shield_id, id="")] else: params = SAFETY_SHIELD_PARAMS diff --git a/llama_stack/providers/tests/test_cases/__init__.py b/llama_stack/providers/tests/test_cases/__init__.py new file mode 100644 index 000000000..756f351d8 --- /dev/null +++ b/llama_stack/providers/tests/test_cases/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. diff --git a/llama_stack/providers/tests/test_cases/chat_completion.json b/llama_stack/providers/tests/test_cases/chat_completion.json new file mode 100644 index 000000000..cb8854e15 --- /dev/null +++ b/llama_stack/providers/tests/test_cases/chat_completion.json @@ -0,0 +1,24 @@ +{ + "01": { + "name": "structured output", + "data": { + "notes": "We include context about Michael Jordan in the prompt so that the test is focused on the funtionality of the model and not on the information embedded in the model. Llama 3.2 3B Instruct tends to think MJ played for 14 seasons.", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant. Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons." + }, + { + "role": "user", + "content": "Please give me information about Michael Jordan." + } + ], + "expected": { + "first_name": "Michael", + "last_name": "Jordan", + "year_of_birth": 1963, + "num_seasons_in_nba": 15 + } + } + } +} diff --git a/llama_stack/providers/tests/test_cases/completion.json b/llama_stack/providers/tests/test_cases/completion.json new file mode 100644 index 000000000..1e968e45e --- /dev/null +++ b/llama_stack/providers/tests/test_cases/completion.json @@ -0,0 +1,13 @@ +{ + "01": { + "name": "structured output", + "data": { + "user_input": "Michael Jordan was born in 1963. He played basketball for the Chicago Bulls. He retired in 2003.", + "expected": { + "name": "Michael Jordan", + "year_born": "1963", + "year_retired": "2003" + } + } + } +} diff --git a/llama_stack/providers/tests/test_cases/test_case.py b/llama_stack/providers/tests/test_cases/test_case.py new file mode 100644 index 000000000..7bd9b4d56 --- /dev/null +++ b/llama_stack/providers/tests/test_cases/test_case.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import json +import pathlib + + +class TestCase: + _apis = ["chat_completion", "completion"] + _jsonblob = {} + + def __init__(self, name): + # loading all test cases + if self._jsonblob == {}: + for api in self._apis: + with open(pathlib.Path(__file__).parent / f"{api}.json", "r") as f: + TestCase._jsonblob.update({f"{api}-{k}": v for k, v in json.load(f).items()}) + + # loading this test case + tc = self._jsonblob.get(name) + if tc is None: + raise ValueError(f"Test case {name} not found") + + # these are the only fields we need + self.name = tc.get("name") + self.data = tc.get("data") + + def __getitem__(self, key): + return self.data[key] diff --git a/llama_stack/providers/tests/vector_io/test_sqlite_vec.py b/llama_stack/providers/tests/vector_io/test_sqlite_vec.py new file mode 100644 index 000000000..47d044cc3 --- /dev/null +++ b/llama_stack/providers/tests/vector_io/test_sqlite_vec.py @@ -0,0 +1,160 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import asyncio +import sqlite3 + +import numpy as np +import pytest +import sqlite_vec + +from llama_stack.apis.vector_dbs import VectorDB +from llama_stack.apis.vector_io import Chunk, QueryChunksResponse +from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import ( + SQLiteVecIndex, + SQLiteVecVectorIOAdapter, + generate_chunk_id, +) + +# How to run this test: +# +# pytest llama_stack/providers/tests/vector_io/test_sqlite_vec.py \ +# -v -s --tb=short --disable-warnings --asyncio-mode=auto + +SQLITE_VEC_PROVIDER = "sqlite_vec" +EMBEDDING_DIMENSION = 384 +EMBEDDING_MODEL = "all-MiniLM-L6-v2" + + +@pytest.fixture(scope="session") +def loop(): + return asyncio.new_event_loop() + + +@pytest.fixture(scope="session", autouse=True) +def sqlite_connection(loop): + conn = sqlite3.connect(":memory:") + try: + conn.enable_load_extension(True) + sqlite_vec.load(conn) + yield conn + finally: + conn.close() + + +@pytest.fixture(scope="session", autouse=True) +async def sqlite_vec_index(sqlite_connection): + return await SQLiteVecIndex.create(dimension=EMBEDDING_DIMENSION, connection=sqlite_connection, bank_id="test_bank") + + +@pytest.fixture(scope="session") +def sample_chunks(): + """Generates chunks that force multiple batches for a single document to expose ID conflicts.""" + n, k = 10, 3 + sample = [ + Chunk(content=f"Sentence {i} from document {j}", metadata={"document_id": f"document-{j}"}) + for j in range(k) + for i in range(n) + ] + return sample + + +@pytest.fixture(scope="session") +def sample_embeddings(sample_chunks): + np.random.seed(42) + return np.array([np.random.rand(EMBEDDING_DIMENSION).astype(np.float32) for _ in sample_chunks]) + + +@pytest.mark.asyncio +async def test_add_chunks(sqlite_vec_index, sample_chunks, sample_embeddings): + await sqlite_vec_index.add_chunks(sample_chunks, sample_embeddings, batch_size=2) + cur = sqlite_vec_index.connection.cursor() + cur.execute(f"SELECT COUNT(*) FROM {sqlite_vec_index.metadata_table}") + count = cur.fetchone()[0] + assert count == len(sample_chunks) + + +@pytest.mark.asyncio +async def test_query_chunks(sqlite_vec_index, sample_chunks, sample_embeddings): + await sqlite_vec_index.add_chunks(sample_chunks, sample_embeddings) + query_embedding = np.random.rand(EMBEDDING_DIMENSION).astype(np.float32) + response = await sqlite_vec_index.query(query_embedding, k=2, score_threshold=0.0) + assert isinstance(response, QueryChunksResponse) + assert len(response.chunks) == 2 + + +@pytest.mark.asyncio +async def test_chunk_id_conflict(sqlite_vec_index, sample_chunks): + """Test that chunk IDs do not conflict across batches when inserting chunks.""" + # Reduce batch size to force multiple batches for same document + # since there are 10 chunks per document and batch size is 2 + batch_size = 2 + sample_embeddings = np.random.rand(len(sample_chunks), EMBEDDING_DIMENSION).astype(np.float32) + + await sqlite_vec_index.add_chunks(sample_chunks, sample_embeddings, batch_size=batch_size) + + cur = sqlite_vec_index.connection.cursor() + + # Retrieve all chunk IDs to check for duplicates + cur.execute(f"SELECT id FROM {sqlite_vec_index.metadata_table}") + chunk_ids = [row[0] for row in cur.fetchall()] + cur.close() + + # Ensure all chunk IDs are unique + assert len(chunk_ids) == len(set(chunk_ids)), "Duplicate chunk IDs detected across batches!" + + +@pytest.fixture(scope="session") +async def sqlite_vec_adapter(sqlite_connection): + config = type("Config", (object,), {"db_path": ":memory:"}) # Mock config with in-memory database + adapter = SQLiteVecVectorIOAdapter(config=config, inference_api=None) + await adapter.initialize() + yield adapter + await adapter.shutdown() + + +@pytest.mark.asyncio +async def test_register_vector_db(sqlite_vec_adapter): + vector_db = VectorDB( + identifier="test_db", + embedding_model=EMBEDDING_MODEL, + embedding_dimension=EMBEDDING_DIMENSION, + metadata={}, + provider_id=SQLITE_VEC_PROVIDER, + ) + await sqlite_vec_adapter.register_vector_db(vector_db) + vector_dbs = await sqlite_vec_adapter.list_vector_dbs() + assert any(db.identifier == "test_db" for db in vector_dbs) + + +@pytest.mark.asyncio +async def test_unregister_vector_db(sqlite_vec_adapter): + vector_db = VectorDB( + identifier="test_db", + embedding_model=EMBEDDING_MODEL, + embedding_dimension=EMBEDDING_DIMENSION, + metadata={}, + provider_id=SQLITE_VEC_PROVIDER, + ) + await sqlite_vec_adapter.register_vector_db(vector_db) + await sqlite_vec_adapter.unregister_vector_db("test_db") + vector_dbs = await sqlite_vec_adapter.list_vector_dbs() + assert not any(db.identifier == "test_db" for db in vector_dbs) + + +def test_generate_chunk_id(): + chunks = [ + Chunk(content="test", metadata={"document_id": "doc-1"}), + Chunk(content="test ", metadata={"document_id": "doc-1"}), + Chunk(content="test 3", metadata={"document_id": "doc-1"}), + ] + + chunk_ids = sorted([generate_chunk_id(chunk.metadata["document_id"], chunk.content) for chunk in chunks]) + assert chunk_ids == [ + "177a1368-f6a8-0c50-6e92-18677f2c3de3", + "bc744db3-1b25-0a9c-cdff-b6ba3df73c36", + "f68df25d-d9aa-ab4d-5684-64a233add20d", + ] diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index c5f6cd6b5..0882019e3 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -4,8 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from collections import namedtuple -from typing import List, Optional +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field from llama_stack.apis.models.models import ModelType from llama_stack.models.llama.sku_list import all_registered_models @@ -14,7 +15,15 @@ from llama_stack.providers.utils.inference import ( ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR, ) -ModelAlias = namedtuple("ModelAlias", ["provider_model_id", "aliases", "llama_model"]) + +# TODO: this class is more confusing than useful right now. We need to make it +# more closer to the Model class. +class ProviderModelEntry(BaseModel): + provider_model_id: str + aliases: List[str] = Field(default_factory=list) + llama_model: Optional[str] = None + model_type: ModelType = ModelType.llm + metadata: Dict[str, Any] = Field(default_factory=dict) def get_huggingface_repo(model_descriptor: str) -> Optional[str]: @@ -24,8 +33,8 @@ def get_huggingface_repo(model_descriptor: str) -> Optional[str]: return None -def build_model_alias(provider_model_id: str, model_descriptor: str) -> ModelAlias: - return ModelAlias( +def build_hf_repo_model_entry(provider_model_id: str, model_descriptor: str) -> ProviderModelEntry: + return ProviderModelEntry( provider_model_id=provider_model_id, aliases=[ get_huggingface_repo(model_descriptor), @@ -34,26 +43,29 @@ def build_model_alias(provider_model_id: str, model_descriptor: str) -> ModelAli ) -def build_model_alias_with_just_provider_model_id(provider_model_id: str, model_descriptor: str) -> ModelAlias: - return ModelAlias( +def build_model_entry(provider_model_id: str, model_descriptor: str) -> ProviderModelEntry: + return ProviderModelEntry( provider_model_id=provider_model_id, aliases=[], llama_model=model_descriptor, + model_type=ModelType.llm, ) class ModelRegistryHelper(ModelsProtocolPrivate): - def __init__(self, model_aliases: List[ModelAlias]): + def __init__(self, model_entries: List[ProviderModelEntry]): self.alias_to_provider_id_map = {} self.provider_id_to_llama_model_map = {} - for alias_obj in model_aliases: - for alias in alias_obj.aliases: - self.alias_to_provider_id_map[alias] = alias_obj.provider_model_id + for entry in model_entries: + for alias in entry.aliases: + self.alias_to_provider_id_map[alias] = entry.provider_model_id + # also add a mapping from provider model id to itself for easy lookup - self.alias_to_provider_id_map[alias_obj.provider_model_id] = alias_obj.provider_model_id - # ensure we can go from llama model to provider model id - self.alias_to_provider_id_map[alias_obj.llama_model] = alias_obj.provider_model_id - self.provider_id_to_llama_model_map[alias_obj.provider_model_id] = alias_obj.llama_model + self.alias_to_provider_id_map[entry.provider_model_id] = entry.provider_model_id + + if entry.llama_model: + self.alias_to_provider_id_map[entry.llama_model] = entry.provider_model_id + self.provider_id_to_llama_model_map[entry.provider_model_id] = entry.llama_model def get_provider_model_id(self, identifier: str) -> Optional[str]: return self.alias_to_provider_id_map.get(identifier, None) diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py index def7e8f37..946d27763 100644 --- a/llama_stack/providers/utils/inference/openai_compat.py +++ b/llama_stack/providers/utils/inference/openai_compat.py @@ -7,7 +7,6 @@ import json import logging from typing import AsyncGenerator, Dict, List, Optional, Union -from llama_models.llama3.api.chat_format import ChatFormat from openai.types.chat import ChatCompletionMessageToolCall from pydantic import BaseModel @@ -40,6 +39,7 @@ from llama_stack.models.llama.datatypes import ( ) from llama_stack.providers.utils.inference.prompt_adapter import ( convert_image_content_to_url, + decode_assistant_message, ) logger = logging.getLogger(__name__) @@ -149,7 +149,7 @@ def convert_openai_completion_logprobs_stream(text: str, logprobs: Optional[Unio return None -def process_completion_response(response: OpenAICompatCompletionResponse, formatter: ChatFormat) -> CompletionResponse: +def process_completion_response(response: OpenAICompatCompletionResponse) -> CompletionResponse: choice = response.choices[0] # drop suffix if present and return stop reason as end of turn if choice.text.endswith("<|eot_id|>"): @@ -174,16 +174,13 @@ def process_completion_response(response: OpenAICompatCompletionResponse, format def process_chat_completion_response( response: OpenAICompatCompletionResponse, - formatter: ChatFormat, request: ChatCompletionRequest, ) -> ChatCompletionResponse: choice = response.choices[0] # TODO: This does not work well with tool calls for vLLM remote provider # Ref: https://github.com/meta-llama/llama-stack/issues/1058 - raw_message = formatter.decode_assistant_message_from_content( - text_from_choice(choice), get_stop_reason(choice.finish_reason) - ) + raw_message = decode_assistant_message(text_from_choice(choice), get_stop_reason(choice.finish_reason)) # NOTE: If we do not set tools in chat-completion request, we should not # expect the ToolCall in the response. Instead, we should return the raw @@ -217,7 +214,7 @@ def process_chat_completion_response( async def process_completion_stream_response( - stream: AsyncGenerator[OpenAICompatCompletionResponse, None], formatter: ChatFormat + stream: AsyncGenerator[OpenAICompatCompletionResponse, None], ) -> AsyncGenerator: stop_reason = None @@ -254,7 +251,6 @@ async def process_completion_stream_response( async def process_chat_completion_stream_response( stream: AsyncGenerator[OpenAICompatCompletionResponse, None], - formatter: ChatFormat, request: ChatCompletionRequest, ) -> AsyncGenerator: yield ChatCompletionResponseStreamChunk( @@ -333,7 +329,7 @@ async def process_chat_completion_stream_response( ) # parse tool calls and report errors - message = formatter.decode_assistant_message_from_content(buffer, stop_reason) + message = decode_assistant_message(buffer, stop_reason) parsed_tool_calls = len(message.tool_calls) > 0 if ipython and not parsed_tool_calls: diff --git a/llama_stack/providers/utils/inference/prompt_adapter.py b/llama_stack/providers/utils/inference/prompt_adapter.py index 2782c661f..10fe442e8 100644 --- a/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/llama_stack/providers/utils/inference/prompt_adapter.py @@ -13,7 +13,9 @@ import re from typing import List, Optional, Tuple, Union import httpx +from llama_models.datatypes import StopReason from llama_models.llama3.api.chat_format import ChatFormat +from llama_models.llama3.api.tokenizer import Tokenizer from PIL import Image as PIL_Image from llama_stack.apis.common.content_types import ( @@ -66,6 +68,11 @@ class CompletionRequestWithRawContent(CompletionRequest): content: RawContent +def decode_assistant_message(content: str, stop_reason: StopReason) -> RawMessage: + formatter = ChatFormat(Tokenizer.get_instance()) + return formatter.decode_assistant_message_from_content(content, stop_reason) + + def interleaved_content_as_str(content: InterleavedContent, sep: str = " ") -> str: def _process(c) -> str: if isinstance(c, str): @@ -207,20 +214,22 @@ async def convert_image_content_to_url( return base64.b64encode(content).decode("utf-8") -async def completion_request_to_prompt(request: CompletionRequest, formatter: ChatFormat) -> str: +async def completion_request_to_prompt(request: CompletionRequest) -> str: content = augment_content_with_response_format_prompt(request.response_format, request.content) request.content = content request = await convert_request_to_raw(request) + + formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) model_input = formatter.encode_content(request.content) return formatter.tokenizer.decode(model_input.tokens) -async def completion_request_to_prompt_model_input_info( - request: CompletionRequest, formatter: ChatFormat -) -> Tuple[str, int]: +async def completion_request_to_prompt_model_input_info(request: CompletionRequest) -> Tuple[str, int]: content = augment_content_with_response_format_prompt(request.response_format, request.content) request.content = content request = await convert_request_to_raw(request) + + formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) model_input = formatter.encode_content(request.content) return (formatter.tokenizer.decode(model_input.tokens), len(model_input.tokens)) @@ -237,22 +246,24 @@ def augment_content_with_response_format_prompt(response_format, content): return content -async def chat_completion_request_to_prompt( - request: ChatCompletionRequest, llama_model: str, formatter: ChatFormat -) -> str: +async def chat_completion_request_to_prompt(request: ChatCompletionRequest, llama_model: str) -> str: messages = chat_completion_request_to_messages(request, llama_model) request.messages = messages request = await convert_request_to_raw(request) + + formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) model_input = formatter.encode_dialog_prompt(request.messages) return formatter.tokenizer.decode(model_input.tokens) async def chat_completion_request_to_model_input_info( - request: ChatCompletionRequest, llama_model: str, formatter: ChatFormat + request: ChatCompletionRequest, llama_model: str ) -> Tuple[str, int]: messages = chat_completion_request_to_messages(request, llama_model) request.messages = messages request = await convert_request_to_raw(request) + + formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) model_input = formatter.encode_dialog_prompt(request.messages) return ( formatter.tokenizer.decode(model_input.tokens), diff --git a/llama_stack/providers/utils/kvstore/config.py b/llama_stack/providers/utils/kvstore/config.py index 85327c131..b9403df32 100644 --- a/llama_stack/providers/utils/kvstore/config.py +++ b/llama_stack/providers/utils/kvstore/config.py @@ -18,6 +18,7 @@ class KVStoreType(Enum): redis = "redis" sqlite = "sqlite" postgres = "postgres" + mongodb = "mongodb" class CommonConfig(BaseModel): @@ -101,7 +102,30 @@ class PostgresKVStoreConfig(CommonConfig): return v +class MongoDBKVStoreConfig(CommonConfig): + type: Literal[KVStoreType.mongodb.value] = KVStoreType.mongodb.value + host: str = "localhost" + port: int = 27017 + db: str = "llamastack" + user: str = None + password: Optional[str] = None + collection_name: str = "llamastack_kvstore" + + @classmethod + def sample_run_config(cls, collection_name: str = "llamastack_kvstore"): + return { + "type": "mongodb", + "namespace": None, + "host": "${env.MONGODB_HOST:localhost}", + "port": "${env.MONGODB_PORT:5432}", + "db": "${env.MONGODB_DB}", + "user": "${env.MONGODB_USER}", + "password": "${env.MONGODB_PASSWORD}", + "collection_name": "${env.MONGODB_COLLECTION_NAME:" + collection_name + "}", + } + + KVStoreConfig = Annotated[ - Union[RedisKVStoreConfig, SqliteKVStoreConfig, PostgresKVStoreConfig], + Union[RedisKVStoreConfig, SqliteKVStoreConfig, PostgresKVStoreConfig, MongoDBKVStoreConfig], Field(discriminator="type", default=KVStoreType.sqlite.value), ] diff --git a/llama_stack/providers/utils/kvstore/kvstore.py b/llama_stack/providers/utils/kvstore/kvstore.py index 32b4e40dd..6bc175260 100644 --- a/llama_stack/providers/utils/kvstore/kvstore.py +++ b/llama_stack/providers/utils/kvstore/kvstore.py @@ -11,7 +11,7 @@ from .config import KVStoreConfig, KVStoreType def kvstore_dependencies(): - return ["aiosqlite", "psycopg2-binary", "redis"] + return ["aiosqlite", "psycopg2-binary", "redis", "pymongo"] class InmemoryKVStoreImpl(KVStore): @@ -44,6 +44,10 @@ async def kvstore_impl(config: KVStoreConfig) -> KVStore: from .postgres import PostgresKVStoreImpl impl = PostgresKVStoreImpl(config) + elif config.type == KVStoreType.mongodb.value: + from .mongodb import MongoDBKVStoreImpl + + impl = MongoDBKVStoreImpl(config) else: raise ValueError(f"Unknown kvstore type {config.type}") diff --git a/llama_stack/providers/utils/kvstore/mongodb/__init__.py b/llama_stack/providers/utils/kvstore/mongodb/__init__.py new file mode 100644 index 000000000..a42fee76f --- /dev/null +++ b/llama_stack/providers/utils/kvstore/mongodb/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .mongodb import MongoDBKVStoreImpl + +__all__ = ["MongoDBKVStoreImpl"] diff --git a/llama_stack/providers/utils/kvstore/mongodb/mongodb.py b/llama_stack/providers/utils/kvstore/mongodb/mongodb.py new file mode 100644 index 000000000..965b4e213 --- /dev/null +++ b/llama_stack/providers/utils/kvstore/mongodb/mongodb.py @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import logging +from datetime import datetime +from typing import List, Optional + +from pymongo import MongoClient + +from llama_stack.providers.utils.kvstore import KVStore, MongoDBKVStoreConfig + +log = logging.getLogger(__name__) + + +class MongoDBKVStoreImpl(KVStore): + def __init__(self, config: MongoDBKVStoreConfig): + self.config = config + self.conn = None + self.collection = None + + async def initialize(self) -> None: + try: + conn_creds = { + "host": self.config.host, + "port": self.config.port, + "username": self.config.user, + "password": self.config.password, + } + conn_creds = {k: v for k, v in conn_creds.items() if v is not None} + self.conn = MongoClient(**conn_creds) + self.collection = self.conn[self.config.db][self.config.collection_name] + except Exception as e: + log.exception("Could not connect to MongoDB database server") + raise RuntimeError("Could not connect to MongoDB database server") from e + + def _namespaced_key(self, key: str) -> str: + if not self.config.namespace: + return key + return f"{self.config.namespace}:{key}" + + async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None: + key = self._namespaced_key(key) + update_query = {"$set": {"value": value, "expiration": expiration}} + self.collection.update_one({"key": key}, update_query, upsert=True) + + async def get(self, key: str) -> Optional[str]: + key = self._namespaced_key(key) + query = {"key": key} + result = self.collection.find_one(query, {"value": 1, "_id": 0}) + return result["value"] if result else None + + async def delete(self, key: str) -> None: + key = self._namespaced_key(key) + self.collection.delete_one({"key": key}) + + async def range(self, start_key: str, end_key: str) -> List[str]: + start_key = self._namespaced_key(start_key) + end_key = self._namespaced_key(end_key) + query = { + "key": {"$gte": start_key, "$lt": end_key}, + } + cursor = self.collection.find(query, {"value": 1, "_id": 0}).sort("key", 1) + return [doc["value"] for doc in cursor] diff --git a/llama_stack/schema_utils.py b/llama_stack/schema_utils.py index 56b9e5e4c..581404844 100644 --- a/llama_stack/schema_utils.py +++ b/llama_stack/schema_utils.py @@ -19,6 +19,7 @@ class WebMethod: request_examples: Optional[List[Any]] = None response_examples: Optional[List[Any]] = None method: Optional[str] = None + raw_bytes_request_body: Optional[bool] = False def webmethod( @@ -27,6 +28,7 @@ def webmethod( public: Optional[bool] = False, request_examples: Optional[List[Any]] = None, response_examples: Optional[List[Any]] = None, + raw_bytes_request_body: Optional[bool] = False, ) -> Callable[[T], T]: """ Decorator that supplies additional metadata to an endpoint operation function. @@ -44,6 +46,7 @@ def webmethod( public=public or False, request_examples=request_examples, response_examples=response_examples, + raw_bytes_request_body=raw_bytes_request_body, ) return cls diff --git a/llama_stack/scripts/distro_codegen.py b/llama_stack/scripts/distro_codegen.py index 825a039ef..1c44b4625 100644 --- a/llama_stack/scripts/distro_codegen.py +++ b/llama_stack/scripts/distro_codegen.py @@ -23,6 +23,22 @@ from llama_stack.distribution.build import ( REPO_ROOT = Path(__file__).parent.parent.parent +class ChangedPathTracker: + """Track a list of paths we may have changed.""" + + def __init__(self): + self._changed_paths = [] + + def add_paths(self, *paths): + for path in paths: + path = str(path) + if path not in self._changed_paths: + self._changed_paths.append(path) + + def changed_paths(self): + return self._changed_paths + + def find_template_dirs(templates_dir: Path) -> Iterator[Path]: """Find immediate subdirectories in the templates folder.""" if not templates_dir.exists(): @@ -31,7 +47,7 @@ def find_template_dirs(templates_dir: Path) -> Iterator[Path]: return sorted(d for d in templates_dir.iterdir() if d.is_dir() and d.name != "__pycache__") -def process_template(template_dir: Path, progress) -> None: +def process_template(template_dir: Path, progress, change_tracker: ChangedPathTracker) -> None: """Process a single template directory.""" progress.print(f"Processing {template_dir.name}") @@ -44,9 +60,12 @@ def process_template(template_dir: Path, progress) -> None: if template_func := getattr(module, "get_distribution_template", None): template = template_func() + yaml_output_dir = REPO_ROOT / "llama_stack" / "templates" / template.name + doc_output_dir = REPO_ROOT / "docs/source/distributions" / f"{template.distro_type}_distro" + change_tracker.add_paths(yaml_output_dir, doc_output_dir) template.save_distribution( - yaml_output_dir=REPO_ROOT / "llama_stack" / "templates" / template.name, - doc_output_dir=REPO_ROOT / "docs/source/distributions" / f"{template.distro_type}_distro", + yaml_output_dir=yaml_output_dir, + doc_output_dir=doc_output_dir, ) else: progress.print(f"[yellow]Warning: {template_dir.name} has no get_distribution_template function") @@ -56,14 +75,19 @@ def process_template(template_dir: Path, progress) -> None: raise e -def check_for_changes() -> bool: +def check_for_changes(change_tracker: ChangedPathTracker) -> bool: """Check if there are any uncommitted changes.""" - result = subprocess.run( - ["git", "diff", "--exit-code"], - cwd=REPO_ROOT, - capture_output=True, - ) - return result.returncode != 0 + has_changes = False + for path in change_tracker.changed_paths(): + result = subprocess.run( + ["git", "diff", "--exit-code", path], + cwd=REPO_ROOT, + capture_output=True, + ) + if result.returncode != 0: + print(f"Change detected in '{path}'.", file=sys.stderr) + has_changes = True + return has_changes def collect_template_dependencies(template_dir: Path) -> tuple[str, list[str]]: @@ -83,7 +107,7 @@ def collect_template_dependencies(template_dir: Path) -> tuple[str, list[str]]: return None, [] -def generate_dependencies_file(): +def generate_dependencies_file(change_tracker: ChangedPathTracker): templates_dir = REPO_ROOT / "llama_stack" / "templates" distribution_deps = {} @@ -93,12 +117,14 @@ def generate_dependencies_file(): distribution_deps[name] = deps deps_file = REPO_ROOT / "distributions" / "dependencies.json" + change_tracker.add_paths(deps_file) with open(deps_file, "w") as f: f.write(json.dumps(distribution_deps, indent=2) + "\n") def main(): templates_dir = REPO_ROOT / "llama_stack" / "templates" + change_tracker = ChangedPathTracker() with Progress( SpinnerColumn(), @@ -108,7 +134,7 @@ def main(): task = progress.add_task("Processing distribution templates...", total=len(template_dirs)) # Create a partial function with the progress bar - process_func = partial(process_template, progress=progress) + process_func = partial(process_template, progress=progress, change_tracker=change_tracker) # Process templates in parallel with concurrent.futures.ThreadPoolExecutor() as executor: @@ -116,9 +142,9 @@ def main(): list(executor.map(process_func, template_dirs)) progress.update(task, advance=len(template_dirs)) - generate_dependencies_file() + generate_dependencies_file(change_tracker) - if check_for_changes(): + if check_for_changes(change_tracker): print( "Distribution template changes detected. Please commit the changes.", file=sys.stderr, diff --git a/llama_stack/scripts/run_client_sdk_tests.py b/llama_stack/scripts/run_client_sdk_tests.py new file mode 100644 index 000000000..1e2ef1ac8 --- /dev/null +++ b/llama_stack/scripts/run_client_sdk_tests.py @@ -0,0 +1,66 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import argparse +import os +from pathlib import Path + +import pytest + +""" +Script for running client-sdk on AsyncLlamaStackAsLibraryClient with templates + +Assuming directory structure: +- llama-stack + - llama_stack + - scripts + - tests + - client-sdk + +Example command: + +cd llama-stack +EXPORT TOGETHER_API_KEY=<..> +EXPORT FIREWORKS_API_KEY=<..> +python llama_stack/scripts/run_client_sdk_tests.py --templates together fireworks --report +""" + +REPO_ROOT = Path(__file__).parent.parent.parent +CLIENT_SDK_TESTS_RELATIVE_PATH = "tests/client-sdk/" + + +def main(parser: argparse.ArgumentParser): + args = parser.parse_args() + templates_dir = REPO_ROOT / "llama_stack" / "templates" + user_specified_templates = [templates_dir / t for t in args.templates] if args.templates else [] + for d in templates_dir.iterdir(): + if d.is_dir() and d.name != "__pycache__": + template_configs = list(d.rglob("run.yaml")) + if len(template_configs) == 0: + continue + config = template_configs[0] + if user_specified_templates: + if not any(config.parent == t for t in user_specified_templates): + continue + os.environ["LLAMA_STACK_CONFIG"] = str(config) + pytest_args = "--report" if args.report else "" + pytest.main( + [ + pytest_args, + "-s", + "-v", + REPO_ROOT / CLIENT_SDK_TESTS_RELATIVE_PATH, + ] + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog="llama_test", + ) + parser.add_argument("--templates", nargs="+") + parser.add_argument("--report", action="store_true") + main(parser) diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py index 0b294824d..628e78612 100644 --- a/llama_stack/templates/bedrock/bedrock.py +++ b/llama_stack/templates/bedrock/bedrock.py @@ -10,7 +10,7 @@ from llama_stack.apis.models import ModelInput from llama_stack.distribution.datatypes import Provider, ToolGroupInput from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig -from llama_stack.providers.remote.inference.bedrock.bedrock import MODEL_ALIASES +from llama_stack.providers.remote.inference.bedrock.models import MODEL_ENTRIES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -47,7 +47,7 @@ def get_distribution_template() -> DistributionTemplate: provider_model_id=m.provider_model_id, provider_id="bedrock", ) - for m in MODEL_ALIASES + for m in MODEL_ENTRIES ] default_tool_groups = [ ToolGroupInput( diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py index 4f6d0c8f3..c467579ac 100644 --- a/llama_stack/templates/cerebras/cerebras.py +++ b/llama_stack/templates/cerebras/cerebras.py @@ -14,7 +14,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( ) from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig -from llama_stack.providers.remote.inference.cerebras.cerebras import model_aliases +from llama_stack.providers.remote.inference.cerebras.models import model_entries from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -55,7 +55,7 @@ def get_distribution_template() -> DistributionTemplate: provider_model_id=m.provider_model_id, provider_id="cerebras", ) - for m in model_aliases + for m in model_entries ] embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py index a6809fef6..06b851551 100644 --- a/llama_stack/templates/fireworks/fireworks.py +++ b/llama_stack/templates/fireworks/fireworks.py @@ -19,7 +19,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( ) from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig -from llama_stack.providers.remote.inference.fireworks.fireworks import MODEL_ALIASES +from llama_stack.providers.remote.inference.fireworks.models import MODEL_ENTRIES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -63,11 +63,13 @@ def get_distribution_template() -> DistributionTemplate: core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()} default_models = [ ModelInput( - model_id=core_model_to_hf_repo[m.llama_model], + model_id=core_model_to_hf_repo[m.llama_model] if m.llama_model else m.provider_model_id, provider_model_id=m.provider_model_id, provider_id="fireworks", + metadata=m.metadata, + model_type=m.model_type, ) - for m in MODEL_ALIASES + for m in MODEL_ENTRIES ] embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", diff --git a/llama_stack/templates/fireworks/run-with-safety.yaml b/llama_stack/templates/fireworks/run-with-safety.yaml index 8f95e9d59..6f622c7d9 100644 --- a/llama_stack/templates/fireworks/run-with-safety.yaml +++ b/llama_stack/templates/fireworks/run-with-safety.yaml @@ -149,6 +149,13 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision model_type: llm +- metadata: + embedding_dimension: 768 + context_length: 8192 + model_id: nomic-ai/nomic-embed-text-v1.5 + provider_id: fireworks + provider_model_id: nomic-ai/nomic-embed-text-v1.5 + model_type: embedding - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml index 64229a5d8..e6d21d10d 100644 --- a/llama_stack/templates/fireworks/run.yaml +++ b/llama_stack/templates/fireworks/run.yaml @@ -143,6 +143,13 @@ models: provider_id: fireworks provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision model_type: llm +- metadata: + embedding_dimension: 768 + context_length: 8192 + model_id: nomic-ai/nomic-embed-text-v1.5 + provider_id: fireworks + provider_model_id: nomic-ai/nomic-embed-text-v1.5 + model_type: embedding - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py index b6f85949b..035784641 100644 --- a/llama_stack/templates/nvidia/nvidia.py +++ b/llama_stack/templates/nvidia/nvidia.py @@ -10,7 +10,7 @@ from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.remote.safety.nvidia import NVIDIASafetyConfig from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig -from llama_stack.providers.remote.inference.nvidia.nvidia import _MODEL_ALIASES +from llama_stack.providers.remote.inference.nvidia.models import _MODEL_ENTRIES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -59,7 +59,7 @@ def get_distribution_template() -> DistributionTemplate: provider_model_id=m.provider_model_id, provider_id="nvidia", ) - for m in _MODEL_ALIASES + for m in _MODEL_ENTRIES ] default_tool_groups = [ ToolGroupInput( diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 48960c5ba..0fee6808c 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -6,7 +6,6 @@ distribution_spec: - remote::ollama vector_io: - inline::faiss - - inline::sqlite_vec - remote::chromadb - remote::pgvector safety: diff --git a/llama_stack/templates/ollama/doc_template.md b/llama_stack/templates/ollama/doc_template.md index 29efe39c3..1d95e4b65 100644 --- a/llama_stack/templates/ollama/doc_template.md +++ b/llama_stack/templates/ollama/doc_template.md @@ -119,7 +119,7 @@ llama stack run ./run-with-safety.yaml \ ### (Optional) Update Model Serving Configuration ```{note} -Please check the [model_aliases](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models. +Please check the [model_entries](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models. ``` To serve a new model with `ollama` diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index f3383cd5a..31119e040 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -71,7 +71,8 @@ def get_distribution_template() -> DistributionTemplate: ) embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", - provider_id="sentence-transformers", + provider_id="ollama", + provider_model_id="all-minilm:latest", model_type=ModelType.embedding, metadata={ "embedding_dimension": 384, diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml index 9d5bfc7a0..7cf527c04 100644 --- a/llama_stack/templates/ollama/run-with-safety.yaml +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -20,6 +20,13 @@ providers: provider_type: inline::sentence-transformers config: {} vector_io: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/faiss_store.db - provider_id: faiss provider_type: inline::faiss config: @@ -103,7 +110,8 @@ models: - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers + provider_id: ollama + provider_model_id: all-minilm:latest model_type: embedding shields: - shield_id: ${env.SAFETY_MODEL} diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 9ac1f3267..ab292c5e0 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -34,7 +34,6 @@ providers: type: sqlite namespace: null db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/sqlite_vec.db - db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/sqlite_vec.db safety: - provider_id: llama-guard provider_type: inline::llama-guard @@ -104,7 +103,8 @@ models: - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers + provider_id: ollama + provider_model_id: all-minilm:latest model_type: embedding shields: [] vector_dbs: [] diff --git a/llama_stack/templates/passthrough/build.yaml b/llama_stack/templates/passthrough/build.yaml new file mode 100644 index 000000000..5fed5286e --- /dev/null +++ b/llama_stack/templates/passthrough/build.yaml @@ -0,0 +1,32 @@ +version: '2' +distribution_spec: + description: Use for running LLM inference with the endpoint that compatible with Llama Stack API + providers: + inference: + - remote::passthrough + vector_io: + - inline::faiss + - remote::chromadb + - remote::pgvector + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference + eval: + - inline::meta-reference + datasetio: + - remote::huggingface + - inline::localfs + scoring: + - inline::basic + - inline::llm-as-judge + - inline::braintrust + tool_runtime: + - remote::brave-search + - remote::tavily-search + - inline::code-interpreter + - inline::rag-runtime + - remote::model-context-protocol +image_type: conda diff --git a/llama_stack/templates/passthrough/run.yaml b/llama_stack/templates/passthrough/run.yaml new file mode 100644 index 000000000..2548faa5d --- /dev/null +++ b/llama_stack/templates/passthrough/run.yaml @@ -0,0 +1,120 @@ +version: '2' +image_name: passthrough +apis: +- agents +- datasetio +- eval +- inference +- safety +- scoring +- telemetry +- tool_runtime +- vector_io +providers: + inference: + - provider_id: passthrough + provider_type: remote::passthrough + config: + url: ${env.PASSTHROUGH_URL} + api_key: ${env.PASSTHROUGH_API_KEY} + - provider_id: sentence-transformers + provider_type: inline::sentence-transformers + config: {} + vector_io: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + service_name: ${env.OTEL_SERVICE_NAME:llama-stack} + sinks: ${env.TELEMETRY_SINKS:console,sqlite} + sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/passthrough/trace_store.db} + eval: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} + datasetio: + - provider_id: huggingface + provider_type: remote::huggingface + config: {} + - provider_id: localfs + provider_type: inline::localfs + config: {} + scoring: + - provider_id: basic + provider_type: inline::basic + config: {} + - provider_id: llm-as-judge + provider_type: inline::llm-as-judge + config: {} + - provider_id: braintrust + provider_type: inline::braintrust + config: + openai_api_key: ${env.OPENAI_API_KEY:} + tool_runtime: + - provider_id: brave-search + provider_type: remote::brave-search + config: + api_key: ${env.BRAVE_SEARCH_API_KEY:} + max_results: 3 + - provider_id: tavily-search + provider_type: remote::tavily-search + config: + api_key: ${env.TAVILY_SEARCH_API_KEY:} + max_results: 3 + - provider_id: code-interpreter + provider_type: inline::code-interpreter + config: {} + - provider_id: rag-runtime + provider_type: inline::rag-runtime + config: {} + - provider_id: model-context-protocol + provider_type: remote::model-context-protocol + config: {} +metadata_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-llama}/registry.db +models: +- metadata: {} + model_id: meta-llama/Llama-3.1-8B-Instruct + provider_id: passthrough + provider_model_id: llama3.1-8b-instruct + model_type: llm +- metadata: {} + model_id: meta-llama/Llama-3.2-11B-Vision-Instruct + provider_id: passthrough + provider_model_id: llama3.2-11b-vision-instruct + model_type: llm +shields: +- shield_id: meta-llama/Llama-Guard-3-8B +vector_dbs: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] +tool_groups: +- toolgroup_id: builtin::websearch + provider_id: tavily-search +- toolgroup_id: builtin::rag + provider_id: rag-runtime +- toolgroup_id: builtin::code_interpreter + provider_id: code-interpreter +server: + port: 8321 diff --git a/llama_stack/templates/sambanova/sambanova.py b/llama_stack/templates/sambanova/sambanova.py index c7a9428af..725c6abc4 100644 --- a/llama_stack/templates/sambanova/sambanova.py +++ b/llama_stack/templates/sambanova/sambanova.py @@ -14,7 +14,7 @@ from llama_stack.distribution.datatypes import ( ) from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig -from llama_stack.providers.remote.inference.sambanova.sambanova import MODEL_ALIASES +from llama_stack.providers.remote.inference.sambanova.models import MODEL_ENTRIES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -47,7 +47,7 @@ def get_distribution_template() -> DistributionTemplate: provider_model_id=m.provider_model_id, provider_id=name, ) - for m in MODEL_ALIASES + for m in MODEL_ENTRIES ] default_tool_groups = [ diff --git a/llama_stack/templates/together/run-with-safety.yaml b/llama_stack/templates/together/run-with-safety.yaml index f101a5d60..9193a3ef6 100644 --- a/llama_stack/templates/together/run-with-safety.yaml +++ b/llama_stack/templates/together/run-with-safety.yaml @@ -144,6 +144,20 @@ models: provider_id: together provider_model_id: meta-llama/Llama-Guard-3-11B-Vision-Turbo model_type: llm +- metadata: + embedding_dimension: 768 + context_length: 8192 + model_id: togethercomputer/m2-bert-80M-8k-retrieval + provider_id: together + provider_model_id: togethercomputer/m2-bert-80M-8k-retrieval + model_type: embedding +- metadata: + embedding_dimension: 768 + context_length: 32768 + model_id: togethercomputer/m2-bert-80M-32k-retrieval + provider_id: together + provider_model_id: togethercomputer/m2-bert-80M-32k-retrieval + model_type: embedding - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml index 8af85979d..32ddf7b16 100644 --- a/llama_stack/templates/together/run.yaml +++ b/llama_stack/templates/together/run.yaml @@ -138,6 +138,20 @@ models: provider_id: together provider_model_id: meta-llama/Llama-Guard-3-11B-Vision-Turbo model_type: llm +- metadata: + embedding_dimension: 768 + context_length: 8192 + model_id: togethercomputer/m2-bert-80M-8k-retrieval + provider_id: together + provider_model_id: togethercomputer/m2-bert-80M-8k-retrieval + model_type: embedding +- metadata: + embedding_dimension: 768 + context_length: 32768 + model_id: togethercomputer/m2-bert-80M-32k-retrieval + provider_id: together + provider_model_id: togethercomputer/m2-bert-80M-32k-retrieval + model_type: embedding - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py index f7b18e32a..d275b7238 100644 --- a/llama_stack/templates/together/together.py +++ b/llama_stack/templates/together/together.py @@ -19,7 +19,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import ( ) from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.together import TogetherImplConfig -from llama_stack.providers.remote.inference.together.together import MODEL_ALIASES +from llama_stack.providers.remote.inference.together.models import MODEL_ENTRIES from llama_stack.templates.template import DistributionTemplate, RunConfigSettings @@ -61,11 +61,13 @@ def get_distribution_template() -> DistributionTemplate: core_model_to_hf_repo = {m.descriptor(): m.huggingface_repo for m in all_registered_models()} default_models = [ ModelInput( - model_id=core_model_to_hf_repo[m.llama_model], + model_id=core_model_to_hf_repo[m.llama_model] if m.llama_model else m.provider_model_id, provider_model_id=m.provider_model_id, provider_id="together", + metadata=m.metadata, + model_type=m.model_type, ) - for m in MODEL_ALIASES + for m in MODEL_ENTRIES ] default_tool_groups = [ ToolGroupInput( diff --git a/pyproject.toml b/pyproject.toml index 71af2cc99..c8ed5737b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ dependencies = [ dev = [ "pytest", "pytest-asyncio", + "pytest-html", "nbval", # For notebook testing "black", "ruff", @@ -49,6 +50,19 @@ dev = [ "pre-commit", "uvicorn", "fastapi", + "ruamel.yaml", # needed for openapi generator +] +test = [ + "openai", + "aiosqlite", + "ollama", + "torch>=2.6.0", + "fairscale>=0.4.13", + "torchvision>=0.21.0", + "lm-format-enforcer>=0.10.9", + "groq", + "opentelemetry-sdk", + "opentelemetry-exporter-otlp-proto-http", ] docs = [ "sphinx-autobuild", @@ -78,6 +92,10 @@ name = "pytorch-cpu" url = "https://download.pytorch.org/whl/cpu" explicit = true +[tool.uv.sources] +torch = [{ index = "pytorch-cpu" }] +torchvision = [{ index = "pytorch-cpu" }] + [tool.ruff] line-length = 120 exclude = [ diff --git a/requirements.txt b/requirements.txt index b72c240bc..02e1a8655 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ distro==1.9.0 exceptiongroup==1.2.2 ; python_full_version < '3.11' filelock==3.17.0 fire==0.7.0 -fsspec==2024.12.0 +fsspec==2025.2.0 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 @@ -23,11 +23,11 @@ jsonschema==4.23.0 jsonschema-specifications==2024.10.1 llama-models==0.1.3 llama-stack-client==0.1.3 -lxml==5.3.0 +lxml==5.3.1 markdown-it-py==3.0.0 markupsafe==3.0.2 mdurl==0.1.2 -numpy==2.2.2 +numpy==2.2.3 packaging==24.2 pandas==2.2.3 pillow==11.1.0 @@ -50,7 +50,7 @@ setuptools==75.8.0 six==1.17.0 sniffio==1.3.1 termcolor==2.5.0 -tiktoken==0.8.0 +tiktoken==0.9.0 tqdm==4.67.1 typing-extensions==4.12.2 tzdata==2025.1 diff --git a/tests/client-sdk/inference/test_text_inference.py b/tests/client-sdk/inference/test_text_inference.py index 6a113c463..1fe53ab86 100644 --- a/tests/client-sdk/inference/test_text_inference.py +++ b/tests/client-sdk/inference/test_text_inference.py @@ -7,6 +7,8 @@ import pytest from pydantic import BaseModel +from llama_stack.providers.tests.test_cases.test_case import TestCase + PROVIDER_TOOL_PROMPT_FORMAT = { "remote::ollama": "json", "remote::together": "json", @@ -120,16 +122,16 @@ def test_completion_log_probs_streaming(llama_stack_client, text_model_id, infer assert not chunk.logprobs, "Logprobs should be empty" -def test_text_completion_structured_output(llama_stack_client, text_model_id, inference_provider_type): - user_input = """ - Michael Jordan was born in 1963. He played basketball for the Chicago Bulls. He retired in 2003. - """ - +@pytest.mark.parametrize("test_case", ["completion-01"]) +def test_text_completion_structured_output(llama_stack_client, text_model_id, inference_provider_type, test_case): class AnswerFormat(BaseModel): name: str year_born: str year_retired: str + tc = TestCase(test_case) + + user_input = tc["user_input"] response = llama_stack_client.inference.completion( model_id=text_model_id, content=user_input, @@ -143,9 +145,10 @@ def test_text_completion_structured_output(llama_stack_client, text_model_id, in }, ) answer = AnswerFormat.model_validate_json(response.content) - assert answer.name == "Michael Jordan" - assert answer.year_born == "1963" - assert answer.year_retired == "2003" + expected = tc["expected"] + assert answer.name == expected["name"] + assert answer.year_born == expected["year_born"] + assert answer.year_retired == expected["year_retired"] @pytest.mark.parametrize( @@ -247,6 +250,7 @@ def test_text_chat_completion_with_tool_calling_and_streaming( assert tool_invocation_content == "[get_weather, {'location': 'San Francisco, CA'}]" +@pytest.mark.parametrize("test_case", ["chat_completion-01"]) def test_text_chat_completion_with_tool_choice_required( llama_stack_client, text_model_id, get_weather_tool_definition, provider_tool_format, inference_provider_type ): @@ -281,25 +285,18 @@ def test_text_chat_completion_with_tool_choice_none( assert tool_invocation_content == "" -def test_text_chat_completion_structured_output(llama_stack_client, text_model_id, inference_provider_type): +def test_text_chat_completion_structured_output(llama_stack_client, text_model_id, inference_provider_type, test_case): class AnswerFormat(BaseModel): first_name: str last_name: str year_of_birth: int num_seasons_in_nba: int + tc = TestCase(test_case) + response = llama_stack_client.inference.chat_completion( model_id=text_model_id, - messages=[ - { - "role": "system", - "content": "You are a helpful assistant. Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons.", - }, - { - "role": "user", - "content": "Please give me information about Michael Jordan.", - }, - ], + messages=tc["messages"], response_format={ "type": "json_schema", "json_schema": AnswerFormat.model_json_schema(), @@ -307,10 +304,11 @@ def test_text_chat_completion_structured_output(llama_stack_client, text_model_i stream=False, ) answer = AnswerFormat.model_validate_json(response.completion_message.content) - assert answer.first_name == "Michael" - assert answer.last_name == "Jordan" - assert answer.year_of_birth == 1963 - assert answer.num_seasons_in_nba == 15 + expected = tc["expected"] + assert answer.first_name == expected["first_name"] + assert answer.last_name == expected["last_name"] + assert answer.year_of_birth == expected["year_of_birth"] + assert answer.num_seasons_in_nba == expected["num_seasons_in_nba"] @pytest.mark.parametrize( diff --git a/tests/client-sdk/report.py b/tests/client-sdk/report.py index d36fa827f..b946b85ba 100644 --- a/tests/client-sdk/report.py +++ b/tests/client-sdk/report.py @@ -175,7 +175,7 @@ class Report: "|:-----|:-----|:-----|:-----|:-----|", ] provider = [p for p in providers if p.api == str(api_group.name)] - provider_str = provider[0].provider_type if provider else "" + provider_str = ",".join(provider) if provider else "" for api, capa_map in API_MAPS[api_group].items(): for capa, tests in capa_map.items(): for test_name in tests: diff --git a/uv.lock b/uv.lock index 336d67c0b..ce633c174 100644 --- a/uv.lock +++ b/uv.lock @@ -1,9 +1,21 @@ version = 1 requires-python = ">=3.10" resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", "python_full_version < '3.11'", + "python_full_version == '3.11.*'", + "python_full_version >= '3.12'", +] + +[[package]] +name = "aiosqlite" +version = "0.21.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/13/7d/8bca2bf9a247c2c5dfeec1d7a5f40db6518f88d314b8bca9da29670d2671/aiosqlite-0.21.0.tar.gz", hash = "sha256:131bb8056daa3bc875608c631c678cda73922a2d4ba8aec373b19f18c17e7aa3", size = 13454 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792 }, ] [[package]] @@ -295,61 +307,62 @@ wheels = [ [[package]] name = "coverage" -version = "7.6.10" +version = "7.6.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/84/ba/ac14d281f80aab516275012e8875991bb06203957aa1e19950139238d658/coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23", size = 803868 } +sdist = { url = "https://files.pythonhosted.org/packages/0c/d6/2b53ab3ee99f2262e6f0b8369a43f6d66658eab45510331c0b3d5c8c4272/coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2", size = 805941 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c5/12/2a2a923edf4ddabdffed7ad6da50d96a5c126dae7b80a33df7310e329a1e/coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78", size = 207982 }, - { url = "https://files.pythonhosted.org/packages/ca/49/6985dbca9c7be3f3cb62a2e6e492a0c88b65bf40579e16c71ae9c33c6b23/coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c", size = 208414 }, - { url = "https://files.pythonhosted.org/packages/35/93/287e8f1d1ed2646f4e0b2605d14616c9a8a2697d0d1b453815eb5c6cebdb/coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a", size = 236860 }, - { url = "https://files.pythonhosted.org/packages/de/e1/cfdb5627a03567a10031acc629b75d45a4ca1616e54f7133ca1fa366050a/coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165", size = 234758 }, - { url = "https://files.pythonhosted.org/packages/6d/85/fc0de2bcda3f97c2ee9fe8568f7d48f7279e91068958e5b2cc19e0e5f600/coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988", size = 235920 }, - { url = "https://files.pythonhosted.org/packages/79/73/ef4ea0105531506a6f4cf4ba571a214b14a884630b567ed65b3d9c1975e1/coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5", size = 234986 }, - { url = "https://files.pythonhosted.org/packages/c6/4d/75afcfe4432e2ad0405c6f27adeb109ff8976c5e636af8604f94f29fa3fc/coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3", size = 233446 }, - { url = "https://files.pythonhosted.org/packages/86/5b/efee56a89c16171288cafff022e8af44f8f94075c2d8da563c3935212871/coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5", size = 234566 }, - { url = "https://files.pythonhosted.org/packages/f2/db/67770cceb4a64d3198bf2aa49946f411b85ec6b0a9b489e61c8467a4253b/coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244", size = 210675 }, - { url = "https://files.pythonhosted.org/packages/8d/27/e8bfc43f5345ec2c27bc8a1fa77cdc5ce9dcf954445e11f14bb70b889d14/coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e", size = 211518 }, - { url = "https://files.pythonhosted.org/packages/85/d2/5e175fcf6766cf7501a8541d81778fd2f52f4870100e791f5327fd23270b/coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3", size = 208088 }, - { url = "https://files.pythonhosted.org/packages/4b/6f/06db4dc8fca33c13b673986e20e466fd936235a6ec1f0045c3853ac1b593/coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43", size = 208536 }, - { url = "https://files.pythonhosted.org/packages/0d/62/c6a0cf80318c1c1af376d52df444da3608eafc913b82c84a4600d8349472/coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132", size = 240474 }, - { url = "https://files.pythonhosted.org/packages/a3/59/750adafc2e57786d2e8739a46b680d4fb0fbc2d57fbcb161290a9f1ecf23/coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f", size = 237880 }, - { url = "https://files.pythonhosted.org/packages/2c/f8/ef009b3b98e9f7033c19deb40d629354aab1d8b2d7f9cfec284dbedf5096/coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994", size = 239750 }, - { url = "https://files.pythonhosted.org/packages/a6/e2/6622f3b70f5f5b59f705e680dae6db64421af05a5d1e389afd24dae62e5b/coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99", size = 238642 }, - { url = "https://files.pythonhosted.org/packages/2d/10/57ac3f191a3c95c67844099514ff44e6e19b2915cd1c22269fb27f9b17b6/coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd", size = 237266 }, - { url = "https://files.pythonhosted.org/packages/ee/2d/7016f4ad9d553cabcb7333ed78ff9d27248ec4eba8dd21fa488254dff894/coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377", size = 238045 }, - { url = "https://files.pythonhosted.org/packages/a7/fe/45af5c82389a71e0cae4546413266d2195c3744849669b0bab4b5f2c75da/coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8", size = 210647 }, - { url = "https://files.pythonhosted.org/packages/db/11/3f8e803a43b79bc534c6a506674da9d614e990e37118b4506faf70d46ed6/coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609", size = 211508 }, - { url = "https://files.pythonhosted.org/packages/86/77/19d09ea06f92fdf0487499283b1b7af06bc422ea94534c8fe3a4cd023641/coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853", size = 208281 }, - { url = "https://files.pythonhosted.org/packages/b6/67/5479b9f2f99fcfb49c0d5cf61912a5255ef80b6e80a3cddba39c38146cf4/coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078", size = 208514 }, - { url = "https://files.pythonhosted.org/packages/15/d1/febf59030ce1c83b7331c3546d7317e5120c5966471727aa7ac157729c4b/coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0", size = 241537 }, - { url = "https://files.pythonhosted.org/packages/4b/7e/5ac4c90192130e7cf8b63153fe620c8bfd9068f89a6d9b5f26f1550f7a26/coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50", size = 238572 }, - { url = "https://files.pythonhosted.org/packages/dc/03/0334a79b26ecf59958f2fe9dd1f5ab3e2f88db876f5071933de39af09647/coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022", size = 240639 }, - { url = "https://files.pythonhosted.org/packages/d7/45/8a707f23c202208d7b286d78ad6233f50dcf929319b664b6cc18a03c1aae/coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b", size = 240072 }, - { url = "https://files.pythonhosted.org/packages/66/02/603ce0ac2d02bc7b393279ef618940b4a0535b0868ee791140bda9ecfa40/coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0", size = 238386 }, - { url = "https://files.pythonhosted.org/packages/04/62/4e6887e9be060f5d18f1dd58c2838b2d9646faf353232dec4e2d4b1c8644/coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852", size = 240054 }, - { url = "https://files.pythonhosted.org/packages/5c/74/83ae4151c170d8bd071924f212add22a0e62a7fe2b149edf016aeecad17c/coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359", size = 210904 }, - { url = "https://files.pythonhosted.org/packages/c3/54/de0893186a221478f5880283119fc40483bc460b27c4c71d1b8bba3474b9/coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247", size = 211692 }, - { url = "https://files.pythonhosted.org/packages/25/6d/31883d78865529257bf847df5789e2ae80e99de8a460c3453dbfbe0db069/coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9", size = 208308 }, - { url = "https://files.pythonhosted.org/packages/70/22/3f2b129cc08de00c83b0ad6252e034320946abfc3e4235c009e57cfeee05/coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b", size = 208565 }, - { url = "https://files.pythonhosted.org/packages/97/0a/d89bc2d1cc61d3a8dfe9e9d75217b2be85f6c73ebf1b9e3c2f4e797f4531/coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690", size = 241083 }, - { url = "https://files.pythonhosted.org/packages/4c/81/6d64b88a00c7a7aaed3a657b8eaa0931f37a6395fcef61e53ff742b49c97/coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18", size = 238235 }, - { url = "https://files.pythonhosted.org/packages/9a/0b/7797d4193f5adb4b837207ed87fecf5fc38f7cc612b369a8e8e12d9fa114/coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c", size = 240220 }, - { url = "https://files.pythonhosted.org/packages/65/4d/6f83ca1bddcf8e51bf8ff71572f39a1c73c34cf50e752a952c34f24d0a60/coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd", size = 239847 }, - { url = "https://files.pythonhosted.org/packages/30/9d/2470df6aa146aff4c65fee0f87f58d2164a67533c771c9cc12ffcdb865d5/coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e", size = 237922 }, - { url = "https://files.pythonhosted.org/packages/08/dd/723fef5d901e6a89f2507094db66c091449c8ba03272861eaefa773ad95c/coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694", size = 239783 }, - { url = "https://files.pythonhosted.org/packages/3d/f7/64d3298b2baf261cb35466000628706ce20a82d42faf9b771af447cd2b76/coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6", size = 210965 }, - { url = "https://files.pythonhosted.org/packages/d5/58/ec43499a7fc681212fe7742fe90b2bc361cdb72e3181ace1604247a5b24d/coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e", size = 211719 }, - { url = "https://files.pythonhosted.org/packages/ab/c9/f2857a135bcff4330c1e90e7d03446b036b2363d4ad37eb5e3a47bbac8a6/coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe", size = 209050 }, - { url = "https://files.pythonhosted.org/packages/aa/b3/f840e5bd777d8433caa9e4a1eb20503495709f697341ac1a8ee6a3c906ad/coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273", size = 209321 }, - { url = "https://files.pythonhosted.org/packages/85/7d/125a5362180fcc1c03d91850fc020f3831d5cda09319522bcfa6b2b70be7/coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8", size = 252039 }, - { url = "https://files.pythonhosted.org/packages/a9/9c/4358bf3c74baf1f9bddd2baf3756b54c07f2cfd2535f0a47f1e7757e54b3/coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098", size = 247758 }, - { url = "https://files.pythonhosted.org/packages/cf/c7/de3eb6fc5263b26fab5cda3de7a0f80e317597a4bad4781859f72885f300/coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb", size = 250119 }, - { url = "https://files.pythonhosted.org/packages/3e/e6/43de91f8ba2ec9140c6a4af1102141712949903dc732cf739167cfa7a3bc/coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0", size = 249597 }, - { url = "https://files.pythonhosted.org/packages/08/40/61158b5499aa2adf9e37bc6d0117e8f6788625b283d51e7e0c53cf340530/coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf", size = 247473 }, - { url = "https://files.pythonhosted.org/packages/50/69/b3f2416725621e9f112e74e8470793d5b5995f146f596f133678a633b77e/coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2", size = 248737 }, - { url = "https://files.pythonhosted.org/packages/3c/6e/fe899fb937657db6df31cc3e61c6968cb56d36d7326361847440a430152e/coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312", size = 211611 }, - { url = "https://files.pythonhosted.org/packages/1c/55/52f5e66142a9d7bc93a15192eba7a78513d2abf6b3558d77b4ca32f5f424/coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d", size = 212781 }, - { url = "https://files.pythonhosted.org/packages/a1/70/de81bfec9ed38a64fc44a77c7665e20ca507fc3265597c28b0d989e4082e/coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f", size = 200223 }, + { url = "https://files.pythonhosted.org/packages/ba/67/81dc41ec8f548c365d04a29f1afd492d3176b372c33e47fa2a45a01dc13a/coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8", size = 208345 }, + { url = "https://files.pythonhosted.org/packages/33/43/17f71676016c8829bde69e24c852fef6bd9ed39f774a245d9ec98f689fa0/coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879", size = 208775 }, + { url = "https://files.pythonhosted.org/packages/86/25/c6ff0775f8960e8c0840845b723eed978d22a3cd9babd2b996e4a7c502c6/coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe", size = 237925 }, + { url = "https://files.pythonhosted.org/packages/b0/3d/5f5bd37046243cb9d15fff2c69e498c2f4fe4f9b42a96018d4579ed3506f/coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674", size = 235835 }, + { url = "https://files.pythonhosted.org/packages/b5/f1/9e6b75531fe33490b910d251b0bf709142e73a40e4e38a3899e6986fe088/coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb", size = 236966 }, + { url = "https://files.pythonhosted.org/packages/4f/bc/aef5a98f9133851bd1aacf130e754063719345d2fb776a117d5a8d516971/coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c", size = 236080 }, + { url = "https://files.pythonhosted.org/packages/eb/d0/56b4ab77f9b12aea4d4c11dc11cdcaa7c29130b837eb610639cf3400c9c3/coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c", size = 234393 }, + { url = "https://files.pythonhosted.org/packages/0d/77/28ef95c5d23fe3dd191a0b7d89c82fea2c2d904aef9315daf7c890e96557/coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e", size = 235536 }, + { url = "https://files.pythonhosted.org/packages/29/62/18791d3632ee3ff3f95bc8599115707d05229c72db9539f208bb878a3d88/coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425", size = 211063 }, + { url = "https://files.pythonhosted.org/packages/fc/57/b3878006cedfd573c963e5c751b8587154eb10a61cc0f47a84f85c88a355/coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa", size = 211955 }, + { url = "https://files.pythonhosted.org/packages/64/2d/da78abbfff98468c91fd63a73cccdfa0e99051676ded8dd36123e3a2d4d5/coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015", size = 208464 }, + { url = "https://files.pythonhosted.org/packages/31/f2/c269f46c470bdabe83a69e860c80a82e5e76840e9f4bbd7f38f8cebbee2f/coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45", size = 208893 }, + { url = "https://files.pythonhosted.org/packages/47/63/5682bf14d2ce20819998a49c0deadb81e608a59eed64d6bc2191bc8046b9/coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702", size = 241545 }, + { url = "https://files.pythonhosted.org/packages/6a/b6/6b6631f1172d437e11067e1c2edfdb7238b65dff965a12bce3b6d1bf2be2/coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0", size = 239230 }, + { url = "https://files.pythonhosted.org/packages/c7/01/9cd06cbb1be53e837e16f1b4309f6357e2dfcbdab0dd7cd3b1a50589e4e1/coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f", size = 241013 }, + { url = "https://files.pythonhosted.org/packages/4b/26/56afefc03c30871326e3d99709a70d327ac1f33da383cba108c79bd71563/coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f", size = 239750 }, + { url = "https://files.pythonhosted.org/packages/dd/ea/88a1ff951ed288f56aa561558ebe380107cf9132facd0b50bced63ba7238/coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d", size = 238462 }, + { url = "https://files.pythonhosted.org/packages/6e/d4/1d9404566f553728889409eff82151d515fbb46dc92cbd13b5337fa0de8c/coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba", size = 239307 }, + { url = "https://files.pythonhosted.org/packages/12/c1/e453d3b794cde1e232ee8ac1d194fde8e2ba329c18bbf1b93f6f5eef606b/coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f", size = 211117 }, + { url = "https://files.pythonhosted.org/packages/d5/db/829185120c1686fa297294f8fcd23e0422f71070bf85ef1cc1a72ecb2930/coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558", size = 212019 }, + { url = "https://files.pythonhosted.org/packages/e2/7f/4af2ed1d06ce6bee7eafc03b2ef748b14132b0bdae04388e451e4b2c529b/coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad", size = 208645 }, + { url = "https://files.pythonhosted.org/packages/dc/60/d19df912989117caa95123524d26fc973f56dc14aecdec5ccd7d0084e131/coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3", size = 208898 }, + { url = "https://files.pythonhosted.org/packages/bd/10/fecabcf438ba676f706bf90186ccf6ff9f6158cc494286965c76e58742fa/coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574", size = 242987 }, + { url = "https://files.pythonhosted.org/packages/4c/53/4e208440389e8ea936f5f2b0762dcd4cb03281a7722def8e2bf9dc9c3d68/coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985", size = 239881 }, + { url = "https://files.pythonhosted.org/packages/c4/47/2ba744af8d2f0caa1f17e7746147e34dfc5f811fb65fc153153722d58835/coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750", size = 242142 }, + { url = "https://files.pythonhosted.org/packages/e9/90/df726af8ee74d92ee7e3bf113bf101ea4315d71508952bd21abc3fae471e/coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea", size = 241437 }, + { url = "https://files.pythonhosted.org/packages/f6/af/995263fd04ae5f9cf12521150295bf03b6ba940d0aea97953bb4a6db3e2b/coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3", size = 239724 }, + { url = "https://files.pythonhosted.org/packages/1c/8e/5bb04f0318805e190984c6ce106b4c3968a9562a400180e549855d8211bd/coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a", size = 241329 }, + { url = "https://files.pythonhosted.org/packages/9e/9d/fa04d9e6c3f6459f4e0b231925277cfc33d72dfab7fa19c312c03e59da99/coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95", size = 211289 }, + { url = "https://files.pythonhosted.org/packages/53/40/53c7ffe3c0c3fff4d708bc99e65f3d78c129110d6629736faf2dbd60ad57/coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288", size = 212079 }, + { url = "https://files.pythonhosted.org/packages/76/89/1adf3e634753c0de3dad2f02aac1e73dba58bc5a3a914ac94a25b2ef418f/coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1", size = 208673 }, + { url = "https://files.pythonhosted.org/packages/ce/64/92a4e239d64d798535c5b45baac6b891c205a8a2e7c9cc8590ad386693dc/coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd", size = 208945 }, + { url = "https://files.pythonhosted.org/packages/b4/d0/4596a3ef3bca20a94539c9b1e10fd250225d1dec57ea78b0867a1cf9742e/coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9", size = 242484 }, + { url = "https://files.pythonhosted.org/packages/1c/ef/6fd0d344695af6718a38d0861408af48a709327335486a7ad7e85936dc6e/coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e", size = 239525 }, + { url = "https://files.pythonhosted.org/packages/0c/4b/373be2be7dd42f2bcd6964059fd8fa307d265a29d2b9bcf1d044bcc156ed/coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4", size = 241545 }, + { url = "https://files.pythonhosted.org/packages/a6/7d/0e83cc2673a7790650851ee92f72a343827ecaaea07960587c8f442b5cd3/coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6", size = 241179 }, + { url = "https://files.pythonhosted.org/packages/ff/8c/566ea92ce2bb7627b0900124e24a99f9244b6c8c92d09ff9f7633eb7c3c8/coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3", size = 239288 }, + { url = "https://files.pythonhosted.org/packages/7d/e4/869a138e50b622f796782d642c15fb5f25a5870c6d0059a663667a201638/coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc", size = 241032 }, + { url = "https://files.pythonhosted.org/packages/ae/28/a52ff5d62a9f9e9fe9c4f17759b98632edd3a3489fce70154c7d66054dd3/coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3", size = 211315 }, + { url = "https://files.pythonhosted.org/packages/bc/17/ab849b7429a639f9722fa5628364c28d675c7ff37ebc3268fe9840dda13c/coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef", size = 212099 }, + { url = "https://files.pythonhosted.org/packages/d2/1c/b9965bf23e171d98505eb5eb4fb4d05c44efd256f2e0f19ad1ba8c3f54b0/coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e", size = 209511 }, + { url = "https://files.pythonhosted.org/packages/57/b3/119c201d3b692d5e17784fee876a9a78e1b3051327de2709392962877ca8/coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703", size = 209729 }, + { url = "https://files.pythonhosted.org/packages/52/4e/a7feb5a56b266304bc59f872ea07b728e14d5a64f1ad3a2cc01a3259c965/coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0", size = 253988 }, + { url = "https://files.pythonhosted.org/packages/65/19/069fec4d6908d0dae98126aa7ad08ce5130a6decc8509da7740d36e8e8d2/coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924", size = 249697 }, + { url = "https://files.pythonhosted.org/packages/1c/da/5b19f09ba39df7c55f77820736bf17bbe2416bbf5216a3100ac019e15839/coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b", size = 252033 }, + { url = "https://files.pythonhosted.org/packages/1e/89/4c2750df7f80a7872267f7c5fe497c69d45f688f7b3afe1297e52e33f791/coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d", size = 251535 }, + { url = "https://files.pythonhosted.org/packages/78/3b/6d3ae3c1cc05f1b0460c51e6f6dcf567598cbd7c6121e5ad06643974703c/coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827", size = 249192 }, + { url = "https://files.pythonhosted.org/packages/6e/8e/c14a79f535ce41af7d436bbad0d3d90c43d9e38ec409b4770c894031422e/coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9", size = 250627 }, + { url = "https://files.pythonhosted.org/packages/cb/79/b7cee656cfb17a7f2c1b9c3cee03dd5d8000ca299ad4038ba64b61a9b044/coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3", size = 212033 }, + { url = "https://files.pythonhosted.org/packages/b6/c3/f7aaa3813f1fa9a4228175a7bd368199659d392897e184435a3b66408dd3/coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f", size = 213240 }, + { url = "https://files.pythonhosted.org/packages/7a/7f/05818c62c7afe75df11e0233bd670948d68b36cdbf2a339a095bc02624a8/coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf", size = 200558 }, + { url = "https://files.pythonhosted.org/packages/fb/b2/f655700e1024dec98b10ebaafd0cedbc25e40e4abe62a3c8e2ceef4f8f0a/coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953", size = 200552 }, ] [[package]] @@ -386,6 +399,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, ] +[[package]] +name = "deprecated" +version = "1.2.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998 }, +] + [[package]] name = "distlib" version = "0.3.9" @@ -431,6 +456,16 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, ] +[[package]] +name = "fairscale" +version = "0.4.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "torch" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c1/08/b3334d7b543ac10dcb129cef4f84723ab696725512f18d69ab3a784b0bf5/fairscale-0.4.13.tar.gz", hash = "sha256:1b797825c427f5dba92253fd0d8daa574e8bd651a2423497775fab1b30cfb768", size = 266261 } + [[package]] name = "fastapi" version = "0.115.8" @@ -474,11 +509,40 @@ sdist = { url = "https://files.pythonhosted.org/packages/6b/b6/82c7e601d6d3c3278 [[package]] name = "fsspec" -version = "2024.12.0" +version = "2025.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/11/de70dee31455c546fbc88301971ec03c328f3d1138cfba14263f651e9551/fsspec-2024.12.0.tar.gz", hash = "sha256:670700c977ed2fb51e0d9f9253177ed20cbde4a3e5c0283cc5385b5870c8533f", size = 291600 } +sdist = { url = "https://files.pythonhosted.org/packages/b5/79/68612ed99700e6413de42895aa725463e821a6b3be75c87fcce1b4af4c70/fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd", size = 292283 } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/86/5486b0188d08aa643e127774a99bac51ffa6cf343e3deb0583956dca5b22/fsspec-2024.12.0-py3-none-any.whl", hash = "sha256:b520aed47ad9804237ff878b504267a3b0b441e97508bd6d2d8774e3db85cee2", size = 183862 }, + { url = "https://files.pythonhosted.org/packages/e2/94/758680531a00d06e471ef649e4ec2ed6bf185356a7f9fbfbb7368a40bd49/fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b", size = 184484 }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.67.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/e1/fbffb85a624f1404133b5bb624834e77e0f549e2b8548146fe18c56e1411/googleapis_common_protos-1.67.0.tar.gz", hash = "sha256:21398025365f138be356d5923e9168737d94d46a72aefee4a6110a1f23463c86", size = 57344 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/30/2bd0eb03a7dee7727cd2ec643d1e992979e62d5e7443507381cce0455132/googleapis_common_protos-1.67.0-py2.py3-none-any.whl", hash = "sha256:579de760800d13616f51cf8be00c876f00a9f146d3e6510e19d1f4111758b741", size = 164985 }, +] + +[[package]] +name = "groq" +version = "0.18.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/8c/e72c164474a88dfed6c7327ad53cb87ff11566b74b3a76d41dc7b94fc51c/groq-0.18.0.tar.gz", hash = "sha256:8e2ccfea406d68b3525af4b7c0e321fcb3d2a73fc60bb70b4156e6cd88c72f03", size = 117322 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b0/6c/5a53d632b44ef7655ac8d9b34432e13160917f9307c94b1467efd34e336e/groq-0.18.0-py3-none-any.whl", hash = "sha256:81d5ac00057a45d8ce559d23ab5d3b3893011d1f12c35187ab35a9182d826ea6", size = 121911 }, ] [[package]] @@ -538,11 +602,11 @@ wheels = [ [[package]] name = "identify" -version = "2.6.6" +version = "2.6.7" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/82/bf/c68c46601bacd4c6fb4dd751a42b6e7087240eaabc6487f2ef7a48e0e8fc/identify-2.6.6.tar.gz", hash = "sha256:7bec12768ed44ea4761efb47806f0a41f86e7c0a5fdf5950d4648c90eca7e251", size = 99217 } +sdist = { url = "https://files.pythonhosted.org/packages/83/d1/524aa3350f78bcd714d148ade6133d67d6b7de2cdbae7d99039c024c9a25/identify-2.6.7.tar.gz", hash = "sha256:3fa266b42eba321ee0b2bb0936a6a6b9e36a1351cbb69055b3082f4193035684", size = 99260 } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/a1/68a395c17eeefb04917034bd0a1bfa765e7654fa150cca473d669aa3afb5/identify-2.6.6-py2.py3-none-any.whl", hash = "sha256:cbd1810bce79f8b671ecb20f53ee0ae8e86ae84b557de31d89709dc2a48ba881", size = 99083 }, + { url = "https://files.pythonhosted.org/packages/03/00/1fd4a117c6c93f2dcc5b7edaeaf53ea45332ef966429be566ca16c2beb94/identify-2.6.7-py2.py3-none-any.whl", hash = "sha256:155931cb617a401807b09ecec6635d6c692d180090a1cedca8ef7d58ba5b6aa0", size = 99097 }, ] [[package]] @@ -563,6 +627,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, ] +[[package]] +name = "importlib-metadata" +version = "8.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 }, +] + [[package]] name = "iniconfig" version = "2.0.0" @@ -572,6 +648,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, ] +[[package]] +name = "interegular" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/9d/8b6dde58a028a3962ce17e84d5fe73758df61378e00ef8ac3d85da34b0ff/interegular-0.3.3.tar.gz", hash = "sha256:d9b697b21b34884711399ba0f0376914b81899ce670032486d0d048344a76600", size = 24705 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/01/72d6472f80651673716d1deda2a5bbb633e563ecf94f4479da5519d69d25/interegular-0.3.3-py37-none-any.whl", hash = "sha256:b0c07007d48c89d6d19f7204972d369b2a77222722e126b6aa63aa721dc3b19c", size = 23635 }, +] + [[package]] name = "ipykernel" version = "6.29.5" @@ -642,6 +727,65 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, ] +[[package]] +name = "jiter" +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/70/90bc7bd3932e651486861df5c8ffea4ca7c77d28e8532ddefe2abc561a53/jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d", size = 163007 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/f3/8c11e0e87bd5934c414f9b1cfae3cbfd4a938d4669d57cb427e1c4d11a7f/jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b", size = 303381 }, + { url = "https://files.pythonhosted.org/packages/ea/28/4cd3f0bcbf40e946bc6a62a82c951afc386a25673d3d8d5ee461f1559bbe/jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393", size = 311718 }, + { url = "https://files.pythonhosted.org/packages/0d/17/57acab00507e60bd954eaec0837d9d7b119b4117ff49b8a62f2b646f32ed/jiter-0.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c826a221851a8dc028eb6d7d6429ba03184fa3c7e83ae01cd6d3bd1d4bd17d", size = 335465 }, + { url = "https://files.pythonhosted.org/packages/74/b9/1a3ddd2bc95ae17c815b021521020f40c60b32137730126bada962ef32b4/jiter-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d35c864c2dff13dfd79fb070fc4fc6235d7b9b359efe340e1261deb21b9fcb66", size = 355570 }, + { url = "https://files.pythonhosted.org/packages/78/69/6d29e2296a934199a7d0dde673ecccf98c9c8db44caf0248b3f2b65483cb/jiter-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f557c55bc2b7676e74d39d19bcb8775ca295c7a028246175d6a8b431e70835e5", size = 381383 }, + { url = "https://files.pythonhosted.org/packages/22/d7/fbc4c3fb1bf65f9be22a32759b539f88e897aeb13fe84ab0266e4423487a/jiter-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:580ccf358539153db147e40751a0b41688a5ceb275e6f3e93d91c9467f42b2e3", size = 390454 }, + { url = "https://files.pythonhosted.org/packages/4d/a0/3993cda2e267fe679b45d0bcc2cef0b4504b0aa810659cdae9737d6bace9/jiter-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af102d3372e917cffce49b521e4c32c497515119dc7bd8a75665e90a718bbf08", size = 345039 }, + { url = "https://files.pythonhosted.org/packages/b9/ef/69c18562b4c09ce88fab5df1dcaf643f6b1a8b970b65216e7221169b81c4/jiter-0.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cadcc978f82397d515bb2683fc0d50103acff2a180552654bb92d6045dec2c49", size = 376200 }, + { url = "https://files.pythonhosted.org/packages/4d/17/0b5a8de46a6ab4d836f70934036278b49b8530c292b29dde3483326d4555/jiter-0.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ba5bdf56969cad2019d4e8ffd3f879b5fdc792624129741d3d83fc832fef8c7d", size = 511158 }, + { url = "https://files.pythonhosted.org/packages/6c/b2/c401a0a2554b36c9e6d6e4876b43790d75139cf3936f0222e675cbc23451/jiter-0.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b94a33a241bee9e34b8481cdcaa3d5c2116f575e0226e421bed3f7a6ea71cff", size = 503956 }, + { url = "https://files.pythonhosted.org/packages/d4/02/a0291ed7d72c0ac130f172354ee3cf0b2556b69584de391463a8ee534f40/jiter-0.8.2-cp310-cp310-win32.whl", hash = "sha256:6e5337bf454abddd91bd048ce0dca5134056fc99ca0205258766db35d0a2ea43", size = 202846 }, + { url = "https://files.pythonhosted.org/packages/ad/20/8c988831ae4bf437e29f1671e198fc99ba8fe49f2895f23789acad1d1811/jiter-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:4a9220497ca0cb1fe94e3f334f65b9b5102a0b8147646118f020d8ce1de70105", size = 204414 }, + { url = "https://files.pythonhosted.org/packages/cb/b0/c1a7caa7f9dc5f1f6cfa08722867790fe2d3645d6e7170ca280e6e52d163/jiter-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2dd61c5afc88a4fda7d8b2cf03ae5947c6ac7516d32b7a15bf4b49569a5c076b", size = 303666 }, + { url = "https://files.pythonhosted.org/packages/f5/97/0468bc9eeae43079aaa5feb9267964e496bf13133d469cfdc135498f8dd0/jiter-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a6c710d657c8d1d2adbbb5c0b0c6bfcec28fd35bd6b5f016395f9ac43e878a15", size = 311934 }, + { url = "https://files.pythonhosted.org/packages/e5/69/64058e18263d9a5f1e10f90c436853616d5f047d997c37c7b2df11b085ec/jiter-0.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9584de0cd306072635fe4b89742bf26feae858a0683b399ad0c2509011b9dc0", size = 335506 }, + { url = "https://files.pythonhosted.org/packages/9d/14/b747f9a77b8c0542141d77ca1e2a7523e854754af2c339ac89a8b66527d6/jiter-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a90a923338531b7970abb063cfc087eebae6ef8ec8139762007188f6bc69a9f", size = 355849 }, + { url = "https://files.pythonhosted.org/packages/53/e2/98a08161db7cc9d0e39bc385415890928ff09709034982f48eccfca40733/jiter-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21974d246ed0181558087cd9f76e84e8321091ebfb3a93d4c341479a736f099", size = 381700 }, + { url = "https://files.pythonhosted.org/packages/7a/38/1674672954d35bce3b1c9af99d5849f9256ac8f5b672e020ac7821581206/jiter-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32475a42b2ea7b344069dc1e81445cfc00b9d0e3ca837f0523072432332e9f74", size = 389710 }, + { url = "https://files.pythonhosted.org/packages/f8/9b/92f9da9a9e107d019bcf883cd9125fa1690079f323f5a9d5c6986eeec3c0/jiter-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9931fd36ee513c26b5bf08c940b0ac875de175341cbdd4fa3be109f0492586", size = 345553 }, + { url = "https://files.pythonhosted.org/packages/44/a6/6d030003394e9659cd0d7136bbeabd82e869849ceccddc34d40abbbbb269/jiter-0.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0820f4a3a59ddced7fce696d86a096d5cc48d32a4183483a17671a61edfddc", size = 376388 }, + { url = "https://files.pythonhosted.org/packages/ad/8d/87b09e648e4aca5f9af89e3ab3cfb93db2d1e633b2f2931ede8dabd9b19a/jiter-0.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ffc86ae5e3e6a93765d49d1ab47b6075a9c978a2b3b80f0f32628f39caa0c88", size = 511226 }, + { url = "https://files.pythonhosted.org/packages/77/95/8008ebe4cdc82eac1c97864a8042ca7e383ed67e0ec17bfd03797045c727/jiter-0.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5127dc1abd809431172bc3fbe8168d6b90556a30bb10acd5ded41c3cfd6f43b6", size = 504134 }, + { url = "https://files.pythonhosted.org/packages/26/0d/3056a74de13e8b2562e4d526de6dac2f65d91ace63a8234deb9284a1d24d/jiter-0.8.2-cp311-cp311-win32.whl", hash = "sha256:66227a2c7b575720c1871c8800d3a0122bb8ee94edb43a5685aa9aceb2782d44", size = 203103 }, + { url = "https://files.pythonhosted.org/packages/4e/1e/7f96b798f356e531ffc0f53dd2f37185fac60fae4d6c612bbbd4639b90aa/jiter-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:cde031d8413842a1e7501e9129b8e676e62a657f8ec8166e18a70d94d4682855", size = 206717 }, + { url = "https://files.pythonhosted.org/packages/a1/17/c8747af8ea4e045f57d6cfd6fc180752cab9bc3de0e8a0c9ca4e8af333b1/jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f", size = 302027 }, + { url = "https://files.pythonhosted.org/packages/3c/c1/6da849640cd35a41e91085723b76acc818d4b7d92b0b6e5111736ce1dd10/jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44", size = 310326 }, + { url = "https://files.pythonhosted.org/packages/06/99/a2bf660d8ccffee9ad7ed46b4f860d2108a148d0ea36043fd16f4dc37e94/jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f", size = 334242 }, + { url = "https://files.pythonhosted.org/packages/a7/5f/cea1c17864828731f11427b9d1ab7f24764dbd9aaf4648a7f851164d2718/jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60", size = 356654 }, + { url = "https://files.pythonhosted.org/packages/e9/13/62774b7e5e7f5d5043efe1d0f94ead66e6d0f894ae010adb56b3f788de71/jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57", size = 379967 }, + { url = "https://files.pythonhosted.org/packages/ec/fb/096b34c553bb0bd3f2289d5013dcad6074948b8d55212aa13a10d44c5326/jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e", size = 389252 }, + { url = "https://files.pythonhosted.org/packages/17/61/beea645c0bf398ced8b199e377b61eb999d8e46e053bb285c91c3d3eaab0/jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887", size = 345490 }, + { url = "https://files.pythonhosted.org/packages/d5/df/834aa17ad5dcc3cf0118821da0a0cf1589ea7db9832589278553640366bc/jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d", size = 376991 }, + { url = "https://files.pythonhosted.org/packages/67/80/87d140399d382fb4ea5b3d56e7ecaa4efdca17cd7411ff904c1517855314/jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152", size = 510822 }, + { url = "https://files.pythonhosted.org/packages/5c/37/3394bb47bac1ad2cb0465601f86828a0518d07828a650722e55268cdb7e6/jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29", size = 503730 }, + { url = "https://files.pythonhosted.org/packages/f9/e2/253fc1fa59103bb4e3aa0665d6ceb1818df1cd7bf3eb492c4dad229b1cd4/jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e", size = 203375 }, + { url = "https://files.pythonhosted.org/packages/41/69/6d4bbe66b3b3b4507e47aa1dd5d075919ad242b4b1115b3f80eecd443687/jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c", size = 204740 }, + { url = "https://files.pythonhosted.org/packages/6c/b0/bfa1f6f2c956b948802ef5a021281978bf53b7a6ca54bb126fd88a5d014e/jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84", size = 301190 }, + { url = "https://files.pythonhosted.org/packages/a4/8f/396ddb4e292b5ea57e45ade5dc48229556b9044bad29a3b4b2dddeaedd52/jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4", size = 309334 }, + { url = "https://files.pythonhosted.org/packages/7f/68/805978f2f446fa6362ba0cc2e4489b945695940656edd844e110a61c98f8/jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587", size = 333918 }, + { url = "https://files.pythonhosted.org/packages/b3/99/0f71f7be667c33403fa9706e5b50583ae5106d96fab997fa7e2f38ee8347/jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c", size = 356057 }, + { url = "https://files.pythonhosted.org/packages/8d/50/a82796e421a22b699ee4d2ce527e5bcb29471a2351cbdc931819d941a167/jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18", size = 379790 }, + { url = "https://files.pythonhosted.org/packages/3c/31/10fb012b00f6d83342ca9e2c9618869ab449f1aa78c8f1b2193a6b49647c/jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6", size = 388285 }, + { url = "https://files.pythonhosted.org/packages/c8/81/f15ebf7de57be488aa22944bf4274962aca8092e4f7817f92ffa50d3ee46/jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef", size = 344764 }, + { url = "https://files.pythonhosted.org/packages/b3/e8/0cae550d72b48829ba653eb348cdc25f3f06f8a62363723702ec18e7be9c/jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1", size = 376620 }, + { url = "https://files.pythonhosted.org/packages/b8/50/e5478ff9d82534a944c03b63bc217c5f37019d4a34d288db0f079b13c10b/jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9", size = 510402 }, + { url = "https://files.pythonhosted.org/packages/8e/1e/3de48bbebbc8f7025bd454cedc8c62378c0e32dd483dece5f4a814a5cb55/jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05", size = 503018 }, + { url = "https://files.pythonhosted.org/packages/d5/cd/d5a5501d72a11fe3e5fd65c78c884e5164eefe80077680533919be22d3a3/jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a", size = 203190 }, + { url = "https://files.pythonhosted.org/packages/51/bf/e5ca301245ba951447e3ad677a02a64a8845b185de2603dabd83e1e4b9c6/jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865", size = 203551 }, + { url = "https://files.pythonhosted.org/packages/2f/3c/71a491952c37b87d127790dd7a0b1ebea0514c6b6ad30085b16bbe00aee6/jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca", size = 308347 }, + { url = "https://files.pythonhosted.org/packages/a0/4c/c02408042e6a7605ec063daed138e07b982fdb98467deaaf1c90950cf2c6/jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0", size = 342875 }, + { url = "https://files.pythonhosted.org/packages/91/61/c80ef80ed8a0a21158e289ef70dac01e351d929a1c30cb0f49be60772547/jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566", size = 202374 }, +] + [[package]] name = "jsonschema" version = "4.23.0" @@ -744,6 +888,8 @@ dev = [ { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-asyncio" }, + { name = "pytest-html" }, + { name = "ruamel-yaml" }, { name = "ruff" }, { name = "types-requests" }, { name = "types-setuptools" }, @@ -760,28 +906,50 @@ docs = [ { name = "sphinxcontrib-redoc" }, { name = "sphinxcontrib-video" }, ] +test = [ + { name = "aiosqlite" }, + { name = "fairscale" }, + { name = "groq" }, + { name = "lm-format-enforcer" }, + { name = "ollama" }, + { name = "openai" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "torch" }, + { name = "torchvision" }, +] [package.metadata] requires-dist = [ + { name = "aiosqlite", marker = "extra == 'test'" }, { name = "black", marker = "extra == 'dev'" }, { name = "blobfile" }, + { name = "fairscale", marker = "extra == 'test'", specifier = ">=0.4.13" }, { name = "fastapi", marker = "extra == 'dev'" }, { name = "fire" }, + { name = "groq", marker = "extra == 'test'" }, { name = "httpx" }, { name = "huggingface-hub" }, { name = "jsonschema" }, { name = "llama-models", specifier = ">=0.1.3" }, { name = "llama-stack-client", specifier = ">=0.1.3" }, + { name = "lm-format-enforcer", marker = "extra == 'test'", specifier = ">=0.10.9" }, { name = "myst-parser", marker = "extra == 'docs'" }, { name = "nbval", marker = "extra == 'dev'" }, + { name = "ollama", marker = "extra == 'test'" }, + { name = "openai", marker = "extra == 'test'" }, + { name = "opentelemetry-exporter-otlp-proto-http", marker = "extra == 'test'" }, + { name = "opentelemetry-sdk", marker = "extra == 'test'" }, { name = "pre-commit", marker = "extra == 'dev'" }, { name = "prompt-toolkit" }, { name = "pydantic", specifier = ">=2" }, { name = "pytest", marker = "extra == 'dev'" }, { name = "pytest-asyncio", marker = "extra == 'dev'" }, + { name = "pytest-html", marker = "extra == 'dev'" }, { name = "python-dotenv" }, { name = "requests" }, { name = "rich" }, + { name = "ruamel-yaml", marker = "extra == 'dev'" }, { name = "ruff", marker = "extra == 'dev'" }, { name = "setuptools" }, { name = "sphinx-autobuild", marker = "extra == 'docs'" }, @@ -793,6 +961,8 @@ requires-dist = [ { name = "sphinxcontrib-redoc", marker = "extra == 'docs'" }, { name = "sphinxcontrib-video", marker = "extra == 'docs'" }, { name = "termcolor" }, + { name = "torch", marker = "extra == 'test'", specifier = ">=2.6.0", index = "https://download.pytorch.org/whl/cpu" }, + { name = "torchvision", marker = "extra == 'test'", specifier = ">=0.21.0", index = "https://download.pytorch.org/whl/cpu" }, { name = "types-requests", marker = "extra == 'dev'" }, { name = "types-setuptools", marker = "extra == 'dev'" }, { name = "uvicorn", marker = "extra == 'dev'" }, @@ -823,85 +993,100 @@ wheels = [ ] [[package]] -name = "lxml" -version = "5.3.0" +name = "lm-format-enforcer" +version = "0.10.9" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/6b/20c3a4b24751377aaa6307eb230b66701024012c29dd374999cc92983269/lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f", size = 3679318 } +dependencies = [ + { name = "interegular" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/73/5d/401ffb7a8895e0f3206345e96c52b428c81e4a2af049d426023cb9cb0cdb/lm_format_enforcer-0.10.9.tar.gz", hash = "sha256:3e0bfeaf9fac9f69c8947da554db9a19a76d0be6e85075055f2c70d0aca420da", size = 39713 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/ce/2789e39eddf2b13fac29878bfa465f0910eb6b0096e29090e5176bc8cf43/lxml-5.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd36439be765e2dde7660212b5275641edbc813e7b24668831a5c8ac91180656", size = 8124570 }, - { url = "https://files.pythonhosted.org/packages/24/a8/f4010166a25d41715527129af2675981a50d3bbf7df09c5d9ab8ca24fbf9/lxml-5.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ae5fe5c4b525aa82b8076c1a59d642c17b6e8739ecf852522c6321852178119d", size = 4413042 }, - { url = "https://files.pythonhosted.org/packages/41/a4/7e45756cecdd7577ddf67a68b69c1db0f5ddbf0c9f65021ee769165ffc5a/lxml-5.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:501d0d7e26b4d261fca8132854d845e4988097611ba2531408ec91cf3fd9d20a", size = 5139213 }, - { url = "https://files.pythonhosted.org/packages/02/e2/ecf845b12323c92748077e1818b64e8b4dba509a4cb12920b3762ebe7552/lxml-5.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66442c2546446944437df74379e9cf9e9db353e61301d1a0e26482f43f0dd8", size = 4838814 }, - { url = "https://files.pythonhosted.org/packages/12/91/619f9fb72cf75e9ceb8700706f7276f23995f6ad757e6d400fbe35ca4990/lxml-5.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e41506fec7a7f9405b14aa2d5c8abbb4dbbd09d88f9496958b6d00cb4d45330", size = 5425084 }, - { url = "https://files.pythonhosted.org/packages/25/3b/162a85a8f0fd2a3032ec3f936636911c6e9523a8e263fffcfd581ce98b54/lxml-5.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f7d4a670107d75dfe5ad080bed6c341d18c4442f9378c9f58e5851e86eb79965", size = 4875993 }, - { url = "https://files.pythonhosted.org/packages/43/af/dd3f58cc7d946da6ae42909629a2b1d5dd2d1b583334d4af9396697d6863/lxml-5.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41ce1f1e2c7755abfc7e759dc34d7d05fd221723ff822947132dc934d122fe22", size = 5012462 }, - { url = "https://files.pythonhosted.org/packages/69/c1/5ea46b2d4c98f5bf5c83fffab8a0ad293c9bc74df9ecfbafef10f77f7201/lxml-5.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:44264ecae91b30e5633013fb66f6ddd05c006d3e0e884f75ce0b4755b3e3847b", size = 4815288 }, - { url = "https://files.pythonhosted.org/packages/1d/51/a0acca077ad35da458f4d3f729ef98effd2b90f003440d35fc36323f8ae6/lxml-5.3.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:3c174dc350d3ec52deb77f2faf05c439331d6ed5e702fc247ccb4e6b62d884b7", size = 5472435 }, - { url = "https://files.pythonhosted.org/packages/4d/6b/0989c9368986961a6b0f55b46c80404c4b758417acdb6d87bfc3bd5f4967/lxml-5.3.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:2dfab5fa6a28a0b60a20638dc48e6343c02ea9933e3279ccb132f555a62323d8", size = 4976354 }, - { url = "https://files.pythonhosted.org/packages/05/9e/87492d03ff604fbf656ed2bf3e2e8d28f5d58ea1f00ff27ac27b06509079/lxml-5.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b1c8c20847b9f34e98080da785bb2336ea982e7f913eed5809e5a3c872900f32", size = 5029973 }, - { url = "https://files.pythonhosted.org/packages/f9/cc/9ae1baf5472af88e19e2c454b3710c1be9ecafb20eb474eeabcd88a055d2/lxml-5.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c86bf781b12ba417f64f3422cfc302523ac9cd1d8ae8c0f92a1c66e56ef2e86", size = 4888837 }, - { url = "https://files.pythonhosted.org/packages/d2/10/5594ffaec8c120d75b17e3ad23439b740a51549a9b5fd7484b2179adfe8f/lxml-5.3.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c162b216070f280fa7da844531169be0baf9ccb17263cf5a8bf876fcd3117fa5", size = 5530555 }, - { url = "https://files.pythonhosted.org/packages/ea/9b/de17f05377c8833343b629905571fb06cff2028f15a6f58ae2267662e341/lxml-5.3.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:36aef61a1678cb778097b4a6eeae96a69875d51d1e8f4d4b491ab3cfb54b5a03", size = 5405314 }, - { url = "https://files.pythonhosted.org/packages/8a/b4/227be0f1f3cca8255925985164c3838b8b36e441ff0cc10c1d3c6bdba031/lxml-5.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f65e5120863c2b266dbcc927b306c5b78e502c71edf3295dfcb9501ec96e5fc7", size = 5079303 }, - { url = "https://files.pythonhosted.org/packages/5c/ee/19abcebb7fc40319bb71cd6adefa1ad94d09b5660228715854d6cc420713/lxml-5.3.0-cp310-cp310-win32.whl", hash = "sha256:ef0c1fe22171dd7c7c27147f2e9c3e86f8bdf473fed75f16b0c2e84a5030ce80", size = 3475126 }, - { url = "https://files.pythonhosted.org/packages/a1/35/183d32551447e280032b2331738cd850da435a42f850b71ebeaab42c1313/lxml-5.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:052d99051e77a4f3e8482c65014cf6372e61b0a6f4fe9edb98503bb5364cfee3", size = 3805065 }, - { url = "https://files.pythonhosted.org/packages/5c/a8/449faa2a3cbe6a99f8d38dcd51a3ee8844c17862841a6f769ea7c2a9cd0f/lxml-5.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:74bcb423462233bc5d6066e4e98b0264e7c1bed7541fff2f4e34fe6b21563c8b", size = 8141056 }, - { url = "https://files.pythonhosted.org/packages/ac/8a/ae6325e994e2052de92f894363b038351c50ee38749d30cc6b6d96aaf90f/lxml-5.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3d819eb6f9b8677f57f9664265d0a10dd6551d227afb4af2b9cd7bdc2ccbf18", size = 4425238 }, - { url = "https://files.pythonhosted.org/packages/f8/fb/128dddb7f9086236bce0eeae2bfb316d138b49b159f50bc681d56c1bdd19/lxml-5.3.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b8f5db71b28b8c404956ddf79575ea77aa8b1538e8b2ef9ec877945b3f46442", size = 5095197 }, - { url = "https://files.pythonhosted.org/packages/b4/f9/a181a8ef106e41e3086629c8bdb2d21a942f14c84a0e77452c22d6b22091/lxml-5.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3406b63232fc7e9b8783ab0b765d7c59e7c59ff96759d8ef9632fca27c7ee4", size = 4809809 }, - { url = "https://files.pythonhosted.org/packages/25/2f/b20565e808f7f6868aacea48ddcdd7e9e9fb4c799287f21f1a6c7c2e8b71/lxml-5.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ecdd78ab768f844c7a1d4a03595038c166b609f6395e25af9b0f3f26ae1230f", size = 5407593 }, - { url = "https://files.pythonhosted.org/packages/23/0e/caac672ec246d3189a16c4d364ed4f7d6bf856c080215382c06764058c08/lxml-5.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168f2dfcfdedf611eb285efac1516c8454c8c99caf271dccda8943576b67552e", size = 4866657 }, - { url = "https://files.pythonhosted.org/packages/67/a4/1f5fbd3f58d4069000522196b0b776a014f3feec1796da03e495cf23532d/lxml-5.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa617107a410245b8660028a7483b68e7914304a6d4882b5ff3d2d3eb5948d8c", size = 4967017 }, - { url = "https://files.pythonhosted.org/packages/ee/73/623ecea6ca3c530dd0a4ed0d00d9702e0e85cd5624e2d5b93b005fe00abd/lxml-5.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:69959bd3167b993e6e710b99051265654133a98f20cec1d9b493b931942e9c16", size = 4810730 }, - { url = "https://files.pythonhosted.org/packages/1d/ce/fb84fb8e3c298f3a245ae3ea6221c2426f1bbaa82d10a88787412a498145/lxml-5.3.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:bd96517ef76c8654446fc3db9242d019a1bb5fe8b751ba414765d59f99210b79", size = 5455154 }, - { url = "https://files.pythonhosted.org/packages/b1/72/4d1ad363748a72c7c0411c28be2b0dc7150d91e823eadad3b91a4514cbea/lxml-5.3.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ab6dd83b970dc97c2d10bc71aa925b84788c7c05de30241b9e96f9b6d9ea3080", size = 4969416 }, - { url = "https://files.pythonhosted.org/packages/42/07/b29571a58a3a80681722ea8ed0ba569211d9bb8531ad49b5cacf6d409185/lxml-5.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eec1bb8cdbba2925bedc887bc0609a80e599c75b12d87ae42ac23fd199445654", size = 5013672 }, - { url = "https://files.pythonhosted.org/packages/b9/93/bde740d5a58cf04cbd38e3dd93ad1e36c2f95553bbf7d57807bc6815d926/lxml-5.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a7095eeec6f89111d03dabfe5883a1fd54da319c94e0fb104ee8f23616b572d", size = 4878644 }, - { url = "https://files.pythonhosted.org/packages/56/b5/645c8c02721d49927c93181de4017164ec0e141413577687c3df8ff0800f/lxml-5.3.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6f651ebd0b21ec65dfca93aa629610a0dbc13dbc13554f19b0113da2e61a4763", size = 5511531 }, - { url = "https://files.pythonhosted.org/packages/85/3f/6a99a12d9438316f4fc86ef88c5d4c8fb674247b17f3173ecadd8346b671/lxml-5.3.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f422a209d2455c56849442ae42f25dbaaba1c6c3f501d58761c619c7836642ec", size = 5402065 }, - { url = "https://files.pythonhosted.org/packages/80/8a/df47bff6ad5ac57335bf552babfb2408f9eb680c074ec1ba412a1a6af2c5/lxml-5.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:62f7fdb0d1ed2065451f086519865b4c90aa19aed51081979ecd05a21eb4d1be", size = 5069775 }, - { url = "https://files.pythonhosted.org/packages/08/ae/e7ad0f0fbe4b6368c5ee1e3ef0c3365098d806d42379c46c1ba2802a52f7/lxml-5.3.0-cp311-cp311-win32.whl", hash = "sha256:c6379f35350b655fd817cd0d6cbeef7f265f3ae5fedb1caae2eb442bbeae9ab9", size = 3474226 }, - { url = "https://files.pythonhosted.org/packages/c3/b5/91c2249bfac02ee514ab135e9304b89d55967be7e53e94a879b74eec7a5c/lxml-5.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c52100e2c2dbb0649b90467935c4b0de5528833c76a35ea1a2691ec9f1ee7a1", size = 3814971 }, - { url = "https://files.pythonhosted.org/packages/eb/6d/d1f1c5e40c64bf62afd7a3f9b34ce18a586a1cccbf71e783cd0a6d8e8971/lxml-5.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e99f5507401436fdcc85036a2e7dc2e28d962550afe1cbfc07c40e454256a859", size = 8171753 }, - { url = "https://files.pythonhosted.org/packages/bd/83/26b1864921869784355459f374896dcf8b44d4af3b15d7697e9156cb2de9/lxml-5.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:384aacddf2e5813a36495233b64cb96b1949da72bef933918ba5c84e06af8f0e", size = 4441955 }, - { url = "https://files.pythonhosted.org/packages/e0/d2/e9bff9fb359226c25cda3538f664f54f2804f4b37b0d7c944639e1a51f69/lxml-5.3.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:874a216bf6afaf97c263b56371434e47e2c652d215788396f60477540298218f", size = 5050778 }, - { url = "https://files.pythonhosted.org/packages/88/69/6972bfafa8cd3ddc8562b126dd607011e218e17be313a8b1b9cc5a0ee876/lxml-5.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65ab5685d56914b9a2a34d67dd5488b83213d680b0c5d10b47f81da5a16b0b0e", size = 4748628 }, - { url = "https://files.pythonhosted.org/packages/5d/ea/a6523c7c7f6dc755a6eed3d2f6d6646617cad4d3d6d8ce4ed71bfd2362c8/lxml-5.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aac0bbd3e8dd2d9c45ceb82249e8bdd3ac99131a32b4d35c8af3cc9db1657179", size = 5322215 }, - { url = "https://files.pythonhosted.org/packages/99/37/396fbd24a70f62b31d988e4500f2068c7f3fd399d2fd45257d13eab51a6f/lxml-5.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b369d3db3c22ed14c75ccd5af429086f166a19627e84a8fdade3f8f31426e52a", size = 4813963 }, - { url = "https://files.pythonhosted.org/packages/09/91/e6136f17459a11ce1757df864b213efbeab7adcb2efa63efb1b846ab6723/lxml-5.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24037349665434f375645fa9d1f5304800cec574d0310f618490c871fd902b3", size = 4923353 }, - { url = "https://files.pythonhosted.org/packages/1d/7c/2eeecf87c9a1fca4f84f991067c693e67340f2b7127fc3eca8fa29d75ee3/lxml-5.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:62d172f358f33a26d6b41b28c170c63886742f5b6772a42b59b4f0fa10526cb1", size = 4740541 }, - { url = "https://files.pythonhosted.org/packages/3b/ed/4c38ba58defca84f5f0d0ac2480fdcd99fc7ae4b28fc417c93640a6949ae/lxml-5.3.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:c1f794c02903c2824fccce5b20c339a1a14b114e83b306ff11b597c5f71a1c8d", size = 5346504 }, - { url = "https://files.pythonhosted.org/packages/a5/22/bbd3995437e5745cb4c2b5d89088d70ab19d4feabf8a27a24cecb9745464/lxml-5.3.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:5d6a6972b93c426ace71e0be9a6f4b2cfae9b1baed2eed2006076a746692288c", size = 4898077 }, - { url = "https://files.pythonhosted.org/packages/0a/6e/94537acfb5b8f18235d13186d247bca478fea5e87d224644e0fe907df976/lxml-5.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:3879cc6ce938ff4eb4900d901ed63555c778731a96365e53fadb36437a131a99", size = 4946543 }, - { url = "https://files.pythonhosted.org/packages/8d/e8/4b15df533fe8e8d53363b23a41df9be907330e1fa28c7ca36893fad338ee/lxml-5.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:74068c601baff6ff021c70f0935b0c7bc528baa8ea210c202e03757c68c5a4ff", size = 4816841 }, - { url = "https://files.pythonhosted.org/packages/1a/e7/03f390ea37d1acda50bc538feb5b2bda6745b25731e4e76ab48fae7106bf/lxml-5.3.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ecd4ad8453ac17bc7ba3868371bffb46f628161ad0eefbd0a855d2c8c32dd81a", size = 5417341 }, - { url = "https://files.pythonhosted.org/packages/ea/99/d1133ab4c250da85a883c3b60249d3d3e7c64f24faff494cf0fd23f91e80/lxml-5.3.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7e2f58095acc211eb9d8b5771bf04df9ff37d6b87618d1cbf85f92399c98dae8", size = 5327539 }, - { url = "https://files.pythonhosted.org/packages/7d/ed/e6276c8d9668028213df01f598f385b05b55a4e1b4662ee12ef05dab35aa/lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d", size = 5012542 }, - { url = "https://files.pythonhosted.org/packages/36/88/684d4e800f5aa28df2a991a6a622783fb73cf0e46235cfa690f9776f032e/lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30", size = 3486454 }, - { url = "https://files.pythonhosted.org/packages/fc/82/ace5a5676051e60355bd8fb945df7b1ba4f4fb8447f2010fb816bfd57724/lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f", size = 3816857 }, - { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, - { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, - { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, - { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, - { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, - { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, - { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, - { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, - { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, - { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, - { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, - { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, - { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, - { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, - { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, - { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, - { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, - { url = "https://files.pythonhosted.org/packages/99/f7/b73a431c8500565aa500e99e60b448d305eaf7c0b4c893c7c5a8a69cc595/lxml-5.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7b1cd427cb0d5f7393c31b7496419da594fe600e6fdc4b105a54f82405e6626c", size = 3925431 }, - { url = "https://files.pythonhosted.org/packages/db/48/4a206623c0d093d0e3b15f415ffb4345b0bdf661a3d0b15a112948c033c7/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51806cfe0279e06ed8500ce19479d757db42a30fd509940b1701be9c86a5ff9a", size = 4216683 }, - { url = "https://files.pythonhosted.org/packages/54/47/577820c45dd954523ae8453b632d91e76da94ca6d9ee40d8c98dd86f916b/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee70d08fd60c9565ba8190f41a46a54096afa0eeb8f76bd66f2c25d3b1b83005", size = 4326732 }, - { url = "https://files.pythonhosted.org/packages/68/de/96cb6d3269bc994b4f5ede8ca7bf0840f5de0a278bc6e50cb317ff71cafa/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:8dc2c0395bea8254d8daebc76dcf8eb3a95ec2a46fa6fae5eaccee366bfe02ce", size = 4218377 }, - { url = "https://files.pythonhosted.org/packages/a5/43/19b1ef6cbffa4244a217f95cc5f41a6cb4720fed33510a49670b03c5f1a0/lxml-5.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6ba0d3dcac281aad8a0e5b14c7ed6f9fa89c8612b47939fc94f80b16e2e9bc83", size = 4351237 }, - { url = "https://files.pythonhosted.org/packages/ba/b2/6a22fb5c0885da3b00e116aee81f0b829ec9ac8f736cd414b4a09413fc7d/lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba", size = 3487557 }, + { url = "https://files.pythonhosted.org/packages/c1/01/e78fdf09de2b4e7750a402eaa4f6783c7215ededd4bc6fe4a3f6d69c49da/lm_format_enforcer-0.10.9-py3-none-any.whl", hash = "sha256:6f3602d3470f54b3ba10d356ea34cc136afbd13394a360949dd8d943a2f2471e", size = 43940 }, +] + +[[package]] +name = "lxml" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ef/f6/c15ca8e5646e937c148e147244817672cf920b56ac0bf2cc1512ae674be8/lxml-5.3.1.tar.gz", hash = "sha256:106b7b5d2977b339f1e97efe2778e2ab20e99994cbb0ec5e55771ed0795920c8", size = 3678591 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/4b/73426192004a643c11a644ed2346dbe72da164c8e775ea2e70f60e63e516/lxml-5.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4058f16cee694577f7e4dd410263cd0ef75644b43802a689c2b3c2a7e69453b", size = 8142766 }, + { url = "https://files.pythonhosted.org/packages/30/c2/3b28f642b43fdf9580d936e8fdd3ec43c01a97ecfe17fd67f76ce9099752/lxml-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:364de8f57d6eda0c16dcfb999af902da31396949efa0e583e12675d09709881b", size = 4422744 }, + { url = "https://files.pythonhosted.org/packages/1f/a5/45279e464174b99d72d25bc018b097f9211c0925a174ca582a415609f036/lxml-5.3.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:528f3a0498a8edc69af0559bdcf8a9f5a8bf7c00051a6ef3141fdcf27017bbf5", size = 5229609 }, + { url = "https://files.pythonhosted.org/packages/f0/e7/10cd8b9e27ffb6b3465b76604725b67b7c70d4e399750ff88de1b38ab9eb/lxml-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4743e30d6f5f92b6d2b7c86b3ad250e0bad8dee4b7ad8a0c44bfb276af89a3", size = 4943509 }, + { url = "https://files.pythonhosted.org/packages/ce/54/2d6f634924920b17122445136345d44c6d69178c9c49e161aa8f206739d6/lxml-5.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b5d7f8acf809465086d498d62a981fa6a56d2718135bb0e4aa48c502055f5c", size = 5561495 }, + { url = "https://files.pythonhosted.org/packages/a2/fe/7f5ae8fd1f357fcb21b0d4e20416fae870d654380b6487adbcaaf0df9b31/lxml-5.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:928e75a7200a4c09e6efc7482a1337919cc61fe1ba289f297827a5b76d8969c2", size = 4998970 }, + { url = "https://files.pythonhosted.org/packages/af/70/22fecb6f2ca8dc77d14ab6be3cef767ff8340040bc95dca384b5b1cb333a/lxml-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a997b784a639e05b9d4053ef3b20c7e447ea80814a762f25b8ed5a89d261eac", size = 5114205 }, + { url = "https://files.pythonhosted.org/packages/63/91/21619cc14f7fd1de3f1bdf86cc8106edacf4d685b540d658d84247a3a32a/lxml-5.3.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7b82e67c5feb682dbb559c3e6b78355f234943053af61606af126df2183b9ef9", size = 4940823 }, + { url = "https://files.pythonhosted.org/packages/50/0f/27183248fa3cdd2040047ceccd320ff1ed1344167f38a4ac26aed092268b/lxml-5.3.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:f1de541a9893cf8a1b1db9bf0bf670a2decab42e3e82233d36a74eda7822b4c9", size = 5585725 }, + { url = "https://files.pythonhosted.org/packages/c6/8d/9b7388d5b23ed2f239a992a478cbd0ce313aaa2d008dd73c4042b190b6a9/lxml-5.3.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:de1fc314c3ad6bc2f6bd5b5a5b9357b8c6896333d27fdbb7049aea8bd5af2d79", size = 5082641 }, + { url = "https://files.pythonhosted.org/packages/65/8e/590e20833220eac55b6abcde71d3ae629d38ac1c3543bcc2bfe1f3c2f5d1/lxml-5.3.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7c0536bd9178f754b277a3e53f90f9c9454a3bd108b1531ffff720e082d824f2", size = 5161219 }, + { url = "https://files.pythonhosted.org/packages/4e/77/cabdf5569fd0415a88ebd1d62d7f2814e71422439b8564aaa03e7eefc069/lxml-5.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68018c4c67d7e89951a91fbd371e2e34cd8cfc71f0bb43b5332db38497025d51", size = 5019293 }, + { url = "https://files.pythonhosted.org/packages/49/bd/f0b6d50ea7b8b54aaa5df4410cb1d5ae6ffa016b8e0503cae08b86c24674/lxml-5.3.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa826340a609d0c954ba52fd831f0fba2a4165659ab0ee1a15e4aac21f302406", size = 5651232 }, + { url = "https://files.pythonhosted.org/packages/fa/69/1793d00a4e3da7f27349edb5a6f3da947ed921263cd9a243fab11c6cbc07/lxml-5.3.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:796520afa499732191e39fc95b56a3b07f95256f2d22b1c26e217fb69a9db5b5", size = 5489527 }, + { url = "https://files.pythonhosted.org/packages/d3/c9/e2449129b6cb2054c898df8d850ea4dadd75b4c33695a6c4b0f35082f1e7/lxml-5.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3effe081b3135237da6e4c4530ff2a868d3f80be0bda027e118a5971285d42d0", size = 5227050 }, + { url = "https://files.pythonhosted.org/packages/ed/63/e5da540eba6ab9a0d4188eeaa5c85767b77cafa8efeb70da0593d6cd3b81/lxml-5.3.1-cp310-cp310-win32.whl", hash = "sha256:a22f66270bd6d0804b02cd49dae2b33d4341015545d17f8426f2c4e22f557a23", size = 3475345 }, + { url = "https://files.pythonhosted.org/packages/08/71/853a3ad812cd24c35b7776977cb0ae40c2b64ff79ad6d6c36c987daffc49/lxml-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0bcfadea3cdc68e678d2b20cb16a16716887dd00a881e16f7d806c2138b8ff0c", size = 3805093 }, + { url = "https://files.pythonhosted.org/packages/57/bb/2faea15df82114fa27f2a86eec220506c532ee8ce211dff22f48881b353a/lxml-5.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e220f7b3e8656ab063d2eb0cd536fafef396829cafe04cb314e734f87649058f", size = 8161781 }, + { url = "https://files.pythonhosted.org/packages/9f/d3/374114084abb1f96026eccb6cd48b070f85de82fdabae6c2f1e198fa64e5/lxml-5.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f2cfae0688fd01f7056a17367e3b84f37c545fb447d7282cf2c242b16262607", size = 4432571 }, + { url = "https://files.pythonhosted.org/packages/0f/fb/44a46efdc235c2dd763c1e929611d8ff3b920c32b8fcd9051d38f4d04633/lxml-5.3.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67d2f8ad9dcc3a9e826bdc7802ed541a44e124c29b7d95a679eeb58c1c14ade8", size = 5028919 }, + { url = "https://files.pythonhosted.org/packages/3b/e5/168ddf9f16a90b590df509858ae97a8219d6999d5a132ad9f72427454bed/lxml-5.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db0c742aad702fd5d0c6611a73f9602f20aec2007c102630c06d7633d9c8f09a", size = 4769599 }, + { url = "https://files.pythonhosted.org/packages/f9/0e/3e2742c6f4854b202eb8587c1f7ed760179f6a9fcb34a460497c8c8f3078/lxml-5.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:198bb4b4dd888e8390afa4f170d4fa28467a7eaf857f1952589f16cfbb67af27", size = 5369260 }, + { url = "https://files.pythonhosted.org/packages/b8/03/b2f2ab9e33c47609c80665e75efed258b030717e06693835413b34e797cb/lxml-5.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2a3e412ce1849be34b45922bfef03df32d1410a06d1cdeb793a343c2f1fd666", size = 4842798 }, + { url = "https://files.pythonhosted.org/packages/93/ad/0ecfb082b842358c8a9e3115ec944b7240f89821baa8cd7c0cb8a38e05cb/lxml-5.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8969dbc8d09d9cd2ae06362c3bad27d03f433252601ef658a49bd9f2b22d79", size = 4917531 }, + { url = "https://files.pythonhosted.org/packages/64/5b/3e93d8ebd2b7eb984c2ad74dfff75493ce96e7b954b12e4f5fc34a700414/lxml-5.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5be8f5e4044146a69c96077c7e08f0709c13a314aa5315981185c1f00235fe65", size = 4791500 }, + { url = "https://files.pythonhosted.org/packages/91/83/7dc412362ee7a0259c7f64349393262525061fad551a1340ef92c59d9732/lxml-5.3.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:133f3493253a00db2c870d3740bc458ebb7d937bd0a6a4f9328373e0db305709", size = 5404557 }, + { url = "https://files.pythonhosted.org/packages/1e/41/c337f121d9dca148431f246825e021fa1a3f66a6b975deab1950530fdb04/lxml-5.3.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:52d82b0d436edd6a1d22d94a344b9a58abd6c68c357ed44f22d4ba8179b37629", size = 4931386 }, + { url = "https://files.pythonhosted.org/packages/a5/73/762c319c4906b3db67e4abc7cfe7d66c34996edb6d0e8cb60f462954d662/lxml-5.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b6f92e35e2658a5ed51c6634ceb5ddae32053182851d8cad2a5bc102a359b33", size = 4982124 }, + { url = "https://files.pythonhosted.org/packages/c1/e7/d1e296cb3b3b46371220a31350730948d7bea41cc9123c5fd219dea33c29/lxml-5.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:203b1d3eaebd34277be06a3eb880050f18a4e4d60861efba4fb946e31071a295", size = 4852742 }, + { url = "https://files.pythonhosted.org/packages/df/90/4adc854475105b93ead6c0c736f762d29371751340dcf5588cfcf8191b8a/lxml-5.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:155e1a5693cf4b55af652f5c0f78ef36596c7f680ff3ec6eb4d7d85367259b2c", size = 5457004 }, + { url = "https://files.pythonhosted.org/packages/f0/0d/39864efbd231c13eb53edee2ab91c742c24d2f93efe2af7d3fe4343e42c1/lxml-5.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22ec2b3c191f43ed21f9545e9df94c37c6b49a5af0a874008ddc9132d49a2d9c", size = 5298185 }, + { url = "https://files.pythonhosted.org/packages/8d/7a/630a64ceb1088196de182e2e33b5899691c3e1ae21af688e394208bd6810/lxml-5.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7eda194dd46e40ec745bf76795a7cccb02a6a41f445ad49d3cf66518b0bd9cff", size = 5032707 }, + { url = "https://files.pythonhosted.org/packages/b2/3d/091bc7b592333754cb346c1507ca948ab39bc89d83577ac8f1da3be4dece/lxml-5.3.1-cp311-cp311-win32.whl", hash = "sha256:fb7c61d4be18e930f75948705e9718618862e6fc2ed0d7159b2262be73f167a2", size = 3474288 }, + { url = "https://files.pythonhosted.org/packages/12/8c/7d47cfc0d04fd4e3639ec7e1c96c2561d5e890eb900de8f76eea75e0964a/lxml-5.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c809eef167bf4a57af4b03007004896f5c60bd38dc3852fcd97a26eae3d4c9e6", size = 3815031 }, + { url = "https://files.pythonhosted.org/packages/3b/f4/5121aa9ee8e09b8b8a28cf3709552efe3d206ca51a20d6fa471b60bb3447/lxml-5.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e69add9b6b7b08c60d7ff0152c7c9a6c45b4a71a919be5abde6f98f1ea16421c", size = 8191889 }, + { url = "https://files.pythonhosted.org/packages/0a/ca/8e9aa01edddc74878f4aea85aa9ab64372f46aa804d1c36dda861bf9eabf/lxml-5.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e52e1b148867b01c05e21837586ee307a01e793b94072d7c7b91d2c2da02ffe", size = 4450685 }, + { url = "https://files.pythonhosted.org/packages/b2/b3/ea40a5c98619fbd7e9349df7007994506d396b97620ced34e4e5053d3734/lxml-5.3.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b382e0e636ed54cd278791d93fe2c4f370772743f02bcbe431a160089025c9", size = 5051722 }, + { url = "https://files.pythonhosted.org/packages/3a/5e/375418be35f8a695cadfe7e7412f16520e62e24952ed93c64c9554755464/lxml-5.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e49dc23a10a1296b04ca9db200c44d3eb32c8d8ec532e8c1fd24792276522a", size = 4786661 }, + { url = "https://files.pythonhosted.org/packages/79/7c/d258eaaa9560f6664f9b426a5165103015bee6512d8931e17342278bad0a/lxml-5.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4399b4226c4785575fb20998dc571bc48125dc92c367ce2602d0d70e0c455eb0", size = 5311766 }, + { url = "https://files.pythonhosted.org/packages/03/bc/a041415be4135a1b3fdf017a5d873244cc16689456166fbdec4b27fba153/lxml-5.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5412500e0dc5481b1ee9cf6b38bb3b473f6e411eb62b83dc9b62699c3b7b79f7", size = 4836014 }, + { url = "https://files.pythonhosted.org/packages/32/88/047f24967d5e3fc97848ea2c207eeef0f16239cdc47368c8b95a8dc93a33/lxml-5.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c93ed3c998ea8472be98fb55aed65b5198740bfceaec07b2eba551e55b7b9ae", size = 4961064 }, + { url = "https://files.pythonhosted.org/packages/3d/b5/ecf5a20937ecd21af02c5374020f4e3a3538e10a32379a7553fca3d77094/lxml-5.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:63d57fc94eb0bbb4735e45517afc21ef262991d8758a8f2f05dd6e4174944519", size = 4778341 }, + { url = "https://files.pythonhosted.org/packages/a4/05/56c359e07275911ed5f35ab1d63c8cd3360d395fb91e43927a2ae90b0322/lxml-5.3.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:b450d7cabcd49aa7ab46a3c6aa3ac7e1593600a1a0605ba536ec0f1b99a04322", size = 5345450 }, + { url = "https://files.pythonhosted.org/packages/b7/f4/f95e3ae12e9f32fbcde00f9affa6b0df07f495117f62dbb796a9a31c84d6/lxml-5.3.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:4df0ec814b50275ad6a99bc82a38b59f90e10e47714ac9871e1b223895825468", size = 4908336 }, + { url = "https://files.pythonhosted.org/packages/c5/f8/309546aec092434166a6e11c7dcecb5c2d0a787c18c072d61e18da9eba57/lxml-5.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d184f85ad2bb1f261eac55cddfcf62a70dee89982c978e92b9a74a1bfef2e367", size = 4986049 }, + { url = "https://files.pythonhosted.org/packages/71/1c/b951817cb5058ca7c332d012dfe8bc59dabd0f0a8911ddd7b7ea8e41cfbd/lxml-5.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b725e70d15906d24615201e650d5b0388b08a5187a55f119f25874d0103f90dd", size = 4860351 }, + { url = "https://files.pythonhosted.org/packages/31/23/45feba8dae1d35fcca1e51b051f59dc4223cbd23e071a31e25f3f73938a8/lxml-5.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a31fa7536ec1fb7155a0cd3a4e3d956c835ad0a43e3610ca32384d01f079ea1c", size = 5421580 }, + { url = "https://files.pythonhosted.org/packages/61/69/be245d7b2dbef81c542af59c97fcd641fbf45accf2dc1c325bae7d0d014c/lxml-5.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3c3c8b55c7fc7b7e8877b9366568cc73d68b82da7fe33d8b98527b73857a225f", size = 5285778 }, + { url = "https://files.pythonhosted.org/packages/69/06/128af2ed04bac99b8f83becfb74c480f1aa18407b5c329fad457e08a1bf4/lxml-5.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d61ec60945d694df806a9aec88e8f29a27293c6e424f8ff91c80416e3c617645", size = 5054455 }, + { url = "https://files.pythonhosted.org/packages/8a/2d/f03a21cf6cc75cdd083563e509c7b6b159d761115c4142abb5481094ed8c/lxml-5.3.1-cp312-cp312-win32.whl", hash = "sha256:f4eac0584cdc3285ef2e74eee1513a6001681fd9753b259e8159421ed28a72e5", size = 3486315 }, + { url = "https://files.pythonhosted.org/packages/2b/9c/8abe21585d20ef70ad9cec7562da4332b764ed69ec29b7389d23dfabcea0/lxml-5.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:29bfc8d3d88e56ea0a27e7c4897b642706840247f59f4377d81be8f32aa0cfbf", size = 3816925 }, + { url = "https://files.pythonhosted.org/packages/94/1c/724931daa1ace168e0237b929e44062545bf1551974102a5762c349c668d/lxml-5.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c093c7088b40d8266f57ed71d93112bd64c6724d31f0794c1e52cc4857c28e0e", size = 8171881 }, + { url = "https://files.pythonhosted.org/packages/67/0c/857b8fb6010c4246e66abeebb8639eaabba60a6d9b7c606554ecc5cbf1ee/lxml-5.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0884e3f22d87c30694e625b1e62e6f30d39782c806287450d9dc2fdf07692fd", size = 4440394 }, + { url = "https://files.pythonhosted.org/packages/61/72/c9e81de6a000f9682ccdd13503db26e973b24c68ac45a7029173237e3eed/lxml-5.3.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1637fa31ec682cd5760092adfabe86d9b718a75d43e65e211d5931809bc111e7", size = 5037860 }, + { url = "https://files.pythonhosted.org/packages/24/26/942048c4b14835711b583b48cd7209bd2b5f0b6939ceed2381a494138b14/lxml-5.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a364e8e944d92dcbf33b6b494d4e0fb3499dcc3bd9485beb701aa4b4201fa414", size = 4782513 }, + { url = "https://files.pythonhosted.org/packages/e2/65/27792339caf00f610cc5be32b940ba1e3009b7054feb0c4527cebac228d4/lxml-5.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:779e851fd0e19795ccc8a9bb4d705d6baa0ef475329fe44a13cf1e962f18ff1e", size = 5305227 }, + { url = "https://files.pythonhosted.org/packages/18/e1/25f7aa434a4d0d8e8420580af05ea49c3e12db6d297cf5435ac0a054df56/lxml-5.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4393600915c308e546dc7003d74371744234e8444a28622d76fe19b98fa59d1", size = 4829846 }, + { url = "https://files.pythonhosted.org/packages/fe/ed/faf235e0792547d24f61ee1448159325448a7e4f2ab706503049d8e5df19/lxml-5.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:673b9d8e780f455091200bba8534d5f4f465944cbdd61f31dc832d70e29064a5", size = 4949495 }, + { url = "https://files.pythonhosted.org/packages/e5/e1/8f572ad9ed6039ba30f26dd4c2c58fb90f79362d2ee35ca3820284767672/lxml-5.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2e4a570f6a99e96c457f7bec5ad459c9c420ee80b99eb04cbfcfe3fc18ec6423", size = 4773415 }, + { url = "https://files.pythonhosted.org/packages/a3/75/6b57166b9d1983dac8f28f354e38bff8d6bcab013a241989c4d54c72701b/lxml-5.3.1-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:71f31eda4e370f46af42fc9f264fafa1b09f46ba07bdbee98f25689a04b81c20", size = 5337710 }, + { url = "https://files.pythonhosted.org/packages/cc/71/4aa56e2daa83bbcc66ca27b5155be2f900d996f5d0c51078eaaac8df9547/lxml-5.3.1-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:42978a68d3825eaac55399eb37a4d52012a205c0c6262199b8b44fcc6fd686e8", size = 4897362 }, + { url = "https://files.pythonhosted.org/packages/65/10/3fa2da152cd9b49332fd23356ed7643c9b74cad636ddd5b2400a9730d12b/lxml-5.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8b1942b3e4ed9ed551ed3083a2e6e0772de1e5e3aca872d955e2e86385fb7ff9", size = 4977795 }, + { url = "https://files.pythonhosted.org/packages/de/d2/e1da0f7b20827e7b0ce934963cb6334c1b02cf1bb4aecd218c4496880cb3/lxml-5.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:85c4f11be9cf08917ac2a5a8b6e1ef63b2f8e3799cec194417e76826e5f1de9c", size = 4858104 }, + { url = "https://files.pythonhosted.org/packages/a5/35/063420e1b33d3308f5aa7fcbdd19ef6c036f741c9a7a4bd5dc8032486b27/lxml-5.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:231cf4d140b22a923b1d0a0a4e0b4f972e5893efcdec188934cc65888fd0227b", size = 5416531 }, + { url = "https://files.pythonhosted.org/packages/c3/83/93a6457d291d1e37adfb54df23498101a4701834258c840381dd2f6a030e/lxml-5.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5865b270b420eda7b68928d70bb517ccbe045e53b1a428129bb44372bf3d7dd5", size = 5273040 }, + { url = "https://files.pythonhosted.org/packages/39/25/ad4ac8fac488505a2702656550e63c2a8db3a4fd63db82a20dad5689cecb/lxml-5.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dbf7bebc2275016cddf3c997bf8a0f7044160714c64a9b83975670a04e6d2252", size = 5050951 }, + { url = "https://files.pythonhosted.org/packages/82/74/f7d223c704c87e44b3d27b5e0dde173a2fcf2e89c0524c8015c2b3554876/lxml-5.3.1-cp313-cp313-win32.whl", hash = "sha256:d0751528b97d2b19a388b302be2a0ee05817097bab46ff0ed76feeec24951f78", size = 3485357 }, + { url = "https://files.pythonhosted.org/packages/80/83/8c54533b3576f4391eebea88454738978669a6cad0d8e23266224007939d/lxml-5.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:91fb6a43d72b4f8863d21f347a9163eecbf36e76e2f51068d59cd004c506f332", size = 3814484 }, + { url = "https://files.pythonhosted.org/packages/d2/b4/89a68d05f267f05cc1b8b2f289a8242955705b1b0a9d246198227817ee46/lxml-5.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:afa578b6524ff85fb365f454cf61683771d0170470c48ad9d170c48075f86725", size = 3936118 }, + { url = "https://files.pythonhosted.org/packages/7f/0d/c034a541e7a1153527d7880c62493a74f2277f38e64de2480cadd0d4cf96/lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f5e80adf0aafc7b5454f2c1cb0cde920c9b1f2cbd0485f07cc1d0497c35c5d", size = 4233690 }, + { url = "https://files.pythonhosted.org/packages/35/5c/38e183c2802f14fbdaa75c3266e11d0ca05c64d78e8cdab2ee84e954a565/lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd0b80ac2d8f13ffc906123a6f20b459cb50a99222d0da492360512f3e50f84", size = 4349569 }, + { url = "https://files.pythonhosted.org/packages/18/5b/14f93b359b3c29673d5d282bc3a6edb3a629879854a77541841aba37607f/lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:422c179022ecdedbe58b0e242607198580804253da220e9454ffe848daa1cfd2", size = 4236731 }, + { url = "https://files.pythonhosted.org/packages/f6/08/8471de65f3dee70a3a50e7082fd7409f0ac7a1ace777c13fca4aea1a5759/lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:524ccfded8989a6595dbdda80d779fb977dbc9a7bc458864fc9a0c2fc15dc877", size = 4373119 }, + { url = "https://files.pythonhosted.org/packages/83/29/00b9b0322a473aee6cda87473401c9abb19506cd650cc69a8aa38277ea74/lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499", size = 3487718 }, ] [[package]] @@ -1007,6 +1192,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, ] +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1018,7 +1212,7 @@ wheels = [ [[package]] name = "myst-parser" -version = "4.0.0" +version = "4.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docutils" }, @@ -1028,9 +1222,9 @@ dependencies = [ { name = "pyyaml" }, { name = "sphinx" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/85/55/6d1741a1780e5e65038b74bce6689da15f620261c490c3511eb4c12bac4b/myst_parser-4.0.0.tar.gz", hash = "sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531", size = 93858 } +sdist = { url = "https://files.pythonhosted.org/packages/66/a5/9626ba4f73555b3735ad86247a8077d4603aa8628537687c839ab08bfe44/myst_parser-4.0.1.tar.gz", hash = "sha256:5cfea715e4f3574138aecbf7d54132296bfd72bb614d31168f48c477a830a7c4", size = 93985 } wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/b4/b036f8fdb667587bb37df29dc6644681dd78b7a2a6321a34684b79412b28/myst_parser-4.0.0-py3-none-any.whl", hash = "sha256:b9317997552424448c6096c2558872fdb6f81d3ecb3a40ce84a7518798f3f28d", size = 84563 }, + { url = "https://files.pythonhosted.org/packages/5f/df/76d0321c3797b54b60fef9ec3bd6f4cfd124b9e422182156a1dd418722cf/myst_parser-4.0.1-py3-none-any.whl", hash = "sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d", size = 84579 }, ] [[package]] @@ -1073,6 +1267,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195 }, ] +[[package]] +name = "networkx" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 }, +] + [[package]] name = "nodeenv" version = "1.9.1" @@ -1084,64 +1287,178 @@ wheels = [ [[package]] name = "numpy" -version = "2.2.2" +version = "2.2.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ec/d0/c12ddfd3a02274be06ffc71f3efc6d0e457b0409c4481596881e748cb264/numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f", size = 20233295 } +sdist = { url = "https://files.pythonhosted.org/packages/fb/90/8956572f5c4ae52201fdec7ba2044b2c882832dcec7d5d0922c9e9acf2de/numpy-2.2.3.tar.gz", hash = "sha256:dbdc15f0c81611925f382dfa97b3bd0bc2c1ce19d4fe50482cb0ddc12ba30020", size = 20262700 } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/2a/69033dc22d981ad21325314f8357438078f5c28310a6d89fb3833030ec8a/numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e", size = 21215825 }, - { url = "https://files.pythonhosted.org/packages/31/2c/39f91e00bbd3d5639b027ac48c55dc5f2992bd2b305412d26be4c830862a/numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e", size = 14354996 }, - { url = "https://files.pythonhosted.org/packages/0a/2c/d468ebd253851af10de5b3e8f3418ebabfaab5f0337a75299fbeb8b8c17a/numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715", size = 5393621 }, - { url = "https://files.pythonhosted.org/packages/7f/f4/3d8a5a0da297034106c5de92be881aca7079cde6058934215a1de91334f6/numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a", size = 6928931 }, - { url = "https://files.pythonhosted.org/packages/47/a7/029354ab56edd43dd3f5efbfad292b8844f98b93174f322f82353fa46efa/numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97", size = 14333157 }, - { url = "https://files.pythonhosted.org/packages/e3/d7/11fc594838d35c43519763310c316d4fd56f8600d3fc80a8e13e325b5c5c/numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957", size = 16381794 }, - { url = "https://files.pythonhosted.org/packages/af/d4/dd9b19cd4aff9c79d3f54d17f8be815407520d3116004bc574948336981b/numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d", size = 15543990 }, - { url = "https://files.pythonhosted.org/packages/30/97/ab96b7650f27f684a9b1e46757a7294ecc50cab27701d05f146e9f779627/numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd", size = 18170896 }, - { url = "https://files.pythonhosted.org/packages/81/9b/bae9618cab20db67a2ca9d711795cad29b2ca4b73034dd3b5d05b962070a/numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160", size = 6573458 }, - { url = "https://files.pythonhosted.org/packages/92/9b/95678092febd14070cfb7906ea7932e71e9dd5a6ab3ee948f9ed975e905d/numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014", size = 12915812 }, - { url = "https://files.pythonhosted.org/packages/21/67/32c68756eed84df181c06528ff57e09138f893c4653448c4967311e0f992/numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189", size = 21220002 }, - { url = "https://files.pythonhosted.org/packages/3b/89/f43bcad18f2b2e5814457b1c7f7b0e671d0db12c8c0e43397ab8cb1831ed/numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323", size = 14391215 }, - { url = "https://files.pythonhosted.org/packages/9c/e6/efb8cd6122bf25e86e3dd89d9dbfec9e6861c50e8810eed77d4be59b51c6/numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac", size = 5391918 }, - { url = "https://files.pythonhosted.org/packages/47/e2/fccf89d64d9b47ffb242823d4e851fc9d36fa751908c9aac2807924d9b4e/numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e", size = 6933133 }, - { url = "https://files.pythonhosted.org/packages/34/22/5ece749c0e5420a9380eef6fbf83d16a50010bd18fef77b9193d80a6760e/numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c", size = 14338187 }, - { url = "https://files.pythonhosted.org/packages/5b/86/caec78829311f62afa6fa334c8dfcd79cffb4d24bcf96ee02ae4840d462b/numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f", size = 16393429 }, - { url = "https://files.pythonhosted.org/packages/c8/4e/0c25f74c88239a37924577d6ad780f3212a50f4b4b5f54f5e8c918d726bd/numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826", size = 15559103 }, - { url = "https://files.pythonhosted.org/packages/d4/bd/d557f10fa50dc4d5871fb9606af563249b66af2fc6f99041a10e8757c6f1/numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8", size = 18182967 }, - { url = "https://files.pythonhosted.org/packages/30/e9/66cc0f66386d78ed89e45a56e2a1d051e177b6e04477c4a41cd590ef4017/numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50", size = 6571499 }, - { url = "https://files.pythonhosted.org/packages/66/a3/4139296b481ae7304a43581046b8f0a20da6a0dfe0ee47a044cade796603/numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2", size = 12919805 }, - { url = "https://files.pythonhosted.org/packages/0c/e6/847d15770ab7a01e807bdfcd4ead5bdae57c0092b7dc83878171b6af97bb/numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467", size = 20912636 }, - { url = "https://files.pythonhosted.org/packages/d1/af/f83580891577b13bd7e261416120e036d0d8fb508c8a43a73e38928b794b/numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a", size = 14098403 }, - { url = "https://files.pythonhosted.org/packages/2b/86/d019fb60a9d0f1d4cf04b014fe88a9135090adfadcc31c1fadbb071d7fa7/numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825", size = 5128938 }, - { url = "https://files.pythonhosted.org/packages/7a/1b/50985edb6f1ec495a1c36452e860476f5b7ecdc3fc59ea89ccad3c4926c5/numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37", size = 6661937 }, - { url = "https://files.pythonhosted.org/packages/f4/1b/17efd94cad1b9d605c3f8907fb06bcffc4ce4d1d14d46b95316cccccf2b9/numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748", size = 14049518 }, - { url = "https://files.pythonhosted.org/packages/5b/73/65d2f0b698df1731e851e3295eb29a5ab8aa06f763f7e4188647a809578d/numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0", size = 16099146 }, - { url = "https://files.pythonhosted.org/packages/d5/69/308f55c0e19d4b5057b5df286c5433822e3c8039ede06d4051d96f1c2c4e/numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278", size = 15246336 }, - { url = "https://files.pythonhosted.org/packages/f0/d8/d8d333ad0d8518d077a21aeea7b7c826eff766a2b1ce1194dea95ca0bacf/numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba", size = 17863507 }, - { url = "https://files.pythonhosted.org/packages/82/6e/0b84ad3103ffc16d6673e63b5acbe7901b2af96c2837174c6318c98e27ab/numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283", size = 6276491 }, - { url = "https://files.pythonhosted.org/packages/fc/84/7f801a42a67b9772a883223a0a1e12069a14626c81a732bd70aac57aebc1/numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb", size = 12616372 }, - { url = "https://files.pythonhosted.org/packages/e1/fe/df5624001f4f5c3e0b78e9017bfab7fdc18a8d3b3d3161da3d64924dd659/numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc", size = 20899188 }, - { url = "https://files.pythonhosted.org/packages/a9/80/d349c3b5ed66bd3cb0214be60c27e32b90a506946857b866838adbe84040/numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369", size = 14113972 }, - { url = "https://files.pythonhosted.org/packages/9d/50/949ec9cbb28c4b751edfa64503f0913cbfa8d795b4a251e7980f13a8a655/numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd", size = 5114294 }, - { url = "https://files.pythonhosted.org/packages/8d/f3/399c15629d5a0c68ef2aa7621d430b2be22034f01dd7f3c65a9c9666c445/numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be", size = 6648426 }, - { url = "https://files.pythonhosted.org/packages/2c/03/c72474c13772e30e1bc2e558cdffd9123c7872b731263d5648b5c49dd459/numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84", size = 14045990 }, - { url = "https://files.pythonhosted.org/packages/83/9c/96a9ab62274ffafb023f8ee08c88d3d31ee74ca58869f859db6845494fa6/numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff", size = 16096614 }, - { url = "https://files.pythonhosted.org/packages/d5/34/cd0a735534c29bec7093544b3a509febc9b0df77718a9b41ffb0809c9f46/numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0", size = 15242123 }, - { url = "https://files.pythonhosted.org/packages/5e/6d/541717a554a8f56fa75e91886d9b79ade2e595918690eb5d0d3dbd3accb9/numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de", size = 17859160 }, - { url = "https://files.pythonhosted.org/packages/b9/a5/fbf1f2b54adab31510728edd06a05c1b30839f37cf8c9747cb85831aaf1b/numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9", size = 6273337 }, - { url = "https://files.pythonhosted.org/packages/56/e5/01106b9291ef1d680f82bc47d0c5b5e26dfed15b0754928e8f856c82c881/numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369", size = 12609010 }, - { url = "https://files.pythonhosted.org/packages/9f/30/f23d9876de0f08dceb707c4dcf7f8dd7588266745029debb12a3cdd40be6/numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391", size = 20924451 }, - { url = "https://files.pythonhosted.org/packages/6a/ec/6ea85b2da9d5dfa1dbb4cb3c76587fc8ddcae580cb1262303ab21c0926c4/numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39", size = 14122390 }, - { url = "https://files.pythonhosted.org/packages/68/05/bfbdf490414a7dbaf65b10c78bc243f312c4553234b6d91c94eb7c4b53c2/numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317", size = 5156590 }, - { url = "https://files.pythonhosted.org/packages/f7/ec/fe2e91b2642b9d6544518388a441bcd65c904cea38d9ff998e2e8ebf808e/numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49", size = 6671958 }, - { url = "https://files.pythonhosted.org/packages/b1/6f/6531a78e182f194d33ee17e59d67d03d0d5a1ce7f6be7343787828d1bd4a/numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2", size = 14019950 }, - { url = "https://files.pythonhosted.org/packages/e1/fb/13c58591d0b6294a08cc40fcc6b9552d239d773d520858ae27f39997f2ae/numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7", size = 16079759 }, - { url = "https://files.pythonhosted.org/packages/2c/f2/f2f8edd62abb4b289f65a7f6d1f3650273af00b91b7267a2431be7f1aec6/numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb", size = 15226139 }, - { url = "https://files.pythonhosted.org/packages/aa/29/14a177f1a90b8ad8a592ca32124ac06af5eff32889874e53a308f850290f/numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648", size = 17856316 }, - { url = "https://files.pythonhosted.org/packages/95/03/242ae8d7b97f4e0e4ab8dd51231465fb23ed5e802680d629149722e3faf1/numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4", size = 6329134 }, - { url = "https://files.pythonhosted.org/packages/80/94/cd9e9b04012c015cb6320ab3bf43bc615e248dddfeb163728e800a5d96f0/numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576", size = 12696208 }, - { url = "https://files.pythonhosted.org/packages/96/7e/1dd770ee68916ed358991ab62c2cc353ffd98d0b75b901d52183ca28e8bb/numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495", size = 21047291 }, - { url = "https://files.pythonhosted.org/packages/d1/3c/ccd08578dc532a8e6927952339d4a02682b776d5e85be49ed0760308433e/numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df", size = 6792494 }, - { url = "https://files.pythonhosted.org/packages/7c/28/8754b9aee4f97199f9a047f73bb644b5a2014994a6d7b061ba67134a42de/numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a", size = 16197312 }, - { url = "https://files.pythonhosted.org/packages/26/96/deb93f871f401045a684ca08a009382b247d14996d7a94fea6aa43c67b94/numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60", size = 12822674 }, + { url = "https://files.pythonhosted.org/packages/5e/e1/1816d5d527fa870b260a1c2c5904d060caad7515637bd54f495a5ce13ccd/numpy-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cbc6472e01952d3d1b2772b720428f8b90e2deea8344e854df22b0618e9cce71", size = 21232911 }, + { url = "https://files.pythonhosted.org/packages/29/46/9f25dc19b359f10c0e52b6bac25d3181eb1f4b4d04c9846a32cf5ea52762/numpy-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdfe0c22692a30cd830c0755746473ae66c4a8f2e7bd508b35fb3b6a0813d787", size = 14371955 }, + { url = "https://files.pythonhosted.org/packages/72/d7/de941296e6b09a5c81d3664ad912f1496a0ecdd2f403318e5e35604ff70f/numpy-2.2.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:e37242f5324ffd9f7ba5acf96d774f9276aa62a966c0bad8dae692deebec7716", size = 5410476 }, + { url = "https://files.pythonhosted.org/packages/36/ce/55f685995110f8a268fdca0f198c9a84fa87b39512830965cc1087af6391/numpy-2.2.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95172a21038c9b423e68be78fd0be6e1b97674cde269b76fe269a5dfa6fadf0b", size = 6945730 }, + { url = "https://files.pythonhosted.org/packages/4f/84/abdb9f6e22576d89c259401c3234d4755b322539491bbcffadc8bcb120d3/numpy-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5b47c440210c5d1d67e1cf434124e0b5c395eee1f5806fdd89b553ed1acd0a3", size = 14350752 }, + { url = "https://files.pythonhosted.org/packages/e9/88/3870cfa9bef4dffb3a326507f430e6007eeac258ebeef6b76fc542aef66d/numpy-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0391ea3622f5c51a2e29708877d56e3d276827ac5447d7f45e9bc4ade8923c52", size = 16399386 }, + { url = "https://files.pythonhosted.org/packages/02/10/3f629682dd0b457525c131945329c4e81e2dadeb11256e6ce4c9a1a6fb41/numpy-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f6b3dfc7661f8842babd8ea07e9897fe3d9b69a1d7e5fbb743e4160f9387833b", size = 15561826 }, + { url = "https://files.pythonhosted.org/packages/da/18/fd35673ba9751eba449d4ce5d24d94e3b612cdbfba79348da71488c0b7ac/numpy-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1ad78ce7f18ce4e7df1b2ea4019b5817a2f6a8a16e34ff2775f646adce0a5027", size = 18188593 }, + { url = "https://files.pythonhosted.org/packages/ce/4c/c0f897b580ea59484b4cc96a441fea50333b26675a60a1421bc912268b5f/numpy-2.2.3-cp310-cp310-win32.whl", hash = "sha256:5ebeb7ef54a7be11044c33a17b2624abe4307a75893c001a4800857956b41094", size = 6590421 }, + { url = "https://files.pythonhosted.org/packages/e5/5b/aaabbfc7060c5c8f0124c5deb5e114a3b413a548bbc64e372c5b5db36165/numpy-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:596140185c7fa113563c67c2e894eabe0daea18cf8e33851738c19f70ce86aeb", size = 12925667 }, + { url = "https://files.pythonhosted.org/packages/96/86/453aa3949eab6ff54e2405f9cb0c01f756f031c3dc2a6d60a1d40cba5488/numpy-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:16372619ee728ed67a2a606a614f56d3eabc5b86f8b615c79d01957062826ca8", size = 21237256 }, + { url = "https://files.pythonhosted.org/packages/20/c3/93ecceadf3e155d6a9e4464dd2392d8d80cf436084c714dc8535121c83e8/numpy-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5521a06a3148686d9269c53b09f7d399a5725c47bbb5b35747e1cb76326b714b", size = 14408049 }, + { url = "https://files.pythonhosted.org/packages/8d/29/076999b69bd9264b8df5e56f2be18da2de6b2a2d0e10737e5307592e01de/numpy-2.2.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:7c8dde0ca2f77828815fd1aedfdf52e59071a5bae30dac3b4da2a335c672149a", size = 5408655 }, + { url = "https://files.pythonhosted.org/packages/e2/a7/b14f0a73eb0fe77cb9bd5b44534c183b23d4229c099e339c522724b02678/numpy-2.2.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:77974aba6c1bc26e3c205c2214f0d5b4305bdc719268b93e768ddb17e3fdd636", size = 6949996 }, + { url = "https://files.pythonhosted.org/packages/72/2f/8063da0616bb0f414b66dccead503bd96e33e43685c820e78a61a214c098/numpy-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d42f9c36d06440e34226e8bd65ff065ca0963aeecada587b937011efa02cdc9d", size = 14355789 }, + { url = "https://files.pythonhosted.org/packages/e6/d7/3cd47b00b8ea95ab358c376cf5602ad21871410950bc754cf3284771f8b6/numpy-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2712c5179f40af9ddc8f6727f2bd910ea0eb50206daea75f58ddd9fa3f715bb", size = 16411356 }, + { url = "https://files.pythonhosted.org/packages/27/c0/a2379e202acbb70b85b41483a422c1e697ff7eee74db642ca478de4ba89f/numpy-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c8b0451d2ec95010d1db8ca733afc41f659f425b7f608af569711097fd6014e2", size = 15576770 }, + { url = "https://files.pythonhosted.org/packages/bc/63/a13ee650f27b7999e5b9e1964ae942af50bb25606d088df4229283eda779/numpy-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d9b4a8148c57ecac25a16b0e11798cbe88edf5237b0df99973687dd866f05e1b", size = 18200483 }, + { url = "https://files.pythonhosted.org/packages/4c/87/e71f89935e09e8161ac9c590c82f66d2321eb163893a94af749dfa8a3cf8/numpy-2.2.3-cp311-cp311-win32.whl", hash = "sha256:1f45315b2dc58d8a3e7754fe4e38b6fce132dab284a92851e41b2b344f6441c5", size = 6588415 }, + { url = "https://files.pythonhosted.org/packages/b9/c6/cd4298729826af9979c5f9ab02fcaa344b82621e7c49322cd2d210483d3f/numpy-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f48ba6f6c13e5e49f3d3efb1b51c8193215c42ac82610a04624906a9270be6f", size = 12929604 }, + { url = "https://files.pythonhosted.org/packages/43/ec/43628dcf98466e087812142eec6d1c1a6c6bdfdad30a0aa07b872dc01f6f/numpy-2.2.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:12c045f43b1d2915eca6b880a7f4a256f59d62df4f044788c8ba67709412128d", size = 20929458 }, + { url = "https://files.pythonhosted.org/packages/9b/c0/2f4225073e99a5c12350954949ed19b5d4a738f541d33e6f7439e33e98e4/numpy-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87eed225fd415bbae787f93a457af7f5990b92a334e346f72070bf569b9c9c95", size = 14115299 }, + { url = "https://files.pythonhosted.org/packages/ca/fa/d2c5575d9c734a7376cc1592fae50257ec95d061b27ee3dbdb0b3b551eb2/numpy-2.2.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:712a64103d97c404e87d4d7c47fb0c7ff9acccc625ca2002848e0d53288b90ea", size = 5145723 }, + { url = "https://files.pythonhosted.org/packages/eb/dc/023dad5b268a7895e58e791f28dc1c60eb7b6c06fcbc2af8538ad069d5f3/numpy-2.2.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a5ae282abe60a2db0fd407072aff4599c279bcd6e9a2475500fc35b00a57c532", size = 6678797 }, + { url = "https://files.pythonhosted.org/packages/3f/19/bcd641ccf19ac25abb6fb1dcd7744840c11f9d62519d7057b6ab2096eb60/numpy-2.2.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5266de33d4c3420973cf9ae3b98b54a2a6d53a559310e3236c4b2b06b9c07d4e", size = 14067362 }, + { url = "https://files.pythonhosted.org/packages/39/04/78d2e7402fb479d893953fb78fa7045f7deb635ec095b6b4f0260223091a/numpy-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b787adbf04b0db1967798dba8da1af07e387908ed1553a0d6e74c084d1ceafe", size = 16116679 }, + { url = "https://files.pythonhosted.org/packages/d0/a1/e90f7aa66512be3150cb9d27f3d9995db330ad1b2046474a13b7040dfd92/numpy-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34c1b7e83f94f3b564b35f480f5652a47007dd91f7c839f404d03279cc8dd021", size = 15264272 }, + { url = "https://files.pythonhosted.org/packages/dc/b6/50bd027cca494de4fa1fc7bf1662983d0ba5f256fa0ece2c376b5eb9b3f0/numpy-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4d8335b5f1b6e2bce120d55fb17064b0262ff29b459e8493d1785c18ae2553b8", size = 17880549 }, + { url = "https://files.pythonhosted.org/packages/96/30/f7bf4acb5f8db10a96f73896bdeed7a63373137b131ca18bd3dab889db3b/numpy-2.2.3-cp312-cp312-win32.whl", hash = "sha256:4d9828d25fb246bedd31e04c9e75714a4087211ac348cb39c8c5f99dbb6683fe", size = 6293394 }, + { url = "https://files.pythonhosted.org/packages/42/6e/55580a538116d16ae7c9aa17d4edd56e83f42126cb1dfe7a684da7925d2c/numpy-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:83807d445817326b4bcdaaaf8e8e9f1753da04341eceec705c001ff342002e5d", size = 12626357 }, + { url = "https://files.pythonhosted.org/packages/0e/8b/88b98ed534d6a03ba8cddb316950fe80842885709b58501233c29dfa24a9/numpy-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bfdb06b395385ea9b91bf55c1adf1b297c9fdb531552845ff1d3ea6e40d5aba", size = 20916001 }, + { url = "https://files.pythonhosted.org/packages/d9/b4/def6ec32c725cc5fbd8bdf8af80f616acf075fe752d8a23e895da8c67b70/numpy-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:23c9f4edbf4c065fddb10a4f6e8b6a244342d95966a48820c614891e5059bb50", size = 14130721 }, + { url = "https://files.pythonhosted.org/packages/20/60/70af0acc86495b25b672d403e12cb25448d79a2b9658f4fc45e845c397a8/numpy-2.2.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:a0c03b6be48aaf92525cccf393265e02773be8fd9551a2f9adbe7db1fa2b60f1", size = 5130999 }, + { url = "https://files.pythonhosted.org/packages/2e/69/d96c006fb73c9a47bcb3611417cf178049aae159afae47c48bd66df9c536/numpy-2.2.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:2376e317111daa0a6739e50f7ee2a6353f768489102308b0d98fcf4a04f7f3b5", size = 6665299 }, + { url = "https://files.pythonhosted.org/packages/5a/3f/d8a877b6e48103733ac224ffa26b30887dc9944ff95dffdfa6c4ce3d7df3/numpy-2.2.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fb62fe3d206d72fe1cfe31c4a1106ad2b136fcc1606093aeab314f02930fdf2", size = 14064096 }, + { url = "https://files.pythonhosted.org/packages/e4/43/619c2c7a0665aafc80efca465ddb1f260287266bdbdce517396f2f145d49/numpy-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52659ad2534427dffcc36aac76bebdd02b67e3b7a619ac67543bc9bfe6b7cdb1", size = 16114758 }, + { url = "https://files.pythonhosted.org/packages/d9/79/ee4fe4f60967ccd3897aa71ae14cdee9e3c097e3256975cc9575d393cb42/numpy-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b416af7d0ed3271cad0f0a0d0bee0911ed7eba23e66f8424d9f3dfcdcae1304", size = 15259880 }, + { url = "https://files.pythonhosted.org/packages/fb/c8/8b55cf05db6d85b7a7d414b3d1bd5a740706df00bfa0824a08bf041e52ee/numpy-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1402da8e0f435991983d0a9708b779f95a8c98c6b18a171b9f1be09005e64d9d", size = 17876721 }, + { url = "https://files.pythonhosted.org/packages/21/d6/b4c2f0564b7dcc413117b0ffbb818d837e4b29996b9234e38b2025ed24e7/numpy-2.2.3-cp313-cp313-win32.whl", hash = "sha256:136553f123ee2951bfcfbc264acd34a2fc2f29d7cdf610ce7daf672b6fbaa693", size = 6290195 }, + { url = "https://files.pythonhosted.org/packages/97/e7/7d55a86719d0de7a6a597949f3febefb1009435b79ba510ff32f05a8c1d7/numpy-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5b732c8beef1d7bc2d9e476dbba20aaff6167bf205ad9aa8d30913859e82884b", size = 12619013 }, + { url = "https://files.pythonhosted.org/packages/a6/1f/0b863d5528b9048fd486a56e0b97c18bf705e88736c8cea7239012119a54/numpy-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:435e7a933b9fda8126130b046975a968cc2d833b505475e588339e09f7672890", size = 20944621 }, + { url = "https://files.pythonhosted.org/packages/aa/99/b478c384f7a0a2e0736177aafc97dc9152fc036a3fdb13f5a3ab225f1494/numpy-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7678556eeb0152cbd1522b684dcd215250885993dd00adb93679ec3c0e6e091c", size = 14142502 }, + { url = "https://files.pythonhosted.org/packages/fb/61/2d9a694a0f9cd0a839501d362de2a18de75e3004576a3008e56bdd60fcdb/numpy-2.2.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:2e8da03bd561504d9b20e7a12340870dfc206c64ea59b4cfee9fceb95070ee94", size = 5176293 }, + { url = "https://files.pythonhosted.org/packages/33/35/51e94011b23e753fa33f891f601e5c1c9a3d515448659b06df9d40c0aa6e/numpy-2.2.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:c9aa4496fd0e17e3843399f533d62857cef5900facf93e735ef65aa4bbc90ef0", size = 6691874 }, + { url = "https://files.pythonhosted.org/packages/ff/cf/06e37619aad98a9d03bd8d65b8e3041c3a639be0f5f6b0a0e2da544538d4/numpy-2.2.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4ca91d61a4bf61b0f2228f24bbfa6a9facd5f8af03759fe2a655c50ae2c6610", size = 14036826 }, + { url = "https://files.pythonhosted.org/packages/0c/93/5d7d19955abd4d6099ef4a8ee006f9ce258166c38af259f9e5558a172e3e/numpy-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:deaa09cd492e24fd9b15296844c0ad1b3c976da7907e1c1ed3a0ad21dded6f76", size = 16096567 }, + { url = "https://files.pythonhosted.org/packages/af/53/d1c599acf7732d81f46a93621dab6aa8daad914b502a7a115b3f17288ab2/numpy-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:246535e2f7496b7ac85deffe932896a3577be7af8fb7eebe7146444680297e9a", size = 15242514 }, + { url = "https://files.pythonhosted.org/packages/53/43/c0f5411c7b3ea90adf341d05ace762dad8cb9819ef26093e27b15dd121ac/numpy-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:daf43a3d1ea699402c5a850e5313680ac355b4adc9770cd5cfc2940e7861f1bf", size = 17872920 }, + { url = "https://files.pythonhosted.org/packages/5b/57/6dbdd45ab277aff62021cafa1e15f9644a52f5b5fc840bc7591b4079fb58/numpy-2.2.3-cp313-cp313t-win32.whl", hash = "sha256:cf802eef1f0134afb81fef94020351be4fe1d6681aadf9c5e862af6602af64ef", size = 6346584 }, + { url = "https://files.pythonhosted.org/packages/97/9b/484f7d04b537d0a1202a5ba81c6f53f1846ae6c63c2127f8df869ed31342/numpy-2.2.3-cp313-cp313t-win_amd64.whl", hash = "sha256:aee2512827ceb6d7f517c8b85aa5d3923afe8fc7a57d028cffcd522f1c6fd082", size = 12706784 }, + { url = "https://files.pythonhosted.org/packages/0a/b5/a7839f5478be8f859cb880f13d90fcfe4b0ec7a9ebaff2bcc30d96760596/numpy-2.2.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3c2ec8a0f51d60f1e9c0c5ab116b7fc104b165ada3f6c58abf881cb2eb16044d", size = 21064244 }, + { url = "https://files.pythonhosted.org/packages/29/e8/5da32ffcaa7a72f7ecd82f90c062140a061eb823cb88e90279424e515cf4/numpy-2.2.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:ed2cf9ed4e8ebc3b754d398cba12f24359f018b416c380f577bbae112ca52fc9", size = 6809418 }, + { url = "https://files.pythonhosted.org/packages/a8/a9/68aa7076c7656a7308a0f73d0a2ced8c03f282c9fd98fa7ce21c12634087/numpy-2.2.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39261798d208c3095ae4f7bc8eaeb3481ea8c6e03dc48028057d3cbdbdb8937e", size = 16215461 }, + { url = "https://files.pythonhosted.org/packages/17/7f/d322a4125405920401450118dbdc52e0384026bd669939484670ce8b2ab9/numpy-2.2.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:783145835458e60fa97afac25d511d00a1eca94d4a8f3ace9fe2043003c678e4", size = 12839607 }, +] + +[[package]] +name = "ollama" +version = "0.4.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b0/6d/dc77539c735bbed5d0c873fb029fb86aa9f0163df169b34152914331c369/ollama-0.4.7.tar.gz", hash = "sha256:891dcbe54f55397d82d289c459de0ea897e103b86a3f1fad0fdb1895922a75ff", size = 12843 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/83/c3ffac86906c10184c88c2e916460806b072a2cfe34cdcaf3a0c0e836d39/ollama-0.4.7-py3-none-any.whl", hash = "sha256:85505663cca67a83707be5fb3aeff0ea72e67846cea5985529d8eca4366564a1", size = 13210 }, +] + +[[package]] +name = "openai" +version = "1.63.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4f/32/2049e973a646801df425aecdf88c6504ca878bdb3951fe12076fc30f2977/openai-1.63.0.tar.gz", hash = "sha256:597d7a1b35b113e5a09fcb953bdb1eef44f404a39985f3d7573b3ab09221fd66", size = 356710 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/a0/e1fe4e87218639fc0a0927da5266c2978eaa0e2eb5437479ee64a11535bb/openai-1.63.0-py3-none-any.whl", hash = "sha256:a664dfc78f0a05ca46c3e21f344f840cf6bf7174f13cfa9de214ed28bfca1dda", size = 472282 }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "importlib-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2b/6d/bbbf879826b7f3c89a45252010b5796fb1f1a0d45d9dc4709db0ef9a06c8/opentelemetry_api-1.30.0.tar.gz", hash = "sha256:375893400c1435bf623f7dfb3bcd44825fe6b56c34d0667c542ea8257b1a1240", size = 63703 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/0a/eea862fae6413d8181b23acf8e13489c90a45f17986ee9cf4eab8a0b9ad9/opentelemetry_api-1.30.0-py3-none-any.whl", hash = "sha256:d5f5284890d73fdf47f843dda3210edf37a38d66f44f2b5aedc1e89ed455dc09", size = 64955 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/d7/44098bf1ef89fc5810cdbda05faa2ae9322a0dbda4921cdc965dc68a9856/opentelemetry_exporter_otlp_proto_common-1.30.0.tar.gz", hash = "sha256:ddbfbf797e518411857d0ca062c957080279320d6235a279f7b64ced73c13897", size = 19640 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/54/f4b3de49f8d7d3a78fd6e6e1a6fd27dd342eb4d82c088b9078c6a32c3808/opentelemetry_exporter_otlp_proto_common-1.30.0-py3-none-any.whl", hash = "sha256:5468007c81aa9c44dc961ab2cf368a29d3475977df83b4e30aeed42aa7bc3b38", size = 18747 }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/f9/abb9191d536e6a2e2b7903f8053bf859a76bf784e3ca19a5749550ef19e4/opentelemetry_exporter_otlp_proto_http-1.30.0.tar.gz", hash = "sha256:c3ae75d4181b1e34a60662a6814d0b94dd33b628bee5588a878bed92cee6abdc", size = 15073 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e1/3c/cdf34bc459613f2275aff9b258f35acdc4c4938dad161d17437de5d4c034/opentelemetry_exporter_otlp_proto_http-1.30.0-py3-none-any.whl", hash = "sha256:9578e790e579931c5ffd50f1e6975cbdefb6a0a0a5dea127a6ae87df10e0a589", size = 17245 }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/6e/c1ff2e3b0cd3a189a6be03fd4d63441d73d7addd9117ab5454e667b9b6c7/opentelemetry_proto-1.30.0.tar.gz", hash = "sha256:afe5c9c15e8b68d7c469596e5b32e8fc085eb9febdd6fb4e20924a93a0389179", size = 34362 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/d7/85de6501f7216995295f7ec11e470142e6a6e080baacec1753bbf272e007/opentelemetry_proto-1.30.0-py3-none-any.whl", hash = "sha256:c6290958ff3ddacc826ca5abbeb377a31c2334387352a259ba0df37c243adc11", size = 55854 }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/93/ee/d710062e8a862433d1be0b85920d0c653abe318878fef2d14dfe2c62ff7b/opentelemetry_sdk-1.30.0.tar.gz", hash = "sha256:c9287a9e4a7614b9946e933a67168450b9ab35f08797eb9bc77d998fa480fa18", size = 158633 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/28/64d781d6adc6bda2260067ce2902bd030cf45aec657e02e28c5b4480b976/opentelemetry_sdk-1.30.0-py3-none-any.whl", hash = "sha256:14fe7afc090caad881addb6926cec967129bd9260c4d33ae6a217359f6b61091", size = 118717 }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.51b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "opentelemetry-api" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/c0/0f9ef4605fea7f2b83d55dd0b0d7aebe8feead247cd6facd232b30907b4f/opentelemetry_semantic_conventions-0.51b0.tar.gz", hash = "sha256:3fabf47f35d1fd9aebcdca7e6802d86bd5ebc3bc3408b7e3248dde6e87a18c47", size = 107191 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/75/d7bdbb6fd8630b4cafb883482b75c4fc276b6426619539d266e32ac53266/opentelemetry_semantic_conventions-0.51b0-py3-none-any.whl", hash = "sha256:fdc777359418e8d06c86012c3dc92c88a6453ba662e941593adb062e48c2eeae", size = 177416 }, ] [[package]] @@ -1345,18 +1662,32 @@ wheels = [ ] [[package]] -name = "psutil" -version = "6.1.1" +name = "protobuf" +version = "5.29.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +sdist = { url = "https://files.pythonhosted.org/packages/f7/d1/e0a911544ca9993e0f17ce6d3cc0932752356c1b0a834397f28e63479344/protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620", size = 424945 } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, - { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, - { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, - { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, - { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, - { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, - { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, + { url = "https://files.pythonhosted.org/packages/dc/7a/1e38f3cafa022f477ca0f57a1f49962f21ad25850c3ca0acd3b9d0091518/protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888", size = 422708 }, + { url = "https://files.pythonhosted.org/packages/61/fa/aae8e10512b83de633f2646506a6d835b151edf4b30d18d73afd01447253/protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a", size = 434508 }, + { url = "https://files.pythonhosted.org/packages/dd/04/3eaedc2ba17a088961d0e3bd396eac764450f431621b58a04ce898acd126/protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e", size = 417825 }, + { url = "https://files.pythonhosted.org/packages/4f/06/7c467744d23c3979ce250397e26d8ad8eeb2bea7b18ca12ad58313c1b8d5/protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84", size = 319573 }, + { url = "https://files.pythonhosted.org/packages/a8/45/2ebbde52ad2be18d3675b6bee50e68cd73c9e0654de77d595540b5129df8/protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f", size = 319672 }, + { url = "https://files.pythonhosted.org/packages/fd/b2/ab07b09e0f6d143dfb839693aa05765257bceaa13d03bf1a696b78323e7a/protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f", size = 172550 }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051 }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535 }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004 }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986 }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544 }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053 }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885 }, ] [[package]] @@ -1549,6 +1880,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/17/3493c5624e48fd97156ebaec380dcaafee9506d7e2c46218ceebbb57d7de/pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3", size = 19467 }, ] +[[package]] +name = "pytest-html" +version = "4.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "pytest" }, + { name = "pytest-metadata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/ab/4862dcb5a8a514bd87747e06b8d55483c0c9e987e1b66972336946e49b49/pytest_html-4.1.1.tar.gz", hash = "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07", size = 150773 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/c7/c160021cbecd956cc1a6f79e5fe155f7868b2e5b848f1320dad0b3e3122f/pytest_html-4.1.1-py3-none-any.whl", hash = "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71", size = 23491 }, +] + +[[package]] +name = "pytest-metadata" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/85/8c969f8bec4e559f8f2b958a15229a35495f5b4ce499f6b865eac54b878d/pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8", size = 9952 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/43/7e7b2ec865caa92f67b8f0e9231a798d102724ca4c0e1f414316be1c1ef2/pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b", size = 11428 }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1913,28 +2270,84 @@ wheels = [ ] [[package]] -name = "ruff" -version = "0.9.4" +name = "ruamel-yaml" +version = "0.18.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c0/17/529e78f49fc6f8076f50d985edd9a2cf011d1dbadb1cdeacc1d12afc1d26/ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7", size = 3599458 } +dependencies = [ + { name = "ruamel-yaml-clib", marker = "python_full_version < '3.13' and platform_python_implementation == 'CPython'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ea/46/f44d8be06b85bc7c4d8c95d658be2b68f27711f279bf9dd0612a5e4794f5/ruamel.yaml-0.18.10.tar.gz", hash = "sha256:20c86ab29ac2153f80a428e1254a8adf686d3383df04490514ca3b79a362db58", size = 143447 } wheels = [ - { url = "https://files.pythonhosted.org/packages/b6/f8/3fafb7804d82e0699a122101b5bee5f0d6e17c3a806dcbc527bb7d3f5b7a/ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706", size = 11668400 }, - { url = "https://files.pythonhosted.org/packages/2e/a6/2efa772d335da48a70ab2c6bb41a096c8517ca43c086ea672d51079e3d1f/ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf", size = 11628395 }, - { url = "https://files.pythonhosted.org/packages/dc/d7/cd822437561082f1c9d7225cc0d0fbb4bad117ad7ac3c41cd5d7f0fa948c/ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b", size = 11090052 }, - { url = "https://files.pythonhosted.org/packages/9e/67/3660d58e893d470abb9a13f679223368ff1684a4ef40f254a0157f51b448/ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137", size = 11882221 }, - { url = "https://files.pythonhosted.org/packages/79/d1/757559995c8ba5f14dfec4459ef2dd3fcea82ac43bc4e7c7bf47484180c0/ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e", size = 11424862 }, - { url = "https://files.pythonhosted.org/packages/c0/96/7915a7c6877bb734caa6a2af424045baf6419f685632469643dbd8eb2958/ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec", size = 12626735 }, - { url = "https://files.pythonhosted.org/packages/0e/cc/dadb9b35473d7cb17c7ffe4737b4377aeec519a446ee8514123ff4a26091/ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b", size = 13255976 }, - { url = "https://files.pythonhosted.org/packages/5f/c3/ad2dd59d3cabbc12df308cced780f9c14367f0321e7800ca0fe52849da4c/ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a", size = 12752262 }, - { url = "https://files.pythonhosted.org/packages/c7/17/5f1971e54bd71604da6788efd84d66d789362b1105e17e5ccc53bba0289b/ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214", size = 14401648 }, - { url = "https://files.pythonhosted.org/packages/30/24/6200b13ea611b83260501b6955b764bb320e23b2b75884c60ee7d3f0b68e/ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231", size = 12414702 }, - { url = "https://files.pythonhosted.org/packages/34/cb/f5d50d0c4ecdcc7670e348bd0b11878154bc4617f3fdd1e8ad5297c0d0ba/ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b", size = 11859608 }, - { url = "https://files.pythonhosted.org/packages/d6/f4/9c8499ae8426da48363bbb78d081b817b0f64a9305f9b7f87eab2a8fb2c1/ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6", size = 11485702 }, - { url = "https://files.pythonhosted.org/packages/18/59/30490e483e804ccaa8147dd78c52e44ff96e1c30b5a95d69a63163cdb15b/ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c", size = 12067782 }, - { url = "https://files.pythonhosted.org/packages/3d/8c/893fa9551760b2f8eb2a351b603e96f15af167ceaf27e27ad873570bc04c/ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0", size = 12483087 }, - { url = "https://files.pythonhosted.org/packages/23/15/f6751c07c21ca10e3f4a51ea495ca975ad936d780c347d9808bcedbd7182/ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402", size = 9852302 }, - { url = "https://files.pythonhosted.org/packages/12/41/2d2d2c6a72e62566f730e49254f602dfed23019c33b5b21ea8f8917315a1/ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e", size = 10850051 }, - { url = "https://files.pythonhosted.org/packages/c6/e6/3d6ec3bc3d254e7f005c543a661a41c3e788976d0e52a1ada195bd664344/ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41", size = 10078251 }, + { url = "https://files.pythonhosted.org/packages/c2/36/dfc1ebc0081e6d39924a2cc53654497f967a084a436bb64402dfce4254d9/ruamel.yaml-0.18.10-py3-none-any.whl", hash = "sha256:30f22513ab2301b3d2b577adc121c6471f28734d3d9728581245f1e76468b4f1", size = 117729 }, +] + +[[package]] +name = "ruamel-yaml-clib" +version = "0.2.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/84/80203abff8ea4993a87d823a5f632e4d92831ef75d404c9fc78d0176d2b5/ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f", size = 225315 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/57/40a958e863e299f0c74ef32a3bde9f2d1ea8d69669368c0c502a0997f57f/ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5", size = 131301 }, + { url = "https://files.pythonhosted.org/packages/98/a8/29a3eb437b12b95f50a6bcc3d7d7214301c6c529d8fdc227247fa84162b5/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969", size = 633728 }, + { url = "https://files.pythonhosted.org/packages/35/6d/ae05a87a3ad540259c3ad88d71275cbd1c0f2d30ae04c65dcbfb6dcd4b9f/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df", size = 722230 }, + { url = "https://files.pythonhosted.org/packages/7f/b7/20c6f3c0b656fe609675d69bc135c03aac9e3865912444be6339207b6648/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76", size = 686712 }, + { url = "https://files.pythonhosted.org/packages/cd/11/d12dbf683471f888d354dac59593873c2b45feb193c5e3e0f2ebf85e68b9/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6", size = 663936 }, + { url = "https://files.pythonhosted.org/packages/72/14/4c268f5077db5c83f743ee1daeb236269fa8577133a5cfa49f8b382baf13/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd", size = 696580 }, + { url = "https://files.pythonhosted.org/packages/30/fc/8cd12f189c6405a4c1cf37bd633aa740a9538c8e40497c231072d0fef5cf/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a", size = 663393 }, + { url = "https://files.pythonhosted.org/packages/80/29/c0a017b704aaf3cbf704989785cd9c5d5b8ccec2dae6ac0c53833c84e677/ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da", size = 100326 }, + { url = "https://files.pythonhosted.org/packages/3a/65/fa39d74db4e2d0cd252355732d966a460a41cd01c6353b820a0952432839/ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28", size = 118079 }, + { url = "https://files.pythonhosted.org/packages/fb/8f/683c6ad562f558cbc4f7c029abcd9599148c51c54b5ef0f24f2638da9fbb/ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6", size = 132224 }, + { url = "https://files.pythonhosted.org/packages/3c/d2/b79b7d695e2f21da020bd44c782490578f300dd44f0a4c57a92575758a76/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e", size = 641480 }, + { url = "https://files.pythonhosted.org/packages/68/6e/264c50ce2a31473a9fdbf4fa66ca9b2b17c7455b31ef585462343818bd6c/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e", size = 739068 }, + { url = "https://files.pythonhosted.org/packages/86/29/88c2567bc893c84d88b4c48027367c3562ae69121d568e8a3f3a8d363f4d/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52", size = 703012 }, + { url = "https://files.pythonhosted.org/packages/11/46/879763c619b5470820f0cd6ca97d134771e502776bc2b844d2adb6e37753/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642", size = 704352 }, + { url = "https://files.pythonhosted.org/packages/02/80/ece7e6034256a4186bbe50dee28cd032d816974941a6abf6a9d65e4228a7/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2", size = 737344 }, + { url = "https://files.pythonhosted.org/packages/f0/ca/e4106ac7e80efbabdf4bf91d3d32fc424e41418458251712f5672eada9ce/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3", size = 714498 }, + { url = "https://files.pythonhosted.org/packages/67/58/b1f60a1d591b771298ffa0428237afb092c7f29ae23bad93420b1eb10703/ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4", size = 100205 }, + { url = "https://files.pythonhosted.org/packages/b4/4f/b52f634c9548a9291a70dfce26ca7ebce388235c93588a1068028ea23fcc/ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb", size = 118185 }, + { url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433 }, + { url = "https://files.pythonhosted.org/packages/ec/b0/b850385604334c2ce90e3ee1013bd911aedf058a934905863a6ea95e9eb4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d", size = 647362 }, + { url = "https://files.pythonhosted.org/packages/44/d0/3f68a86e006448fb6c005aee66565b9eb89014a70c491d70c08de597f8e4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c", size = 754118 }, + { url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497 }, + { url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042 }, + { url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831 }, + { url = "https://files.pythonhosted.org/packages/db/5d/36619b61ffa2429eeaefaab4f3374666adf36ad8ac6330d855848d7d36fd/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d", size = 715692 }, + { url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777 }, + { url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523 }, + { url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011 }, + { url = "https://files.pythonhosted.org/packages/7f/5e/212f473a93ae78c669ffa0cb051e3fee1139cb2d385d2ae1653d64281507/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475", size = 642488 }, + { url = "https://files.pythonhosted.org/packages/1f/8f/ecfbe2123ade605c49ef769788f79c38ddb1c8fa81e01f4dbf5cf1a44b16/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef", size = 745066 }, + { url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785 }, + { url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017 }, + { url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270 }, + { url = "https://files.pythonhosted.org/packages/87/b8/01c29b924dcbbed75cc45b30c30d565d763b9c4d540545a0eeecffb8f09c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01", size = 709059 }, + { url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583 }, + { url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190 }, +] + +[[package]] +name = "ruff" +version = "0.9.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/e1/e265aba384343dd8ddd3083f5e33536cd17e1566c41453a5517b5dd443be/ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9", size = 3639454 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/e3/3d2c022e687e18cf5d93d6bfa2722d46afc64eaa438c7fbbdd603b3597be/ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba", size = 11714128 }, + { url = "https://files.pythonhosted.org/packages/e1/22/aff073b70f95c052e5c58153cba735748c9e70107a77d03420d7850710a0/ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504", size = 11682539 }, + { url = "https://files.pythonhosted.org/packages/75/a7/f5b7390afd98a7918582a3d256cd3e78ba0a26165a467c1820084587cbf9/ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83", size = 11132512 }, + { url = "https://files.pythonhosted.org/packages/a6/e3/45de13ef65047fea2e33f7e573d848206e15c715e5cd56095589a7733d04/ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc", size = 11929275 }, + { url = "https://files.pythonhosted.org/packages/7d/f2/23d04cd6c43b2e641ab961ade8d0b5edb212ecebd112506188c91f2a6e6c/ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b", size = 11466502 }, + { url = "https://files.pythonhosted.org/packages/b5/6f/3a8cf166f2d7f1627dd2201e6cbc4cb81f8b7d58099348f0c1ff7b733792/ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e", size = 12676364 }, + { url = "https://files.pythonhosted.org/packages/f5/c4/db52e2189983c70114ff2b7e3997e48c8318af44fe83e1ce9517570a50c6/ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666", size = 13335518 }, + { url = "https://files.pythonhosted.org/packages/66/44/545f8a4d136830f08f4d24324e7db957c5374bf3a3f7a6c0bc7be4623a37/ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5", size = 12823287 }, + { url = "https://files.pythonhosted.org/packages/c5/26/8208ef9ee7431032c143649a9967c3ae1aae4257d95e6f8519f07309aa66/ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5", size = 14592374 }, + { url = "https://files.pythonhosted.org/packages/31/70/e917781e55ff39c5b5208bda384fd397ffd76605e68544d71a7e40944945/ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217", size = 12500173 }, + { url = "https://files.pythonhosted.org/packages/84/f5/e4ddee07660f5a9622a9c2b639afd8f3104988dc4f6ba0b73ffacffa9a8c/ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6", size = 11906555 }, + { url = "https://files.pythonhosted.org/packages/f1/2b/6ff2fe383667075eef8656b9892e73dd9b119b5e3add51298628b87f6429/ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897", size = 11538958 }, + { url = "https://files.pythonhosted.org/packages/3c/db/98e59e90de45d1eb46649151c10a062d5707b5b7f76f64eb1e29edf6ebb1/ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08", size = 12117247 }, + { url = "https://files.pythonhosted.org/packages/ec/bc/54e38f6d219013a9204a5a2015c09e7a8c36cedcd50a4b01ac69a550b9d9/ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656", size = 12554647 }, + { url = "https://files.pythonhosted.org/packages/a5/7d/7b461ab0e2404293c0627125bb70ac642c2e8d55bf590f6fce85f508f1b2/ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d", size = 9949214 }, + { url = "https://files.pythonhosted.org/packages/ee/30/c3cee10f915ed75a5c29c1e57311282d1a15855551a64795c1b2bbe5cf37/ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa", size = 10999914 }, + { url = "https://files.pythonhosted.org/packages/e8/a8/d71f44b93e3aa86ae232af1f2126ca7b95c0f515ec135462b3e1f351441c/ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a", size = 10177499 }, ] [[package]] @@ -2200,6 +2613,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d9/61/f2b52e107b1fc8944b33ef56bf6ac4ebbe16d91b94d2b87ce013bf63fb84/starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d", size = 71507 }, ] +[[package]] +name = "sympy" +version = "1.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/99/5a5b6f19ff9f083671ddf7b9632028436167cd3d33e11015754e41b249a4/sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", size = 7533040 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177 }, +] + [[package]] name = "termcolor" version = "2.5.0" @@ -2211,38 +2636,38 @@ wheels = [ [[package]] name = "tiktoken" -version = "0.8.0" +version = "0.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "regex" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/37/02/576ff3a6639e755c4f70997b2d315f56d6d71e0d046f4fb64cb81a3fb099/tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2", size = 35107 } +sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/756fedf6981e82897f2d570dd25fa597eb3f4459068ae0572d7e888cfd6f/tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d", size = 35991 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/ba/a35fad753bbca8ba0cc1b0f3402a70256a110ced7ac332cf84ba89fc87ab/tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e", size = 1039905 }, - { url = "https://files.pythonhosted.org/packages/91/05/13dab8fd7460391c387b3e69e14bf1e51ff71fe0a202cd2933cc3ea93fb6/tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21", size = 982417 }, - { url = "https://files.pythonhosted.org/packages/e9/98/18ec4a8351a6cf4537e40cd6e19a422c10cce1ef00a2fcb716e0a96af58b/tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560", size = 1144915 }, - { url = "https://files.pythonhosted.org/packages/2e/28/cf3633018cbcc6deb7805b700ccd6085c9a5a7f72b38974ee0bffd56d311/tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2", size = 1177221 }, - { url = "https://files.pythonhosted.org/packages/57/81/8a5be305cbd39d4e83a794f9e80c7f2c84b524587b7feb27c797b2046d51/tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9", size = 1237398 }, - { url = "https://files.pythonhosted.org/packages/dc/da/8d1cc3089a83f5cf11c2e489332752981435280285231924557350523a59/tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005", size = 884215 }, - { url = "https://files.pythonhosted.org/packages/f6/1e/ca48e7bfeeccaf76f3a501bd84db1fa28b3c22c9d1a1f41af9fb7579c5f6/tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1", size = 1039700 }, - { url = "https://files.pythonhosted.org/packages/8c/f8/f0101d98d661b34534769c3818f5af631e59c36ac6d07268fbfc89e539ce/tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a", size = 982413 }, - { url = "https://files.pythonhosted.org/packages/ac/3c/2b95391d9bd520a73830469f80a96e3790e6c0a5ac2444f80f20b4b31051/tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d", size = 1144242 }, - { url = "https://files.pythonhosted.org/packages/01/c4/c4a4360de845217b6aa9709c15773484b50479f36bb50419c443204e5de9/tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47", size = 1176588 }, - { url = "https://files.pythonhosted.org/packages/f8/a3/ef984e976822cd6c2227c854f74d2e60cf4cd6fbfca46251199914746f78/tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419", size = 1237261 }, - { url = "https://files.pythonhosted.org/packages/1e/86/eea2309dc258fb86c7d9b10db536434fc16420feaa3b6113df18b23db7c2/tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99", size = 884537 }, - { url = "https://files.pythonhosted.org/packages/c1/22/34b2e136a6f4af186b6640cbfd6f93400783c9ef6cd550d9eab80628d9de/tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586", size = 1039357 }, - { url = "https://files.pythonhosted.org/packages/04/d2/c793cf49c20f5855fd6ce05d080c0537d7418f22c58e71f392d5e8c8dbf7/tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b", size = 982616 }, - { url = "https://files.pythonhosted.org/packages/b3/a1/79846e5ef911cd5d75c844de3fa496a10c91b4b5f550aad695c5df153d72/tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab", size = 1144011 }, - { url = "https://files.pythonhosted.org/packages/26/32/e0e3a859136e95c85a572e4806dc58bf1ddf651108ae8b97d5f3ebe1a244/tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04", size = 1175432 }, - { url = "https://files.pythonhosted.org/packages/c7/89/926b66e9025b97e9fbabeaa59048a736fe3c3e4530a204109571104f921c/tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc", size = 1236576 }, - { url = "https://files.pythonhosted.org/packages/45/e2/39d4aa02a52bba73b2cd21ba4533c84425ff8786cc63c511d68c8897376e/tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db", size = 883824 }, - { url = "https://files.pythonhosted.org/packages/e3/38/802e79ba0ee5fcbf240cd624143f57744e5d411d2e9d9ad2db70d8395986/tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24", size = 1039648 }, - { url = "https://files.pythonhosted.org/packages/b1/da/24cdbfc302c98663fbea66f5866f7fa1048405c7564ab88483aea97c3b1a/tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a", size = 982763 }, - { url = "https://files.pythonhosted.org/packages/e4/f0/0ecf79a279dfa41fc97d00adccf976ecc2556d3c08ef3e25e45eb31f665b/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5", size = 1144417 }, - { url = "https://files.pythonhosted.org/packages/ab/d3/155d2d4514f3471a25dc1d6d20549ef254e2aa9bb5b1060809b1d3b03d3a/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953", size = 1175108 }, - { url = "https://files.pythonhosted.org/packages/19/eb/5989e16821ee8300ef8ee13c16effc20dfc26c777d05fbb6825e3c037b81/tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7", size = 1236520 }, - { url = "https://files.pythonhosted.org/packages/40/59/14b20465f1d1cb89cfbc96ec27e5617b2d41c79da12b5e04e96d689be2a7/tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69", size = 883849 }, + { url = "https://files.pythonhosted.org/packages/64/f3/50ec5709fad61641e4411eb1b9ac55b99801d71f1993c29853f256c726c9/tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382", size = 1065770 }, + { url = "https://files.pythonhosted.org/packages/d6/f8/5a9560a422cf1755b6e0a9a436e14090eeb878d8ec0f80e0cd3d45b78bf4/tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108", size = 1009314 }, + { url = "https://files.pythonhosted.org/packages/bc/20/3ed4cfff8f809cb902900ae686069e029db74567ee10d017cb254df1d598/tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd", size = 1143140 }, + { url = "https://files.pythonhosted.org/packages/f1/95/cc2c6d79df8f113bdc6c99cdec985a878768120d87d839a34da4bd3ff90a/tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de", size = 1197860 }, + { url = "https://files.pythonhosted.org/packages/c7/6c/9c1a4cc51573e8867c9381db1814223c09ebb4716779c7f845d48688b9c8/tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990", size = 1259661 }, + { url = "https://files.pythonhosted.org/packages/cd/4c/22eb8e9856a2b1808d0a002d171e534eac03f96dbe1161978d7389a59498/tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4", size = 894026 }, + { url = "https://files.pythonhosted.org/packages/4d/ae/4613a59a2a48e761c5161237fc850eb470b4bb93696db89da51b79a871f1/tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e", size = 1065987 }, + { url = "https://files.pythonhosted.org/packages/3f/86/55d9d1f5b5a7e1164d0f1538a85529b5fcba2b105f92db3622e5d7de6522/tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348", size = 1009155 }, + { url = "https://files.pythonhosted.org/packages/03/58/01fb6240df083b7c1916d1dcb024e2b761213c95d576e9f780dfb5625a76/tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33", size = 1142898 }, + { url = "https://files.pythonhosted.org/packages/b1/73/41591c525680cd460a6becf56c9b17468d3711b1df242c53d2c7b2183d16/tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136", size = 1197535 }, + { url = "https://files.pythonhosted.org/packages/7d/7c/1069f25521c8f01a1a182f362e5c8e0337907fae91b368b7da9c3e39b810/tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336", size = 1259548 }, + { url = "https://files.pythonhosted.org/packages/6f/07/c67ad1724b8e14e2b4c8cca04b15da158733ac60136879131db05dda7c30/tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb", size = 893895 }, + { url = "https://files.pythonhosted.org/packages/cf/e5/21ff33ecfa2101c1bb0f9b6df750553bd873b7fb532ce2cb276ff40b197f/tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03", size = 1065073 }, + { url = "https://files.pythonhosted.org/packages/8e/03/a95e7b4863ee9ceec1c55983e4cc9558bcfd8f4f80e19c4f8a99642f697d/tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210", size = 1008075 }, + { url = "https://files.pythonhosted.org/packages/40/10/1305bb02a561595088235a513ec73e50b32e74364fef4de519da69bc8010/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794", size = 1140754 }, + { url = "https://files.pythonhosted.org/packages/1b/40/da42522018ca496432ffd02793c3a72a739ac04c3794a4914570c9bb2925/tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22", size = 1196678 }, + { url = "https://files.pythonhosted.org/packages/5c/41/1e59dddaae270ba20187ceb8aa52c75b24ffc09f547233991d5fd822838b/tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2", size = 1259283 }, + { url = "https://files.pythonhosted.org/packages/5b/64/b16003419a1d7728d0d8c0d56a4c24325e7b10a21a9dd1fc0f7115c02f0a/tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16", size = 894897 }, + { url = "https://files.pythonhosted.org/packages/7a/11/09d936d37f49f4f494ffe660af44acd2d99eb2429d60a57c71318af214e0/tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb", size = 1064919 }, + { url = "https://files.pythonhosted.org/packages/80/0e/f38ba35713edb8d4197ae602e80837d574244ced7fb1b6070b31c29816e0/tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63", size = 1007877 }, + { url = "https://files.pythonhosted.org/packages/fe/82/9197f77421e2a01373e27a79dd36efdd99e6b4115746ecc553318ecafbf0/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01", size = 1140095 }, + { url = "https://files.pythonhosted.org/packages/f2/bb/4513da71cac187383541facd0291c4572b03ec23c561de5811781bbd988f/tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139", size = 1195649 }, + { url = "https://files.pythonhosted.org/packages/fa/5c/74e4c137530dd8504e97e3a41729b1103a4ac29036cbfd3250b11fd29451/tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a", size = 1258465 }, + { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669 }, ] [[package]] @@ -2284,6 +2709,56 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, ] +[[package]] +name = "torch" +version = "2.6.0+cpu" +source = { registry = "https://download.pytorch.org/whl/cpu" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "sympy" }, + { name = "typing-extensions" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:35a9e78b7e4096968b54c1a198687b981569c50ae93e661aa430f9fd208da102" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:90832f4d118c566b8652a2196ac695fc1f14cf420db27b5a1b41c7eaaf2141e9" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp310-cp310-win_amd64.whl", hash = "sha256:6e22f0b13db8d53e55bcb3b46c9dd4b6676d1c44051b56753e745cec3075b333" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:5b6ae523bfb67088a17ca7734d131548a2e60346c622621e4248ed09dd0790cc" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d3dab9fb0294f268aec28e8aaba834e9d006b90a50db5bc2fe2191a9d48c6084" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:24c9d3d13b9ea769dd7bd5c11cfa1fc463fd7391397156565484565ca685d908" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:59e78aa0c690f70734e42670036d6b541930b8eabbaa18d94e090abf14cc4d91" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:318290e8924353c61b125cdc8768d15208704e279e7757c113b9620740deca98" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:4027d982eb2781c93825ab9527f17fbbb12dbabf422298e4b954be60016f87d8" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp313-cp313-linux_x86_64.whl", hash = "sha256:e70ee2e37ad27a90201d101a41c2e10df7cf15a9ebd17c084f54cf2518c57bdf" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b5e7e8d561b263b5ad8049736281cd12c78e51e7bc1a913fd4098fd0e0b96347" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp313-cp313-win_amd64.whl", hash = "sha256:b436a6c62d086dc5b32f5721b59f0ca8ad3bf9de09ee9b5b83dbf1e7a7e22c60" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp313-cp313t-linux_x86_64.whl", hash = "sha256:fb34d6cc4e6e20e66d74852c3d84e0301dc5e1a7c822076ef288886f978390f0" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:7cac05af909ee1c5c2915e8f3efaa1ea015e7e414be0ff53071402b9e4f3c7df" }, +] + +[[package]] +name = "torchvision" +version = "0.21.0+cpu" +source = { registry = "https://download.pytorch.org/whl/cpu" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, + { name = "torch" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:4ed0a1be50676a7c589ba83b62c9dc0267a87e852b8cd9b7d6db27ab36c6d552" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp310-cp310-win_amd64.whl", hash = "sha256:554ca0f5948ac89911299f8bfb6f23936d867387ea213ab235adc2814b510d0c" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:d67081026aad9642c46d3b14035f8ae69117468c09a07d628f3eafc7ae74841f" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:852b96738a68592223f01a04e4bcc1b3906bef7eee41c99f27f3be5706046862" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:d6874431e678ba107b60a83f255c33f3755f06bad587b1b919aa514ec325dcd8" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:667f3d983240f41eaff5a3f78bdcbc144473978a37cd15a4db6dad92b1e8b6f0" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp313-cp313-linux_x86_64.whl", hash = "sha256:a76478c0f547e032116282d61a5a7d943142cf040f6c7d97941d7e96813c4c14" }, + { url = "https://download.pytorch.org/whl/cpu/torchvision-0.21.0%2Bcpu-cp313-cp313-win_amd64.whl", hash = "sha256:883f8668b923781f1152a20d75e75ad94a4f1016328d86a7b889006a9156fb14" }, +] + [[package]] name = "tornado" version = "6.4.2" @@ -2337,11 +2812,11 @@ wheels = [ [[package]] name = "types-setuptools" -version = "75.8.0.20250110" +version = "75.8.0.20250210" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f7/42/5713e90d4f9683f2301d900f33e4fc2405ad8ac224dda30f6cb7f4cd215b/types_setuptools-75.8.0.20250110.tar.gz", hash = "sha256:96f7ec8bbd6e0a54ea180d66ad68ad7a1d7954e7281a710ea2de75e355545271", size = 48185 } +sdist = { url = "https://files.pythonhosted.org/packages/c3/20/794589df23b1e7d3c1a1f86285e749f2a83ef845d90f2461bc2912b8f989/types_setuptools-75.8.0.20250210.tar.gz", hash = "sha256:c1547361b2441f07c94e25dce8a068e18c611593ad4b6fdd727b1a8f5d1fda33", size = 48240 } wheels = [ - { url = "https://files.pythonhosted.org/packages/cf/a3/dbfd106751b11c728cec21cc62cbfe7ff7391b935c4b6e8f0bdc2e6fd541/types_setuptools-75.8.0.20250110-py3-none-any.whl", hash = "sha256:a9f12980bbf9bcdc23ecd80755789085bad6bfce4060c2275bc2b4ca9f2bc480", size = 71521 }, + { url = "https://files.pythonhosted.org/packages/2d/b4/5978a63dac80d9a653fdb73f58e08b208486d303f9a3ee481f0c807630de/types_setuptools-75.8.0.20250210-py3-none-any.whl", hash = "sha256:a217d7b4d59be04c29e23d142c959a0f85e71292fd3fc4313f016ca11f0b56dc", size = 71535 }, ] [[package]] @@ -2387,16 +2862,16 @@ wheels = [ [[package]] name = "virtualenv" -version = "20.29.1" +version = "20.29.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, { name = "filelock" }, { name = "platformdirs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/ca/f23dcb02e161a9bba141b1c08aa50e8da6ea25e6d780528f1d385a3efe25/virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35", size = 7658028 } +sdist = { url = "https://files.pythonhosted.org/packages/f1/88/dacc875dd54a8acadb4bcbfd4e3e86df8be75527116c91d8f9784f5e9cab/virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728", size = 4320272 } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/9b/599bcfc7064fbe5740919e78c5df18e5dceb0887e676256a1061bb5ae232/virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779", size = 4282379 }, + { url = "https://files.pythonhosted.org/packages/93/fa/849483d56773ae29740ae70043ad88e068f98a6401aa819b5d6bee604683/virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a", size = 4301478 }, ] [[package]] @@ -2531,3 +3006,76 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ff/1e/e47dedac8bf7140e59aa6a679e850c4df9610ae844d71b6015263ddea37b/websockets-14.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dec254fcabc7bd488dab64846f588fc5b6fe0d78f641180030f8ea27b76d72c3", size = 164465 }, { url = "https://files.pythonhosted.org/packages/7b/c8/d529f8a32ce40d98309f4470780631e971a5a842b60aec864833b3615786/websockets-14.2-py3-none-any.whl", hash = "sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b", size = 157416 }, ] + +[[package]] +name = "wrapt" +version = "1.17.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/d1/1daec934997e8b160040c78d7b31789f19b122110a75eca3d4e8da0049e1/wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984", size = 53307 }, + { url = "https://files.pythonhosted.org/packages/1b/7b/13369d42651b809389c1a7153baa01d9700430576c81a2f5c5e460df0ed9/wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22", size = 38486 }, + { url = "https://files.pythonhosted.org/packages/62/bf/e0105016f907c30b4bd9e377867c48c34dc9c6c0c104556c9c9126bd89ed/wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7", size = 38777 }, + { url = "https://files.pythonhosted.org/packages/27/70/0f6e0679845cbf8b165e027d43402a55494779295c4b08414097b258ac87/wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c", size = 83314 }, + { url = "https://files.pythonhosted.org/packages/0f/77/0576d841bf84af8579124a93d216f55d6f74374e4445264cb378a6ed33eb/wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72", size = 74947 }, + { url = "https://files.pythonhosted.org/packages/90/ec/00759565518f268ed707dcc40f7eeec38637d46b098a1f5143bff488fe97/wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061", size = 82778 }, + { url = "https://files.pythonhosted.org/packages/f8/5a/7cffd26b1c607b0b0c8a9ca9d75757ad7620c9c0a9b4a25d3f8a1480fafc/wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2", size = 81716 }, + { url = "https://files.pythonhosted.org/packages/7e/09/dccf68fa98e862df7e6a60a61d43d644b7d095a5fc36dbb591bbd4a1c7b2/wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c", size = 74548 }, + { url = "https://files.pythonhosted.org/packages/b7/8e/067021fa3c8814952c5e228d916963c1115b983e21393289de15128e867e/wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62", size = 81334 }, + { url = "https://files.pythonhosted.org/packages/4b/0d/9d4b5219ae4393f718699ca1c05f5ebc0c40d076f7e65fd48f5f693294fb/wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563", size = 36427 }, + { url = "https://files.pythonhosted.org/packages/72/6a/c5a83e8f61aec1e1aeef939807602fb880e5872371e95df2137142f5c58e/wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f", size = 38774 }, + { url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308 }, + { url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488 }, + { url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776 }, + { url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776 }, + { url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420 }, + { url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199 }, + { url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307 }, + { url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025 }, + { url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879 }, + { url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419 }, + { url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773 }, + { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799 }, + { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821 }, + { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919 }, + { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721 }, + { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899 }, + { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222 }, + { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707 }, + { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685 }, + { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, + { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, + { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, +] + +[[package]] +name = "zipp" +version = "3.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630 }, +]