Refactor log streaming and report generation logic

Simplified the logic for log streaming by consolidating into a single async generator (`log_stream`). Removed redundant tasks and streamlined report generation to improve code readability and maintainability.
This commit is contained in:
ThomasTaroni 2025-04-25 22:11:19 +02:00
parent 1472e277bc
commit e2b4ba5a7d

View file

@ -1,4 +1,4 @@
from fastapi import FastAPI, HTTPException, Request, Depends
from fastapi import FastAPI, HTTPException, Request, Depends
from pydantic import BaseModel
from phoenix_technologies import ReportGenerator, CustomLogsHandler
from fastapi.responses import StreamingResponse
@ -8,17 +8,21 @@ import asyncio
# FastAPI app instance
app = FastAPI()
# Define a request body structure using Pydantic
class ReportRequest(BaseModel):
query: str
report_type: str
# Define a dependency to validate the API Key
def verify_api_key(request: Request):
# Define the API key from the environment variables
expected_api_key = os.getenv("API_KEY", None)
if not expected_api_key:
raise HTTPException(status_code=500, detail="API key is not configured on the server.")
raise HTTPException(
status_code=500, detail="API key is not configured on the server."
)
# Get the API key from the request headers
provided_api_key = request.headers.get("X-API-KEY", None)
@ -27,6 +31,7 @@ def verify_api_key(request: Request):
if not provided_api_key or provided_api_key != expected_api_key:
raise HTTPException(status_code=403, detail="Invalid or missing API key.")
@app.post("/get_report", dependencies=[Depends(verify_api_key)])
async def get_report_endpoint(request: ReportRequest):
"""
@ -40,12 +45,13 @@ async def get_report_endpoint(request: ReportRequest):
# Define a coroutine to run `generate_report` in a separate thread
async def generate_report_thread(generator: ReportGenerator):
try:
# Run blocking code in a thread pool
await asyncio.to_thread(generator.generate_report)
except Exception as e:
print(f"Error during report generation: {str(e)}")
# Define a coroutine for streaming logs
async def get_logs_thread(generator: ReportGenerator, custom_logs_handler: CustomLogsHandler):
# Define an asynchronous generator for streaming logs
async def log_stream():
try:
index = 0
while not generator.complete:
@ -57,27 +63,15 @@ async def get_report_endpoint(request: ReportRequest):
else:
# Wait briefly to avoid aggressive looping
await asyncio.sleep(0.1)
# After completion, include a success message
yield "\nReport generation completed successfully!\n"
except Exception as e:
print(f"Error while fetching logs: {str(e)}")
yield f"Error: {str(e)}"
# Define an asynchronous generator to stream output
async def combined_stream():
try:
# Run both tasks concurrently
task1 = asyncio.create_task(generate_report_thread(generator))
task2 = asyncio.create_task(get_logs_thread(generator, custom_logs_handler))
# Run the report generation task concurrently with the log streaming
asyncio.create_task(generate_report_thread(generator))
# Wait for logs and stream output
async for log_entry in get_logs_thread(generator, custom_logs_handler):
yield log_entry
# Wait for both tasks to finish
await asyncio.gather(task1, task2)
yield "\nReport generation completed successfully!\n"
except Exception as e:
yield f"Error: {str(e)}"
# Return the combined async generator as a streaming response
return StreamingResponse(combined_stream(), media_type="text/plain")
# Return the `log_stream` async generator as a streaming response
return StreamingResponse(log_stream(), media_type="text/plain")