Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support streaming invoke responses #23

Merged
merged 9 commits into from
Sep 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
## 0.0.6

* **Support streaming response types for /invoke if callable is async generator**

## 0.0.5

* **Improve logging to hide body in case of sensitive data unless TRACE level**
Expand Down
2 changes: 1 addition & 1 deletion unstructured_platform_plugins/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.5" # pragma: no cover
__version__ = "0.0.6" # pragma: no cover
24 changes: 19 additions & 5 deletions unstructured_platform_plugins/etl_uvicorn/api_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import Any, Callable, Optional

from fastapi import FastAPI, status
from fastapi.responses import StreamingResponse
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from pydantic import BaseModel
from starlette.responses import RedirectResponse
Expand Down Expand Up @@ -110,16 +111,29 @@ class InvokeResponse(BaseModel):

logging.getLogger("etl_uvicorn.fastapi")

async def wrap_fn(func: Callable, kwargs: Optional[dict[str, Any]] = None) -> InvokeResponse:
ResponseType = StreamingResponse if inspect.isasyncgenfunction(func) else InvokeResponse

async def wrap_fn(func: Callable, kwargs: Optional[dict[str, Any]] = None) -> ResponseType:
usage: list[UsageData] = []
request_dict = kwargs if kwargs else {}
if "usage" in inspect.signature(func).parameters:
request_dict["usage"] = usage
else:
vangheem marked this conversation as resolved.
Show resolved Hide resolved
logger.warning("usage data not an expected parameter, omitting")
try:
output = await invoke_func(func=func, kwargs=request_dict)
return InvokeResponse(usage=usage, status_code=status.HTTP_200_OK, output=output)
if inspect.isasyncgenfunction(func):
# Stream response if function is an async generator

async def _stream_response():
async for output in func(**(request_dict or {})):
yield InvokeResponse(
usage=usage, status_code=status.HTTP_200_OK, output=output
).model_dump_json() + "\n"

return StreamingResponse(_stream_response(), media_type="application/x-ndjson")
else:
output = await invoke_func(func=func, kwargs=request_dict)
return InvokeResponse(usage=usage, status_code=status.HTTP_200_OK, output=output)
except Exception as invoke_error:
logger.error(f"failed to invoke plugin: {invoke_error}", exc_info=True)
return InvokeResponse(
Expand All @@ -132,7 +146,7 @@ async def wrap_fn(func: Callable, kwargs: Optional[dict[str, Any]] = None) -> In
if input_schema_model.model_fields:

@fastapi_app.post("/invoke", response_model=InvokeResponse)
async def run_job(request: input_schema_model) -> InvokeResponse:
async def run_job(request: input_schema_model) -> ResponseType:
log_func_and_body(func=func, body=request.json())
# Create dictionary from pydantic model while preserving underlying types
request_dict = {f: getattr(request, f) for f in request.model_fields}
Expand All @@ -144,7 +158,7 @@ async def run_job(request: input_schema_model) -> InvokeResponse:
else:

@fastapi_app.post("/invoke", response_model=InvokeResponse)
async def run_job() -> InvokeResponse:
async def run_job() -> ResponseType:
log_func_and_body(func=func)
return await wrap_fn(
func=func,
Expand Down
Loading