Skip to content

vllm.entrypoints.pooling.classify.api_router

router module-attribute

router = APIRouter()

classify

classify(request: Request) -> ServingClassification | None
Source code in vllm/entrypoints/pooling/classify/api_router.py
def classify(request: Request) -> ServingClassification | None:
    return request.app.state.openai_serving_classification

create_classify async

create_classify(
    request: ClassificationRequest, raw_request: Request
)
Source code in vllm/entrypoints/pooling/classify/api_router.py
@router.post("/classify", dependencies=[Depends(validate_json_request)])
@with_cancellation
@load_aware_call
async def create_classify(request: ClassificationRequest, raw_request: Request):
    handler = classify(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Classification API"
        )

    try:
        generator = await handler.create_classify(request, raw_request)
    except Exception as e:
        raise HTTPException(
            status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
        ) from e
    if isinstance(generator, ErrorResponse):
        return JSONResponse(
            content=generator.model_dump(), status_code=generator.error.code
        )

    elif isinstance(generator, ClassificationResponse):
        return JSONResponse(content=generator.model_dump())

    assert_never(generator)