Skip to content

vllm.entrypoints.pooling.embed.api_router

router module-attribute

router = APIRouter()

create_embedding async

create_embedding(
    request: EmbeddingRequest, raw_request: Request
)
Source code in vllm/entrypoints/pooling/embed/api_router.py
@router.post(
    "/v1/embeddings",
    dependencies=[Depends(validate_json_request)],
    responses={
        HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
        HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
    },
)
@with_cancellation
@load_aware_call
async def create_embedding(
    request: EmbeddingRequest,
    raw_request: Request,
):
    handler = embedding(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Embeddings API"
        )

    try:
        generator = await handler.create_embedding(request, raw_request)
    except Exception as e:
        raise HTTPException(
            status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value, detail=str(e)
        ) from e

    if isinstance(generator, ErrorResponse):
        return JSONResponse(
            content=generator.model_dump(), status_code=generator.error.code
        )
    elif isinstance(generator, EmbeddingResponse):
        return JSONResponse(content=generator.model_dump())
    elif isinstance(generator, EmbeddingBytesResponse):
        return StreamingResponse(
            content=generator.body,
            headers={"metadata": generator.metadata},
            media_type=generator.media_type,
        )

    assert_never(generator)

embedding

embedding(
    request: Request,
) -> OpenAIServingEmbedding | None
Source code in vllm/entrypoints/pooling/embed/api_router.py
def embedding(request: Request) -> OpenAIServingEmbedding | None:
    return request.app.state.openai_serving_embedding