Skip to content

vllm.entrypoints.pooling.score.protocol

RerankDocument

Bases: BaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class RerankDocument(BaseModel):
    text: str | None = None
    multi_modal: ScoreContentPartParam | None = None

multi_modal class-attribute instance-attribute

multi_modal: ScoreContentPartParam | None = None

text class-attribute instance-attribute

text: str | None = None

RerankRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class RerankRequest(OpenAIBaseModel):
    model: str | None = None
    query: str | ScoreMultiModalParam
    documents: list[str] | ScoreMultiModalParam
    top_n: int = Field(default_factory=lambda: 0)
    truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None

    # --8<-- [start:rerank-extra-params]

    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )

    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )

    softmax: bool | None = Field(
        default=None,
        description="softmax will be deprecated, please use use_activation instead.",
    )

    activation: bool | None = Field(
        default=None,
        description="activation will be deprecated, please use use_activation instead.",
    )

    use_activation: bool | None = Field(
        default=None,
        description="Whether to use activation for classification outputs. "
        "Default is True.",
    )
    # --8<-- [end:rerank-extra-params]

    def to_pooling_params(self):
        return PoolingParams(
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            use_activation=get_use_activation(self),
        )

activation class-attribute instance-attribute

activation: bool | None = Field(
    default=None,
    description="activation will be deprecated, please use use_activation instead.",
)

documents instance-attribute

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

model class-attribute instance-attribute

model: str | None = None

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

query instance-attribute

softmax class-attribute instance-attribute

softmax: bool | None = Field(
    default=None,
    description="softmax will be deprecated, please use use_activation instead.",
)

top_n class-attribute instance-attribute

top_n: int = Field(default_factory=lambda: 0)

truncate_prompt_tokens class-attribute instance-attribute

truncate_prompt_tokens: (
    Annotated[int, Field(ge=-1)] | None
) = None

use_activation class-attribute instance-attribute

use_activation: bool | None = Field(
    default=None,
    description="Whether to use activation for classification outputs. Default is True.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/score/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        use_activation=get_use_activation(self),
    )

RerankResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class RerankResponse(OpenAIBaseModel):
    id: str
    model: str
    usage: RerankUsage
    results: list[RerankResult]

id instance-attribute

id: str

model instance-attribute

model: str

results instance-attribute

results: list[RerankResult]

usage instance-attribute

usage: RerankUsage

RerankResult

Bases: BaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class RerankResult(BaseModel):
    index: int
    document: RerankDocument
    relevance_score: float

document instance-attribute

document: RerankDocument

index instance-attribute

index: int

relevance_score instance-attribute

relevance_score: float

RerankUsage

Bases: BaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class RerankUsage(BaseModel):
    total_tokens: int

total_tokens instance-attribute

total_tokens: int

ScoreRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class ScoreRequest(OpenAIBaseModel):
    model: str | None = None
    text_1: list[str] | str | ScoreMultiModalParam
    text_2: list[str] | str | ScoreMultiModalParam
    truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None

    # --8<-- [start:score-extra-params]

    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )

    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )

    softmax: bool | None = Field(
        default=None,
        description="softmax will be deprecated, please use use_activation instead.",
    )

    activation: bool | None = Field(
        default=None,
        description="activation will be deprecated, please use use_activation instead.",
    )

    use_activation: bool | None = Field(
        default=None,
        description="Whether to use activation for classification outputs. "
        "Default is True.",
    )
    # --8<-- [end:score-extra-params]

    def to_pooling_params(self):
        return PoolingParams(
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            use_activation=get_use_activation(self),
        )

activation class-attribute instance-attribute

activation: bool | None = Field(
    default=None,
    description="activation will be deprecated, please use use_activation instead.",
)

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

model class-attribute instance-attribute

model: str | None = None

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

softmax class-attribute instance-attribute

softmax: bool | None = Field(
    default=None,
    description="softmax will be deprecated, please use use_activation instead.",
)

text_1 instance-attribute

text_2 instance-attribute

truncate_prompt_tokens class-attribute instance-attribute

truncate_prompt_tokens: (
    Annotated[int, Field(ge=-1)] | None
) = None

use_activation class-attribute instance-attribute

use_activation: bool | None = Field(
    default=None,
    description="Whether to use activation for classification outputs. Default is True.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/score/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        use_activation=get_use_activation(self),
    )

ScoreResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class ScoreResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"embd-{random_uuid()}")
    object: str = "list"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    data: list[ScoreResponseData]
    usage: UsageInfo

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

data instance-attribute

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"embd-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'list'

usage instance-attribute

usage: UsageInfo

ScoreResponseData

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/score/protocol.py
class ScoreResponseData(OpenAIBaseModel):
    index: int
    object: str = "score"
    score: float

index instance-attribute

index: int

object class-attribute instance-attribute

object: str = 'score'

score instance-attribute

score: float