Skip to content

vllm.entrypoints.openai.engine.protocol

AnyResponseFormat module-attribute

AnyStructuralTagResponseFormat module-attribute

LogitsProcessors module-attribute

LogitsProcessors = list[str | LogitsProcessorConstructor]

_LONG_INFO module-attribute

_LONG_INFO = iinfo(long)

logger module-attribute

logger = init_logger(__name__)

DeltaFunctionCall

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaFunctionCall(BaseModel):
    name: str | None = None
    arguments: str | None = None

arguments class-attribute instance-attribute

arguments: str | None = None

name class-attribute instance-attribute

name: str | None = None

DeltaMessage

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaMessage(OpenAIBaseModel):
    role: str | None = None
    content: str | None = None
    reasoning: str | None = None
    reasoning_content: str | None = None
    """Deprecated: use `reasoning` instead."""
    tool_calls: list[DeltaToolCall] = Field(default_factory=list)

    @model_validator(mode="after")
    def handle_deprecated_reasoning_content(self):
        """Copy reasoning to reasoning_content for backward compatibility."""
        self.reasoning_content = self.reasoning
        return self

content class-attribute instance-attribute

content: str | None = None

reasoning class-attribute instance-attribute

reasoning: str | None = None

reasoning_content class-attribute instance-attribute

reasoning_content: str | None = None

Deprecated: use reasoning instead.

role class-attribute instance-attribute

role: str | None = None

tool_calls class-attribute instance-attribute

tool_calls: list[DeltaToolCall] = Field(
    default_factory=list
)

handle_deprecated_reasoning_content

handle_deprecated_reasoning_content()

Copy reasoning to reasoning_content for backward compatibility.

Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="after")
def handle_deprecated_reasoning_content(self):
    """Copy reasoning to reasoning_content for backward compatibility."""
    self.reasoning_content = self.reasoning
    return self

DeltaToolCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaToolCall(OpenAIBaseModel):
    id: str | None = None
    type: Literal["function"] | None = None
    index: int
    function: DeltaFunctionCall | None = None

function class-attribute instance-attribute

function: DeltaFunctionCall | None = None

id class-attribute instance-attribute

id: str | None = None

index instance-attribute

index: int

type class-attribute instance-attribute

type: Literal['function'] | None = None

ErrorInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ErrorInfo(OpenAIBaseModel):
    message: str
    type: str
    param: str | None = None
    code: int

code instance-attribute

code: int

message instance-attribute

message: str

param class-attribute instance-attribute

param: str | None = None

type instance-attribute

type: str

ErrorResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ErrorResponse(OpenAIBaseModel):
    error: ErrorInfo

error instance-attribute

error: ErrorInfo

ExtractedToolCallInformation

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ExtractedToolCallInformation(BaseModel):
    # indicate if tools were called
    tools_called: bool

    # extracted tool calls
    tool_calls: list[ToolCall]

    # content - per OpenAI spec, content AND tool calls can be returned rarely
    # But some models will do this intentionally
    content: str | None = None

content class-attribute instance-attribute

content: str | None = None

tool_calls instance-attribute

tool_calls: list[ToolCall]

tools_called instance-attribute

tools_called: bool

FunctionCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class FunctionCall(OpenAIBaseModel):
    name: str
    arguments: str

arguments instance-attribute

arguments: str

name instance-attribute

name: str

FunctionDefinition

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class FunctionDefinition(OpenAIBaseModel):
    name: str
    description: str | None = None
    parameters: dict[str, Any] | None = None

description class-attribute instance-attribute

description: str | None = None

name instance-attribute

name: str

parameters class-attribute instance-attribute

parameters: dict[str, Any] | None = None

GenerateRequest

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class GenerateRequest(BaseModel):
    request_id: str = Field(
        default_factory=random_uuid,
        description=(
            "The request_id related to this request. If the caller does "
            "not set it, a random_uuid will be generated. This id is used "
            "through out the inference process and return in response."
        ),
    )
    token_ids: list[int]
    """The token ids to generate text from."""

    # features: MultiModalFeatureSpec
    # TODO (NickLucche): implement once Renderer work is completed
    features: str | None = None
    """The processed MM inputs for the model."""

    sampling_params: SamplingParams
    """The sampling parameters for the model."""

    model: str | None = None

    stream: bool | None = False
    stream_options: StreamOptions | None = None
    cache_salt: str | None = Field(
        default=None,
        description=(
            "If specified, the prefix cache will be salted with the provided "
            "string to prevent an attacker to guess prompts in multi-user "
            "environments. The salt should be random, protected from "
            "access by 3rd parties, and long enough to be "
            "unpredictable (e.g., 43 characters base64-encoded, corresponding "
            "to 256 bit)."
        ),
    )
    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )
    kv_transfer_params: dict[str, Any] | None = Field(
        default=None,
        description="KVTransfer parameters used for disaggregated serving.",
    )

cache_salt class-attribute instance-attribute

cache_salt: str | None = Field(
    default=None,
    description="If specified, the prefix cache will be salted with the provided string to prevent an attacker to guess prompts in multi-user environments. The salt should be random, protected from access by 3rd parties, and long enough to be unpredictable (e.g., 43 characters base64-encoded, corresponding to 256 bit).",
)

features class-attribute instance-attribute

features: str | None = None

The processed MM inputs for the model.

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None,
    description="KVTransfer parameters used for disaggregated serving.",
)

model class-attribute instance-attribute

model: str | None = None

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

request_id class-attribute instance-attribute

request_id: str = Field(
    default_factory=random_uuid,
    description="The request_id related to this request. If the caller does not set it, a random_uuid will be generated. This id is used through out the inference process and return in response.",
)

sampling_params instance-attribute

sampling_params: SamplingParams

The sampling parameters for the model.

stream class-attribute instance-attribute

stream: bool | None = False

stream_options class-attribute instance-attribute

stream_options: StreamOptions | None = None

token_ids instance-attribute

token_ids: list[int]

The token ids to generate text from.

JsonSchemaResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class JsonSchemaResponseFormat(OpenAIBaseModel):
    name: str
    description: str | None = None
    # schema is the field in openai but that causes conflicts with pydantic so
    # instead use json_schema with an alias
    json_schema: dict[str, Any] | None = Field(default=None, alias="schema")
    strict: bool | None = None

description class-attribute instance-attribute

description: str | None = None

json_schema class-attribute instance-attribute

json_schema: dict[str, Any] | None = Field(
    default=None, alias="schema"
)

name instance-attribute

name: str

strict class-attribute instance-attribute

strict: bool | None = None

LegacyStructuralTag

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LegacyStructuralTag(OpenAIBaseModel):
    begin: str
    # schema is the field, but that causes conflicts with pydantic so
    # instead use structural_tag_schema with an alias
    structural_tag_schema: dict[str, Any] | None = Field(default=None, alias="schema")
    end: str

begin instance-attribute

begin: str

end instance-attribute

end: str

structural_tag_schema class-attribute instance-attribute

structural_tag_schema: dict[str, Any] | None = Field(
    default=None, alias="schema"
)

LegacyStructuralTagResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LegacyStructuralTagResponseFormat(OpenAIBaseModel):
    type: Literal["structural_tag"]
    structures: list[LegacyStructuralTag]
    triggers: list[str]

structures instance-attribute

triggers instance-attribute

triggers: list[str]

type instance-attribute

type: Literal['structural_tag']

LogitsProcessorConstructor

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LogitsProcessorConstructor(BaseModel):
    qualname: str
    args: list[Any] | None = None
    kwargs: dict[str, Any] | None = None

    model_config = ConfigDict(extra="forbid")

args class-attribute instance-attribute

args: list[Any] | None = None

kwargs class-attribute instance-attribute

kwargs: dict[str, Any] | None = None

model_config class-attribute instance-attribute

model_config = ConfigDict(extra='forbid')

qualname instance-attribute

qualname: str

ModelCard

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelCard(OpenAIBaseModel):
    id: str
    object: str = "model"
    created: int = Field(default_factory=lambda: int(time.time()))
    owned_by: str = "vllm"
    root: str | None = None
    parent: str | None = None
    max_model_len: int | None = None
    permission: list[ModelPermission] = Field(default_factory=list)

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id instance-attribute

id: str

max_model_len class-attribute instance-attribute

max_model_len: int | None = None

object class-attribute instance-attribute

object: str = 'model'

owned_by class-attribute instance-attribute

owned_by: str = 'vllm'

parent class-attribute instance-attribute

parent: str | None = None

permission class-attribute instance-attribute

permission: list[ModelPermission] = Field(
    default_factory=list
)

root class-attribute instance-attribute

root: str | None = None

ModelList

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelList(OpenAIBaseModel):
    object: str = "list"
    data: list[ModelCard] = Field(default_factory=list)

data class-attribute instance-attribute

data: list[ModelCard] = Field(default_factory=list)

object class-attribute instance-attribute

object: str = 'list'

ModelPermission

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelPermission(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}")
    object: str = "model_permission"
    created: int = Field(default_factory=lambda: int(time.time()))
    allow_create_engine: bool = False
    allow_sampling: bool = True
    allow_logprobs: bool = True
    allow_search_indices: bool = False
    allow_view: bool = True
    allow_fine_tuning: bool = False
    organization: str = "*"
    group: str | None = None
    is_blocking: bool = False

allow_create_engine class-attribute instance-attribute

allow_create_engine: bool = False

allow_fine_tuning class-attribute instance-attribute

allow_fine_tuning: bool = False

allow_logprobs class-attribute instance-attribute

allow_logprobs: bool = True

allow_sampling class-attribute instance-attribute

allow_sampling: bool = True

allow_search_indices class-attribute instance-attribute

allow_search_indices: bool = False

allow_view class-attribute instance-attribute

allow_view: bool = True

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

group class-attribute instance-attribute

group: str | None = None

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"modelperm-{random_uuid()}"
)

is_blocking class-attribute instance-attribute

is_blocking: bool = False

object class-attribute instance-attribute

object: str = 'model_permission'

organization class-attribute instance-attribute

organization: str = '*'

OpenAIBaseModel

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class OpenAIBaseModel(BaseModel):
    # OpenAI API does allow extra fields
    model_config = ConfigDict(extra="allow")

    # Cache class field names
    field_names: ClassVar[set[str] | None] = None

    @model_validator(mode="wrap")
    @classmethod
    def __log_extra_fields__(cls, data, handler):
        result = handler(data)
        if not isinstance(data, dict):
            return result
        field_names = cls.field_names
        if field_names is None:
            # Get all class field names and their potential aliases
            field_names = set()
            for field_name, field in cls.model_fields.items():
                field_names.add(field_name)
                if alias := getattr(field, "alias", None):
                    field_names.add(alias)
            cls.field_names = field_names

        # Compare against both field names and aliases
        if any(k not in field_names for k in data):
            logger.warning(
                "The following fields were present in the request but ignored: %s",
                data.keys() - field_names,
            )
        return result

field_names class-attribute

field_names: set[str] | None = None

model_config class-attribute instance-attribute

model_config = ConfigDict(extra='allow')

__log_extra_fields__ classmethod

__log_extra_fields__(data, handler)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="wrap")
@classmethod
def __log_extra_fields__(cls, data, handler):
    result = handler(data)
    if not isinstance(data, dict):
        return result
    field_names = cls.field_names
    if field_names is None:
        # Get all class field names and their potential aliases
        field_names = set()
        for field_name, field in cls.model_fields.items():
            field_names.add(field_name)
            if alias := getattr(field, "alias", None):
                field_names.add(alias)
        cls.field_names = field_names

    # Compare against both field names and aliases
    if any(k not in field_names for k in data):
        logger.warning(
            "The following fields were present in the request but ignored: %s",
            data.keys() - field_names,
        )
    return result

PromptTokenUsageInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class PromptTokenUsageInfo(OpenAIBaseModel):
    cached_tokens: int | None = None

cached_tokens class-attribute instance-attribute

cached_tokens: int | None = None

RequestResponseMetadata

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class RequestResponseMetadata(BaseModel):
    request_id: str
    final_usage_info: UsageInfo | None = None

final_usage_info class-attribute instance-attribute

final_usage_info: UsageInfo | None = None

request_id instance-attribute

request_id: str

ResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseFormat(OpenAIBaseModel):
    # type must be "json_schema", "json_object", or "text"
    type: Literal["text", "json_object", "json_schema"]
    json_schema: JsonSchemaResponseFormat | None = None

json_schema class-attribute instance-attribute

json_schema: JsonSchemaResponseFormat | None = None

type instance-attribute

type: Literal['text', 'json_object', 'json_schema']

StreamOptions

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class StreamOptions(OpenAIBaseModel):
    include_usage: bool | None = True
    continuous_usage_stats: bool | None = False

continuous_usage_stats class-attribute instance-attribute

continuous_usage_stats: bool | None = False

include_usage class-attribute instance-attribute

include_usage: bool | None = True

StructuralTagResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class StructuralTagResponseFormat(OpenAIBaseModel):
    type: Literal["structural_tag"]
    format: Any

format instance-attribute

format: Any

type instance-attribute

type: Literal['structural_tag']

ToolCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ToolCall(OpenAIBaseModel):
    id: str = Field(default_factory=make_tool_call_id)
    type: Literal["function"] = "function"
    function: FunctionCall

function instance-attribute

function: FunctionCall

id class-attribute instance-attribute

id: str = Field(default_factory=make_tool_call_id)

type class-attribute instance-attribute

type: Literal['function'] = 'function'

UsageInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class UsageInfo(OpenAIBaseModel):
    prompt_tokens: int = 0
    total_tokens: int = 0
    completion_tokens: int | None = 0
    prompt_tokens_details: PromptTokenUsageInfo | None = None

completion_tokens class-attribute instance-attribute

completion_tokens: int | None = 0

prompt_tokens class-attribute instance-attribute

prompt_tokens: int = 0

prompt_tokens_details class-attribute instance-attribute

prompt_tokens_details: PromptTokenUsageInfo | None = None

total_tokens class-attribute instance-attribute

total_tokens: int = 0

get_logits_processors

get_logits_processors(
    processors: LogitsProcessors | None, pattern: str | None
) -> list[Any] | None
Source code in vllm/entrypoints/openai/engine/protocol.py
def get_logits_processors(
    processors: LogitsProcessors | None, pattern: str | None
) -> list[Any] | None:
    if processors and pattern:
        logits_processors = []
        for processor in processors:
            qualname = processor if isinstance(processor, str) else processor.qualname
            if not re.match(pattern, qualname):
                raise ValueError(
                    f"Logits processor '{qualname}' is not allowed by this "
                    "server. See --logits-processor-pattern engine argument "
                    "for more information."
                )
            try:
                logits_processor = resolve_obj_by_qualname(qualname)
            except Exception as e:
                raise ValueError(
                    f"Logits processor '{qualname}' could not be resolved: {e}"
                ) from e
            if isinstance(processor, LogitsProcessorConstructor):
                logits_processor = logits_processor(
                    *processor.args or [], **processor.kwargs or {}
                )
            logits_processors.append(logits_processor)
        return logits_processors
    elif processors:
        raise ValueError(
            "The `logits_processors` argument is not supported by this "
            "server. See --logits-processor-pattern engine argument "
            "for more information."
        )
    return None