Skip to content

vllm.renderers.terratorch

logger module-attribute

logger = init_logger(__name__)

TerratorchRenderer

Bases: RendererLike

Source code in vllm/renderers/terratorch.py
class TerratorchRenderer(RendererLike):
    @classmethod
    def from_config(
        cls,
        config: "ModelConfig",
        tokenizer_kwargs: dict[str, Any],
    ) -> "RendererLike":
        return cls(config)

    def __init__(self, config: ModelConfig) -> None:
        super().__init__()

        self.config = config

        if not config.skip_tokenizer_init:
            raise ValueError("Terratorch renderer requires `skip_tokenizer_init=True`")

    @property
    def tokenizer(self) -> TokenizerLike | None:
        return None

    def get_tokenizer(self) -> TokenizerLike:
        raise ValueError("Tokenizer not available for Terratorch renderer")

    def render_messages(
        self,
        messages: list[ChatCompletionMessageParam],
        **kwargs,
    ) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
        model_config = self.config

        conversation, mm_data, mm_uuids = parse_chat_messages(
            messages,
            model_config,
            content_format="string",
        )

        prompt = TokensPrompt(prompt_token_ids=[1])
        if mm_data is not None:
            prompt["multi_modal_data"] = mm_data
        if mm_uuids is not None:
            prompt["multi_modal_uuids"] = mm_uuids

        return conversation, prompt

    async def render_messages_async(
        self,
        messages: list[ChatCompletionMessageParam],
        **kwargs,
    ) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
        model_config = self.config

        conversation, mm_data, mm_uuids = await parse_chat_messages_async(
            messages,
            model_config,
            content_format="string",
        )

        prompt = TokensPrompt(prompt_token_ids=[1])  # Dummy token IDs
        if mm_data is not None:
            prompt["multi_modal_data"] = mm_data
        if mm_uuids is not None:
            prompt["multi_modal_uuids"] = mm_uuids

        return conversation, prompt

config instance-attribute

config = config

tokenizer property

tokenizer: TokenizerLike | None

__init__

__init__(config: ModelConfig) -> None
Source code in vllm/renderers/terratorch.py
def __init__(self, config: ModelConfig) -> None:
    super().__init__()

    self.config = config

    if not config.skip_tokenizer_init:
        raise ValueError("Terratorch renderer requires `skip_tokenizer_init=True`")

from_config classmethod

from_config(
    config: ModelConfig, tokenizer_kwargs: dict[str, Any]
) -> RendererLike
Source code in vllm/renderers/terratorch.py
@classmethod
def from_config(
    cls,
    config: "ModelConfig",
    tokenizer_kwargs: dict[str, Any],
) -> "RendererLike":
    return cls(config)

get_tokenizer

get_tokenizer() -> TokenizerLike
Source code in vllm/renderers/terratorch.py
def get_tokenizer(self) -> TokenizerLike:
    raise ValueError("Tokenizer not available for Terratorch renderer")

render_messages

render_messages(
    messages: list[ChatCompletionMessageParam], **kwargs
) -> tuple[
    list[ConversationMessage], TextPrompt | TokensPrompt
]
Source code in vllm/renderers/terratorch.py
def render_messages(
    self,
    messages: list[ChatCompletionMessageParam],
    **kwargs,
) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
    model_config = self.config

    conversation, mm_data, mm_uuids = parse_chat_messages(
        messages,
        model_config,
        content_format="string",
    )

    prompt = TokensPrompt(prompt_token_ids=[1])
    if mm_data is not None:
        prompt["multi_modal_data"] = mm_data
    if mm_uuids is not None:
        prompt["multi_modal_uuids"] = mm_uuids

    return conversation, prompt

render_messages_async async

render_messages_async(
    messages: list[ChatCompletionMessageParam], **kwargs
) -> tuple[
    list[ConversationMessage], TextPrompt | TokensPrompt
]
Source code in vllm/renderers/terratorch.py
async def render_messages_async(
    self,
    messages: list[ChatCompletionMessageParam],
    **kwargs,
) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
    model_config = self.config

    conversation, mm_data, mm_uuids = await parse_chat_messages_async(
        messages,
        model_config,
        content_format="string",
    )

    prompt = TokensPrompt(prompt_token_ids=[1])  # Dummy token IDs
    if mm_data is not None:
        prompt["multi_modal_data"] = mm_data
    if mm_uuids is not None:
        prompt["multi_modal_uuids"] = mm_uuids

    return conversation, prompt