Add `{% generation %} to support tokenizer.apply_chat_template return_assistant_tokens_mask

#25
by zeromquan - opened
model_name_or_path = "Qwen/Qwen3-Coder-30B-A3B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(
        model_name_or_path,
        trust_remote_code=True,
        use_fast=True,
)

example = {
    "messages": [
        {"role": "user", "content": "Hello!"},
        {"role": "assistant", "content": "Hi! How can I help you? "},
    ],
}
processed = tokenizer .apply_chat_template(
    example["messages"],
    return_dict=True,
    return_assistant_tokens_mask=True,
    tools=example.get("tools"),
)

assert 1  in processed["assistant_masks"]

Sign up or log in to comment