{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "e0cc43b9-b88b-4d72-8533-a6d442d41f3e",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor\n",
"from peft import PeftModel\n",
"from qwen_vl_utils import process_vision_info\n",
"\n",
"# --------------------------------\n",
"# System/User Prompts - EXACTLY as in training\n",
"# --------------------------------\n",
"SYSTEM_PROMPT = \"\"\"A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively.\"\"\"\n",
"USER_PROMPT = \"Classify the given image into: product, non product, loading or captcha. If product, also classify product flags.\"\n",
"\n",
"# --------------------------------\n",
"# Load base model exactly as in training\n",
"# --------------------------------\n",
"model_id = \"Qwen/Qwen2.5-VL-7B-Instruct\"\n",
"base_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
" model_id, \n",
" torch_dtype=torch.bfloat16,\n",
" device_map=\"auto\"\n",
")\n",
"\n",
"# --------------------------------\n",
"# Load LoRA weights\n",
"# --------------------------------\n",
"lora_checkpoint_path = \"checkpoint-376\"\n",
"model = PeftModel.from_pretrained(\n",
" base_model,\n",
" lora_checkpoint_path,\n",
" torch_dtype=torch.bfloat16\n",
")\n",
"\n",
"# MUST merge the model for it to work properly\n",
"model = model.merge_and_unload()\n",
"\n",
"# --------------------------------\n",
"# Load processor - same as training\n",
"# --------------------------------\n",
"processor = AutoProcessor.from_pretrained(model_id)\n",
"\n",
"# --------------------------------\n",
"# Prepare input - formatted exactly like in training\n",
"# --------------------------------\n",
"image_url = \"https://f005.backblazeb2.com/file/prod-ss-product-images-compressed/00126be6-a52d-45f3-9548-69d12d0213eb.webp\"\n",
"\n",
"# Match the exact message format from collate_fn in training\n",
"messages = [\n",
" {\n",
" \"role\": \"system\",\n",
" \"content\": [{\"type\": \"text\", \"text\": SYSTEM_PROMPT}]\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\"type\": \"image\", \"image\": image_url},\n",
" {\"type\": \"text\", \"text\": USER_PROMPT}\n",
" ]\n",
" }\n",
"]\n",
"\n",
"# Apply chat template exactly like in training\n",
"text = processor.apply_chat_template(\n",
" messages, tokenize=False, add_generation_prompt=True\n",
")\n",
"image_inputs, video_inputs = process_vision_info(messages)\n",
"inputs = processor(\n",
" text=[text],\n",
" images=image_inputs,\n",
" videos=video_inputs,\n",
" padding=True,\n",
" return_tensors=\"pt\"\n",
")\n",
"inputs = inputs.to(model.device)\n",
"\n",
"# --------------------------------\n",
"# Generation - use deterministic settings\n",
"# --------------------------------\n",
"model.eval()\n",
"with torch.no_grad():\n",
" generated_ids = model.generate(\n",
" **inputs, \n",
" max_new_tokens=1024,\n",
" )\n",
"\n",
"# --------------------------------\n",
"# Process output - no post-processing\n",
"# --------------------------------\n",
"prompt_len = inputs[\"input_ids\"].shape[1]\n",
"generated_ids_trimmed = [out[prompt_len:] for out in generated_ids]\n",
"output_text = processor.batch_decode(\n",
" generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False\n",
")\n",
"\n",
"print(\"\\nModel output:\")\n",
"print(output_text[0])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}