File size: 4,635 Bytes
e7baaf7 95c06ef e7baaf7 95c06ef e7baaf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "e0cc43b9-b88b-4d72-8533-a6d442d41f3e",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor\n",
"from peft import PeftModel\n",
"from qwen_vl_utils import process_vision_info\n",
"\n",
"# --------------------------------\n",
"# System/User Prompts - EXACTLY as in training\n",
"# --------------------------------\n",
"SYSTEM_PROMPT = \"\"\"A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively.\"\"\"\n",
"USER_PROMPT = \"Classify the given image into: product, non product, loading or captcha. If product, also classify product flags.\"\n",
"\n",
"# --------------------------------\n",
"# Load base model exactly as in training\n",
"# --------------------------------\n",
"model_id = \"Qwen/Qwen2.5-VL-7B-Instruct\"\n",
"base_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(\n",
" model_id, \n",
" torch_dtype=torch.bfloat16,\n",
" device_map=\"auto\"\n",
")\n",
"\n",
"# --------------------------------\n",
"# Load LoRA weights\n",
"# --------------------------------\n",
"lora_checkpoint_path = \"checkpoint-376\"\n",
"model = PeftModel.from_pretrained(\n",
" base_model,\n",
" lora_checkpoint_path,\n",
" torch_dtype=torch.bfloat16\n",
")\n",
"\n",
"# MUST merge the model for it to work properly\n",
"model = model.merge_and_unload()\n",
"\n",
"# --------------------------------\n",
"# Load processor - same as training\n",
"# --------------------------------\n",
"processor = AutoProcessor.from_pretrained(model_id)\n",
"\n",
"# --------------------------------\n",
"# Prepare input - formatted exactly like in training\n",
"# --------------------------------\n",
"image_url = \"https://f005.backblazeb2.com/file/prod-ss-product-images-compressed/00126be6-a52d-45f3-9548-69d12d0213eb.webp\"\n",
"\n",
"# Match the exact message format from collate_fn in training\n",
"messages = [\n",
" {\n",
" \"role\": \"system\",\n",
" \"content\": [{\"type\": \"text\", \"text\": SYSTEM_PROMPT}]\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\"type\": \"image\", \"image\": image_url},\n",
" {\"type\": \"text\", \"text\": USER_PROMPT}\n",
" ]\n",
" }\n",
"]\n",
"\n",
"# Apply chat template exactly like in training\n",
"text = processor.apply_chat_template(\n",
" messages, tokenize=False, add_generation_prompt=True\n",
")\n",
"image_inputs, video_inputs = process_vision_info(messages)\n",
"inputs = processor(\n",
" text=[text],\n",
" images=image_inputs,\n",
" videos=video_inputs,\n",
" padding=True,\n",
" return_tensors=\"pt\"\n",
")\n",
"inputs = inputs.to(model.device)\n",
"\n",
"# --------------------------------\n",
"# Generation - use deterministic settings\n",
"# --------------------------------\n",
"model.eval()\n",
"with torch.no_grad():\n",
" generated_ids = model.generate(\n",
" **inputs, \n",
" max_new_tokens=1024,\n",
" )\n",
"\n",
"# --------------------------------\n",
"# Process output - no post-processing\n",
"# --------------------------------\n",
"prompt_len = inputs[\"input_ids\"].shape[1]\n",
"generated_ids_trimmed = [out[prompt_len:] for out in generated_ids]\n",
"output_text = processor.batch_decode(\n",
" generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False\n",
")\n",
"\n",
"print(\"\\nModel output:\")\n",
"print(output_text[0])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|