Upload folder using huggingface_hub
Browse files- Eagle2-2B/.gitattributes +35 -0
- Eagle2-2B/README.md +576 -0
- Eagle2-2B/added_tokens.json +33 -0
- Eagle2-2B/config.json +206 -0
- Eagle2-2B/configuration_eagle_chat.py +102 -0
- Eagle2-2B/demo.py +428 -0
- Eagle2-2B/generation_config.json +4 -0
- Eagle2-2B/merges.txt +0 -0
- Eagle2-2B/model.safetensors +3 -0
- Eagle2-2B/modeling_eagle_chat.py +450 -0
- Eagle2-2B/special_tokens_map.json +40 -0
- Eagle2-2B/tokenizer_config.json +289 -0
- Eagle2-2B/vocab.json +0 -0
- empty_language_adapter/README.md +202 -0
- empty_language_adapter/adapter_config.json +32 -0
- empty_language_adapter/adapter_model.safetensors +3 -0
Eagle2-2B/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Eagle2-2B/README.md
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-nc-4.0
|
| 3 |
+
pipeline_tag: image-text-to-text
|
| 4 |
+
library_name: transformers
|
| 5 |
+
base_model:
|
| 6 |
+
- google/paligemma-3b-mix-448
|
| 7 |
+
- Qwen/Qwen2.5-1.5B-Instruct
|
| 8 |
+
- google/siglip-so400m-patch14-384
|
| 9 |
+
base_model_relation: merge
|
| 10 |
+
language:
|
| 11 |
+
- multilingual
|
| 12 |
+
tags:
|
| 13 |
+
- eagle
|
| 14 |
+
- VLM
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Eagle-2
|
| 19 |
+
|
| 20 |
+
[\[📂 GitHub\]](https://github.com/NVlabs/EAGLE) [\[📜 Eagle2 Tech Report\]](http://arxiv.org/abs/2501.14818)
|
| 21 |
+
[\[🗨️ Chat Demo\]](http://eagle-vlm.xyz/) [\[🤗 HF Demo\]](TODO)
|
| 22 |
+
## Introduction
|
| 23 |
+
|
| 24 |
+
We are thrilled to release our latest Eagle2 series Vision-Language Model. Open-source Vision-Language Models (VLMs) have made significant strides in narrowing the gap with proprietary models. However, critical details about data strategies and implementation are often missing, limiting reproducibility and innovation. In this project, we focus on VLM post-training from a data-centric perspective, sharing insights into building effective data strategies from scratch. By combining these strategies with robust training recipes and model design, we introduce Eagle2, a family of performant VLMs. Our work aims to empower the open-source community to develop competitive VLMs with transparent processes.
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
In this repo, we are open-sourcing Eagle2-2B, a lightweight model that achieves remarkable efficiency and speed while maintaining solid performance.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
## Model Zoo
|
| 38 |
+
We provide the following models:
|
| 39 |
+
|
| 40 |
+
| model name | LLM | Vision | Max Length| HF Link|
|
| 41 |
+
| ----------- | ------- |---------|-|-|
|
| 42 |
+
| Eagle2-1B | [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) | Siglip | 16K| [🤗 link](https://huggingface.co/NVIDIA/Eagle2-1B)|
|
| 43 |
+
| Eagle2-2B | [Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) | Siglip | 16K| [🤗 link](https://huggingface.co/NVIDIA/Eagle2-2B)|
|
| 44 |
+
| Eagle2-9B | [Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) | Siglip+ConvNext | 16K| [🤗 link](https://huggingface.co/NVIDIA/Eagle2-9B)|
|
| 45 |
+
|
| 46 |
+
## Benchmark Results
|
| 47 |
+
| Benchmark | InternVL2-2B | InternVL2.5-2B | InternVL2-4B |Qwen2-VL-2B| Eagle2-2B|
|
| 48 |
+
| :--------------------------: | :------------------: | :----------------: | :----------: |:----------: |:----------: |
|
| 49 |
+
| DocVQA<sub>test</sub> | 86.9 | 88.7 | 89.2 |90.1|88.0|
|
| 50 |
+
| ChartQA<sub>test</sub> | 76.2 | 79.2 | 81.5 |73.0|82.0|
|
| 51 |
+
| InfoVQA<sub>test</sub> | 58.9 | 60.9 | 67.0 |65.5|65.8|
|
| 52 |
+
| TextVQA<sub>val</sub> | 73.4 | 74.3 | 74.4 |79.7|79.1|
|
| 53 |
+
| OCRBench | 784 | 804 | 788 |809|818|
|
| 54 |
+
| MME<sub>sum</sub> | 1876.8 | 2138.2 | 2059.8 |1872.0 | 2109.8
|
| 55 |
+
| RealWorldQA | 57.3 | 60.1 | 60.7 |62.6|63.1|
|
| 56 |
+
| AI2D<sub>test</sub> | 74.1 | 74.9 | 74.7 | 78.9 |79.3|
|
| 57 |
+
| MMMU<sub>val</sub> | 36.3 | 43.6 | 47.9 |41.1|43.1|
|
| 58 |
+
| MMVet<sub>GPT-4-Turbo</sub> | 39.5 | 60.8 | 51.0 | 49.5|53.8|
|
| 59 |
+
| HallBench<sub>avg</sub> | 37.9 | 42.6 | 41.9 |41.7|45.8
|
| 60 |
+
| MathVista<sub>testmini</sub> | 46.3 | 51.3 | 58.6 |43.0|54.7|
|
| 61 |
+
| MMstar | 50.1 | 53.7 | 54.3|48.0|56.4|
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
## Quick Start
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
We provide a [demo inference script](./demo.py) to help you quickly start using the model. We support different input types:
|
| 70 |
+
- pure text input
|
| 71 |
+
- single image input
|
| 72 |
+
- multiple image input
|
| 73 |
+
- video input
|
| 74 |
+
|
| 75 |
+
### 0. Install the dependencies
|
| 76 |
+
|
| 77 |
+
```bash
|
| 78 |
+
pip install transformers
|
| 79 |
+
pip install flash-attn
|
| 80 |
+
```
|
| 81 |
+
**Note**: Latest version of transformers is not compatible with the model.
|
| 82 |
+
|
| 83 |
+
### 1. Prepare the Model worker
|
| 84 |
+
|
| 85 |
+
<details>
|
| 86 |
+
<summary>Click to expand</summary>
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
A model worker executes the model.
|
| 92 |
+
Copied and modified from https://github.com/OpenGVLab/InternVL/blob/main/streamlit_demo/model_worker.py
|
| 93 |
+
"""
|
| 94 |
+
# Importing torch before transformers can cause `segmentation fault`
|
| 95 |
+
from transformers import AutoModel, AutoTokenizer, TextIteratorStreamer, AutoConfig
|
| 96 |
+
|
| 97 |
+
import argparse
|
| 98 |
+
import base64
|
| 99 |
+
import json
|
| 100 |
+
import os
|
| 101 |
+
import decord
|
| 102 |
+
import threading
|
| 103 |
+
import time
|
| 104 |
+
from io import BytesIO
|
| 105 |
+
from threading import Thread
|
| 106 |
+
import math
|
| 107 |
+
import requests
|
| 108 |
+
import torch
|
| 109 |
+
import torchvision.transforms as T
|
| 110 |
+
from PIL import Image
|
| 111 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 112 |
+
import numpy as np
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 116 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 117 |
+
|
| 118 |
+
SIGLIP_MEAN = (0.5, 0.5, 0.5)
|
| 119 |
+
SIGLIP_STD = (0.5, 0.5, 0.5)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def get_seq_frames(total_num_frames, desired_num_frames=-1, stride=-1):
|
| 123 |
+
"""
|
| 124 |
+
Calculate the indices of frames to extract from a video.
|
| 125 |
+
|
| 126 |
+
Parameters:
|
| 127 |
+
total_num_frames (int): Total number of frames in the video.
|
| 128 |
+
desired_num_frames (int): Desired number of frames to extract.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
list: List of indices of frames to extract.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
assert desired_num_frames > 0 or stride > 0 and not (desired_num_frames > 0 and stride > 0)
|
| 135 |
+
|
| 136 |
+
if stride > 0:
|
| 137 |
+
return list(range(0, total_num_frames, stride))
|
| 138 |
+
|
| 139 |
+
# Calculate the size of each segment from which a frame will be extracted
|
| 140 |
+
seg_size = float(total_num_frames - 1) / desired_num_frames
|
| 141 |
+
|
| 142 |
+
seq = []
|
| 143 |
+
for i in range(desired_num_frames):
|
| 144 |
+
# Calculate the start and end indices of each segment
|
| 145 |
+
start = int(np.round(seg_size * i))
|
| 146 |
+
end = int(np.round(seg_size * (i + 1)))
|
| 147 |
+
|
| 148 |
+
# Append the middle index of the segment to the list
|
| 149 |
+
seq.append((start + end) // 2)
|
| 150 |
+
|
| 151 |
+
return seq
|
| 152 |
+
|
| 153 |
+
def build_video_prompt(meta_list, num_frames, time_position=False):
|
| 154 |
+
# if time_position is True, the frame_timestamp is used.
|
| 155 |
+
# 1. pass time_position, 2. use env TIME_POSITION
|
| 156 |
+
time_position = os.environ.get("TIME_POSITION", time_position)
|
| 157 |
+
prefix = f"This is a video:\n"
|
| 158 |
+
for i in range(num_frames):
|
| 159 |
+
if time_position:
|
| 160 |
+
frame_txt = f"Frame {i+1} sampled at {meta_list[i]:.2f} seconds: <image>\n"
|
| 161 |
+
else:
|
| 162 |
+
frame_txt = f"Frame {i+1}: <image>\n"
|
| 163 |
+
prefix += frame_txt
|
| 164 |
+
return prefix
|
| 165 |
+
|
| 166 |
+
def load_video(video_path, num_frames=64, frame_cache_root=None):
|
| 167 |
+
if isinstance(video_path, str):
|
| 168 |
+
video = decord.VideoReader(video_path)
|
| 169 |
+
elif isinstance(video_path, dict):
|
| 170 |
+
assert False, 'we not support vidoe: "video_path" as input'
|
| 171 |
+
fps = video.get_avg_fps()
|
| 172 |
+
sampled_frames = get_seq_frames(len(video), num_frames)
|
| 173 |
+
samepld_timestamps = [i / fps for i in sampled_frames]
|
| 174 |
+
frames = video.get_batch(sampled_frames).asnumpy()
|
| 175 |
+
images = [Image.fromarray(frame) for frame in frames]
|
| 176 |
+
|
| 177 |
+
return images, build_video_prompt(samepld_timestamps, len(images), time_position=True)
|
| 178 |
+
|
| 179 |
+
def load_image(image):
|
| 180 |
+
if isinstance(image, str) and os.path.exists(image):
|
| 181 |
+
return Image.open(image)
|
| 182 |
+
elif isinstance(image, dict):
|
| 183 |
+
if 'disk_path' in image:
|
| 184 |
+
return Image.open(image['disk_path'])
|
| 185 |
+
elif 'base64' in image:
|
| 186 |
+
return Image.open(BytesIO(base64.b64decode(image['base64'])))
|
| 187 |
+
elif 'url' in image:
|
| 188 |
+
response = requests.get(image['url'])
|
| 189 |
+
return Image.open(BytesIO(response.content))
|
| 190 |
+
elif 'bytes' in image:
|
| 191 |
+
return Image.open(BytesIO(image['bytes']))
|
| 192 |
+
else:
|
| 193 |
+
raise ValueError(f'Invalid image: {image}')
|
| 194 |
+
else:
|
| 195 |
+
raise ValueError(f'Invalid image: {image}')
|
| 196 |
+
|
| 197 |
+
def build_transform(input_size, norm_type='imagenet'):
|
| 198 |
+
if norm_type == 'imagenet':
|
| 199 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 200 |
+
elif norm_type == 'siglip':
|
| 201 |
+
MEAN, STD = SIGLIP_MEAN, SIGLIP_STD
|
| 202 |
+
|
| 203 |
+
transform = T.Compose([
|
| 204 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 205 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 206 |
+
T.ToTensor(),
|
| 207 |
+
T.Normalize(mean=MEAN, std=STD)
|
| 208 |
+
])
|
| 209 |
+
return transform
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 213 |
+
"""
|
| 214 |
+
previous version mainly foucs on ratio.
|
| 215 |
+
We also consider area ratio here.
|
| 216 |
+
"""
|
| 217 |
+
best_factor = float('-inf')
|
| 218 |
+
best_ratio = (1, 1)
|
| 219 |
+
area = width * height
|
| 220 |
+
for ratio in target_ratios:
|
| 221 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 222 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 223 |
+
area_ratio = (ratio[0]*ratio[1]*image_size*image_size)/ area
|
| 224 |
+
"""
|
| 225 |
+
new area > 60% of original image area is enough.
|
| 226 |
+
"""
|
| 227 |
+
factor_based_on_area_n_ratio = min((ratio[0]*ratio[1]*image_size*image_size)/ area, 0.6)* \
|
| 228 |
+
min(target_aspect_ratio/aspect_ratio, aspect_ratio/target_aspect_ratio)
|
| 229 |
+
|
| 230 |
+
if factor_based_on_area_n_ratio > best_factor:
|
| 231 |
+
best_factor = factor_based_on_area_n_ratio
|
| 232 |
+
best_ratio = ratio
|
| 233 |
+
|
| 234 |
+
return best_ratio
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False):
|
| 238 |
+
orig_width, orig_height = image.size
|
| 239 |
+
aspect_ratio = orig_width / orig_height
|
| 240 |
+
|
| 241 |
+
# calculate the existing image aspect ratio
|
| 242 |
+
target_ratios = set(
|
| 243 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 244 |
+
i * j <= max_num and i * j >= min_num)
|
| 245 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 246 |
+
|
| 247 |
+
# find the closest aspect ratio to the target
|
| 248 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 249 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 250 |
+
|
| 251 |
+
# calculate the target width and height
|
| 252 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 253 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 254 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 255 |
+
|
| 256 |
+
# resize the image
|
| 257 |
+
resized_img = image.resize((target_width, target_height))
|
| 258 |
+
processed_images = []
|
| 259 |
+
for i in range(blocks):
|
| 260 |
+
box = (
|
| 261 |
+
(i % (target_width // image_size)) * image_size,
|
| 262 |
+
(i // (target_width // image_size)) * image_size,
|
| 263 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 264 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 265 |
+
)
|
| 266 |
+
# split the image
|
| 267 |
+
split_img = resized_img.crop(box)
|
| 268 |
+
processed_images.append(split_img)
|
| 269 |
+
assert len(processed_images) == blocks
|
| 270 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 271 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 272 |
+
processed_images.append(thumbnail_img)
|
| 273 |
+
return processed_images
|
| 274 |
+
|
| 275 |
+
def split_model(model_path, device):
|
| 276 |
+
|
| 277 |
+
device_map = {}
|
| 278 |
+
world_size = torch.cuda.device_count()
|
| 279 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
| 280 |
+
num_layers = config.llm_config.num_hidden_layers
|
| 281 |
+
|
| 282 |
+
print('world_size', world_size)
|
| 283 |
+
num_layers_per_gpu_ = math.floor(num_layers / (world_size - 1))
|
| 284 |
+
num_layers_per_gpu = [num_layers_per_gpu_] * world_size
|
| 285 |
+
num_layers_per_gpu[device] = num_layers - num_layers_per_gpu_ * (world_size-1)
|
| 286 |
+
print(num_layers_per_gpu)
|
| 287 |
+
layer_cnt = 0
|
| 288 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
| 289 |
+
for j in range(num_layer):
|
| 290 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
| 291 |
+
layer_cnt += 1
|
| 292 |
+
device_map['vision_model'] = device
|
| 293 |
+
device_map['mlp1'] = device
|
| 294 |
+
device_map['language_model.model.tok_embeddings'] = device
|
| 295 |
+
device_map['language_model.model.embed_tokens'] = device
|
| 296 |
+
device_map['language_model.output'] = device
|
| 297 |
+
device_map['language_model.model.norm'] = device
|
| 298 |
+
device_map['language_model.lm_head'] = device
|
| 299 |
+
device_map['language_model.model.rotary_emb'] = device
|
| 300 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = device
|
| 301 |
+
return device_map
|
| 302 |
+
|
| 303 |
+
class ModelWorker:
|
| 304 |
+
def __init__(self, model_path, model_name,
|
| 305 |
+
load_8bit, device):
|
| 306 |
+
|
| 307 |
+
if model_path.endswith('/'):
|
| 308 |
+
model_path = model_path[:-1]
|
| 309 |
+
if model_name is None:
|
| 310 |
+
model_paths = model_path.split('/')
|
| 311 |
+
if model_paths[-1].startswith('checkpoint-'):
|
| 312 |
+
self.model_name = model_paths[-2] + '_' + model_paths[-1]
|
| 313 |
+
else:
|
| 314 |
+
self.model_name = model_paths[-1]
|
| 315 |
+
else:
|
| 316 |
+
self.model_name = model_name
|
| 317 |
+
|
| 318 |
+
print(f'Loading the model {self.model_name}')
|
| 319 |
+
|
| 320 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
|
| 321 |
+
tokens_to_keep = ['<box>', '</box>', '<ref>', '</ref>']
|
| 322 |
+
tokenizer.additional_special_tokens = [item for item in tokenizer.additional_special_tokens if item not in tokens_to_keep]
|
| 323 |
+
self.tokenizer = tokenizer
|
| 324 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
| 325 |
+
model_type = config.vision_config.model_type
|
| 326 |
+
self.device = torch.cuda.current_device()
|
| 327 |
+
if model_type == 'siglip_vision_model':
|
| 328 |
+
self.norm_type = 'siglip'
|
| 329 |
+
elif model_type == 'MOB':
|
| 330 |
+
self.norm_type = 'siglip'
|
| 331 |
+
else:
|
| 332 |
+
self.norm_type = 'imagenet'
|
| 333 |
+
|
| 334 |
+
if any(x in model_path.lower() for x in ['34b']):
|
| 335 |
+
device_map = split_model(model_path, self.device)
|
| 336 |
+
else:
|
| 337 |
+
device_map = None
|
| 338 |
+
|
| 339 |
+
if device_map is not None:
|
| 340 |
+
self.model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16,
|
| 341 |
+
low_cpu_mem_usage=True,
|
| 342 |
+
device_map=device_map,
|
| 343 |
+
trust_remote_code=True,
|
| 344 |
+
load_in_8bit=load_8bit).eval()
|
| 345 |
+
else:
|
| 346 |
+
self.model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16,
|
| 347 |
+
trust_remote_code=True,
|
| 348 |
+
load_in_8bit=load_8bit).eval()
|
| 349 |
+
|
| 350 |
+
if not load_8bit and device_map is None:
|
| 351 |
+
self.model = self.model.to(device)
|
| 352 |
+
self.load_8bit = load_8bit
|
| 353 |
+
|
| 354 |
+
self.model_path = model_path
|
| 355 |
+
self.image_size = self.model.config.force_image_size
|
| 356 |
+
self.context_len = tokenizer.model_max_length
|
| 357 |
+
self.per_tile_len = 256
|
| 358 |
+
|
| 359 |
+
def reload_model(self):
|
| 360 |
+
del self.model
|
| 361 |
+
torch.cuda.empty_cache()
|
| 362 |
+
if self.device == 'auto':
|
| 363 |
+
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
| 364 |
+
# This can make distributed deployment work properly
|
| 365 |
+
self.model = AutoModel.from_pretrained(
|
| 366 |
+
self.model_path,
|
| 367 |
+
load_in_8bit=self.load_8bit,
|
| 368 |
+
torch_dtype=torch.bfloat16,
|
| 369 |
+
device_map=self.device_map,
|
| 370 |
+
trust_remote_code=True).eval()
|
| 371 |
+
else:
|
| 372 |
+
self.model = AutoModel.from_pretrained(
|
| 373 |
+
self.model_path,
|
| 374 |
+
load_in_8bit=self.load_8bit,
|
| 375 |
+
torch_dtype=torch.bfloat16,
|
| 376 |
+
trust_remote_code=True).eval()
|
| 377 |
+
if not self.load_8bit and not self.device == 'auto':
|
| 378 |
+
self.model = self.model.cuda()
|
| 379 |
+
|
| 380 |
+
@torch.inference_mode()
|
| 381 |
+
def generate(self, params):
|
| 382 |
+
system_message = params['prompt'][0]['content']
|
| 383 |
+
send_messages = params['prompt'][1:]
|
| 384 |
+
max_input_tiles = params['max_input_tiles']
|
| 385 |
+
temperature = params['temperature']
|
| 386 |
+
top_p = params['top_p']
|
| 387 |
+
max_new_tokens = params['max_new_tokens']
|
| 388 |
+
repetition_penalty = params['repetition_penalty']
|
| 389 |
+
video_frame_num = params.get('video_frame_num', 64)
|
| 390 |
+
do_sample = True if temperature > 0.0 else False
|
| 391 |
+
|
| 392 |
+
global_image_cnt = 0
|
| 393 |
+
history, pil_images, max_input_tile_list = [], [], []
|
| 394 |
+
for message in send_messages:
|
| 395 |
+
if message['role'] == 'user':
|
| 396 |
+
prefix = ''
|
| 397 |
+
if 'image' in message:
|
| 398 |
+
for image_data in message['image']:
|
| 399 |
+
pil_images.append(load_image(image_data))
|
| 400 |
+
prefix = prefix + f'<image {global_image_cnt + 1}><image>\n'
|
| 401 |
+
global_image_cnt += 1
|
| 402 |
+
max_input_tile_list.append(max_input_tiles)
|
| 403 |
+
if 'video' in message:
|
| 404 |
+
for video_data in message['video']:
|
| 405 |
+
video_frames, tmp_prefix = load_video(video_data, num_frames=video_frame_num)
|
| 406 |
+
pil_images.extend(video_frames)
|
| 407 |
+
prefix = prefix + tmp_prefix
|
| 408 |
+
global_image_cnt += len(video_frames)
|
| 409 |
+
max_input_tile_list.extend([1] * len(video_frames))
|
| 410 |
+
content = prefix + message['content']
|
| 411 |
+
history.append([content, ])
|
| 412 |
+
else:
|
| 413 |
+
history[-1].append(message['content'])
|
| 414 |
+
question, history = history[-1][0], history[:-1]
|
| 415 |
+
|
| 416 |
+
if global_image_cnt == 1:
|
| 417 |
+
question = question.replace('<image 1><image>\n', '<image>\n')
|
| 418 |
+
history = [[item[0].replace('<image 1><image>\n', '<image>\n'), item[1]] for item in history]
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
try:
|
| 422 |
+
assert len(max_input_tile_list) == len(pil_images), 'The number of max_input_tile_list and pil_images should be the same.'
|
| 423 |
+
except Exception as e:
|
| 424 |
+
from IPython import embed; embed()
|
| 425 |
+
exit()
|
| 426 |
+
print(f'Error: {e}')
|
| 427 |
+
print(f'max_input_tile_list: {max_input_tile_list}, pil_images: {pil_images}')
|
| 428 |
+
# raise e
|
| 429 |
+
|
| 430 |
+
old_system_message = self.model.system_message
|
| 431 |
+
self.model.system_message = system_message
|
| 432 |
+
|
| 433 |
+
transform = build_transform(input_size=self.image_size, norm_type=self.norm_type)
|
| 434 |
+
if len(pil_images) > 0:
|
| 435 |
+
max_input_tiles_limited_by_contect = params['max_input_tiles']
|
| 436 |
+
while True:
|
| 437 |
+
image_tiles = []
|
| 438 |
+
for current_max_input_tiles, pil_image in zip(max_input_tile_list, pil_images):
|
| 439 |
+
if self.model.config.dynamic_image_size:
|
| 440 |
+
tiles = dynamic_preprocess(
|
| 441 |
+
pil_image, image_size=self.image_size, max_num=min(current_max_input_tiles, max_input_tiles_limited_by_contect),
|
| 442 |
+
use_thumbnail=self.model.config.use_thumbnail)
|
| 443 |
+
else:
|
| 444 |
+
tiles = [pil_image]
|
| 445 |
+
image_tiles += tiles
|
| 446 |
+
if (len(image_tiles) * self.per_tile_len < self.context_len):
|
| 447 |
+
break
|
| 448 |
+
else:
|
| 449 |
+
max_input_tiles_limited_by_contect -= 2
|
| 450 |
+
|
| 451 |
+
if max_input_tiles_limited_by_contect < 1:
|
| 452 |
+
break
|
| 453 |
+
|
| 454 |
+
pixel_values = [transform(item) for item in image_tiles]
|
| 455 |
+
pixel_values = torch.stack(pixel_values).to(self.model.device, dtype=torch.bfloat16)
|
| 456 |
+
print(f'Split images to {pixel_values.shape}')
|
| 457 |
+
else:
|
| 458 |
+
pixel_values = None
|
| 459 |
+
|
| 460 |
+
generation_config = dict(
|
| 461 |
+
num_beams=1,
|
| 462 |
+
max_new_tokens=max_new_tokens,
|
| 463 |
+
do_sample=do_sample,
|
| 464 |
+
temperature=temperature,
|
| 465 |
+
repetition_penalty=repetition_penalty,
|
| 466 |
+
max_length=self.context_len,
|
| 467 |
+
top_p=top_p,
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
response = self.model.chat(
|
| 471 |
+
tokenizer=self.tokenizer,
|
| 472 |
+
pixel_values=pixel_values,
|
| 473 |
+
question=question,
|
| 474 |
+
history=history,
|
| 475 |
+
return_history=False,
|
| 476 |
+
generation_config=generation_config,
|
| 477 |
+
)
|
| 478 |
+
self.model.system_message = old_system_message
|
| 479 |
+
return {'text': response, 'error_code': 0}
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
if __name__ == '__main__':
|
| 486 |
+
parser = argparse.ArgumentParser()
|
| 487 |
+
parser.add_argument('--model-path', type=str, default='nvidia/Eagle2-2B')
|
| 488 |
+
parser.add_argument('--model-name', type=str, default='Eagle2-2B')
|
| 489 |
+
parser.add_argument('--device', type=str, default='cuda')
|
| 490 |
+
parser.add_argument('--load-8bit', action='store_true')
|
| 491 |
+
args = parser.parse_args()
|
| 492 |
+
print(f'args: {args}')
|
| 493 |
+
|
| 494 |
+
worker = ModelWorker(
|
| 495 |
+
args.model_path,
|
| 496 |
+
args.model_name,
|
| 497 |
+
args.load_8bit,
|
| 498 |
+
args.device)
|
| 499 |
+
```
|
| 500 |
+
</details>
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
### 2. Prepare the Prompt
|
| 504 |
+
|
| 505 |
+
- Single image input
|
| 506 |
+
```python
|
| 507 |
+
prompt = [
|
| 508 |
+
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
| 509 |
+
{'role': 'user', 'content': 'Describe this image in details.',
|
| 510 |
+
'image':[
|
| 511 |
+
{'url': 'https://www.nvidia.com/content/dam/en-zz/Solutions/about-nvidia/logo-and-brand/[email protected]'}
|
| 512 |
+
],
|
| 513 |
+
}
|
| 514 |
+
]
|
| 515 |
+
```
|
| 516 |
+
|
| 517 |
+
- Multiple image input
|
| 518 |
+
```python
|
| 519 |
+
prompt = [
|
| 520 |
+
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
| 521 |
+
{'role': 'user', 'content': 'Describe these two images in details.',
|
| 522 |
+
'image':[
|
| 523 |
+
{'url': 'https://www.nvidia.com/content/dam/en-zz/Solutions/about-nvidia/logo-and-brand/[email protected]'},
|
| 524 |
+
{'url': 'https://www.nvidia.com/content/dam/en-zz/Solutions/about-nvidia/logo-and-brand/[email protected]'}
|
| 525 |
+
],
|
| 526 |
+
}
|
| 527 |
+
]
|
| 528 |
+
```
|
| 529 |
+
|
| 530 |
+
- Video input
|
| 531 |
+
```python
|
| 532 |
+
prompt = [
|
| 533 |
+
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
| 534 |
+
{'role': 'user', 'content': 'Describe this video in details.',
|
| 535 |
+
'video':[
|
| 536 |
+
'path/to/your/video.mp4'
|
| 537 |
+
],
|
| 538 |
+
}
|
| 539 |
+
]
|
| 540 |
+
```
|
| 541 |
+
|
| 542 |
+
### 3. Generate the response
|
| 543 |
+
```python
|
| 544 |
+
params = {
|
| 545 |
+
'prompt': prompt,
|
| 546 |
+
'max_input_tiles': 24,
|
| 547 |
+
'temperature': 0.7,
|
| 548 |
+
'top_p': 1.0,
|
| 549 |
+
'max_new_tokens': 4096,
|
| 550 |
+
'repetition_penalty': 1.0,
|
| 551 |
+
}
|
| 552 |
+
worker.generate(params)
|
| 553 |
+
```
|
| 554 |
+
|
| 555 |
+
## TODO
|
| 556 |
+
- [ ] Support vLLM Inference
|
| 557 |
+
- [ ] Provide AWQ Quantization Weights
|
| 558 |
+
- [ ] Provide fine-tuning scripts
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
## License/Terms of Use
|
| 562 |
+
- The code is released under the Apache 2.0 license as found in the [LICENSE](https://huggingface.co/NVEagle/Eagle-X5-13B-Chat/blob/main/LICENSE) file.
|
| 563 |
+
- The pretrained model weights are released under the [Creative Commons Attribution: Non-Commercial 4.0 International](https://spdx.org/licenses/CC-BY-NC-4.0) <br>
|
| 564 |
+
- The service is a research preview intended for non-commercial use only, and is subject to the following licenses and terms:
|
| 565 |
+
- Model License of Qwen2.5-1.5B-Instruct: [Apache-2.0](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct/blob/main/LICENSE)
|
| 566 |
+
- Model License of PaliGemma: [Gemma license](https://ai.google.dev/gemma/terms)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
## Citation
|
| 571 |
+
|
| 572 |
+
## Ethical Considerations
|
| 573 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse.
|
| 574 |
+
|
| 575 |
+
Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
|
| 576 |
+
|
Eagle2-2B/added_tokens.json
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</box>": 151673,
|
| 3 |
+
"</img>": 151666,
|
| 4 |
+
"</quad>": 151669,
|
| 5 |
+
"</ref>": 151671,
|
| 6 |
+
"</tool_call>": 151658,
|
| 7 |
+
"<IMG_CONTEXT>": 151667,
|
| 8 |
+
"<box>": 151672,
|
| 9 |
+
"<img>": 151665,
|
| 10 |
+
"<quad>": 151668,
|
| 11 |
+
"<ref>": 151670,
|
| 12 |
+
"<tool_call>": 151657,
|
| 13 |
+
"<|box_end|>": 151649,
|
| 14 |
+
"<|box_start|>": 151648,
|
| 15 |
+
"<|endoftext|>": 151643,
|
| 16 |
+
"<|file_sep|>": 151664,
|
| 17 |
+
"<|fim_middle|>": 151660,
|
| 18 |
+
"<|fim_pad|>": 151662,
|
| 19 |
+
"<|fim_prefix|>": 151659,
|
| 20 |
+
"<|fim_suffix|>": 151661,
|
| 21 |
+
"<|im_end|>": 151645,
|
| 22 |
+
"<|im_start|>": 151644,
|
| 23 |
+
"<|image_pad|>": 151655,
|
| 24 |
+
"<|object_ref_end|>": 151647,
|
| 25 |
+
"<|object_ref_start|>": 151646,
|
| 26 |
+
"<|quad_end|>": 151651,
|
| 27 |
+
"<|quad_start|>": 151650,
|
| 28 |
+
"<|repo_name|>": 151663,
|
| 29 |
+
"<|video_pad|>": 151656,
|
| 30 |
+
"<|vision_end|>": 151653,
|
| 31 |
+
"<|vision_pad|>": 151654,
|
| 32 |
+
"<|vision_start|>": 151652
|
| 33 |
+
}
|
Eagle2-2B/config.json
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_commit_hash": null,
|
| 3 |
+
"_name_or_path": "",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"Eagle2ChatModel"
|
| 6 |
+
],
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_eagle_chat.Eagle2ChatConfig",
|
| 9 |
+
"AutoModel": "modeling_eagle_chat.Eagle2ChatModel",
|
| 10 |
+
"AutoModelForCausalLM": "modeling_eagle_chat.Eagle2ChatModel"
|
| 11 |
+
},
|
| 12 |
+
"downsample_ratio": 0.5,
|
| 13 |
+
"dynamic_image_size": true,
|
| 14 |
+
"efficient_loss": true,
|
| 15 |
+
"force_image_size": 448,
|
| 16 |
+
"keep_aspect_ratio": false,
|
| 17 |
+
"llm_config": {
|
| 18 |
+
"_name_or_path": "./pretrained/Qwen2_5-1_5B-Instruct",
|
| 19 |
+
"add_cross_attention": false,
|
| 20 |
+
"architectures": [
|
| 21 |
+
"Qwen2ForCausalLM"
|
| 22 |
+
],
|
| 23 |
+
"attention_dropout": 0.0,
|
| 24 |
+
"attn_implementation": "flash_attention_2",
|
| 25 |
+
"auto_map": {
|
| 26 |
+
"AutoConfig": "configuration_qwen2.Qwen2Config",
|
| 27 |
+
"AutoModel": "modeling_qwen2.Qwen2Model",
|
| 28 |
+
"AutoModelForCausalLM": "modeling_qwen2.Qwen2ForCausalLM"
|
| 29 |
+
},
|
| 30 |
+
"bad_words_ids": null,
|
| 31 |
+
"begin_suppress_tokens": null,
|
| 32 |
+
"bos_token_id": 151643,
|
| 33 |
+
"chunk_size_feed_forward": 0,
|
| 34 |
+
"cross_attention_hidden_size": null,
|
| 35 |
+
"decoder_start_token_id": null,
|
| 36 |
+
"diversity_penalty": 0.0,
|
| 37 |
+
"do_sample": false,
|
| 38 |
+
"early_stopping": false,
|
| 39 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 40 |
+
"eos_token_id": 151645,
|
| 41 |
+
"exponential_decay_length_penalty": null,
|
| 42 |
+
"finetuning_task": null,
|
| 43 |
+
"forced_bos_token_id": null,
|
| 44 |
+
"forced_eos_token_id": null,
|
| 45 |
+
"hidden_act": "silu",
|
| 46 |
+
"hidden_size": 1536,
|
| 47 |
+
"id2label": {
|
| 48 |
+
"0": "LABEL_0",
|
| 49 |
+
"1": "LABEL_1"
|
| 50 |
+
},
|
| 51 |
+
"initializer_range": 0.02,
|
| 52 |
+
"intermediate_size": 8960,
|
| 53 |
+
"is_decoder": false,
|
| 54 |
+
"is_encoder_decoder": false,
|
| 55 |
+
"label2id": {
|
| 56 |
+
"LABEL_0": 0,
|
| 57 |
+
"LABEL_1": 1
|
| 58 |
+
},
|
| 59 |
+
"length_penalty": 1.0,
|
| 60 |
+
"max_length": 20,
|
| 61 |
+
"max_position_embeddings": 32768,
|
| 62 |
+
"max_window_layers": 21,
|
| 63 |
+
"min_length": 0,
|
| 64 |
+
"model_type": "qwen2",
|
| 65 |
+
"no_repeat_ngram_size": 0,
|
| 66 |
+
"num_attention_heads": 12,
|
| 67 |
+
"num_beam_groups": 1,
|
| 68 |
+
"num_beams": 1,
|
| 69 |
+
"num_hidden_layers": 28,
|
| 70 |
+
"num_key_value_heads": 2,
|
| 71 |
+
"num_return_sequences": 1,
|
| 72 |
+
"output_attentions": false,
|
| 73 |
+
"output_hidden_states": false,
|
| 74 |
+
"output_scores": false,
|
| 75 |
+
"pad_token_id": null,
|
| 76 |
+
"prefix": null,
|
| 77 |
+
"problem_type": null,
|
| 78 |
+
"pruned_heads": {},
|
| 79 |
+
"remove_invalid_values": false,
|
| 80 |
+
"repetition_penalty": 1.0,
|
| 81 |
+
"return_dict": true,
|
| 82 |
+
"return_dict_in_generate": false,
|
| 83 |
+
"rms_norm_eps": 1e-06,
|
| 84 |
+
"rope_theta": 1000000.0,
|
| 85 |
+
"sep_token_id": null,
|
| 86 |
+
"sliding_window": 32768,
|
| 87 |
+
"suppress_tokens": null,
|
| 88 |
+
"task_specific_params": null,
|
| 89 |
+
"temperature": 1.0,
|
| 90 |
+
"tf_legacy_loss": false,
|
| 91 |
+
"tie_encoder_decoder": false,
|
| 92 |
+
"tie_word_embeddings": true,
|
| 93 |
+
"tokenizer_class": null,
|
| 94 |
+
"top_k": 50,
|
| 95 |
+
"top_p": 1.0,
|
| 96 |
+
"torch_dtype": "bfloat16",
|
| 97 |
+
"torchscript": false,
|
| 98 |
+
"transformers_version": "4.37.2",
|
| 99 |
+
"typical_p": 1.0,
|
| 100 |
+
"use_bfloat16": false,
|
| 101 |
+
"use_cache": false,
|
| 102 |
+
"use_sliding_window": false,
|
| 103 |
+
"vocab_size": 151674
|
| 104 |
+
},
|
| 105 |
+
"loss_version": "v4",
|
| 106 |
+
"max_dynamic_patch": 12,
|
| 107 |
+
"min_dynamic_patch": 1,
|
| 108 |
+
"mlp_checkpoint": true,
|
| 109 |
+
"model_type": "eagle_chat",
|
| 110 |
+
"pad2square": false,
|
| 111 |
+
"pre_feature_reduction": false,
|
| 112 |
+
"ps_version": "v2",
|
| 113 |
+
"select_layer": -1,
|
| 114 |
+
"template": "qwen2-chat",
|
| 115 |
+
"torch_dtype": "bfloat16",
|
| 116 |
+
"transformers_version": null,
|
| 117 |
+
"use_backbone_lora": 0,
|
| 118 |
+
"use_llm_lora": 0,
|
| 119 |
+
"use_thumbnail": true,
|
| 120 |
+
"vision_config": {
|
| 121 |
+
"_name_or_path": "",
|
| 122 |
+
"add_cross_attention": false,
|
| 123 |
+
"architectures": [
|
| 124 |
+
"SiglipVisionModel"
|
| 125 |
+
],
|
| 126 |
+
"attention_dropout": 0.0,
|
| 127 |
+
"auto_map": {
|
| 128 |
+
"AutoConfig": "configuration_siglip.SiglipVisionConfig",
|
| 129 |
+
"AutoModel": "modeling_siglip.SiglipVisionModel"
|
| 130 |
+
},
|
| 131 |
+
"bad_words_ids": null,
|
| 132 |
+
"begin_suppress_tokens": null,
|
| 133 |
+
"bos_token_id": null,
|
| 134 |
+
"chunk_size_feed_forward": 0,
|
| 135 |
+
"cross_attention_hidden_size": null,
|
| 136 |
+
"decoder_start_token_id": null,
|
| 137 |
+
"diversity_penalty": 0.0,
|
| 138 |
+
"do_sample": false,
|
| 139 |
+
"drop_path_rate": 0.1,
|
| 140 |
+
"early_stopping": false,
|
| 141 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 142 |
+
"eos_token_id": null,
|
| 143 |
+
"exponential_decay_length_penalty": null,
|
| 144 |
+
"finetuning_task": null,
|
| 145 |
+
"forced_bos_token_id": null,
|
| 146 |
+
"forced_eos_token_id": null,
|
| 147 |
+
"hidden_act": "gelu_pytorch_tanh",
|
| 148 |
+
"hidden_size": 1152,
|
| 149 |
+
"id2label": {
|
| 150 |
+
"0": "LABEL_0",
|
| 151 |
+
"1": "LABEL_1"
|
| 152 |
+
},
|
| 153 |
+
"image_size": 448,
|
| 154 |
+
"intermediate_size": 4304,
|
| 155 |
+
"is_decoder": false,
|
| 156 |
+
"is_encoder_decoder": false,
|
| 157 |
+
"label2id": {
|
| 158 |
+
"LABEL_0": 0,
|
| 159 |
+
"LABEL_1": 1
|
| 160 |
+
},
|
| 161 |
+
"layer_norm_eps": 1e-06,
|
| 162 |
+
"length_penalty": 1.0,
|
| 163 |
+
"max_length": 20,
|
| 164 |
+
"min_length": 0,
|
| 165 |
+
"model_type": "siglip_vision_model",
|
| 166 |
+
"no_repeat_ngram_size": 0,
|
| 167 |
+
"num_attention_heads": 16,
|
| 168 |
+
"num_beam_groups": 1,
|
| 169 |
+
"num_beams": 1,
|
| 170 |
+
"num_channels": 3,
|
| 171 |
+
"num_hidden_layers": 27,
|
| 172 |
+
"num_image_tokens": 1024,
|
| 173 |
+
"num_return_sequences": 1,
|
| 174 |
+
"output_attentions": false,
|
| 175 |
+
"output_hidden_states": false,
|
| 176 |
+
"output_scores": false,
|
| 177 |
+
"pad_token_id": null,
|
| 178 |
+
"patch_size": 14,
|
| 179 |
+
"prefix": null,
|
| 180 |
+
"problem_type": null,
|
| 181 |
+
"projection_dim": 2048,
|
| 182 |
+
"projector_hidden_act": "gelu_fast",
|
| 183 |
+
"pruned_heads": {},
|
| 184 |
+
"remove_invalid_values": false,
|
| 185 |
+
"repetition_penalty": 1.0,
|
| 186 |
+
"return_dict": true,
|
| 187 |
+
"return_dict_in_generate": false,
|
| 188 |
+
"sep_token_id": null,
|
| 189 |
+
"suppress_tokens": null,
|
| 190 |
+
"task_specific_params": null,
|
| 191 |
+
"temperature": 1.0,
|
| 192 |
+
"tf_legacy_loss": false,
|
| 193 |
+
"tie_encoder_decoder": false,
|
| 194 |
+
"tie_word_embeddings": true,
|
| 195 |
+
"tokenizer_class": null,
|
| 196 |
+
"top_k": 50,
|
| 197 |
+
"top_p": 1.0,
|
| 198 |
+
"torch_dtype": "bfloat16",
|
| 199 |
+
"torchscript": false,
|
| 200 |
+
"transformers_version": "4.37.2",
|
| 201 |
+
"typical_p": 1.0,
|
| 202 |
+
"use_bfloat16": false,
|
| 203 |
+
"vision_use_head": false,
|
| 204 |
+
"_attn_implementation": "flash_attention_2"
|
| 205 |
+
}
|
| 206 |
+
}
|
Eagle2-2B/configuration_eagle_chat.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# Eagle2
|
| 3 |
+
# Copyright (c) 2025 NVIDIA
|
| 4 |
+
# Licensed under The Apache License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import copy
|
| 8 |
+
|
| 9 |
+
from transformers import AutoConfig, LlamaConfig
|
| 10 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 11 |
+
from transformers.utils import logging
|
| 12 |
+
from transformers.models.siglip.configuration_siglip import SiglipVisionConfig
|
| 13 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
| 14 |
+
logger = logging.get_logger(__name__)
|
| 15 |
+
|
| 16 |
+
class Eagle2ChatConfig(PretrainedConfig):
|
| 17 |
+
model_type = 'eagle_chat'
|
| 18 |
+
is_composition = True
|
| 19 |
+
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
vision_config=None,
|
| 23 |
+
llm_config=None,
|
| 24 |
+
use_backbone_lora=0,
|
| 25 |
+
use_llm_lora=0,
|
| 26 |
+
select_layer=-1,
|
| 27 |
+
force_image_size=None,
|
| 28 |
+
downsample_ratio=0.5,
|
| 29 |
+
template=None,
|
| 30 |
+
dynamic_image_size=False,
|
| 31 |
+
use_thumbnail=False,
|
| 32 |
+
min_dynamic_patch=1,
|
| 33 |
+
max_dynamic_patch=6,
|
| 34 |
+
mlp_checkpoint=True,
|
| 35 |
+
pre_feature_reduction=False,
|
| 36 |
+
keep_aspect_ratio=False,
|
| 37 |
+
vocab_size=-1,
|
| 38 |
+
**kwargs):
|
| 39 |
+
super().__init__(**kwargs)
|
| 40 |
+
|
| 41 |
+
if vision_config is None:
|
| 42 |
+
vision_config = {}
|
| 43 |
+
logger.info('vision_config is None. Initializing Vision Encoders with default values.')
|
| 44 |
+
|
| 45 |
+
if llm_config is None:
|
| 46 |
+
llm_config = {}
|
| 47 |
+
logger.info('llm_config is None. Initializing the LLM config with default values')
|
| 48 |
+
|
| 49 |
+
if vision_config['model_type'] == 'siglip_vision_model':
|
| 50 |
+
self.vision_config = SiglipVisionConfig(**vision_config)
|
| 51 |
+
else:
|
| 52 |
+
raise ValueError('Unsupported model_type: {}'.format(vision_config['model_type']))
|
| 53 |
+
|
| 54 |
+
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
| 55 |
+
self.llm_config = LlamaConfig(**llm_config)
|
| 56 |
+
elif llm_config['architectures'][0] == 'Qwen2ForCausalLM':
|
| 57 |
+
self.llm_config = Qwen2Config(**llm_config)
|
| 58 |
+
else:
|
| 59 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
| 60 |
+
self.use_backbone_lora = use_backbone_lora
|
| 61 |
+
self.use_llm_lora = use_llm_lora
|
| 62 |
+
self.select_layer = select_layer
|
| 63 |
+
self.force_image_size = force_image_size
|
| 64 |
+
self.downsample_ratio = downsample_ratio
|
| 65 |
+
self.template = template
|
| 66 |
+
self.dynamic_image_size = dynamic_image_size
|
| 67 |
+
self.use_thumbnail = use_thumbnail
|
| 68 |
+
self.min_dynamic_patch = min_dynamic_patch
|
| 69 |
+
self.max_dynamic_patch = max_dynamic_patch
|
| 70 |
+
self.mlp_checkpoint = mlp_checkpoint
|
| 71 |
+
self.pre_feature_reduction = pre_feature_reduction
|
| 72 |
+
self.keep_aspect_ratio = keep_aspect_ratio
|
| 73 |
+
self.vocab_size = self.llm_config.vocab_size
|
| 74 |
+
logger.info(f'keep_aspect_ratio: {self.keep_aspect_ratio}')
|
| 75 |
+
logger.info(f'vision_select_layer: {self.select_layer}')
|
| 76 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
| 77 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
| 78 |
+
|
| 79 |
+
def to_dict(self):
|
| 80 |
+
"""
|
| 81 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
| 85 |
+
"""
|
| 86 |
+
output = copy.deepcopy(self.__dict__)
|
| 87 |
+
output['vision_config'] = self.vision_config.to_dict()
|
| 88 |
+
output['llm_config'] = self.llm_config.to_dict()
|
| 89 |
+
output['model_type'] = self.__class__.model_type
|
| 90 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
| 91 |
+
output['use_llm_lora'] = self.use_llm_lora
|
| 92 |
+
output['select_layer'] = self.select_layer
|
| 93 |
+
output['force_image_size'] = self.force_image_size
|
| 94 |
+
output['downsample_ratio'] = self.downsample_ratio
|
| 95 |
+
output['template'] = self.template
|
| 96 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
| 97 |
+
output['use_thumbnail'] = self.use_thumbnail
|
| 98 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
| 99 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
| 100 |
+
output['keep_aspect_ratio'] = self.keep_aspect_ratio
|
| 101 |
+
|
| 102 |
+
return output
|
Eagle2-2B/demo.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
"""
|
| 3 |
+
A model worker executes the model.
|
| 4 |
+
"""
|
| 5 |
+
from transformers import AutoModel, AutoTokenizer, TextIteratorStreamer, AutoConfig
|
| 6 |
+
import argparse
|
| 7 |
+
import base64
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
import decord
|
| 11 |
+
import threading
|
| 12 |
+
import time
|
| 13 |
+
from io import BytesIO
|
| 14 |
+
from threading import Thread
|
| 15 |
+
import math
|
| 16 |
+
import requests
|
| 17 |
+
import torch
|
| 18 |
+
import torchvision.transforms as T
|
| 19 |
+
from PIL import Image
|
| 20 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 25 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 26 |
+
|
| 27 |
+
SIGLIP_MEAN = (0.5, 0.5, 0.5)
|
| 28 |
+
SIGLIP_STD = (0.5, 0.5, 0.5)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_seq_frames(total_num_frames, desired_num_frames=-1, stride=-1):
|
| 32 |
+
"""
|
| 33 |
+
Calculate the indices of frames to extract from a video.
|
| 34 |
+
|
| 35 |
+
Parameters:
|
| 36 |
+
total_num_frames (int): Total number of frames in the video.
|
| 37 |
+
desired_num_frames (int): Desired number of frames to extract.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
list: List of indices of frames to extract.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
assert desired_num_frames > 0 or stride > 0 and not (desired_num_frames > 0 and stride > 0)
|
| 44 |
+
|
| 45 |
+
if stride > 0:
|
| 46 |
+
return list(range(0, total_num_frames, stride))
|
| 47 |
+
|
| 48 |
+
# Calculate the size of each segment from which a frame will be extracted
|
| 49 |
+
seg_size = float(total_num_frames - 1) / desired_num_frames
|
| 50 |
+
|
| 51 |
+
seq = []
|
| 52 |
+
for i in range(desired_num_frames):
|
| 53 |
+
# Calculate the start and end indices of each segment
|
| 54 |
+
start = int(np.round(seg_size * i))
|
| 55 |
+
end = int(np.round(seg_size * (i + 1)))
|
| 56 |
+
|
| 57 |
+
# Append the middle index of the segment to the list
|
| 58 |
+
seq.append((start + end) // 2)
|
| 59 |
+
|
| 60 |
+
return seq
|
| 61 |
+
|
| 62 |
+
def build_video_prompt(meta_list, num_frames, time_position=False):
|
| 63 |
+
# if time_position is True, the frame_timestamp is used.
|
| 64 |
+
# 1. pass time_position, 2. use env TIME_POSITION
|
| 65 |
+
time_position = os.environ.get("TIME_POSITION", time_position)
|
| 66 |
+
prefix = f"This is a video:\n"
|
| 67 |
+
for i in range(num_frames):
|
| 68 |
+
if time_position:
|
| 69 |
+
frame_txt = f"Frame {i+1} sampled at {meta_list[i]:.2f} seconds: <image>\n"
|
| 70 |
+
else:
|
| 71 |
+
frame_txt = f"Frame {i+1}: <image>\n"
|
| 72 |
+
prefix += frame_txt
|
| 73 |
+
return prefix
|
| 74 |
+
|
| 75 |
+
def load_video(video_path, num_frames=64, frame_cache_root=None):
|
| 76 |
+
if isinstance(video_path, str):
|
| 77 |
+
video = decord.VideoReader(video_path)
|
| 78 |
+
elif isinstance(video_path, dict):
|
| 79 |
+
assert False, 'we not support vidoe: "video_path" as input'
|
| 80 |
+
fps = video.get_avg_fps()
|
| 81 |
+
sampled_frames = get_seq_frames(len(video), num_frames)
|
| 82 |
+
samepld_timestamps = [i / fps for i in sampled_frames]
|
| 83 |
+
frames = video.get_batch(sampled_frames).asnumpy()
|
| 84 |
+
images = [Image.fromarray(frame) for frame in frames]
|
| 85 |
+
|
| 86 |
+
return images, build_video_prompt(samepld_timestamps, len(images), time_position=True)
|
| 87 |
+
|
| 88 |
+
def load_image(image):
|
| 89 |
+
if isinstance(image, str) and os.path.exists(image):
|
| 90 |
+
return Image.open(image)
|
| 91 |
+
elif isinstance(image, dict):
|
| 92 |
+
if 'disk_path' in image:
|
| 93 |
+
return Image.open(image['disk_path'])
|
| 94 |
+
elif 'base64' in image:
|
| 95 |
+
return Image.open(BytesIO(base64.b64decode(image['base64'])))
|
| 96 |
+
elif 'url' in image:
|
| 97 |
+
response = requests.get(image['url'])
|
| 98 |
+
return Image.open(BytesIO(response.content))
|
| 99 |
+
elif 'bytes' in image:
|
| 100 |
+
return Image.open(BytesIO(image['bytes']))
|
| 101 |
+
else:
|
| 102 |
+
raise ValueError(f'Invalid image: {image}')
|
| 103 |
+
else:
|
| 104 |
+
raise ValueError(f'Invalid image: {image}')
|
| 105 |
+
|
| 106 |
+
def build_transform(input_size, norm_type='imagenet'):
|
| 107 |
+
if norm_type == 'imagenet':
|
| 108 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
| 109 |
+
elif norm_type == 'siglip':
|
| 110 |
+
MEAN, STD = SIGLIP_MEAN, SIGLIP_STD
|
| 111 |
+
|
| 112 |
+
transform = T.Compose([
|
| 113 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 114 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 115 |
+
T.ToTensor(),
|
| 116 |
+
T.Normalize(mean=MEAN, std=STD)
|
| 117 |
+
])
|
| 118 |
+
return transform
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 122 |
+
"""
|
| 123 |
+
previous version mainly foucs on ratio.
|
| 124 |
+
We also consider area ratio here.
|
| 125 |
+
"""
|
| 126 |
+
best_factor = float('-inf')
|
| 127 |
+
best_ratio = (1, 1)
|
| 128 |
+
area = width * height
|
| 129 |
+
for ratio in target_ratios:
|
| 130 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 131 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 132 |
+
area_ratio = (ratio[0]*ratio[1]*image_size*image_size)/ area
|
| 133 |
+
"""
|
| 134 |
+
new area > 60% of original image area is enough.
|
| 135 |
+
"""
|
| 136 |
+
factor_based_on_area_n_ratio = min((ratio[0]*ratio[1]*image_size*image_size)/ area, 0.6)* \
|
| 137 |
+
min(target_aspect_ratio/aspect_ratio, aspect_ratio/target_aspect_ratio)
|
| 138 |
+
|
| 139 |
+
if factor_based_on_area_n_ratio > best_factor:
|
| 140 |
+
best_factor = factor_based_on_area_n_ratio
|
| 141 |
+
best_ratio = ratio
|
| 142 |
+
|
| 143 |
+
return best_ratio
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False):
|
| 147 |
+
orig_width, orig_height = image.size
|
| 148 |
+
aspect_ratio = orig_width / orig_height
|
| 149 |
+
|
| 150 |
+
# calculate the existing image aspect ratio
|
| 151 |
+
target_ratios = set(
|
| 152 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
| 153 |
+
i * j <= max_num and i * j >= min_num)
|
| 154 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 155 |
+
|
| 156 |
+
# find the closest aspect ratio to the target
|
| 157 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 158 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 159 |
+
|
| 160 |
+
# calculate the target width and height
|
| 161 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 162 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 163 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 164 |
+
|
| 165 |
+
# resize the image
|
| 166 |
+
resized_img = image.resize((target_width, target_height))
|
| 167 |
+
processed_images = []
|
| 168 |
+
for i in range(blocks):
|
| 169 |
+
box = (
|
| 170 |
+
(i % (target_width // image_size)) * image_size,
|
| 171 |
+
(i // (target_width // image_size)) * image_size,
|
| 172 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 173 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 174 |
+
)
|
| 175 |
+
# split the image
|
| 176 |
+
split_img = resized_img.crop(box)
|
| 177 |
+
processed_images.append(split_img)
|
| 178 |
+
assert len(processed_images) == blocks
|
| 179 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 180 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 181 |
+
processed_images.append(thumbnail_img)
|
| 182 |
+
return processed_images
|
| 183 |
+
|
| 184 |
+
def split_model(model_path, device):
|
| 185 |
+
|
| 186 |
+
device_map = {}
|
| 187 |
+
world_size = torch.cuda.device_count()
|
| 188 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
| 189 |
+
num_layers = config.llm_config.num_hidden_layers
|
| 190 |
+
|
| 191 |
+
num_layers_per_gpu_ = math.floor(num_layers / (world_size - 1))
|
| 192 |
+
num_layers_per_gpu = [num_layers_per_gpu_] * world_size
|
| 193 |
+
num_layers_per_gpu[device] = num_layers - num_layers_per_gpu_ * (world_size-1)
|
| 194 |
+
layer_cnt = 0
|
| 195 |
+
for i, num_layer in enumerate(num_layers_per_gpu):
|
| 196 |
+
for j in range(num_layer):
|
| 197 |
+
device_map[f'language_model.model.layers.{layer_cnt}'] = i
|
| 198 |
+
layer_cnt += 1
|
| 199 |
+
device_map['vision_model'] = device
|
| 200 |
+
device_map['mlp1'] = device
|
| 201 |
+
device_map['language_model.model.tok_embeddings'] = device
|
| 202 |
+
device_map['language_model.model.embed_tokens'] = device
|
| 203 |
+
device_map['language_model.output'] = device
|
| 204 |
+
device_map['language_model.model.norm'] = device
|
| 205 |
+
device_map['language_model.lm_head'] = device
|
| 206 |
+
device_map['language_model.model.rotary_emb'] = device
|
| 207 |
+
device_map[f'language_model.model.layers.{num_layers - 1}'] = device
|
| 208 |
+
return device_map
|
| 209 |
+
|
| 210 |
+
class ModelWorker:
|
| 211 |
+
def __init__(self, model_path, model_name,
|
| 212 |
+
load_8bit, device):
|
| 213 |
+
|
| 214 |
+
if model_path.endswith('/'):
|
| 215 |
+
model_path = model_path[:-1]
|
| 216 |
+
if model_name is None:
|
| 217 |
+
model_paths = model_path.split('/')
|
| 218 |
+
if model_paths[-1].startswith('checkpoint-'):
|
| 219 |
+
self.model_name = model_paths[-2] + '_' + model_paths[-1]
|
| 220 |
+
else:
|
| 221 |
+
self.model_name = model_paths[-1]
|
| 222 |
+
else:
|
| 223 |
+
self.model_name = model_name
|
| 224 |
+
|
| 225 |
+
print(f'Loading the model {self.model_name}')
|
| 226 |
+
|
| 227 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
|
| 228 |
+
tokens_to_keep = ['<box>', '</box>', '<ref>', '</ref>']
|
| 229 |
+
tokenizer.additional_special_tokens = [item for item in tokenizer.additional_special_tokens if item not in tokens_to_keep]
|
| 230 |
+
self.tokenizer = tokenizer
|
| 231 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
| 232 |
+
model_type = config.vision_config.model_type
|
| 233 |
+
self.device = torch.cuda.current_device()
|
| 234 |
+
if model_type == 'siglip_vision_model':
|
| 235 |
+
self.norm_type = 'siglip'
|
| 236 |
+
elif model_type == 'MOB':
|
| 237 |
+
self.norm_type = 'siglip'
|
| 238 |
+
else:
|
| 239 |
+
self.norm_type = 'imagenet'
|
| 240 |
+
print('norm_type: ', self.norm_type)
|
| 241 |
+
if any(x in model_path.lower() for x in ['34b']):
|
| 242 |
+
device_map = split_model(model_path, self.device)
|
| 243 |
+
else:
|
| 244 |
+
device_map = None
|
| 245 |
+
|
| 246 |
+
if device_map is not None:
|
| 247 |
+
self.model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16,
|
| 248 |
+
low_cpu_mem_usage=True,
|
| 249 |
+
device_map=device_map,
|
| 250 |
+
trust_remote_code=True,
|
| 251 |
+
load_in_8bit=load_8bit).eval()
|
| 252 |
+
else:
|
| 253 |
+
self.model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16,
|
| 254 |
+
trust_remote_code=True,
|
| 255 |
+
load_in_8bit=load_8bit).eval()
|
| 256 |
+
if not load_8bit and device_map is None:
|
| 257 |
+
self.model = self.model.to(device)
|
| 258 |
+
self.load_8bit = load_8bit
|
| 259 |
+
|
| 260 |
+
self.model_path = model_path
|
| 261 |
+
self.image_size = self.model.config.force_image_size
|
| 262 |
+
self.context_len = tokenizer.model_max_length
|
| 263 |
+
self.per_tile_len = 256
|
| 264 |
+
print(self.model)
|
| 265 |
+
def reload_model(self):
|
| 266 |
+
del self.model
|
| 267 |
+
torch.cuda.empty_cache()
|
| 268 |
+
if self.device == 'auto':
|
| 269 |
+
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
| 270 |
+
# This can make distributed deployment work properly
|
| 271 |
+
self.model = AutoModel.from_pretrained(
|
| 272 |
+
self.model_path,
|
| 273 |
+
load_in_8bit=self.load_8bit,
|
| 274 |
+
torch_dtype=torch.bfloat16,
|
| 275 |
+
device_map=self.device_map,
|
| 276 |
+
trust_remote_code=True).eval()
|
| 277 |
+
else:
|
| 278 |
+
self.model = AutoModel.from_pretrained(
|
| 279 |
+
self.model_path,
|
| 280 |
+
load_in_8bit=self.load_8bit,
|
| 281 |
+
torch_dtype=torch.bfloat16,
|
| 282 |
+
trust_remote_code=True).eval()
|
| 283 |
+
if not self.load_8bit and not self.device == 'auto':
|
| 284 |
+
self.model = self.model.cuda()
|
| 285 |
+
|
| 286 |
+
@torch.inference_mode()
|
| 287 |
+
def generate(self, params):
|
| 288 |
+
system_message = params['prompt'][0]['content']
|
| 289 |
+
send_messages = params['prompt'][1:]
|
| 290 |
+
max_input_tiles = params['max_input_tiles']
|
| 291 |
+
temperature = params['temperature']
|
| 292 |
+
top_p = params['top_p']
|
| 293 |
+
max_new_tokens = params['max_new_tokens']
|
| 294 |
+
repetition_penalty = params['repetition_penalty']
|
| 295 |
+
video_frame_num = params.get('video_frame_num', 64)
|
| 296 |
+
do_sample = True if temperature > 0.0 else False
|
| 297 |
+
|
| 298 |
+
global_image_cnt = 0
|
| 299 |
+
history, pil_images, max_input_tile_list = [], [], []
|
| 300 |
+
|
| 301 |
+
for message in send_messages:
|
| 302 |
+
if message['role'] == 'user':
|
| 303 |
+
prefix = ''
|
| 304 |
+
if 'image' in message:
|
| 305 |
+
for image_data in message['image']:
|
| 306 |
+
pil_images.append(load_image(image_data))
|
| 307 |
+
prefix = prefix + f'<image {global_image_cnt + 1}><image>\n'
|
| 308 |
+
global_image_cnt += 1
|
| 309 |
+
max_input_tile_list.append(max_input_tiles)
|
| 310 |
+
if 'video' in message:
|
| 311 |
+
for video_data in message['video']:
|
| 312 |
+
video_frames, tmp_prefix = load_video(video_data, num_frames=video_frame_num)
|
| 313 |
+
pil_images.extend(video_frames)
|
| 314 |
+
prefix = prefix + tmp_prefix
|
| 315 |
+
global_image_cnt += len(video_frames)
|
| 316 |
+
max_input_tile_list.extend([1] * len(video_frames))
|
| 317 |
+
content = prefix + message['content']
|
| 318 |
+
history.append([content, ])
|
| 319 |
+
else:
|
| 320 |
+
history[-1].append(message['content'])
|
| 321 |
+
question, history = history[-1][0], history[:-1]
|
| 322 |
+
|
| 323 |
+
if global_image_cnt == 1:
|
| 324 |
+
question = question.replace('<image 1><image>\n', '<image>\n')
|
| 325 |
+
history = [[item[0].replace('<image 1><image>\n', '<image>\n'), item[1]] for item in history]
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
try:
|
| 329 |
+
assert len(max_input_tile_list) == len(pil_images), 'The number of max_input_tile_list and pil_images should be the same.'
|
| 330 |
+
except Exception as e:
|
| 331 |
+
from IPython import embed; embed()
|
| 332 |
+
exit()
|
| 333 |
+
print(f'Error: {e}')
|
| 334 |
+
print(f'max_input_tile_list: {max_input_tile_list}, pil_images: {pil_images}')
|
| 335 |
+
# raise e
|
| 336 |
+
|
| 337 |
+
old_system_message = self.model.system_message
|
| 338 |
+
self.model.system_message = system_message
|
| 339 |
+
|
| 340 |
+
transform = build_transform(input_size=self.image_size, norm_type=self.norm_type)
|
| 341 |
+
if len(pil_images) > 0:
|
| 342 |
+
max_input_tiles_limited_by_contect = params['max_input_tiles']
|
| 343 |
+
while True:
|
| 344 |
+
image_tiles = []
|
| 345 |
+
num_patches_list = []
|
| 346 |
+
for current_max_input_tiles, pil_image in zip(max_input_tile_list, pil_images):
|
| 347 |
+
if self.model.config.dynamic_image_size:
|
| 348 |
+
tiles = dynamic_preprocess(
|
| 349 |
+
pil_image, image_size=self.image_size, max_num=min(current_max_input_tiles, max_input_tiles_limited_by_contect),
|
| 350 |
+
use_thumbnail=self.model.config.use_thumbnail)
|
| 351 |
+
else:
|
| 352 |
+
tiles = [pil_image]
|
| 353 |
+
num_patches_list.append(len(tiles))
|
| 354 |
+
image_tiles += tiles
|
| 355 |
+
if (len(image_tiles) * self.per_tile_len < self.context_len):
|
| 356 |
+
break
|
| 357 |
+
else:
|
| 358 |
+
max_input_tiles_limited_by_contect -= 2
|
| 359 |
+
|
| 360 |
+
if max_input_tiles_limited_by_contect < 1:
|
| 361 |
+
break
|
| 362 |
+
|
| 363 |
+
pixel_values = [transform(item) for item in image_tiles]
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
pixel_values = torch.stack(pixel_values).to(self.model.device, dtype=torch.bfloat16)
|
| 367 |
+
|
| 368 |
+
else:
|
| 369 |
+
pixel_values = None
|
| 370 |
+
|
| 371 |
+
generation_config = dict(
|
| 372 |
+
num_beams=1,
|
| 373 |
+
max_new_tokens=max_new_tokens,
|
| 374 |
+
do_sample=do_sample,
|
| 375 |
+
temperature=temperature,
|
| 376 |
+
repetition_penalty=repetition_penalty,
|
| 377 |
+
max_length=self.context_len,
|
| 378 |
+
top_p=top_p,
|
| 379 |
+
)
|
| 380 |
+
print(f'pixel_values: {pixel_values.shape}')
|
| 381 |
+
response = self.model.chat(
|
| 382 |
+
tokenizer=self.tokenizer,
|
| 383 |
+
pixel_values=pixel_values,
|
| 384 |
+
question=question,
|
| 385 |
+
history=history,
|
| 386 |
+
return_history=False,
|
| 387 |
+
num_patches_list=num_patches_list,
|
| 388 |
+
generation_config=generation_config,
|
| 389 |
+
)
|
| 390 |
+
self.model.system_message = old_system_message
|
| 391 |
+
return {'text': response, 'error_code': 0}
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
if __name__ == '__main__':
|
| 398 |
+
parser = argparse.ArgumentParser()
|
| 399 |
+
parser.add_argument('--model-path', type=str, default='/home/zhidingy/workspace/eagle-next/internvl_chat/work_dirs/release/test/Eagle2-2B')
|
| 400 |
+
parser.add_argument('--model-name', type=str, default='Eagle2')
|
| 401 |
+
parser.add_argument('--device', type=str, default='cuda')
|
| 402 |
+
parser.add_argument('--load-8bit', action='store_true')
|
| 403 |
+
args = parser.parse_args()
|
| 404 |
+
print(f'args: {args}')
|
| 405 |
+
|
| 406 |
+
worker = ModelWorker(
|
| 407 |
+
args.model_path,
|
| 408 |
+
args.model_name,
|
| 409 |
+
args.load_8bit,
|
| 410 |
+
args.device)
|
| 411 |
+
prompt = [
|
| 412 |
+
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
| 413 |
+
{'role': 'user', 'content': 'Describe these two images in details respectively.',
|
| 414 |
+
'image':[
|
| 415 |
+
{'url': 'https://www.nvidia.com/content/dam/en-zz/Solutions/about-nvidia/logo-and-brand/[email protected]'},
|
| 416 |
+
{'url': "https://www.google.com.hk/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"}
|
| 417 |
+
]
|
| 418 |
+
}
|
| 419 |
+
]
|
| 420 |
+
params = {
|
| 421 |
+
'prompt': prompt,
|
| 422 |
+
'max_input_tiles': 24,
|
| 423 |
+
'temperature': 0.7,
|
| 424 |
+
'top_p': 1.0,
|
| 425 |
+
'max_new_tokens': 4096,
|
| 426 |
+
'repetition_penalty': 1.0,
|
| 427 |
+
}
|
| 428 |
+
print(worker.generate(params))
|
Eagle2-2B/generation_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"transformers_version": "4.37.2"
|
| 4 |
+
}
|
Eagle2-2B/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Eagle2-2B/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f73a9fc7fb024f2405af6970c5945c3d3bfe160757119acf8ef335f1f10a4235
|
| 3 |
+
size 4428702200
|
Eagle2-2B/modeling_eagle_chat.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# Eagle2
|
| 3 |
+
# Copyright (c) 2025 NVIDIA
|
| 4 |
+
# Licensed under The Apache License [see LICENSE for details]
|
| 5 |
+
# --------------------------------------------------------
|
| 6 |
+
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import Any, List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch.utils.checkpoint
|
| 11 |
+
import transformers
|
| 12 |
+
from torch import nn
|
| 13 |
+
from torch.nn import CrossEntropyLoss
|
| 14 |
+
from transformers import (AutoModel, GenerationConfig,
|
| 15 |
+
LlamaTokenizer, LlamaForCausalLM)
|
| 16 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 17 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 18 |
+
from transformers.utils import ModelOutput, logging
|
| 19 |
+
from peft import LoraConfig, get_peft_model
|
| 20 |
+
from transformers.models.siglip.modeling_siglip import SiglipVisionModel
|
| 21 |
+
|
| 22 |
+
from transformers.models.qwen2.modeling_qwen2 import Qwen2ForCausalLM
|
| 23 |
+
import functools
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__)
|
| 27 |
+
from .configuration_eagle_chat import Eagle2ChatConfig
|
| 28 |
+
|
| 29 |
+
def cleanup_xlora_pre_hooks(model, verbose=True):
|
| 30 |
+
cleaned = 0
|
| 31 |
+
for _, m in model.named_modules():
|
| 32 |
+
d = getattr(m, "_forward_pre_hooks", None)
|
| 33 |
+
if not isinstance(d, dict):
|
| 34 |
+
continue
|
| 35 |
+
for k, cb in list(d.items()):
|
| 36 |
+
is_xlora = isinstance(cb, functools.partial) and getattr(cb.func, "__name__", "") == "scalings_injection_hook"
|
| 37 |
+
if is_xlora:
|
| 38 |
+
try:
|
| 39 |
+
d.pop(k, None)
|
| 40 |
+
cleaned += 1
|
| 41 |
+
except Exception:
|
| 42 |
+
pass
|
| 43 |
+
if verbose and cleaned:
|
| 44 |
+
print(f"[XLORA] cleaned {cleaned} stale pre_hooks")
|
| 45 |
+
|
| 46 |
+
def version_cmp(v1, v2, op='eq'):
|
| 47 |
+
import operator
|
| 48 |
+
|
| 49 |
+
from packaging import version
|
| 50 |
+
op_func = getattr(operator, op)
|
| 51 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class Eagle2ChatModel(PreTrainedModel):
|
| 55 |
+
config_class = Eagle2ChatConfig
|
| 56 |
+
main_input_name = 'pixel_values'
|
| 57 |
+
_no_split_modules = ['LlamaDecoderLayer']
|
| 58 |
+
_supports_flash_attn_2 = True
|
| 59 |
+
_supports_sdpa = True
|
| 60 |
+
_supports_flex_attn = False
|
| 61 |
+
_supports_cache_class = False
|
| 62 |
+
_supports_quantized_cache = False
|
| 63 |
+
_supports_static_cache = False
|
| 64 |
+
_supports_attention_backend = False
|
| 65 |
+
|
| 66 |
+
def __init__(self, config: Eagle2ChatConfig, vision_model=None, language_model=None):
|
| 67 |
+
super().__init__(config)
|
| 68 |
+
|
| 69 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
| 70 |
+
|
| 71 |
+
patch_size = config.vision_config.patch_size
|
| 72 |
+
self.patch_size = patch_size
|
| 73 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 74 |
+
|
| 75 |
+
self.select_layer = config.select_layer
|
| 76 |
+
self.template = config.template
|
| 77 |
+
self.downsample_ratio = config.downsample_ratio
|
| 78 |
+
|
| 79 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 80 |
+
if vision_model is not None:
|
| 81 |
+
self.vision_model = vision_model
|
| 82 |
+
else:
|
| 83 |
+
if config.vision_config.model_type == 'siglip_vision_model':
|
| 84 |
+
if version_cmp(transformers.__version__, '4.43.0', 'le'):
|
| 85 |
+
config.vision_config._attn_implementation = 'eager'
|
| 86 |
+
self.vision_model = SiglipVisionModel(config.vision_config)
|
| 87 |
+
|
| 88 |
+
if language_model is not None:
|
| 89 |
+
self.language_model = language_model
|
| 90 |
+
else:
|
| 91 |
+
if config.llm_config.architectures[0] == 'LlamaForCausalLM':
|
| 92 |
+
self.language_model = LlamaForCausalLM(config.llm_config)
|
| 93 |
+
elif config.llm_config.architectures[0] == 'Qwen2ForCausalLM':
|
| 94 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
| 95 |
+
else:
|
| 96 |
+
raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
|
| 97 |
+
|
| 98 |
+
vit_hidden_size = config.vision_config.hidden_size
|
| 99 |
+
|
| 100 |
+
llm_hidden_size = config.llm_config.hidden_size
|
| 101 |
+
|
| 102 |
+
self.mlp1 = nn.Sequential(
|
| 103 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
| 104 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
| 105 |
+
nn.GELU(),
|
| 106 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
| 107 |
+
)
|
| 108 |
+
self.img_context_token_id = None
|
| 109 |
+
self.system_message = 'You are a helpful assistant.' # Default system message
|
| 110 |
+
|
| 111 |
+
if config.use_backbone_lora:
|
| 112 |
+
self.wrap_backbone_lora(r=config.use_backbone_lora, lora_alpha=2 * config.use_backbone_lora)
|
| 113 |
+
|
| 114 |
+
if config.use_llm_lora:
|
| 115 |
+
self.wrap_llm_lora(r=config.use_llm_lora, lora_alpha=2 * config.use_llm_lora)
|
| 116 |
+
|
| 117 |
+
def wrap_backbone_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
|
| 118 |
+
lora_config = LoraConfig(
|
| 119 |
+
r=r,
|
| 120 |
+
target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2'],
|
| 121 |
+
lora_alpha=lora_alpha,
|
| 122 |
+
lora_dropout=lora_dropout,
|
| 123 |
+
)
|
| 124 |
+
self.vision_model = get_peft_model(self.vision_model, lora_config)
|
| 125 |
+
self.vision_model.print_trainable_parameters()
|
| 126 |
+
|
| 127 |
+
def wrap_llm_lora(self, r=128, lora_alpha=256, lora_dropout=0.05):
|
| 128 |
+
lora_config = LoraConfig(
|
| 129 |
+
r=r,
|
| 130 |
+
target_modules=['self_attn.q_proj', 'self_attn.k_proj', 'self_attn.v_proj', 'self_attn.o_proj',
|
| 131 |
+
'mlp.gate_proj', 'mlp.down_proj', 'mlp.up_proj'],
|
| 132 |
+
lora_alpha=lora_alpha,
|
| 133 |
+
lora_dropout=lora_dropout,
|
| 134 |
+
task_type='CAUSAL_LM'
|
| 135 |
+
)
|
| 136 |
+
self.language_model = get_peft_model(self.language_model, lora_config)
|
| 137 |
+
self.language_model.enable_input_require_grads()
|
| 138 |
+
self.language_model.print_trainable_parameters()
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def forward(
|
| 142 |
+
self,
|
| 143 |
+
pixel_values: torch.FloatTensor,
|
| 144 |
+
input_ids: torch.LongTensor = None,
|
| 145 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 146 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 147 |
+
image_flags: Optional[torch.LongTensor] = None,
|
| 148 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 149 |
+
labels: Optional[torch.LongTensor] = None,
|
| 150 |
+
use_cache: Optional[bool] = None,
|
| 151 |
+
output_attentions: Optional[bool] = None,
|
| 152 |
+
output_hidden_states: Optional[bool] = None,
|
| 153 |
+
return_dict: Optional[bool] = None,
|
| 154 |
+
num_patches_list: Optional[List[torch.Tensor]] = None,
|
| 155 |
+
**kwargs
|
| 156 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 157 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 158 |
+
|
| 159 |
+
image_flags = image_flags.squeeze(-1)
|
| 160 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 161 |
+
|
| 162 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 163 |
+
|
| 164 |
+
if not isinstance(image_flags, list):
|
| 165 |
+
image_flags = image_flags.squeeze(-1)
|
| 166 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
| 167 |
+
|
| 168 |
+
vit_batch_size = pixel_values.shape[0]
|
| 169 |
+
|
| 170 |
+
B, N, C = input_embeds.shape
|
| 171 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 172 |
+
|
| 173 |
+
# if torch.distributed.get_rank() == 0:
|
| 174 |
+
# print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
| 175 |
+
|
| 176 |
+
input_ids = input_ids.reshape(B * N)
|
| 177 |
+
selected = (input_ids == self.img_context_token_id)
|
| 178 |
+
try:
|
| 179 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
| 180 |
+
except Exception as e:
|
| 181 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
| 182 |
+
if torch.distributed.get_rank() == 0:
|
| 183 |
+
print(pixel_values.shape,input_embeds.shape)
|
| 184 |
+
print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
|
| 185 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
| 186 |
+
n_token = selected.sum()
|
| 187 |
+
input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
|
| 188 |
+
|
| 189 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 190 |
+
|
| 191 |
+
outputs = self.language_model(
|
| 192 |
+
inputs_embeds=input_embeds,
|
| 193 |
+
labels=labels,
|
| 194 |
+
attention_mask=attention_mask,
|
| 195 |
+
position_ids=position_ids,
|
| 196 |
+
past_key_values=past_key_values,
|
| 197 |
+
use_cache=use_cache,
|
| 198 |
+
output_attentions=output_attentions,
|
| 199 |
+
output_hidden_states=output_hidden_states,
|
| 200 |
+
**kwargs
|
| 201 |
+
)
|
| 202 |
+
logits = outputs.logits
|
| 203 |
+
|
| 204 |
+
loss = None
|
| 205 |
+
if labels is not None and outputs.loss is None:
|
| 206 |
+
# Shift so that tokens < n predict n
|
| 207 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 208 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 209 |
+
# Flatten the tokens
|
| 210 |
+
loss_fct = CrossEntropyLoss()
|
| 211 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
| 212 |
+
shift_labels = shift_labels.view(-1)
|
| 213 |
+
# Enable model parallelism
|
| 214 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 215 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 216 |
+
else:
|
| 217 |
+
loss = outputs.loss
|
| 218 |
+
|
| 219 |
+
if not return_dict:
|
| 220 |
+
output = (logits,) + outputs[1:]
|
| 221 |
+
return (loss,) + output if loss is not None else output
|
| 222 |
+
|
| 223 |
+
return CausalLMOutputWithPast(
|
| 224 |
+
loss=loss,
|
| 225 |
+
logits=logits,
|
| 226 |
+
past_key_values=outputs.past_key_values,
|
| 227 |
+
hidden_states=outputs.hidden_states,
|
| 228 |
+
attentions=outputs.attentions,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 232 |
+
n, w, h, c = x.size()
|
| 233 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 234 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 235 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 236 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 237 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 238 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 239 |
+
int(c / (scale_factor * scale_factor)))
|
| 240 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 241 |
+
return x
|
| 242 |
+
|
| 243 |
+
def extract_feature(self, pixel_values):
|
| 244 |
+
"""
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
if self.select_layer == -1:
|
| 248 |
+
vit_embeds = self.vision_model(
|
| 249 |
+
pixel_values=pixel_values,
|
| 250 |
+
output_hidden_states=False,
|
| 251 |
+
return_dict=True)
|
| 252 |
+
# if there is vit_embeds.last_hidden_state, use it.
|
| 253 |
+
if hasattr(vit_embeds, 'last_hidden_state'):
|
| 254 |
+
vit_embeds = vit_embeds.last_hidden_state
|
| 255 |
+
else:
|
| 256 |
+
vit_embeds = self.vision_model(
|
| 257 |
+
pixel_values=pixel_values,
|
| 258 |
+
output_hidden_states=True,
|
| 259 |
+
return_dict=True).hidden_states[self.select_layer]
|
| 260 |
+
if type(self.vision_model) == SiglipVisionModel:
|
| 261 |
+
pass
|
| 262 |
+
else:
|
| 263 |
+
vit_embeds = vit_embeds[:, 1:, :] # torch.Size([B, 1024, 1024])
|
| 264 |
+
|
| 265 |
+
if self.training and self.neftune_alpha is not None:
|
| 266 |
+
vit_embeds = self.noised_embed(vit_embeds, self.neftune_alpha)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
| 270 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
| 271 |
+
|
| 272 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio) # torch.Size([B, 1024, 1024]) -> torch.Size([B, 16, 16, 4096])
|
| 273 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) # torch.Size([B, 16, 16, 4096]) -> torch.Size([B, 256, 4096])
|
| 274 |
+
vit_embeds = self.mlp1(vit_embeds)#.to(pixel_values.device)
|
| 275 |
+
|
| 276 |
+
return vit_embeds
|
| 277 |
+
|
| 278 |
+
def batch_chat(self,
|
| 279 |
+
tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
|
| 280 |
+
history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
|
| 281 |
+
IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
|
| 282 |
+
if history is not None or return_history:
|
| 283 |
+
print('Now multi-turn chat is not supported in batch_chat.')
|
| 284 |
+
raise NotImplementedError
|
| 285 |
+
|
| 286 |
+
if image_counts is not None:
|
| 287 |
+
num_patches_list = image_counts
|
| 288 |
+
print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
|
| 289 |
+
|
| 290 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 291 |
+
self.img_context_token_id = img_context_token_id
|
| 292 |
+
|
| 293 |
+
if verbose and pixel_values is not None:
|
| 294 |
+
image_bs = pixel_values.shape[0]
|
| 295 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 296 |
+
|
| 297 |
+
queries = []
|
| 298 |
+
for idx, num_patches in enumerate(num_patches_list):
|
| 299 |
+
question = questions[idx]
|
| 300 |
+
if pixel_values is not None and '<image>' not in question:
|
| 301 |
+
question = '<image>\n' + question
|
| 302 |
+
template_messages = []
|
| 303 |
+
sep = tokenizer.eos_token
|
| 304 |
+
template_messages.append(('<|im_start|>user', question))
|
| 305 |
+
template_messages.append(('<|im_end|>assistant', None))
|
| 306 |
+
query = self.get_prompt(self.system_message, template_messages, sep)
|
| 307 |
+
|
| 308 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 309 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 310 |
+
queries.append(query)
|
| 311 |
+
|
| 312 |
+
tokenizer.padding_side = 'left'
|
| 313 |
+
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
| 314 |
+
input_ids = model_inputs['input_ids'].cuda()
|
| 315 |
+
attention_mask = model_inputs['attention_mask'].cuda()
|
| 316 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(sep)
|
| 317 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 318 |
+
generation_output = self.generate(
|
| 319 |
+
pixel_values=pixel_values,
|
| 320 |
+
input_ids=input_ids,
|
| 321 |
+
attention_mask=attention_mask,
|
| 322 |
+
**generation_config
|
| 323 |
+
)
|
| 324 |
+
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
| 325 |
+
responses = [response.split(sep)[0].strip() for response in responses]
|
| 326 |
+
return responses
|
| 327 |
+
|
| 328 |
+
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
| 329 |
+
num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
|
| 330 |
+
verbose=False, llm_only=False):
|
| 331 |
+
|
| 332 |
+
if history is None and pixel_values is not None and '<image>' not in question:
|
| 333 |
+
question = '<image>\n' + question
|
| 334 |
+
|
| 335 |
+
if num_patches_list is None:
|
| 336 |
+
num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
|
| 337 |
+
assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
|
| 338 |
+
|
| 339 |
+
img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
|
| 340 |
+
self.img_context_token_id = img_context_token_id
|
| 341 |
+
|
| 342 |
+
template_messages = []
|
| 343 |
+
system_message = f'<|im_start|>system\n{self.system_message}'
|
| 344 |
+
sep = tokenizer.eos_token
|
| 345 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(sep)
|
| 346 |
+
|
| 347 |
+
history = [] if history is None else history
|
| 348 |
+
for (old_question, old_answer) in history:
|
| 349 |
+
template_messages.append(('<|im_start|>user', old_question))
|
| 350 |
+
template_messages.append(('<|im_start|>assistant', old_answer))
|
| 351 |
+
template_messages.append(('<|im_start|>user', question))
|
| 352 |
+
template_messages.append(('<|im_end|>assistant', None))
|
| 353 |
+
query = self.get_prompt(system_message, template_messages, sep)
|
| 354 |
+
|
| 355 |
+
if verbose and pixel_values is not None:
|
| 356 |
+
image_bs = pixel_values.shape[0]
|
| 357 |
+
print(f'dynamic ViT batch size: {image_bs}')
|
| 358 |
+
|
| 359 |
+
for num_patches in num_patches_list:
|
| 360 |
+
image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
|
| 361 |
+
if llm_only:
|
| 362 |
+
query = query.replace('<image>', '', 1)
|
| 363 |
+
else:
|
| 364 |
+
query = query.replace('<image>', image_tokens, 1)
|
| 365 |
+
|
| 366 |
+
model_inputs = tokenizer(query, return_tensors='pt')
|
| 367 |
+
input_ids = model_inputs['input_ids'].cuda()
|
| 368 |
+
attention_mask = model_inputs['attention_mask'].cuda()
|
| 369 |
+
generation_config['eos_token_id'] = eos_token_id
|
| 370 |
+
generation_output = self.generate(
|
| 371 |
+
pixel_values=pixel_values,
|
| 372 |
+
input_ids=input_ids,
|
| 373 |
+
attention_mask=attention_mask,
|
| 374 |
+
**generation_config
|
| 375 |
+
)
|
| 376 |
+
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
| 377 |
+
response = response.split(sep)[0].strip()
|
| 378 |
+
history.append((question, response))
|
| 379 |
+
if return_history:
|
| 380 |
+
return response, history
|
| 381 |
+
else:
|
| 382 |
+
query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
|
| 383 |
+
query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
|
| 384 |
+
if verbose:
|
| 385 |
+
print(query_to_print, response)
|
| 386 |
+
return response
|
| 387 |
+
|
| 388 |
+
def get_prompt(self, system_prompt, messages, sep) -> str:
|
| 389 |
+
"""Get the prompt for generation."""
|
| 390 |
+
|
| 391 |
+
ret = '' if system_prompt == '' else system_prompt + sep + '\n'
|
| 392 |
+
for role, message in messages:
|
| 393 |
+
if message:
|
| 394 |
+
ret += role + '\n' + message + sep + '\n'
|
| 395 |
+
else:
|
| 396 |
+
ret += role + '\n'
|
| 397 |
+
return ret
|
| 398 |
+
|
| 399 |
+
@torch.no_grad()
|
| 400 |
+
def generate(
|
| 401 |
+
self,
|
| 402 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 403 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 404 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 405 |
+
visual_features: Optional[torch.FloatTensor] = None,
|
| 406 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 407 |
+
output_hidden_states: Optional[bool] = None,
|
| 408 |
+
return_dict: Optional[bool] = None,
|
| 409 |
+
**generate_kwargs,
|
| 410 |
+
) -> torch.LongTensor:
|
| 411 |
+
|
| 412 |
+
assert self.img_context_token_id is not None
|
| 413 |
+
if pixel_values is not None:
|
| 414 |
+
if visual_features is not None:
|
| 415 |
+
vit_embeds = visual_features
|
| 416 |
+
else:
|
| 417 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 418 |
+
|
| 419 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 420 |
+
B, N, C = input_embeds.shape
|
| 421 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
| 422 |
+
|
| 423 |
+
input_ids = input_ids.reshape(B * N)
|
| 424 |
+
selected = (input_ids == self.img_context_token_id)
|
| 425 |
+
assert selected.sum() != 0
|
| 426 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device, dtype=input_embeds.dtype)
|
| 427 |
+
|
| 428 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
| 429 |
+
else:
|
| 430 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 431 |
+
|
| 432 |
+
outputs = self.language_model.generate(
|
| 433 |
+
inputs_embeds=input_embeds,
|
| 434 |
+
attention_mask=attention_mask,
|
| 435 |
+
generation_config=generation_config,
|
| 436 |
+
output_hidden_states=output_hidden_states,
|
| 437 |
+
# return_dict=return_dict, # default is True
|
| 438 |
+
**generate_kwargs,
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
cleanup_xlora_pre_hooks(self.language_model)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
return outputs
|
| 445 |
+
|
| 446 |
+
def get_input_embeddings(self):
|
| 447 |
+
return self.language_model.get_input_embeddings()
|
| 448 |
+
|
| 449 |
+
def get_output_embeddings(self):
|
| 450 |
+
return self.language_model.get_output_embeddings()
|
Eagle2-2B/special_tokens_map.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>",
|
| 16 |
+
"<img>",
|
| 17 |
+
"</img>",
|
| 18 |
+
"<IMG_CONTEXT>",
|
| 19 |
+
"<quad>",
|
| 20 |
+
"</quad>",
|
| 21 |
+
"<ref>",
|
| 22 |
+
"</ref>",
|
| 23 |
+
"<box>",
|
| 24 |
+
"</box>"
|
| 25 |
+
],
|
| 26 |
+
"eos_token": {
|
| 27 |
+
"content": "<|im_end|>",
|
| 28 |
+
"lstrip": false,
|
| 29 |
+
"normalized": false,
|
| 30 |
+
"rstrip": false,
|
| 31 |
+
"single_word": false
|
| 32 |
+
},
|
| 33 |
+
"pad_token": {
|
| 34 |
+
"content": "<|endoftext|>",
|
| 35 |
+
"lstrip": false,
|
| 36 |
+
"normalized": false,
|
| 37 |
+
"rstrip": false,
|
| 38 |
+
"single_word": false
|
| 39 |
+
}
|
| 40 |
+
}
|
Eagle2-2B/tokenizer_config.json
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"151643": {
|
| 7 |
+
"content": "<|endoftext|>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"151644": {
|
| 15 |
+
"content": "<|im_start|>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"151645": {
|
| 23 |
+
"content": "<|im_end|>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
},
|
| 30 |
+
"151646": {
|
| 31 |
+
"content": "<|object_ref_start|>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": true
|
| 37 |
+
},
|
| 38 |
+
"151647": {
|
| 39 |
+
"content": "<|object_ref_end|>",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": false,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": true
|
| 45 |
+
},
|
| 46 |
+
"151648": {
|
| 47 |
+
"content": "<|box_start|>",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": false,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": true
|
| 53 |
+
},
|
| 54 |
+
"151649": {
|
| 55 |
+
"content": "<|box_end|>",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": false,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": true
|
| 61 |
+
},
|
| 62 |
+
"151650": {
|
| 63 |
+
"content": "<|quad_start|>",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": false,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": true
|
| 69 |
+
},
|
| 70 |
+
"151651": {
|
| 71 |
+
"content": "<|quad_end|>",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": false,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": true
|
| 77 |
+
},
|
| 78 |
+
"151652": {
|
| 79 |
+
"content": "<|vision_start|>",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": false,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": true
|
| 85 |
+
},
|
| 86 |
+
"151653": {
|
| 87 |
+
"content": "<|vision_end|>",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": false,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": true
|
| 93 |
+
},
|
| 94 |
+
"151654": {
|
| 95 |
+
"content": "<|vision_pad|>",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": false,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": true
|
| 101 |
+
},
|
| 102 |
+
"151655": {
|
| 103 |
+
"content": "<|image_pad|>",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": false,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": true
|
| 109 |
+
},
|
| 110 |
+
"151656": {
|
| 111 |
+
"content": "<|video_pad|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": false,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"151657": {
|
| 119 |
+
"content": "<tool_call>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": false,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": false
|
| 125 |
+
},
|
| 126 |
+
"151658": {
|
| 127 |
+
"content": "</tool_call>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": false,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": false
|
| 133 |
+
},
|
| 134 |
+
"151659": {
|
| 135 |
+
"content": "<|fim_prefix|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": false,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": false
|
| 141 |
+
},
|
| 142 |
+
"151660": {
|
| 143 |
+
"content": "<|fim_middle|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": false,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": false
|
| 149 |
+
},
|
| 150 |
+
"151661": {
|
| 151 |
+
"content": "<|fim_suffix|>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": false,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": false
|
| 157 |
+
},
|
| 158 |
+
"151662": {
|
| 159 |
+
"content": "<|fim_pad|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": false,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": false
|
| 165 |
+
},
|
| 166 |
+
"151663": {
|
| 167 |
+
"content": "<|repo_name|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": false,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": false
|
| 173 |
+
},
|
| 174 |
+
"151664": {
|
| 175 |
+
"content": "<|file_sep|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": false,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": false
|
| 181 |
+
},
|
| 182 |
+
"151665": {
|
| 183 |
+
"content": "<img>",
|
| 184 |
+
"lstrip": false,
|
| 185 |
+
"normalized": false,
|
| 186 |
+
"rstrip": false,
|
| 187 |
+
"single_word": false,
|
| 188 |
+
"special": true
|
| 189 |
+
},
|
| 190 |
+
"151666": {
|
| 191 |
+
"content": "</img>",
|
| 192 |
+
"lstrip": false,
|
| 193 |
+
"normalized": false,
|
| 194 |
+
"rstrip": false,
|
| 195 |
+
"single_word": false,
|
| 196 |
+
"special": true
|
| 197 |
+
},
|
| 198 |
+
"151667": {
|
| 199 |
+
"content": "<IMG_CONTEXT>",
|
| 200 |
+
"lstrip": false,
|
| 201 |
+
"normalized": false,
|
| 202 |
+
"rstrip": false,
|
| 203 |
+
"single_word": false,
|
| 204 |
+
"special": true
|
| 205 |
+
},
|
| 206 |
+
"151668": {
|
| 207 |
+
"content": "<quad>",
|
| 208 |
+
"lstrip": false,
|
| 209 |
+
"normalized": false,
|
| 210 |
+
"rstrip": false,
|
| 211 |
+
"single_word": false,
|
| 212 |
+
"special": true
|
| 213 |
+
},
|
| 214 |
+
"151669": {
|
| 215 |
+
"content": "</quad>",
|
| 216 |
+
"lstrip": false,
|
| 217 |
+
"normalized": false,
|
| 218 |
+
"rstrip": false,
|
| 219 |
+
"single_word": false,
|
| 220 |
+
"special": true
|
| 221 |
+
},
|
| 222 |
+
"151670": {
|
| 223 |
+
"content": "<ref>",
|
| 224 |
+
"lstrip": false,
|
| 225 |
+
"normalized": false,
|
| 226 |
+
"rstrip": false,
|
| 227 |
+
"single_word": false,
|
| 228 |
+
"special": true
|
| 229 |
+
},
|
| 230 |
+
"151671": {
|
| 231 |
+
"content": "</ref>",
|
| 232 |
+
"lstrip": false,
|
| 233 |
+
"normalized": false,
|
| 234 |
+
"rstrip": false,
|
| 235 |
+
"single_word": false,
|
| 236 |
+
"special": true
|
| 237 |
+
},
|
| 238 |
+
"151672": {
|
| 239 |
+
"content": "<box>",
|
| 240 |
+
"lstrip": false,
|
| 241 |
+
"normalized": false,
|
| 242 |
+
"rstrip": false,
|
| 243 |
+
"single_word": false,
|
| 244 |
+
"special": true
|
| 245 |
+
},
|
| 246 |
+
"151673": {
|
| 247 |
+
"content": "</box>",
|
| 248 |
+
"lstrip": false,
|
| 249 |
+
"normalized": false,
|
| 250 |
+
"rstrip": false,
|
| 251 |
+
"single_word": false,
|
| 252 |
+
"special": true
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
"additional_special_tokens": [
|
| 256 |
+
"<|im_start|>",
|
| 257 |
+
"<|im_end|>",
|
| 258 |
+
"<|object_ref_start|>",
|
| 259 |
+
"<|object_ref_end|>",
|
| 260 |
+
"<|box_start|>",
|
| 261 |
+
"<|box_end|>",
|
| 262 |
+
"<|quad_start|>",
|
| 263 |
+
"<|quad_end|>",
|
| 264 |
+
"<|vision_start|>",
|
| 265 |
+
"<|vision_end|>",
|
| 266 |
+
"<|vision_pad|>",
|
| 267 |
+
"<|image_pad|>",
|
| 268 |
+
"<|video_pad|>",
|
| 269 |
+
"<img>",
|
| 270 |
+
"</img>",
|
| 271 |
+
"<IMG_CONTEXT>",
|
| 272 |
+
"<quad>",
|
| 273 |
+
"</quad>",
|
| 274 |
+
"<ref>",
|
| 275 |
+
"</ref>",
|
| 276 |
+
"<box>",
|
| 277 |
+
"</box>"
|
| 278 |
+
],
|
| 279 |
+
"bos_token": null,
|
| 280 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 281 |
+
"clean_up_tokenization_spaces": false,
|
| 282 |
+
"eos_token": "<|im_end|>",
|
| 283 |
+
"errors": "replace",
|
| 284 |
+
"model_max_length": 16384,
|
| 285 |
+
"pad_token": "<|endoftext|>",
|
| 286 |
+
"split_special_tokens": false,
|
| 287 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 288 |
+
"unk_token": null
|
| 289 |
+
}
|
Eagle2-2B/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
empty_language_adapter/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: ./pretrained/Qwen2_5-1_5B-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.15.2
|
empty_language_adapter/adapter_config.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "./pretrained/Qwen2_5-1_5B-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layer_replication": null,
|
| 9 |
+
"layers_pattern": null,
|
| 10 |
+
"layers_to_transform": null,
|
| 11 |
+
"loftq_config": {},
|
| 12 |
+
"lora_alpha": 256,
|
| 13 |
+
"lora_dropout": 0.05,
|
| 14 |
+
"megatron_config": null,
|
| 15 |
+
"megatron_core": "megatron.core",
|
| 16 |
+
"modules_to_save": null,
|
| 17 |
+
"peft_type": "LORA",
|
| 18 |
+
"r": 128,
|
| 19 |
+
"rank_pattern": {},
|
| 20 |
+
"revision": null,
|
| 21 |
+
"target_modules": [
|
| 22 |
+
"self_attn.o_proj",
|
| 23 |
+
"mlp.up_proj",
|
| 24 |
+
"self_attn.v_proj",
|
| 25 |
+
"self_attn.k_proj",
|
| 26 |
+
"mlp.down_proj",
|
| 27 |
+
"self_attn.q_proj"
|
| 28 |
+
],
|
| 29 |
+
"task_type": "CAUSAL_LM",
|
| 30 |
+
"use_dora": false,
|
| 31 |
+
"use_rslora": false
|
| 32 |
+
}
|
empty_language_adapter/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dada5c48f8e5936e18bda0930a998cecb8fa9e2aea0abea387f5647eae7134a7
|
| 3 |
+
size 440447544
|