zharer commited on
Commit
a413f2a
·
verified ·
1 Parent(s): 85322a1

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,3 +1,11 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
1
+ # Janus-Pro-7B WebGPU
2
+
3
+ WebGPU-optimized Janus-Pro-7B for transformers.js.
4
+
5
+ ## Usage
6
+ ```javascript
7
+ import { loadJanus } from './usage_example.js';
8
+ const { model, processor } = await loadJanus();
9
+ ```
10
+
11
+ Ready for browser deployment! 🚀
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "aligner_config": {
3
+ "cls": "MlpProjector",
4
+ "model_type": "aligner",
5
+ "params": {
6
+ "depth": 2,
7
+ "input_dim": 1024,
8
+ "n_embed": 4096,
9
+ "projector_type": "mlp_gelu"
10
+ }
11
+ },
12
+ "gen_aligner_config": {
13
+ "cls": "MlpProjector",
14
+ "model_type": "gen_aligner",
15
+ "params": {
16
+ "depth": 2,
17
+ "input_dim": 8,
18
+ "n_embed": 4096,
19
+ "projector_type": "mlp_gelu"
20
+ }
21
+ },
22
+ "gen_head_config": {
23
+ "cls": "vision_head",
24
+ "model_type": "gen_head",
25
+ "params": {
26
+ "image_token_embed": 4096,
27
+ "image_token_size": 16384,
28
+ "n_embed": 4096
29
+ }
30
+ },
31
+ "gen_vision_config": {
32
+ "cls": "VQ-16",
33
+ "model_type": "gen_vision",
34
+ "params": {
35
+ "image_token_size": 16384,
36
+ "n_embed": 8
37
+ }
38
+ },
39
+ "language_config": {
40
+ "max_position_embeddings": 16384,
41
+ "model_type": "llama",
42
+ "num_hidden_layers": 30,
43
+ "torch_dtype": "bfloat16",
44
+ "vocab_size": 102400
45
+ },
46
+ "model_type": "janus",
47
+ "torch_dtype": "float16",
48
+ "transformers_version": "4.33.1",
49
+ "vision_config": {
50
+ "cls": "CLIPVisionTower",
51
+ "model_type": "vision",
52
+ "params": {
53
+ "image_size": 384,
54
+ "model_name": "siglip_large_patch16_384",
55
+ "select_feature": "same",
56
+ "select_layer": -1
57
+ }
58
+ },
59
+ "architectures": [
60
+ "JanusForConditionalGeneration"
61
+ ],
62
+ "use_cache": true,
63
+ "webgpu_compatible": true,
64
+ "quantization": "q4f16",
65
+ "pipeline_tag": "text-to-image"
66
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 100000,
3
+ "eos_token_id": 100001,
4
+ "pad_token_id": 151643,
5
+ "max_length": 2048,
6
+ "max_new_tokens": 1024,
7
+ "do_sample": true,
8
+ "temperature": 0.7,
9
+ "top_p": 0.9,
10
+ "use_cache": true,
11
+ "num_image_tokens": 576
12
+ }
model_index.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "Janus-Pro-7B",
3
+ "model_type": "multimodal",
4
+ "architecture": "janus",
5
+ "format": "onnx",
6
+ "quantization": "q4f16",
7
+ "device": "webgpu",
8
+ "total_size_mb": 4935.2,
9
+ "components_count": 6,
10
+ "status": "complete",
11
+ "capabilities": [
12
+ "text-to-image",
13
+ "image-to-text",
14
+ "multimodal-chat"
15
+ ],
16
+ "transformers_js_compatible": true,
17
+ "webgpu_optimized": true,
18
+ "export_date": "2025-09-27",
19
+ "notes": "All 6 components successfully exported for WebGPU deployment"
20
+ }
onnx/gen_head_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22eb336ab33e4d0b9ee5c09c2d3906682c4aeff408a189a2eb45077ff3f1de30
3
+ size 335628308
onnx/gen_img_embeds_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184a1b1eb70e501a8a5831e7ab5193364f973a4d9946dc12caf410cf85294f13
3
+ size 67125624
onnx/language_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df9b2ef1d20b7314c4c877b75939bbbed5dacb3aa1f4e75fc47fe0b6e88e0e3a
3
+ size 1073941695
onnx/lm_head_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6599c2b692b6efd30d7b2ab4ae04434ba352f8f4ed4fdc967ca71ef69debdf8f
3
+ size 1677721863
onnx/prepare_inputs_embeds_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b94e5d5ccaf0a02a804dbcb35c7f0d0b57f22fd30b1f187c2ec797fb34df6c5b
3
+ size 1677721872
onnx/vision_encoder_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c17e3ed46cb81ac30d8cf1c9c6c723afc77108a52a63df4c6b74e7c1dd8431c
3
+ size 1215274800
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "background_color": [
3
+ 127,
4
+ 127,
5
+ 127
6
+ ],
7
+ "do_normalize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "VLMImageProcessor",
14
+ "image_size": 384,
15
+ "image_std": [
16
+ 0.5,
17
+ 0.5,
18
+ 0.5
19
+ ],
20
+ "min_size": 14,
21
+ "processor_class": "VLChatProcessor",
22
+ "rescale_factor": 0.00392156862745098
23
+ }
processor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_token": false,
3
+ "ignore_id": -100,
4
+ "image_tag": "<image_placeholder>",
5
+ "mask_prompt": true,
6
+ "num_image_tokens": 576,
7
+ "processor_class": "VLChatProcessor",
8
+ "sft_format": "deepseek"
9
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<image_placeholder>",
4
+ "<patch_placeholder>",
5
+ "<|ref|>",
6
+ "<|/ref|>",
7
+ "<|det|>",
8
+ "<|/det|>",
9
+ "<|grounding|>",
10
+ "<|User|>",
11
+ "<|Assistant|>"
12
+ ],
13
+ "bos_token": "<|begin▁of▁sentence|>",
14
+ "eos_token": "<|end▁of▁sentence|>",
15
+ "pad_token": "<|▁pad▁|>"
16
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|begin▁of▁sentence|>",
3
+ "clean_up_tokenization_spaces": false,
4
+ "eos_token": "<|end▁of▁sentence|>",
5
+ "model_max_length": 16384,
6
+ "pad_token": null,
7
+ "tokenizer_class": "LlamaTokenizer",
8
+ "unk_token": null,
9
+ "use_default_system_prompt": true
10
+ }
usage_example.js ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Janus-Pro-7B WebGPU Usage
2
+ import { AutoProcessor, AutoModelForCausalLM } from "@huggingface/transformers";
3
+
4
+ async function loadJanus() {
5
+ const processor = await AutoProcessor.from_pretrained("./janus-pro-7b-webgpu");
6
+ const model = await AutoModelForCausalLM.from_pretrained("./janus-pro-7b-webgpu", {
7
+ device: "webgpu",
8
+ dtype: "q4f16"
9
+ });
10
+ return { model, processor };
11
+ }
12
+
13
+ export { loadJanus };