Update README.md
Browse files
README.md
CHANGED
|
@@ -1,3 +1,46 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
base_model:
|
| 6 |
+
- Qwen/Qwen-Image
|
| 7 |
+
pipeline_tag: text-to-image
|
| 8 |
+
library_name: diffusers
|
| 9 |
+
---
|
| 10 |
+
# **gguf quantized version of qwen-image**
|
| 11 |
+
- test it with:
|
| 12 |
+
```py
|
| 13 |
+
import torch
|
| 14 |
+
from diffusers import DiffusionPipeline, GGUFQuantizationConfig, QwenImageTransformer2DModel
|
| 15 |
+
|
| 16 |
+
model_path = "https://huggingface.co/calcuis/krea-gguf/blob/main/qwen-image-q2_k.gguf"
|
| 17 |
+
transformer = QwenImageTransformer2DModel.from_single_file(
|
| 18 |
+
model_path,
|
| 19 |
+
quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16),
|
| 20 |
+
torch_dtype=torch.bfloat16,
|
| 21 |
+
config="callgg/qi-decoder",
|
| 22 |
+
subfolder="transformer"
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 26 |
+
"callgg/qi-decoder",
|
| 27 |
+
transformer=transformer,
|
| 28 |
+
torch_dtype=torch.bfloat16,
|
| 29 |
+
)
|
| 30 |
+
pipe.enable_model_cpu_offload()
|
| 31 |
+
|
| 32 |
+
prompt = "a pig holding a sign that says hello world"
|
| 33 |
+
positive_magic = {"en": "Ultra HD, 4K, cinematic composition."}
|
| 34 |
+
negative_prompt = " "
|
| 35 |
+
|
| 36 |
+
image = pipe(
|
| 37 |
+
prompt=prompt + positive_magic["en"],
|
| 38 |
+
negative_prompt=negative_prompt,
|
| 39 |
+
height=1024,
|
| 40 |
+
width=1024,
|
| 41 |
+
num_inference_steps=24,
|
| 42 |
+
true_cfg_scale=2.5,
|
| 43 |
+
generator=torch.Generator().manual_seed(0)
|
| 44 |
+
).images[0]
|
| 45 |
+
image.save("output.png")
|
| 46 |
+
```
|