chestnutlzj commited on
Commit
849a228
·
verified ·
1 Parent(s): bc1b6fa

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +54 -3
README.md CHANGED
@@ -1,3 +1,54 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ - zh
6
+ library_name: diffusers
7
+ pipeline_tag: image-to-image
8
+ ---
9
+
10
+ # Edit-R1
11
+
12
+ <p align="center">
13
+ <a href="https://github.com/PKU-YuanGroup/Edit-R1"><b>Code</b></a> | <a href="https://github.com/PKU-YuanGroup/Edit-R1"><b>Dataset</b></a>
14
+ </p>
15
+
16
+ # Usage
17
+
18
+ ```python
19
+ import os
20
+ import torch
21
+ from PIL import Image
22
+ from diffusers import QwenImageEditPlusPipeline
23
+
24
+ pipeline = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", torch_dtype=torch.bfloat16)
25
+ print("pipeline loaded")
26
+
27
+ pipeline.load_lora_weights(
28
+ "chestnutlzj/Edit-R1-Qwen-Image-Edit-2509",
29
+ adapter_name="lora",
30
+ )
31
+ pipeline.set_adapters(["lora"], adapter_weights=[1])
32
+
33
+ pipeline.to('cuda')
34
+ pipeline.set_progress_bar_config(disable=None)
35
+ image1 = Image.open("input1.png")
36
+ image2 = Image.open("input2.png")
37
+ prompt = "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square."
38
+ inputs = {
39
+ "image": [image1, image2],
40
+ "prompt": prompt,
41
+ "generator": torch.manual_seed(0),
42
+ "true_cfg_scale": 4.0,
43
+ "negative_prompt": " ",
44
+ "num_inference_steps": 40,
45
+ "guidance_scale": 1.0,
46
+ "num_images_per_prompt": 1,
47
+ }
48
+ with torch.inference_mode():
49
+ output = pipeline(**inputs)
50
+ output_image = output.images[0]
51
+ output_image.save("output_image_edit_plus.png")
52
+ print("image saved at", os.path.abspath("output_image_edit_plus.png"))
53
+
54
+ ```