Dfrost8-2 commited on
Commit
6d377ad
·
verified ·
1 Parent(s): ae821ec

Upload live_preview_helpers.py

Browse files
Files changed (1) hide show
  1. live_preview_helpers.py +169 -0
live_preview_helpers.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ @torch.inference_mode()
43
+ def flux_pipe_call_that_returns_an_iterable_of_images(
44
+ self,
45
+ prompt: Union[str, List[str]] = None,
46
+ prompt_2: Optional[Union[str, List[str]]] = None,
47
+ height: Optional[int] = None,
48
+ width: Optional[int] = None,
49
+ num_inference_steps: int = 28,
50
+ timesteps: List[int] = None,
51
+ guidance_scale: float = 3.5,
52
+ num_images_per_prompt: Optional[int] = 1,
53
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
54
+ latents: Optional[torch.FloatTensor] = None,
55
+ prompt_embeds: Optional[torch.FloatTensor] = None,
56
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ output_type: Optional[str] = "pil",
58
+ return_dict: bool = True,
59
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
60
+ max_sequence_length: int = 512,
61
+ good_vae: Optional[Any] = None,
62
+ ):
63
+ height = height or self.default_sample_size * self.vae_scale_factor
64
+ width = width or self.default_sample_size * self.vae_scale_factor
65
+
66
+ # 1. Check inputs
67
+ self.check_inputs(
68
+ prompt,
69
+ prompt_2,
70
+ height,
71
+ width,
72
+ prompt_embeds=prompt_embeds,
73
+ pooled_prompt_embeds=pooled_prompt_embeds,
74
+ max_sequence_length=max_sequence_length,
75
+ )
76
+
77
+ self._guidance_scale = guidance_scale
78
+ self._joint_attention_kwargs = joint_attention_kwargs
79
+ self._interrupt = False
80
+
81
+ # 2. Define call parameters
82
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
83
+ device = self._execution_device
84
+
85
+ # 3. Encode prompt
86
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
87
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
88
+ prompt=prompt,
89
+ prompt_2=prompt_2,
90
+ prompt_embeds=prompt_embeds,
91
+ pooled_prompt_embeds=pooled_prompt_embeds,
92
+ device=device,
93
+ num_images_per_prompt=num_images_per_prompt,
94
+ max_sequence_length=max_sequence_length,
95
+ lora_scale=lora_scale,
96
+ )
97
+
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+
111
+ # 5. Prepare timesteps
112
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
113
+ image_seq_len = latents.shape[1]
114
+ mu = calculate_shift(
115
+ image_seq_len,
116
+ self.scheduler.config.base_image_seq_len,
117
+ self.scheduler.config.max_image_seq_len,
118
+ self.scheduler.config.base_shift,
119
+ self.scheduler.config.max_shift,
120
+ )
121
+ timesteps, num_inference_steps = retrieve_timesteps(
122
+ self.scheduler,
123
+ num_inference_steps,
124
+ device,
125
+ timesteps,
126
+ sigmas,
127
+ mu=mu,
128
+ )
129
+ self._num_timesteps = len(timesteps)
130
+
131
+ # Handle guidance
132
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) \
133
+ if self.transformer.config.guidance_embeds else None
134
+
135
+ # 6. Denoising loop
136
+ for i, t in enumerate(timesteps):
137
+ if self.interrupt:
138
+ continue
139
+
140
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
141
+
142
+ noise_pred = self.transformer(
143
+ hidden_states=latents,
144
+ timestep=timestep / 1000,
145
+ guidance=guidance,
146
+ pooled_projections=pooled_prompt_embeds,
147
+ encoder_hidden_states=prompt_embeds,
148
+ txt_ids=text_ids,
149
+ img_ids=latent_image_ids,
150
+ joint_attention_kwargs=self.joint_attention_kwargs,
151
+ return_dict=False,
152
+ )[0]
153
+
154
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
155
+
156
+ # Yield intermediate result
157
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
158
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
159
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
160
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
161
+ torch.cuda.empty_cache()
162
+
163
+ # Final image using good_vae
164
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
165
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
166
+ image = good_vae.decode(latents, return_dict=False)[0]
167
+ self.maybe_free_model_hooks()
168
+ torch.cuda.empty_cache()
169
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]