Spaces:
Running
Running
| import gradio as gr | |
| import torch, torchvision | |
| from torchvision import transforms | |
| import torch.nn.functional as F | |
| import numpy as np | |
| import PIL | |
| from PIL import Image, ImageColor | |
| from diffusers import DiffusionPipeline | |
| from diffusers import DDIMScheduler, DDPMScheduler, DEISMultistepScheduler, LCMScheduler, PNDMScheduler, UniPCMultistepScheduler | |
| import base64 | |
| from urllib.parse import quote_plus | |
| from io import BytesIO | |
| device = ("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu") | |
| class MSPipeline(DiffusionPipeline): | |
| def __init__(self, unet, scheduler): | |
| super().__init__() | |
| self.register_modules(unet=unet, scheduler=scheduler) | |
| def __call__(self, batch_size = 1, num_inference_steps = 1000): | |
| x = torch.randn(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size).to(device) | |
| self.scheduler.set_timesteps(num_inference_steps) | |
| for t in self.progress_bar(self.scheduler.timesteps): | |
| #x = self.scheduler.scale_model_input(x, t) | |
| model_output = self.unet(x, t).sample | |
| x = self.scheduler.step(model_output, t, x).prev_sample | |
| x = x.cpu().permute(0, 2, 3, 1).clip(0, 1).numpy() #* 255 | |
| x = self.numpy_to_pil(x) | |
| return x | |
| pipe = MSPipeline.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", use_safetensors=True).to(device) | |
| def clear_pix(x): | |
| datas = [] | |
| for pixel in list(x.getdata()): | |
| r,g,b,a = pixel | |
| if a == 0 or a < 150: | |
| datas.append((0,0,0,0)) | |
| else: | |
| datas.append((r,g,b,255)) | |
| x.putdata(datas) | |
| return x | |
| def show_3D(image, print_link = False): | |
| if isinstance(image, PIL.Image.Image): | |
| buffer = BytesIO() | |
| image.save(buffer, format="PNG") | |
| skin_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8") | |
| else: | |
| with open(image, "rb") as f: | |
| skin_base64 = base64.b64encode(f.read()).decode("utf-8") | |
| data = f"data:image/png;base64,{skin_base64}" | |
| quoted = quote_plus(data) | |
| url = f"https://wine-ineff.github.io/SkinViewFrame/skin.html?color=c2b4ff&data={quoted}" | |
| if print_link == True: | |
| print(url) | |
| return url | |
| def generate(schedulers, inference_steps, images_num): | |
| if schedulers == "DDIMScheduler": | |
| pipe.scheduler = DDIMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| elif schedulers == "DDPMScheduler": | |
| pipe.scheduler = DDPMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| elif schedulers == "DEISMultistepScheduler": | |
| pipe.scheduler = DEISMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| elif schedulers == "LCMScheduler": | |
| pipe.scheduler = LCMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| elif schedulers == "PNDMScheduler": | |
| pipe.scheduler = PNDMScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| elif schedulers == "UniPCMultistepScheduler": | |
| pipe.scheduler = UniPCMultistepScheduler.from_pretrained("WiNE-iNEFF/Mineskin-Diffusion-v1.0", subfolder="scheduler") | |
| images = pipe(batch_size = images_num, num_inference_steps = inference_steps) | |
| return images, update_iframe(images) | |
| def update_iframe(images): | |
| iframe_html = "<div style='display: grid; gap: 10px'>" #grid-template-columns: repeat(2, 1fr); | |
| for img in images: | |
| iframe_url = show_3D(clear_pix(img), print_link=True) | |
| iframe_html += f"<iframe style='min-width: 100%;' src='{iframe_url}'></iframe>" | |
| iframe_html += "</div>" | |
| return iframe_html | |
| interface = gr.Blocks() | |
| with interface: | |
| gr.HTML(f""" | |
| <h1 style="min-width: 100%; text-align: center;">Mineskin Diffusion</h1> | |
| <p>This space run with {f"<b>{device.upper()}</b>." if device != "cpu" else f"<b>{device.upper()}</b>. This space will be work very slow. Pleasy use a <a href='https://colab.research.google.com/#fileId=https://huggingface.co/WiNE-iNEFF/Mineskin-Diffusion-v1.0/blob/main/MineskinDiffusion.ipynb' style='text-decoration: none; color: orange;'>Google Colab notebook</a> for get fully experience of model or dublicate this space with your devices"}</p> | |
| """) | |
| with gr.Tabs(): | |
| with gr.TabItem("v1.0"): | |
| with gr.Column(): | |
| scheduler_type = gr.Dropdown( | |
| ["DDIMScheduler", "DDPMScheduler", "DEISMultistepScheduler", "LCMScheduler", "PNDMScheduler", "UniPCMultistepScheduler"], | |
| label = "Type of scheduler" | |
| ) | |
| with gr.Row(): | |
| inference_steps = gr.Number( | |
| label = "Amount of denoising steps", | |
| value = 30, minimum = 5, maximum = 1000 | |
| ) | |
| images_num = gr.Number( | |
| label = "Amount of skins", | |
| value = 4, minimum = 1, maximum = 4 | |
| ) | |
| gallery = gr.Gallery(columns=4, object_fit="scale-down", min_width=76) | |
| iframe_output = gr.HTML() | |
| gen_btn = gr.Button("Generate") | |
| gen_btn.click(fn=generate, outputs=[gallery, iframe_output], inputs=[scheduler_type, inference_steps, images_num]) | |
| interface.launch() |