sasha's picture
sasha HF Staff
Update app.py
466791c verified
raw
history blame
4.66 kB
import pickle
import gradio as gr
from datasets import load_dataset
from transformers import AutoModel, AutoFeatureExtractor
from PIL import Image
# Only runs once when the script is first run.
with open("butts_1024_new.pickle", "rb") as handle:
index = pickle.load(handle)
# Load model for computing embeddings.
feature_extractor = AutoFeatureExtractor.from_pretrained(
"sasha/autotrain-butterfly-similarity-2490576840"
)
model = AutoModel.from_pretrained("sasha/autotrain-butterfly-similarity-2490576840")
# Candidate images.
dataset = load_dataset("sasha/butterflies_10k_names_multiple")
ds = dataset["train"]
def query(image, top_k=1):
inputs = feature_extractor(image, return_tensors="pt")
model_output = model(**inputs)
embedding = model_output.pooler_output.detach()
results = index.query(embedding, k=top_k)
inx = results[0][0].tolist()
logits = results[1][0].tolist()
butterfly = ds.select(inx)["image"][0]
return overlay_png_on_side_by_side_images(
image, butterfly, "cadre.png", png_position=(0, 0)
)
def overlay_png_on_side_by_side_images(
person_image_path, insect_image_path, overlay_image_path, png_position=(0, 0)
):
"""
Overlays a PNG image onto two side-by-side images, resizing them to fit.
Args:
image1_path (str): Path to the first image.
image2_path (str): Path to the second image.
png_path (str): Path to the PNG image to overlay.
output_path (str): Path to save the resulting image.
png_position (tuple): (x, y) coordinates for the top-left corner of the PNG overlay.
"""
# Open images
img1 = Image.fromarray(person_image_path).convert("RGBA")
img2 = insect_image_path.convert("RGBA")
png_img = Image.open(overlay_image_path).convert("RGBA")
# Determine a common height for side-by-side images
min_height = 384
# Resize images to the common height, maintaining aspect ratio
img1 = img1.resize((288, 384), Image.LANCZOS)
img2 = img2.resize((288, 384), Image.LANCZOS)
combined_width = img1.width + img2.width
# Create a blank canvas for the combined image
combined_image = Image.new(
"RGBA", (combined_width, min_height), (0, 0, 0, 0)
) # Transparent background
# Paste images side by side
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (img1.width, 0))
# Resize PNG to fit within the combined image dimensions if necessary, or to a desired size
# For simplicity, let's resize the PNG to a quarter of the combined image's width, maintaining aspect ratio
target_png_width = combined_image.width
png_img = png_img.resize((combined_image.width, min_height), Image.LANCZOS)
# Overlay the PNG image
combined_image.paste(png_img, png_position, png_img)
# Save the result
# Determine a common height for side-by-side images
min_height = 384
# Resize images to the common height, maintaining aspect ratio
img1 = img1.resize((288, 384), Image.LANCZOS)
img2 = img2.resize((288, 384), Image.LANCZOS)
combined_width = img1.width + img2.width
# Create a blank canvas for the combined image
combined_image = Image.new(
"RGBA", (combined_width, min_height), (0, 0, 0, 0)
) # Transparent background
# Paste images side by side
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (img1.width, 0))
# Resize PNG to fit within the combined image dimensions if necessary, or to a desired size
# For simplicity, let's resize the PNG to a quarter of the combined image's width, maintaining aspect ratio
target_png_width = combined_image.width
png_img = png_img.resize((combined_image.width, min_height), Image.LANCZOS)
# Overlay the PNG image
combined_image.paste(png_img, png_position, png_img)
return combined_image
with gr.Blocks() as demo:
gr.Markdown("# Find my Butterfly 🦋")
gr.Markdown(
"## Use this Space to find your butterfly, based on the [iNaturalist butterfly dataset](https://huggingface.co/datasets/huggan/inat_butterflies_top10k)!"
)
with gr.Row():
with gr.Column(scale=1):
inputs = gr.Image(width=288, height=384)
btn = gr.Button("Find my butterfly!")
description = gr.Markdown()
with gr.Column(scale=2):
outputs = gr.Image()
gr.Markdown("### Image Examples")
gr.Examples(
examples=["elton.jpg", "ken.jpg", "gaga.jpg", "taylor.jpg"],
inputs=inputs,
outputs=outputs,
fn=query,
cache_examples=True,
)
btn.click(query, inputs, outputs)
demo.launch()