WasteWise / app.py
fabianjkrueger's picture
Deactivate example images.
a54d745
import gradio as gr
from fastai.vision.all import *
import skimage
# load image classifier
learn = load_learner('resnet101_waste_recogniser.pkl')
# get classes of waste
labels = learn.dls.vocab
# make dictionaries translating the class to something else for the output
# adapt class spelling for output
spelling_dict = {
"aluminum_foil": "aluminum foil",
"apples": "apple",
"banana_peels": "banana peel",
"cardboard": "cardboard",
"condoms": "condom",
"diapers": "diaper",
"food_waste": "food waste",
"glass_bottle": "glass bottle",
"old_books": "book",
"oranges": "orange",
"pans": "pan",
"pizza_box": "pizza box",
"plastic_bags": "plastic bag",
"plastic_packaging": "plastic packaging",
"plastic_toys": "plastic toy",
"smartphone": "smartphone",
"tampons": "tampon",
"tea_bags": "tea bag",
"tetrapack": "tetra pak",
"toothbrush": "toothbrush"
}
# recommend waste bin for each class
bin_dict = {
"aluminum_foil": "gelbe sack",
"apples": "bio waste",
"banana_peels": "bio waste",
"cardboard": "paper waste",
"condoms": "residual waste",
"diapers": "residual waste",
"food_waste": "residual waste",
"glass_bottle": "glass waste",
"old_books": "paper waste",
"oranges": "bio waste",
"pans": "residual waste",
"pizza_box": "residual waste",
"plastic_bags": "plastic waste",
"plastic_packaging": "plastic waste",
"plastic_toys": "residual waste",
"smartphone": "wertstoffsammlung",
"tampons": "residual waste",
"tea_bags": "bio waste",
"tetrapack": "plastic waste",
"toothbrush": "residual waste"}
# define a function for the learner
def predict_full(img):
img = PILImage.create(img)
pred,pred_idx,probs = learn.predict(img)
return {labels[i]: float(probs[i]) for i in range(len(labels))}, f'It belongs into the {bin_dict[pred]}.'
# describe the interface
title = "WasteWise"
description = "Hey! What do you want to throw away? \n Just take a picture by clicking the camera icon, then hit 'submit'. \n You will see the result and confidence to the right hand side as well as a recommendation below. \n To label the image as misclassified, just hit 'Flag'. \n To see what parts of the image are responsible for the output, hit 'Interpret' and wait for a few seconds. \n The parts of the image that contributed to increase the likelihood of the outputted class are marked red. \n The parts that decrease the class confidence are highlighted blue. \n The intensity of color corresponds to the importance of that part of the input. \n At the bottom you can load some example images."
# examples = [...] # enter examples and path here, save them in GitHub
# make a gradio interface
gr.Interface(fn=predict_full, inputs = gr.Webcam(shape=(512, 512)), title=title, description=description, interpretation = "default", enable_queue = True, outputs=[gr.outputs.Label(num_top_classes=3), "text"]).launch()