Spaces:
Build error
Build error
| import streamlit as st | |
| import tensorflow as tf | |
| import numpy as np | |
| from PIL import Image | |
| import os | |
| from huggingface_hub import login | |
| from tensorflow import keras | |
| import requests | |
| # Get Hugging Face token from environment variables | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| if not HF_TOKEN: | |
| st.error("HF_TOKEN environment variable not set. Please set it before running the app.") | |
| st.stop() # Stop execution if token is missing | |
| # Authenticate with Hugging Face | |
| try: | |
| login(token=HF_TOKEN) | |
| st.success("Successfully logged in to Hugging Face Hub!") | |
| except Exception as e: | |
| st.error(f"Hugging Face login failed: {e}") | |
| st.stop() | |
| model_url = "https://huggingface.co/louiecerv/cats_dogs_recognition_tf_cnn/resolve/main/cats_dogs_recognition_tf_cnn.keras" | |
| model_filename = "cats_dogs_recognition_tf_cnn.keras" # Or any name you prefer | |
| model_filepath = model_filename # Save in the current directory | |
| try: | |
| # 1. Download the model file | |
| response = requests.get(model_url, stream=True) | |
| response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx) | |
| with open(model_filepath, "wb") as f: | |
| for chunk in response.iter_content(chunk_size=8192): | |
| f.write(chunk) | |
| # 2. Load the model from the local file | |
| model = tf.keras.models.load_model(model_filepath) | |
| st.success(f"Model loaded successfully from: {model_filepath}") | |
| # Optional: Clean up the downloaded file (if desired) | |
| # os.remove(model_filepath) # Uncomment if you want to delete the file after loading | |
| except requests.exceptions.RequestException as e: | |
| st.error(f"Error downloading model: {e}") | |
| st.stop() | |
| except Exception as e: | |
| st.error(f"Error loading model: {e}") | |
| st.stop() | |
| # Image preprocessing function with cropping | |
| def preprocess_image(image): | |
| # 1. Crop to square aspect ratio | |
| width, height = image.size | |
| new_size = min(width, height) | |
| left = (width - new_size) // 2 | |
| top = (height - new_size) // 2 | |
| right = (width + new_size) // 2 | |
| bottom = (height + new_size) // 2 | |
| image = image.crop((left, top, right, bottom)) | |
| # 2. Resize | |
| image = image.resize((128, 128)) # Use PIL's resize for consistency | |
| # 3. Convert to NumPy array if it's a PIL image | |
| image_np = np.array(image) | |
| # 4. Ensure 3 channels (RGB) | |
| if image_np.ndim == 2 or image_np.shape[2] == 1: | |
| image_np = np.stack([image_np] * 3, axis=-1) | |
| # 5. Normalize | |
| image_np = image_np.astype(np.float32) / 255.0 | |
| image_np = np.expand_dims(image_np, axis=0) | |
| return image_np | |
| # Streamlit app | |
| st.title("Cat vs. Dog Image Classifier") | |
| uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) | |
| if uploaded_file is not None: | |
| image = Image.open(uploaded_file) | |
| st.image(image, caption="Uploaded Image.", use_container_width=True) | |
| processed_image = preprocess_image(image) # Pass the PIL Image directly | |
| # Display preprocessed image (optional, but helpful for debugging) | |
| processed_image_display = tf.squeeze(processed_image, axis=0).numpy() * 255.0 | |
| processed_image_display = processed_image_display.astype(np.uint8) | |
| processed_image_display = Image.fromarray(processed_image_display) | |
| st.image(processed_image_display, caption="Preprocessed Image", use_container_width=True) | |
| predictions = model.predict(processed_image) | |
| class_index = np.argmax(predictions) | |
| class_names = ["Cat", "Dog"] | |
| predicted_class = class_names[class_index] | |
| confidence = predictions[0][class_index] * 100 | |
| st.write(f"## Prediction: {predicted_class} (Confidence: {confidence:.2f}%)") | |