Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
# --- Import necessary classes ---
|
| 3 |
+
from transformers import (
|
| 4 |
+
TrOCRProcessor,
|
| 5 |
+
VisionEncoderDecoderModel,
|
| 6 |
+
pipeline,
|
| 7 |
+
AutoTokenizer,
|
| 8 |
+
RobertaForSequenceClassification,
|
| 9 |
+
AutoConfig # <--- Import AutoConfig
|
| 10 |
+
)
|
| 11 |
+
# ---
|
| 12 |
+
from PIL import Image
|
| 13 |
+
import traceback
|
| 14 |
+
import warnings
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import shutil # Not used directly, but keep for potential manual use
|
| 18 |
+
|
| 19 |
+
# --- Model IDs ---
|
| 20 |
+
TROCR_MODELS = {
|
| 21 |
+
"Printed Text": "microsoft/trocr-large-printed",
|
| 22 |
+
"Handwritten": "microsoft/trocr-large-handwritten",
|
| 23 |
+
}
|
| 24 |
+
DETECTOR_MODEL_ID = "SuperAnnotate/roberta-large-llm-content-detector"
|
| 25 |
+
print(f"Using AI Detector Model: {DETECTOR_MODEL_ID}")
|
| 26 |
+
|
| 27 |
+
# --- Load OCR Models (no changes here) ---
|
| 28 |
+
print("Loading OCR models...")
|
| 29 |
+
OCR_PIPELINES = {}
|
| 30 |
+
for name, model_id in TROCR_MODELS.items():
|
| 31 |
+
try:
|
| 32 |
+
proc = TrOCRProcessor.from_pretrained(model_id)
|
| 33 |
+
mdl = VisionEncoderDecoderModel.from_pretrained(model_id)
|
| 34 |
+
OCR_PIPELINES[name] = (proc, mdl)
|
| 35 |
+
print(f"Loaded {name} OCR model.")
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error loading OCR model {name} ({model_id}): {e}")
|
| 38 |
+
|
| 39 |
+
# --- Explicitly load config, tokenizer, and model ---
|
| 40 |
+
print(f"Loading AI detector components ({DETECTOR_MODEL_ID})...")
|
| 41 |
+
DETECTOR_PIPELINE = None
|
| 42 |
+
detector_tokenizer = None
|
| 43 |
+
detector_model = None
|
| 44 |
+
try:
|
| 45 |
+
# 1. Load Configuration FIRST
|
| 46 |
+
print("Loading detector config...")
|
| 47 |
+
detector_config = AutoConfig.from_pretrained(DETECTOR_MODEL_ID)
|
| 48 |
+
print(f"Loaded config. Expected hidden size: {detector_config.hidden_size}") # Should be 1024
|
| 49 |
+
|
| 50 |
+
# Add an assertion to halt if config is wrong (optional but helpful)
|
| 51 |
+
if detector_config.hidden_size != 1024:
|
| 52 |
+
raise ValueError(f"Loaded config specifies hidden size {detector_config.hidden_size}, but expected 1024 for roberta-large. Check cache for {DETECTOR_MODEL_ID}.")
|
| 53 |
+
|
| 54 |
+
# 2. Load Tokenizer
|
| 55 |
+
print("Loading detector tokenizer...")
|
| 56 |
+
detector_tokenizer = AutoTokenizer.from_pretrained(DETECTOR_MODEL_ID)
|
| 57 |
+
|
| 58 |
+
# 3. Load Model using the specific class AND the loaded config
|
| 59 |
+
print("Loading detector model with loaded config...")
|
| 60 |
+
detector_model = RobertaForSequenceClassification.from_pretrained(
|
| 61 |
+
DETECTOR_MODEL_ID,
|
| 62 |
+
config=detector_config # <--- Pass the loaded config
|
| 63 |
+
)
|
| 64 |
+
print("AI detector model and tokenizer loaded successfully.")
|
| 65 |
+
|
| 66 |
+
# 4. Create Pipeline
|
| 67 |
+
print("Creating AI detector pipeline...")
|
| 68 |
+
DETECTOR_PIPELINE = pipeline(
|
| 69 |
+
"text-classification",
|
| 70 |
+
model=detector_model,
|
| 71 |
+
tokenizer=detector_tokenizer,
|
| 72 |
+
top_k=None
|
| 73 |
+
)
|
| 74 |
+
print("Created AI detector pipeline.")
|
| 75 |
+
|
| 76 |
+
# --- Optional: Label Test (keep from previous version) ---
|
| 77 |
+
if DETECTOR_PIPELINE:
|
| 78 |
+
try:
|
| 79 |
+
print("Testing detector pipeline labels...")
|
| 80 |
+
sample_output = DETECTOR_PIPELINE("This is a reasonably long test sentence to check the model labels.", truncation=True)
|
| 81 |
+
print(f"Sample detector output structure: {sample_output}")
|
| 82 |
+
# ... (rest of label testing code) ...
|
| 83 |
+
if sample_output and isinstance(sample_output, list) and len(sample_output) > 0:
|
| 84 |
+
if isinstance(sample_output[0], list) and len(sample_output[0]) > 0:
|
| 85 |
+
labels = [item.get('label', 'N/A') for item in sample_output[0] if isinstance(item, dict)]
|
| 86 |
+
print(f"Detected labels from sample run: {labels}")
|
| 87 |
+
elif isinstance(sample_output[0], dict):
|
| 88 |
+
labels = [item.get('label', 'N/A') for item in sample_output if isinstance(item, dict)]
|
| 89 |
+
print(f"Detected labels from sample run (non-nested): {labels}")
|
| 90 |
+
if detector_model and detector_model.config and detector_model.config.id2label:
|
| 91 |
+
print(f"Labels from model config: {detector_model.config.id2label}") # Should show {0: 'Human', 1: 'AI'}
|
| 92 |
+
except Exception as test_e:
|
| 93 |
+
print(f"Could not perform detector label test: {test_e}")
|
| 94 |
+
traceback.print_exc()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
except Exception as e:
|
| 98 |
+
print(f"CRITICAL Error loading AI detector components ({DETECTOR_MODEL_ID}): {e}")
|
| 99 |
+
traceback.print_exc()
|
| 100 |
+
|
| 101 |
+
# --- Simplified Cache Clearing Suggestion ---
|
| 102 |
+
# Get cache path using environment variable or default
|
| 103 |
+
hf_home = os.environ.get("HF_HOME", os.path.expanduser("~/.cache/huggingface"))
|
| 104 |
+
hub_cache_path = os.path.join(hf_home, "hub") # Models are usually in the 'hub' subfolder
|
| 105 |
+
|
| 106 |
+
print("\n--- TROUBLESHOOTING SUGGESTION ---")
|
| 107 |
+
print(f"The model loading failed: {e}")
|
| 108 |
+
print("\nThis *strongly* indicates a problem with the cached files for this model.")
|
| 109 |
+
print("The most likely solution is to MANUALLY clear the cache for this model.")
|
| 110 |
+
print(f"\n1. Stop this application.")
|
| 111 |
+
print(f"2. Go to your Hugging Face hub cache directory (usually found under '{hub_cache_path}').")
|
| 112 |
+
print(f" (If you've set HF_HOME environment variable, check there instead: '{hf_home}')")
|
| 113 |
+
# Construct the model-specific cache folder name
|
| 114 |
+
model_cache_folder_name = f"models--{DETECTOR_MODEL_ID.replace('/', '--')}"
|
| 115 |
+
print(f"3. Delete the specific folder for this model: '{model_cache_folder_name}'")
|
| 116 |
+
print(f" Full path example: {os.path.join(hub_cache_path, model_cache_folder_name)}")
|
| 117 |
+
print(f"4. Restart the application. This will force a fresh download.")
|
| 118 |
+
print("\nMake sure no other applications are using the cache while deleting.")
|
| 119 |
+
print("--- END TROUBLESHOOTING ---")
|
| 120 |
+
# ---
|
| 121 |
+
|
| 122 |
+
# DETECTOR_PIPELINE remains None
|
| 123 |
+
|
| 124 |
+
# --- Functions get_ai_and_human_scores, analyze_image, classify_text remain the same ---
|
| 125 |
+
# (Ensure get_ai_and_human_scores correctly handles "AI" and "Human" based on config)
|
| 126 |
+
def get_ai_and_human_scores(results):
|
| 127 |
+
"""
|
| 128 |
+
Processes detector results to get likelihood scores for both AI and Human classes.
|
| 129 |
+
Handles various label formats including 'AI'/'Human', 'LABEL_0'/'LABEL_1', etc.
|
| 130 |
+
Returns:
|
| 131 |
+
tuple: (ai_display_string, human_display_string)
|
| 132 |
+
"""
|
| 133 |
+
ai_prob = 0.0
|
| 134 |
+
human_prob = 0.0
|
| 135 |
+
status_message = "Status: Initializing..." # Default status
|
| 136 |
+
|
| 137 |
+
if not results:
|
| 138 |
+
print("Warning: Received empty results for AI detection.")
|
| 139 |
+
status_message = "Error: No results received"
|
| 140 |
+
return status_message, "N/A"
|
| 141 |
+
|
| 142 |
+
# Handle potential nested list structure
|
| 143 |
+
score_list = []
|
| 144 |
+
if isinstance(results, list) and len(results) > 0:
|
| 145 |
+
if isinstance(results[0], list) and len(results[0]) > 0:
|
| 146 |
+
score_list = results[0]
|
| 147 |
+
elif isinstance(results[0], dict):
|
| 148 |
+
score_list = results
|
| 149 |
+
else:
|
| 150 |
+
status_message = f"Error: Unexpected detector output format (inner list type: {type(results[0])})"
|
| 151 |
+
print(f"Warning: {status_message}. Results[0]: {results[0]}")
|
| 152 |
+
return status_message, "N/A"
|
| 153 |
+
else:
|
| 154 |
+
status_message = f"Error: Unexpected detector output format (outer type: {type(results)})"
|
| 155 |
+
print(f"Warning: {status_message}. Results: {results}")
|
| 156 |
+
return status_message, "N/A"
|
| 157 |
+
|
| 158 |
+
# Build label→score map (uppercase labels for robust matching)
|
| 159 |
+
lbl2score = {}
|
| 160 |
+
parse_errors = []
|
| 161 |
+
for entry in score_list:
|
| 162 |
+
if isinstance(entry, dict) and "label" in entry and "score" in entry:
|
| 163 |
+
try:
|
| 164 |
+
score = float(entry["score"])
|
| 165 |
+
lbl2score[entry["label"].upper()] = score
|
| 166 |
+
except (ValueError, TypeError):
|
| 167 |
+
parse_errors.append(f"Invalid score format: {entry}")
|
| 168 |
+
else:
|
| 169 |
+
parse_errors.append(f"Invalid entry format: {entry}")
|
| 170 |
+
|
| 171 |
+
if parse_errors:
|
| 172 |
+
print(f"Warning: Encountered parsing errors in score list: {parse_errors}")
|
| 173 |
+
|
| 174 |
+
if not lbl2score:
|
| 175 |
+
status_message = "Error: Could not parse any valid scores from detector output"
|
| 176 |
+
print(f"Warning: {status_message}. Score list was: {score_list}")
|
| 177 |
+
return status_message, "N/A"
|
| 178 |
+
|
| 179 |
+
label_keys_found = ", ".join(lbl2score.keys())
|
| 180 |
+
found_pair = False
|
| 181 |
+
inferred = False
|
| 182 |
+
|
| 183 |
+
# --- Determine AI and Human probabilities based on labels ---
|
| 184 |
+
upper_keys = lbl2score.keys()
|
| 185 |
+
|
| 186 |
+
# Prioritize AI/HUMAN as per model config
|
| 187 |
+
if "AI" in upper_keys and "HUMAN" in upper_keys:
|
| 188 |
+
ai_prob = lbl2score["AI"]
|
| 189 |
+
human_prob = lbl2score["HUMAN"]
|
| 190 |
+
found_pair = True
|
| 191 |
+
status_message = "OK (Used AI/HUMAN labels)"
|
| 192 |
+
# Fallbacks
|
| 193 |
+
elif "LABEL_1" in upper_keys and "LABEL_0" in upper_keys:
|
| 194 |
+
ai_prob = lbl2score["LABEL_1"]
|
| 195 |
+
human_prob = lbl2score["LABEL_0"]
|
| 196 |
+
found_pair = True
|
| 197 |
+
status_message = "OK (Used LABEL_1/LABEL_0 - Check Mapping)"
|
| 198 |
+
print("Warning: Used fallback LABEL_1/LABEL_0. Config expects AI/HUMAN.")
|
| 199 |
+
# Add other fallbacks if necessary (FAKE/REAL, MACHINE/HUMAN)
|
| 200 |
+
|
| 201 |
+
# Inference logic
|
| 202 |
+
if not found_pair:
|
| 203 |
+
if "AI" in upper_keys:
|
| 204 |
+
ai_prob = lbl2score["AI"]
|
| 205 |
+
human_prob = max(0.0, 1.0 - ai_prob)
|
| 206 |
+
inferred = True
|
| 207 |
+
status_message = "OK (Inferred from AI label)"
|
| 208 |
+
elif "HUMAN" in upper_keys:
|
| 209 |
+
human_prob = lbl2score["HUMAN"]
|
| 210 |
+
ai_prob = max(0.0, 1.0 - human_prob)
|
| 211 |
+
inferred = True
|
| 212 |
+
status_message = "OK (Inferred from HUMAN label)"
|
| 213 |
+
# Add fallback inference if needed
|
| 214 |
+
|
| 215 |
+
if not inferred:
|
| 216 |
+
status_message = f"Error: Could not determine AI/Human pair from labels [{label_keys_found}]"
|
| 217 |
+
print(f"Warning: {status_message}")
|
| 218 |
+
|
| 219 |
+
# --- Format output strings ---
|
| 220 |
+
ai_display_str = f"{ai_prob*100:.2f}%"
|
| 221 |
+
human_display_str = f"{human_prob*100:.2f}%"
|
| 222 |
+
|
| 223 |
+
if "Error:" in status_message:
|
| 224 |
+
ai_display_str = status_message
|
| 225 |
+
human_display_str = "N/A"
|
| 226 |
+
|
| 227 |
+
print(f"Score Status: {status_message}. AI={ai_display_str}, Human={human_display_str}")
|
| 228 |
+
return ai_display_str, human_display_str
|
| 229 |
+
|
| 230 |
+
# --- analyze_image function (no changes needed) ---
|
| 231 |
+
def analyze_image(image: Image.Image, ocr_choice: str):
|
| 232 |
+
"""Performs OCR and AI Content Detection, returns both AI and Human %."""
|
| 233 |
+
extracted = ""
|
| 234 |
+
ai_result_str = "N/A"
|
| 235 |
+
human_result_str = "N/A"
|
| 236 |
+
status_update = "Awaiting input..."
|
| 237 |
+
|
| 238 |
+
if image is None:
|
| 239 |
+
status_update = "Please upload an image first."
|
| 240 |
+
return extracted, ai_result_str, human_result_str, status_update
|
| 241 |
+
if not ocr_choice or ocr_choice not in TROCR_MODELS:
|
| 242 |
+
status_update = "Please select a valid OCR model."
|
| 243 |
+
return extracted, ai_result_str, human_result_str, status_update
|
| 244 |
+
if OCR_PIPELINES.get(ocr_choice) is None:
|
| 245 |
+
return "", "N/A", "N/A", f"Error: OCR model '{ocr_choice}' failed to load or is unavailable."
|
| 246 |
+
if DETECTOR_PIPELINE is None:
|
| 247 |
+
return "", "N/A", "N/A", f"Critical Error: AI Detector model ({DETECTOR_MODEL_ID}) failed during startup. Check logs for details (possible cache issue?)."
|
| 248 |
+
|
| 249 |
+
try:
|
| 250 |
+
status_update = f"Processing with {ocr_choice} OCR..."
|
| 251 |
+
print(status_update)
|
| 252 |
+
proc, mdl = OCR_PIPELINES[ocr_choice]
|
| 253 |
+
if image.mode != "RGB": image = image.convert("RGB")
|
| 254 |
+
pix = proc(images=image, return_tensors="pt").pixel_values
|
| 255 |
+
tokens = mdl.generate(pix, max_length=1024)
|
| 256 |
+
extracted = proc.batch_decode(tokens, skip_special_tokens=True)[0]
|
| 257 |
+
extracted = extracted.strip()
|
| 258 |
+
|
| 259 |
+
if not extracted:
|
| 260 |
+
status_update = "OCR completed, but no text was extracted."
|
| 261 |
+
print(status_update)
|
| 262 |
+
return extracted, "N/A", "N/A", status_update
|
| 263 |
+
|
| 264 |
+
status_update = f"Detecting AI/Human content in {len(extracted)} characters..."
|
| 265 |
+
print(status_update)
|
| 266 |
+
results = DETECTOR_PIPELINE(extracted)
|
| 267 |
+
|
| 268 |
+
ai_result_str, human_result_str = get_ai_and_human_scores(results)
|
| 269 |
+
|
| 270 |
+
if "Error:" in ai_result_str:
|
| 271 |
+
status_update = ai_result_str
|
| 272 |
+
else:
|
| 273 |
+
status_update = "Analysis complete."
|
| 274 |
+
print(f"Final Status: {status_update}")
|
| 275 |
+
|
| 276 |
+
return extracted, ai_result_str, human_result_str, status_update
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
error_msg = f"Error during image analysis: {e}"
|
| 280 |
+
print(error_msg)
|
| 281 |
+
traceback.print_exc()
|
| 282 |
+
status_update = error_msg
|
| 283 |
+
return extracted, "Error", "Error", status_update
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
# --- classify_text function (no changes needed) ---
|
| 287 |
+
def classify_text(text: str):
|
| 288 |
+
"""Classifies provided text, returning both AI and Human %."""
|
| 289 |
+
ai_result_str = "N/A"
|
| 290 |
+
human_result_str = "N/A"
|
| 291 |
+
|
| 292 |
+
if DETECTOR_PIPELINE is None:
|
| 293 |
+
return f"Critical Error: AI Detector model ({DETECTOR_MODEL_ID}) failed during startup. Check logs for details (possible cache issue?).", "N/A"
|
| 294 |
+
if not text or text.isspace():
|
| 295 |
+
return "Please enter some text.", "N/A"
|
| 296 |
+
|
| 297 |
+
print("Classifying text...")
|
| 298 |
+
try:
|
| 299 |
+
results = DETECTOR_PIPELINE(text)
|
| 300 |
+
|
| 301 |
+
ai_result_str, human_result_str = get_ai_and_human_scores(results)
|
| 302 |
+
|
| 303 |
+
if "Error:" not in ai_result_str:
|
| 304 |
+
print("Classification complete.")
|
| 305 |
+
else:
|
| 306 |
+
print(f"Classification completed with issues: {ai_result_str}")
|
| 307 |
+
|
| 308 |
+
return ai_result_str, human_result_str
|
| 309 |
+
|
| 310 |
+
except Exception as e:
|
| 311 |
+
error_msg = f"Error during text classification: {e}"
|
| 312 |
+
print(error_msg)
|
| 313 |
+
traceback.print_exc()
|
| 314 |
+
return error_msg, "Error"
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
# --- Gradio Interface (no changes needed) ---
|
| 318 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 319 |
+
gr.Markdown(
|
| 320 |
+
f"""
|
| 321 |
+
## OCR + AI/Human Content Detection
|
| 322 |
+
Upload an image or paste text. The tool extracts text via OCR (if image) and analyzes it
|
| 323 |
+
using an AI content detector (`{DETECTOR_MODEL_ID}`)
|
| 324 |
+
to estimate the likelihood of it being AI-generated vs. Human-written.
|
| 325 |
+
**Disclaimer:** AI content detection is challenging and not 100% accurate. These likelihoods
|
| 326 |
+
are estimates based on the model's training data and may not be definitive.
|
| 327 |
+
Performance varies with text type, length, and AI generation methods.
|
| 328 |
+
**Label Assumption:** Uses the model's configured labels (`AI`/`Human`). Fallbacks for other label formats are included but may be less reliable if the model deviates from its configuration.
|
| 329 |
+
"""
|
| 330 |
+
)
|
| 331 |
+
with gr.Tab("Analyze Image"):
|
| 332 |
+
with gr.Row():
|
| 333 |
+
with gr.Column(scale=2):
|
| 334 |
+
img_in = gr.Image(type="pil", label="Upload Image", sources=["upload", "clipboard"])
|
| 335 |
+
with gr.Column(scale=1):
|
| 336 |
+
ocr_dd = gr.Dropdown(
|
| 337 |
+
list(TROCR_MODELS.keys()), label="1. Select OCR Model", info="Choose based on text type in image."
|
| 338 |
+
)
|
| 339 |
+
run_btn = gr.Button("2. Analyze Image", variant="primary")
|
| 340 |
+
status_img = gr.Label(value="Awaiting image analysis...", label="Status")
|
| 341 |
+
|
| 342 |
+
with gr.Row():
|
| 343 |
+
text_out_img = gr.Textbox(label="Extracted Text", lines=10, interactive=False)
|
| 344 |
+
with gr.Column(scale=1):
|
| 345 |
+
ai_out_img = gr.Textbox(label="AI Likelihood %", interactive=False)
|
| 346 |
+
with gr.Column(scale=1):
|
| 347 |
+
human_out_img = gr.Textbox(label="Human Likelihood %", interactive=False)
|
| 348 |
+
|
| 349 |
+
run_btn.click(
|
| 350 |
+
fn=analyze_image,
|
| 351 |
+
inputs=[img_in, ocr_dd],
|
| 352 |
+
outputs=[text_out_img, ai_out_img, human_out_img, status_img],
|
| 353 |
+
queue=True
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
with gr.Tab("Classify Text"):
|
| 357 |
+
with gr.Column():
|
| 358 |
+
text_in_classify = gr.Textbox(label="Paste or type text here", lines=10)
|
| 359 |
+
classify_btn = gr.Button("Classify Text", variant="primary")
|
| 360 |
+
with gr.Row():
|
| 361 |
+
with gr.Column(scale=1):
|
| 362 |
+
ai_out_classify = gr.Textbox(label="AI Likelihood %", interactive=False)
|
| 363 |
+
with gr.Column(scale=1):
|
| 364 |
+
human_out_classify = gr.Textbox(label="Human Likelihood %", interactive=False)
|
| 365 |
+
|
| 366 |
+
classify_btn.click(
|
| 367 |
+
fn=classify_text,
|
| 368 |
+
inputs=[text_in_classify],
|
| 369 |
+
outputs=[ai_out_classify, human_out_classify],
|
| 370 |
+
queue=True
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
gr.HTML(f"<footer style='text-align:center; margin-top: 20px; color: grey;'>Powered by TrOCR & {DETECTOR_MODEL_ID}</footer>")
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
if __name__ == "__main__":
|
| 377 |
+
print("Starting Gradio demo...")
|
| 378 |
+
demo.launch(share=False, server_name="0.0.0.0")
|