GitHub Actions commited on
Commit
cbb3515
·
0 Parent(s):

Deploy to Hugging Face Space: product-image-update-port-6

Browse files
Files changed (41) hide show
  1. .gitattributes +72 -0
  2. Dockerfile +116 -0
  3. README.md +0 -0
  4. app.py +0 -0
  5. failed-background-removal/8988497117405_7.webp +3 -0
  6. failed-background-removal/8988526510301_5.webp +3 -0
  7. failed-background-removal/8988526510301_6.webp +3 -0
  8. failed-background-removal/8988526772445_4.webp +3 -0
  9. failed-background-removal/8988526870749_10.webp +3 -0
  10. requirements.txt +0 -0
  11. src/api/__init__.py +0 -0
  12. src/api/gpu_functions.py +0 -0
  13. src/api/routes.py +0 -0
  14. src/config/__init__.py +0 -0
  15. src/config/constants.py +0 -0
  16. src/models/__init__.py +0 -0
  17. src/models/model_loader.py +0 -0
  18. src/pipeline/__init__.py +0 -0
  19. src/pipeline/executor.py +0 -0
  20. src/pipeline/pipeline_steps.py +0 -0
  21. src/processing/bounding_box/bounding_box.py +0 -0
  22. src/processing/bounding_box/flow_diagram.mermaid +98 -0
  23. src/processing/bounding_box/head_model.py +0 -0
  24. src/processing/bounding_box/rtdetr_model.py +0 -0
  25. src/processing/bounding_box/yolos_fashionpedia_model.py +0 -0
  26. src/processing/cropping_padding/cropping_padding.py +0 -0
  27. src/processing/cropping_padding/flow_diagram.mermaid +224 -0
  28. src/processing/image_download/flow_diagram.md +0 -0
  29. src/processing/image_download/flow_diagram.mermaid +0 -0
  30. src/processing/image_download/image_download.py +0 -0
  31. src/processing/remove_background/flow_diagram.mermaid +349 -0
  32. src/processing/remove_background/remove_background.py +0 -0
  33. src/processing/return_images/flow_diagram.mermaid +61 -0
  34. src/processing/return_images/return_images.py +0 -0
  35. src/processing/under_development/under_development.py +0 -0
  36. src/utils/__init__.py +0 -0
  37. src/utils/context_utils.py +0 -0
  38. src/utils/logging_utils.py +0 -0
  39. tests/__init__.py +0 -0
  40. tests/config.py +0 -0
  41. tests/test_full_pipeline.py +0 -0
.gitattributes ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Standard LFS patterns
2
+ *.7z filter=lfs diff=lfs merge=lfs -text
3
+ *.arrow filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+
38
+ # Model-specific patterns
39
+ models/**/*.bin filter=lfs diff=lfs merge=lfs -text
40
+ models/**/*.safetensors filter=lfs diff=lfs merge=lfs -text
41
+ models/**/*.onnx filter=lfs diff=lfs merge=lfs -text
42
+ models/**/*.pt filter=lfs diff=lfs merge=lfs -text
43
+ models/**/*.pth filter=lfs diff=lfs merge=lfs -text
44
+
45
+ # Cache patterns
46
+ .cache/**/*.bin filter=lfs diff=lfs merge=lfs -text
47
+ .cache/**/*.safetensors filter=lfs diff=lfs merge=lfs -text
48
+ .cache/**/*.json filter=lfs diff=lfs merge=lfs -text
49
+
50
+ # Image patterns (for sample images)
51
+ *.jpg filter=lfs diff=lfs merge=lfs -text
52
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
53
+ *.png filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
55
+ *.gif filter=lfs diff=lfs merge=lfs -text
56
+ *.bmp filter=lfs diff=lfs merge=lfs -text
57
+
58
+ # Video patterns (if needed)
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.avi filter=lfs diff=lfs merge=lfs -text
61
+ *.mov filter=lfs diff=lfs merge=lfs -text
62
+ *.webm filter=lfs diff=lfs merge=lfs -text
63
+
64
+ # Text files (not LFS)
65
+ *.txt -filter=lfs -diff=lfs -merge=lfs text
66
+ *.md -filter=lfs -diff=lfs -merge=lfs text
67
+ *.json -filter=lfs -diff=lfs -merge=lfs text
68
+ *.py -filter=lfs -diff=lfs -merge=lfs text
69
+ *.yml -filter=lfs -diff=lfs -merge=lfs text
70
+ *.yaml -filter=lfs -diff=lfs -merge=lfs text
71
+ requirements.txt -filter=lfs -diff=lfs -merge=lfs text
72
+ README.md -filter=lfs -diff=lfs -merge=lfs text
Dockerfile ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ product-image-update:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile
8
+ ports:
9
+ - "7860:7860"
10
+ environment:
11
+ # Copy from .env file or set directly
12
+ - HF_TOKEN=${HF_TOKEN}
13
+ - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0}
14
+ - PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
15
+ - TRANSFORMERS_CACHE=/app/.cache/huggingface
16
+ - HF_HOME=/app/.cache/huggingface
17
+ - GRADIO_SERVER_NAME=0.0.0.0
18
+ - GRADIO_SERVER_PORT=7860
19
+ - DEBUG=${DEBUG:-false}
20
+ volumes:
21
+ # Mount cache directories for faster restarts
22
+ - huggingface-cache:/app/.cache/huggingface
23
+ - torch-cache:/app/.cache/torch
24
+ - models-cache:/app/models
25
+ # Mount processed images directory
26
+ - ./processed_imgs:/app/processed_imgs
27
+ # For development - mount source code
28
+ - ./app.py:/app/app.py:ro
29
+ - ./utils.py:/app/utils.py:ro
30
+ - ./image-download:/app/image-download:ro
31
+ - ./remove-background:/app/remove-background:ro
32
+ - ./bounding-box:/app/bounding-box:ro
33
+ - ./cropping-padding:/app/cropping-padding:ro
34
+ - ./return-images:/app/return-images:ro
35
+ deploy:
36
+ resources:
37
+ limits:
38
+ memory: 16G
39
+ reservations:
40
+ devices:
41
+ - driver: nvidia
42
+ count: 1
43
+ capabilities: [gpu]
44
+ healthcheck:
45
+ test: ["CMD", "curl", "-f", "http://localhost:7860/health"]
46
+ interval: 30s
47
+ timeout: 10s
48
+ retries: 3
49
+ start_period: 60s
50
+ restart: unless-stopped
51
+ networks:
52
+ - app-network
53
+
54
+ # Optional: Nginx reverse proxy for production
55
+ nginx:
56
+ image: nginx:alpine
57
+ ports:
58
+ - "80:80"
59
+ - "443:443"
60
+ volumes:
61
+ - ./nginx.conf:/etc/nginx/nginx.conf:ro
62
+ - ./ssl:/etc/nginx/ssl:ro
63
+ depends_on:
64
+ - product-image-update
65
+ networks:
66
+ - app-network
67
+ profiles:
68
+ - production
69
+
70
+ # Optional: Monitoring with Prometheus
71
+ prometheus:
72
+ image: prom/prometheus:latest
73
+ ports:
74
+ - "9090:9090"
75
+ volumes:
76
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
77
+ - prometheus-data:/prometheus
78
+ command:
79
+ - '--config.file=/etc/prometheus/prometheus.yml'
80
+ - '--storage.tsdb.path=/prometheus'
81
+ networks:
82
+ - app-network
83
+ profiles:
84
+ - monitoring
85
+
86
+ # Optional: Grafana for visualization
87
+ grafana:
88
+ image: grafana/grafana:latest
89
+ ports:
90
+ - "3000:3000"
91
+ environment:
92
+ - GF_SECURITY_ADMIN_PASSWORD=admin
93
+ volumes:
94
+ - grafana-data:/var/lib/grafana
95
+ depends_on:
96
+ - prometheus
97
+ networks:
98
+ - app-network
99
+ profiles:
100
+ - monitoring
101
+
102
+ volumes:
103
+ huggingface-cache:
104
+ driver: local
105
+ torch-cache:
106
+ driver: local
107
+ models-cache:
108
+ driver: local
109
+ prometheus-data:
110
+ driver: local
111
+ grafana-data:
112
+ driver: local
113
+
114
+ networks:
115
+ app-network:
116
+ driver: bridge
README.md ADDED
Binary file (247 Bytes). View file
 
app.py ADDED
Binary file (36.2 kB). View file
 
failed-background-removal/8988497117405_7.webp ADDED

Git LFS Details

  • SHA256: 50e6301503e86898bcaea59082e5580560aea73f93466cd27a503661e7b0d9b9
  • Pointer size: 131 Bytes
  • Size of remote file: 219 kB
failed-background-removal/8988526510301_5.webp ADDED

Git LFS Details

  • SHA256: 6b8b32442181728395159d48e4472dffadd6fd7cd97430e31e960d3643e8e80b
  • Pointer size: 131 Bytes
  • Size of remote file: 460 kB
failed-background-removal/8988526510301_6.webp ADDED

Git LFS Details

  • SHA256: d609ea0020c9f89b4eed9d3a5a484b3ca0424629fdfe661050d774cec8473db2
  • Pointer size: 131 Bytes
  • Size of remote file: 472 kB
failed-background-removal/8988526772445_4.webp ADDED

Git LFS Details

  • SHA256: 2c2df0ba7342e7674e9e78a42b60f9b893ea2dd766c683aa89dc99038c3dda34
  • Pointer size: 130 Bytes
  • Size of remote file: 85.7 kB
failed-background-removal/8988526870749_10.webp ADDED

Git LFS Details

  • SHA256: 92392b76352a17d39b6591f2e6f2fa8a5a801db814ac3bb29e9e61e064a991e6
  • Pointer size: 131 Bytes
  • Size of remote file: 232 kB
requirements.txt ADDED
Binary file (1.34 kB). View file
 
src/api/__init__.py ADDED
Binary file (159 Bytes). View file
 
src/api/gpu_functions.py ADDED
Binary file (3.03 kB). View file
 
src/api/routes.py ADDED
Binary file (1.62 kB). View file
 
src/config/__init__.py ADDED
Binary file (2.13 kB). View file
 
src/config/constants.py ADDED
Binary file (3.16 kB). View file
 
src/models/__init__.py ADDED
Binary file (3.25 kB). View file
 
src/models/model_loader.py ADDED
Binary file (26.5 kB). View file
 
src/pipeline/__init__.py ADDED
Binary file (325 Bytes). View file
 
src/pipeline/executor.py ADDED
Binary file (1.05 kB). View file
 
src/pipeline/pipeline_steps.py ADDED
Binary file (808 Bytes). View file
 
src/processing/bounding_box/bounding_box.py ADDED
Binary file (49.2 kB). View file
 
src/processing/bounding_box/flow_diagram.mermaid ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flowchart TD
2
+ classDef blueBox fill:#0000ff33,stroke:#0000ff,color:#000
3
+ classDef greenBox fill:#00ff0033,stroke:#00ff00,color:#000
4
+ classDef violetBox fill:#ee82ee33,stroke:#ee82ee,color:#000
5
+ classDef orangeBox fill:#ffa50033,stroke:#ffa500,color:#000
6
+ classDef redBox fill:#ff000033,stroke:#ff0000,color:#000
7
+ classDef multiBox fill:lightyellow,stroke:#333,color:#000
8
+ classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
9
+ classDef dataNode fill:#d5e8d4,stroke:#82b366,color:#000
10
+
11
+ Input[/"Input Image"/] --> Step1
12
+
13
+ subgraph Models["Detection Models"]
14
+ direction TB
15
+ RTDETR["RT-DETR Model
16
+ 🟦 Main Product Detection:
17
+ person, coat, dress, jacket,
18
+ shirt, skirt, pants, shorts
19
+
20
+ 🟧 Some Features:
21
+ tie → collar"]:::multiBox
22
+
23
+ RTDETR_A["RT-DETR Artifact Detection
24
+ 🟥 Artifacts:
25
+ backpack, handbag → bag,
26
+ bottle, cup → cup,
27
+ book, cell phone, camera,
28
+ umbrella"]:::redBox
29
+
30
+ YOLO["YOLOv11 Model
31
+ 🟦 Main Products:
32
+ jacket, coat, shirt, dress,
33
+ vest, pants, jeans, shorts
34
+
35
+ 🟪 Shoes:
36
+ footwear, shoes, boots,
37
+ high heels, sandals
38
+
39
+ 🟧 Some Features:
40
+ tie → collar"]:::multiBox
41
+
42
+ HEAD["Head Detection Model
43
+ 🟩 Head:
44
+ face, head"]:::greenBox
45
+ end
46
+
47
+ Step1["Step 1: Define Largest Box
48
+ Use RT-DETR to identify
49
+ the main product region"]:::processNode
50
+ RTDETR --> Step1
51
+ Step1 --> Step2
52
+
53
+ Step2["Step 2: Multi-model Detection
54
+ Run all models on
55
+ largest box region"]:::processNode
56
+ RTDETR --> Step2
57
+ RTDETR_A --> Step2
58
+ YOLO --> Step2
59
+ HEAD --> Step2
60
+ Step2 --> Step3
61
+
62
+ Step3["Step 3: Color Assignment
63
+ & Keyword Mapping"]:::processNode --> Categories
64
+
65
+ subgraph Categories["Map objects to categories"]
66
+ direction TB
67
+ Product["🟦 BLUE: Product Type (0.4)
68
+ jacket, shirt, vest,
69
+ jeans, shorts, skirt,
70
+ overall, dress"]:::blueBox
71
+
72
+ Head["🟩 GREEN: Head (0.5)
73
+ head"]:::greenBox
74
+
75
+ Shoes["🟪 VIOLET: Shoes (0.5)
76
+ shoes"]:::violetBox
77
+
78
+ Features["🟧 ORANGE: Features (0.4)
79
+ neckline, collar, sleeve,
80
+ closure, pocket"]:::orangeBox
81
+
82
+ Artifacts["🟥 RED: Artifacts (0.7)
83
+ bag, cup, hanger, book,
84
+ phone, camera, umbrella"]:::redBox
85
+ end
86
+
87
+ Categories --> Step4
88
+
89
+ Step4["Step 4: Adjust Blue Box
90
+ Refine product box based on
91
+ head and shoe positions"]:::processNode --> Step5
92
+
93
+ Step5["Step 5: Draw Boxes
94
+ Add colored bbox
95
+ to image"]:::processNode --> Output
96
+
97
+ Output["Processed Image
98
+ With Boxes"]:::dataNode
src/processing/bounding_box/head_model.py ADDED
Binary file (5.25 kB). View file
 
src/processing/bounding_box/rtdetr_model.py ADDED
Binary file (13.9 kB). View file
 
src/processing/bounding_box/yolos_fashionpedia_model.py ADDED
Binary file (12.1 kB). View file
 
src/processing/cropping_padding/cropping_padding.py ADDED
Binary file (40.1 kB). View file
 
src/processing/cropping_padding/flow_diagram.mermaid ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flowchart TD
2
+ classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
3
+ classDef imageNode fill:#d5e8d4,stroke:#82b366,color:#000
4
+ classDef decisionNode fill:#fff2cc,stroke:#d6b656,color:#000
5
+ classDef squareNode fill:#e3f2fd,stroke:#1976d2,color:#000
6
+ classDef landscapeNode fill:#e8f5e8,stroke:#388e3c,color:#000
7
+ classDef portraitNode fill:#fce4ec,stroke:#c2185b,color:#000
8
+ classDef transformNode fill:#f3e5f5,stroke:#7b1fa2,color:#000
9
+ classDef borderNode fill:#fff3e0,stroke:#f57c00,color:#000
10
+
11
+ Input[/"Input RGBA Image
12
+ with transparent background"/]:::imageNode
13
+ Input --> InitialCrop
14
+
15
+ InitialCrop["Initial Cropping
16
+ • Use blue box coordinates from detection
17
+ • Remove excess vertical space
18
+ • Preserve product boundaries"]:::processNode
19
+ InitialCrop --> ShrinkBox
20
+
21
+ ShrinkBox["Shrink Primary Box
22
+ • Analyze alpha channel transparency
23
+ • Remove white-only regions
24
+ • Create tight bounding box"]:::processNode
25
+ ShrinkBox --> BorderDetect
26
+
27
+ BorderDetect["Border Detection Analysis
28
+ • Measure edge coverage ratios
29
+ • Calculate feathering smoothness
30
+ • Classify border types as straight lines"]:::processNode
31
+ BorderDetect --> OrientCheck
32
+
33
+ OrientCheck{"Determine Image Orientation
34
+ Width vs Height Analysis"}:::decisionNode
35
+ OrientCheck -->|"Width ≈ Height"| SquareFlow
36
+ OrientCheck -->|"Width > Height"| LandscapeFlow
37
+ OrientCheck -->|"Height > Width"| PortraitFlow
38
+
39
+ subgraph SquareFlow["Square Image Transformation Pipeline □"]
40
+ SqInput["Square Input □
41
+ Width ≈ Height"]:::squareNode
42
+ SqInput --> SqCheck{"Detect Special Cases"}:::decisionNode
43
+
44
+ SqCheck -->|"Shoes Detected"| SqShoes["Shoes Transformation:
45
+ □ → Pad bottom only
46
+ □ → Ignore lower border flags
47
+ □ → Result: Slightly taller square"]:::squareNode
48
+
49
+ SqCheck -->|"Head + Borders"| SqHeadBorder["Head with Borders:
50
+ □ → No padding changes
51
+ □ → Preserve existing borders
52
+ □ → Result: Original square maintained"]:::squareNode
53
+
54
+ SqCheck -->|"Border Lines"| SqBorders["Border Lines Present:
55
+ □ → No additional padding
56
+ □ → Maintain current dimensions
57
+ □ → Result: Original square preserved"]:::squareNode
58
+
59
+ SqCheck -->|"Default Case"| SqDefault["Universal Padding:
60
+ □ → UNIVERSAL_PAD_RATIO on all sides
61
+ □ → Equal padding: top, bottom, left, right
62
+ □ → Result: Larger square with even spacing"]:::squareNode
63
+
64
+ SqShoes --> SqOutput["□ Final Square Output"]:::squareNode
65
+ SqHeadBorder --> SqOutput
66
+ SqBorders --> SqOutput
67
+ SqDefault --> SqOutput
68
+ end
69
+
70
+ subgraph LandscapeFlow["Landscape Image Transformation Pipeline ▭"]
71
+ LsInput["Landscape Input ▭
72
+ Width > Height"]:::landscapeNode
73
+ LsInput --> LsCheck{"Detect Special Cases"}:::decisionNode
74
+
75
+ LsCheck -->|"Shoes Detected"| LsShoes["Shoes Transformation:
76
+ ▭ → Add bottom padding
77
+ ▭ → Crop to square format
78
+ ▭ → Result: Square with shoes at bottom"]:::transformNode
79
+
80
+ LsCheck -->|"Head Detected"| LsHead["Head Transformation:
81
+ ▭ → Remove top padding
82
+ ▭ → Keep landscape shape square
83
+ ▭ → Result: Square with head positioned well"]:::transformNode
84
+
85
+ LsCheck -->|"Border Lines"| LsBorders["Border Lines Transformation:
86
+ ▭ → Coverage-based cropping
87
+ ▭ → Analyze content distribution
88
+ ▭ → Result: Square from content analysis"]:::transformNode
89
+
90
+ LsCheck -->|"Default Case"| LsDefault["Two-Step Transformation:
91
+ ▭ → Step 1: Pad height to approach square
92
+ ▭ → Step 2: Equalize to perfect square
93
+ ▭ → Result: Centered square with even padding"]:::transformNode
94
+
95
+ LsShoes --> LsOutput["□ Final Square Output"]:::landscapeNode
96
+ LsHead --> LsOutput
97
+ LsBorders --> LsOutput
98
+ LsDefault --> LsOutput
99
+ end
100
+
101
+ subgraph PortraitFlow["Portrait Image Transformation Pipeline ▯"]
102
+ PtInput["Portrait Input ▯
103
+ Height > Width"]:::portraitNode
104
+ PtInput --> PtCheck{"Detect Special Cases"}:::decisionNode
105
+
106
+ PtCheck -->|"Shoes Detected"| PtShoes["Shoes Transformation:
107
+ ▯ → Never pad top
108
+ ▯ → Always pad bottom
109
+ ▯ → Pad sides to square
110
+ ▯ → Result: Square with shoes at bottom"]:::transformNode
111
+
112
+ PtCheck -->|"Head Detected"| PtHead["Head Transformation:
113
+ ▯ → Pad left and right only
114
+ ▯ → Maintain head position
115
+ ▯ → Result: Square with head positioned"]:::transformNode
116
+
117
+ PtCheck -->|"L/R Borders"| PtLR["L/R Borders Transformation:
118
+ ▯ → Coverage-based cropping
119
+ ▯ → Remove excess based on content density
120
+ ▯ → Result: Square from intelligent cropping"]:::transformNode
121
+
122
+ PtCheck -->|"U+L Borders"| PtUL["U+L Borders Transformation:
123
+ ▯ → Pad left and right only
124
+ ▯ → Maintain vertical borders
125
+ ▯ → Result: Square with preserved content"]:::transformNode
126
+
127
+ PtCheck -->|"One Border"| PtOne["Single Border Transformation:
128
+ ▯ → Two-step padding process
129
+ ▯ → Step 1: Pad width toward square
130
+ ▯ → Result: Balanced square output"]:::transformNode
131
+
132
+ PtCheck -->|"Default Case"| PtDefault["Default Transformation:
133
+ ▯ → Two-step padding process
134
+ ▯ → Step 1: Add horizontal padding
135
+ ▯ → Result: Centered square with side padding"]:::transformNode
136
+
137
+ PtShoes --> PtOutput["□ Final Square Output"]:::portraitNode
138
+ PtHead --> PtOutput
139
+ PtLR --> PtOutput
140
+ PtUL --> PtOutput
141
+ PtOne --> PtOutput
142
+ PtDefault --> PtOutput
143
+ end
144
+
145
+ SqOutput --> Centering
146
+ LsOutput --> Centering
147
+ PtOutput --> Centering
148
+
149
+ Centering["Final Object Centering
150
+ • Analyze alpha channel boundaries
151
+ • Calculate product midpoint
152
+ • Shift horizontally to center
153
+ • Maintain padding relationships"]:::processNode
154
+ Centering --> FinalOutput
155
+
156
+ FinalOutput[/"□ Final Processed Square Image
157
+ Consistently sized and centered"/]:::imageNode
158
+
159
+ subgraph BorderDetectionDetails["Border Detection Algorithm Details"]
160
+ BorderParams["Detection Parameters:
161
+ COVERAGE_THRESHOLD = 0.25
162
+ FEATHER_THRESHOLD_MIN = 0.3
163
+ FEATHER_THRESHOLD_MAX = 0.7"]:::borderNode
164
+
165
+ CoverageAnalysis["Coverage Analysis:
166
+ • Left Border: Vertical edge pattern
167
+ • Right Border: Opposite vertical edge
168
+ • Upper Border: Top horizontal edge
169
+ • Lower Border: Bottom horizontal edge"]:::borderNode
170
+
171
+ FeatheringAnalysis["Feathering Analysis:
172
+ • Hard Edges: Sharp transitions
173
+ • Soft Edges: Gradual transitions
174
+ • Feathering Ratio: Transition zone proportion"]:::borderNode
175
+
176
+ Classification["Classification Criteria:
177
+ Border = Straight Line when:
178
+ • Coverage > COVERAGE_THRESHOLD
179
+ • Feathering outside MIN-MAX range"]:::borderNode
180
+
181
+ BorderParams --> CoverageAnalysis --> FeatheringAnalysis --> Classification
182
+ end
183
+
184
+ BorderDetect -.-> BorderDetectionDetails
185
+
186
+ subgraph TransformationExamples["Visual Transformation Examples"]
187
+ ExampleSquare["Square Examples:
188
+ □ 1000×1000 → □ 1150×1150 (universal pad)
189
+ □ 1000×1000 → □ 1000×1000 (preserved)"]:::squareNode
190
+
191
+ ExampleLandscape["Landscape Examples:
192
+ ▭ 1200×800 → ▭ 1200×1200 (height pad) → □ 1200×1200
193
+ ▭ 1400×900 → Analysis → □ 1200×1200 (coverage crop)"]:::landscapeNode
194
+
195
+ ExamplePortrait["Portrait Examples:
196
+ ▯ 800×1200 → ▯ 1200×1200 (width pad) → □ 1200×1200
197
+ ▯ 600×1000 → ▯ 1000×1000 (side pad) → □ 1000×1000"]:::portraitNode
198
+ end
199
+
200
+ FinalOutput -.-> TransformationExamples
201
+
202
+ subgraph PaddingStrategies["Padding Strategy Implementation"]
203
+ UniversalPadding["Universal Padding:
204
+ UNIVERSAL_PAD_RATIO = 0.075
205
+ Applied as 7.5% of image dimension
206
+ 1000px image = 75px padding per side"]:::borderNode
207
+
208
+ TwoStepPadding["Two-Step Padding:
209
+ Step 1: Pad longer dimension toward square
210
+ Step 2: Equalize shorter dimension
211
+ Step 3: Complete perfect square formation"]:::borderNode
212
+
213
+ CoverageCropping["Coverage-Based Cropping:
214
+ Step 1: Analyze content distribution
215
+ Step 2: Remove excess based on density
216
+ Step 3: Apply minimal padding
217
+ Step 4: Achieve square format"]:::borderNode
218
+
219
+ UniversalPadding --> TwoStepPadding --> CoverageCropping
220
+ end
221
+
222
+ SquareFlow -.-> PaddingStrategies
223
+ LandscapeFlow -.-> PaddingStrategies
224
+ PortraitFlow -.-> PaddingStrategies
src/processing/image_download/flow_diagram.md ADDED
Binary file (5.97 kB). View file
 
src/processing/image_download/flow_diagram.mermaid ADDED
File without changes
src/processing/image_download/image_download.py ADDED
Binary file (7.03 kB). View file
 
src/processing/remove_background/flow_diagram.mermaid ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flowchart TD
2
+ classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
3
+ classDef imageNode fill:#d5e8d4,stroke:#82b366,color:#000
4
+ classDef modelNode fill:#ffe6cc,stroke:#d79b00,color:#000
5
+ classDef paramNode fill:#fff2cc,stroke:#d6b656,color:#000
6
+ classDef analysisNode fill:#e1d5e7,stroke:#9673a6,color:#000
7
+ classDef scaleNode fill:#ffa50033,stroke:#ffa500,color:#000
8
+ classDef configNode fill:#f0f8ff,stroke:#4682b4,color:#000
9
+ classDef postProcessNode fill:#e1d5e7,stroke:#9673a6,color:#000
10
+ classDef componentNode fill:#ffb6c1,stroke:#c71585,color:#000
11
+
12
+ Input[/"Downloaded Image Content
13
+ (_download_content)"/]:::imageNode --> Stage1
14
+
15
+ Stage1["Stage 1: Preprocessing & Analysis
16
+ • ImageOps.exif_transpose()
17
+ • RGB conversion
18
+ • Dynamic size configuration
19
+ • Image analysis & enhancement"]:::processNode --> Stage2
20
+
21
+ Stage2["Stage 2: Size-Grouped Model Application
22
+ • Group by identical dimensions
23
+ • Multi-scale tensor preparation
24
+ • RMBG neural network inference
25
+ • Scale-specific processing"]:::processNode --> Stage3
26
+
27
+ Stage3["Stage 3: Advanced Post-Processing
28
+ • guided_filter() edge preservation
29
+ • Morphological operations pipeline
30
+ • Bilateral filtering
31
+ • Gaussian blur smoothing"]:::processNode --> Stage4
32
+
33
+ Stage4["Stage 4: Component Analysis & Selection
34
+ • Connected component detection
35
+ • Left-side preference logic
36
+ • Area-based optimization
37
+ • Best component selection"]:::postProcessNode --> Stage5
38
+
39
+ Stage5["Stage 5: Final Composition
40
+ • final_pad_sq() processing
41
+ • Content cleanup
42
+ • Context update (pil_img)"]:::processNode --> Output
43
+
44
+ Output[/"RGBA Image with Transparent Background"/]:::imageNode
45
+
46
+ subgraph ImageAnalysis["Enhanced Image Analysis Pipeline"]
47
+ BlueDetect["detect_blue_background()
48
+ HSV: [100,100,80] - [130,255,255]
49
+ Threshold: >25% coverage"]:::analysisNode
50
+
51
+ SkinDetect["detect_skin_tones()
52
+ HSV ranges for diverse skin tones
53
+ Threshold: >10% coverage"]:::analysisNode
54
+
55
+ DenimDetect["detect_denim()
56
+ HSV: [80,30,30] - [130,220,230]
57
+ Additional range: [85,15,100] - [115,130,230]
58
+ Threshold: >12% coverage"]:::analysisNode
59
+
60
+ OutdoorDetect["detect_outdoor_scene()
61
+ Green: [35,40,40] - [85,255,255]
62
+ Sky: [90,40,150] - [130,120,255]
63
+ Threshold: >15% coverage"]:::analysisNode
64
+
65
+ DiffCheck["analyze_image_difficulty()
66
+ contrast = std(histogram * pixel_values)
67
+ edge_ratio = mean(canny_edges > 0)
68
+ Difficult: contrast < 50 AND edge_ratio < 0.05"]:::analysisNode
69
+
70
+ ApplyEnhancements["Context-Aware Enhancement:
71
+ • Base: RBC_CONTRAST_FACTOR (1.25)
72
+ • Outdoor: factor * 1.1
73
+ • Person: factor * 0.75
74
+ • Sharpness: RBC_SHARPNESS_FACTOR (1.15)
75
+ • Edge enhancement for difficult images"]:::analysisNode
76
+
77
+ BlueDetect --> SkinDetect --> DenimDetect --> OutdoorDetect --> DiffCheck --> ApplyEnhancements
78
+ end
79
+
80
+ Stage1 -.-> ImageAnalysis
81
+
82
+ subgraph DynamicSizing["Dynamic Size Configuration System v2"]
83
+ SizeDetection["determine_size_config()
84
+ max_dim = max(width, height)
85
+ SIZE_CONFIG_SCALE = [1024, 1536]
86
+ • < 1024px → 'small' config
87
+ • 1024-1536px → 'medium' config
88
+ • > 1536px → 'large' config"]:::scaleNode
89
+
90
+ OptimalScale["calculate_optimal_scale()
91
+ if max_original <= target_side: return 1.0
92
+ else: return (target_side * 0.98) / max_original
93
+ Prevents unnecessary upscaling"]:::scaleNode
94
+
95
+ ConfigMapping["SIZE_CONFIGS Mapping:
96
+ Small: final_side=1024, scales=[2.0,1.5,1.0,0.5]
97
+ Medium: final_side=1536, scales=[1.25,1.0,0.75,0.5]
98
+ Large: final_side=2048, scales=[1.0,0.75,0.5,0.25]"]:::scaleNode
99
+
100
+ SizeDetection --> OptimalScale --> ConfigMapping
101
+ end
102
+
103
+ Stage1 -.-> DynamicSizing
104
+
105
+ subgraph SizeGroupedProcessing["Size-Grouped RMBG Processing v16_balanced"]
106
+ GroupBySize["Group Tensors by Size:
107
+ • Create size_key: 'WxH' format
108
+ • Group tensors with identical dimensions
109
+ • Separate metas and indices by size
110
+ • Prevent concatenation errors"]:::scaleNode
111
+
112
+ PrepareScales["Scale Preparation per Size Group:
113
+ For each scale in RBC_SCALES:
114
+ • Resize enhanced image
115
+ • Apply rmbg_trans normalization
116
+ • Create tensor batch"]:::scaleNode
117
+
118
+ ModelInference["Model Inference per Size Group:
119
+ • Batch tensor to device (same sizes only)
120
+ • RMBG model forward pass
121
+ • Extract logits (handle list/tuple)
122
+ • Apply sigmoid for probabilities"]:::modelNode
123
+
124
+ CombineResults["Probability Combination:
125
+ • Weight scale contributions
126
+ • Merge probability maps
127
+ • Apply THRESH (0.42)
128
+ • Generate binary mask"]:::scaleNode
129
+
130
+ GroupBySize --> PrepareScales --> ModelInference --> CombineResults
131
+
132
+ subgraph ScaleOptimization["Scale-Specific Processing"]
133
+ SmallConfig["Small Images (< 1024px):
134
+ final_side: 1024px
135
+ scales: [2.0, 1.5, 1.0, 0.5]
136
+ • 2.0x: Fine detail capture
137
+ • 1.5x: Enhanced edges
138
+ • 1.0x: Reference
139
+ • 0.5x: Context"]:::scaleNode
140
+
141
+ MediumConfig["Medium Images (1024-1536px):
142
+ final_side: 1536px
143
+ scales: [1.25, 1.0, 0.75, 0.5]
144
+ • 1.25x: Slight enhancement
145
+ • 1.0x: Native resolution
146
+ • 0.75x: Noise reduction
147
+ • 0.5x: Fast context"]:::scaleNode
148
+
149
+ LargeConfig["Large Images (> 1536px):
150
+ final_side: 2048px
151
+ scales: [1.0, 0.75, 0.5, 0.25]
152
+ • 1.0x: Full resolution
153
+ • 0.75x: Balanced quality
154
+ • 0.5x: Efficient processing
155
+ • 0.25x: Overall structure"]:::scaleNode
156
+ end
157
+
158
+ ModelInference -.-> ScaleOptimization
159
+ end
160
+
161
+ Stage2 -.-> SizeGroupedProcessing
162
+
163
+ subgraph MaskRefinement["Advanced Mask Refinement Pipeline"]
164
+ GuidedFilter["guided_filter()
165
+ DO_GUIDED_FILTER = True
166
+ radius: 4, eps: 0.01
167
+ Edge-aware smoothing"]:::processNode
168
+
169
+ MorphClose["cv2.morphologyEx(MORPH_CLOSE)
170
+ MORPH_CLOSE_ITER = 1
171
+ MORPH_KERNEL_SIZE = (3,3)
172
+ Fill small mask holes"]:::processNode
173
+
174
+ MorphOpen["cv2.morphologyEx(MORPH_OPEN)
175
+ MORPH_OPEN_ITER = 1
176
+ Remove isolated pixels"]:::processNode
177
+
178
+ Erosion["cv2.erode()
179
+ EROSION_ITER = 1
180
+ Shrink mask boundaries"]:::processNode
181
+
182
+ BilateralFilter["cv2.bilateralFilter()
183
+ USE_BILATERAL = True
184
+ Parameters: (9, 75, 75)
185
+ Edge-preserving noise reduction"]:::processNode
186
+
187
+ GaussianBlur["cv2.GaussianBlur()
188
+ GAUSSIAN_KERNEL_SIZE = (7,7)
189
+ Final edge smoothing"]:::processNode
190
+
191
+ GuidedFilter --> MorphClose --> MorphOpen --> Erosion --> BilateralFilter --> GaussianBlur
192
+ end
193
+
194
+ Stage3 -.-> MaskRefinement
195
+
196
+ subgraph ComponentAnalysis["Component Analysis with Left-Side Selection"]
197
+ AlphaAnalysis["Alpha Channel Analysis:
198
+ • Convert final_rgba alpha to numpy array
199
+ • Apply scipy.label() for connected components
200
+ • Count and identify separate regions"]:::componentNode
201
+
202
+ BoundingBoxCalc["Bounding Box Calculation:
203
+ • Use find_objects() to get region coordinates
204
+ • Calculate sy, ey, sx, ex for each component
205
+ • Crop sub-images for area analysis"]:::componentNode
206
+
207
+ SpatialCategorization["Spatial Categorization:
208
+ • Calculate center_x = (sx + ex) / 2.0
209
+ • Left components: center_x < image_width / 2
210
+ • Other components: center_x >= image_width / 2
211
+ • Store area and sub_img for each"]:::componentNode
212
+
213
+ SelectionLogic["Selection Logic:
214
+ if left_components AND other_components:
215
+ best_crop = max(left_components, key=area)
216
+ else:
217
+ all_components = left + other
218
+ best_crop = max(all_components, key=area)
219
+ fallback: use original final_rgba"]:::componentNode
220
+
221
+ DetailedLogging["Enhanced Logging:
222
+ • num_components: total detected
223
+ • split_component_boxes: coordinates & areas
224
+ • passed_image_dimensions: final selection
225
+ • time_seconds: processing duration"]:::componentNode
226
+
227
+ AlphaAnalysis --> BoundingBoxCalc --> SpatialCategorization --> SelectionLogic --> DetailedLogging
228
+ end
229
+
230
+ Stage4 -.-> ComponentAnalysis
231
+
232
+ subgraph CalibrationV16["Calibration v16_balanced Parameters"]
233
+ ImageParams["Image Enhancement:
234
+ • RBC_CONTRAST_FACTOR = 1.25
235
+ • RBC_SHARPNESS_FACTOR = 1.15
236
+ • PAD_COLOR = '#ffffff'"]:::paramNode
237
+
238
+ ThresholdParams["Threshold Settings:
239
+ • THRESH = 0.42 (primary)
240
+ • RESCUE_THRESH = 0.20 (fallback)
241
+ • Balanced precision/recall"]:::paramNode
242
+
243
+ MorphParams["Morphological Settings:
244
+ • MORPH_KERNEL_SIZE = (3,3)
245
+ • MORPH_CLOSE_ITER = 1
246
+ • MORPH_OPEN_ITER = 1
247
+ • EROSION_ITER = 1"]:::paramNode
248
+
249
+ FilterParams["Filter Configuration:
250
+ • GAUSSIAN_KERNEL_SIZE = (7,7)
251
+ • DO_GUIDED_FILTER = True
252
+ • USE_BILATERAL = True
253
+ • FILL_HOLES = False"]:::paramNode
254
+
255
+ BatchParams["Batch Settings:
256
+ • MAX_IMAGES_PER_BATCH = 4
257
+ • CALIBRATION_VERSION = 'v16_balanced'
258
+ • SIZE_CONFIG_SCALE = [1024, 1536]"]:::paramNode
259
+
260
+ ImageParams --> ThresholdParams --> MorphParams --> FilterParams --> BatchParams
261
+ end
262
+
263
+ SizeGroupedProcessing -.-> CalibrationV16
264
+ MaskRefinement -.-> CalibrationV16
265
+ ComponentAnalysis -.-> CalibrationV16
266
+
267
+ subgraph ErrorHandling["Error Handling & Performance"]
268
+ SkipLogic["Skip Condition Handling:
269
+ • Check skip_run flag
270
+ • Check skip_processing flag
271
+ • Validate _download_content existence"]:::processNode
272
+
273
+ SizeGroupHandling["Size Group Error Handling:
274
+ • Isolate errors per size group
275
+ • Continue processing other size groups
276
+ • Detailed size-specific error logging"]:::processNode
277
+
278
+ MemoryMgmt["Memory Management:
279
+ • Immediate _download_content cleanup
280
+ • Tensor device management per size group
281
+ • Component analysis memory optimization"]:::processNode
282
+
283
+ PerformanceLog["Performance Logging:
284
+ • Processing time tracking per stage
285
+ • Success/error/skip counters per size group
286
+ • Component analysis metrics
287
+ • Size configuration logging"]:::processNode
288
+
289
+ SkipLogic --> SizeGroupHandling --> MemoryMgmt --> PerformanceLog
290
+ end
291
+
292
+ Stage1 -.-> ErrorHandling
293
+ Stage2 -.-> ErrorHandling
294
+ Stage4 -.-> ErrorHandling
295
+ Stage5 -.-> ErrorHandling
296
+
297
+ subgraph BatchProcessing["Enhanced Batch Processing Flow"]
298
+ PreprocessBatch["preprocess_images_batch()
299
+ • Load from _download_content
300
+ • Size config determination using SIZE_CONFIG_SCALE
301
+ • Image analysis & enhancement
302
+ • Error isolation per context"]:::processNode
303
+
304
+ ModelBatch["apply_model_batch() with Size Grouping
305
+ • Group tensors by identical dimensions
306
+ • Process each size group independently
307
+ • Device placement & precision per group
308
+ • Component analysis per image"]:::modelNode
309
+
310
+ PostprocessBatch["Batch Post-processing:
311
+ • Mask refinement per image
312
+ • Component selection per image
313
+ • Performance summary per size group
314
+ • Context updates with selected components"]:::processNode
315
+
316
+ PreprocessBatch --> ModelBatch --> PostprocessBatch
317
+ end
318
+
319
+ Stage1 -.-> BatchProcessing
320
+ Stage2 -.-> BatchProcessing
321
+ Stage3 -.-> BatchProcessing
322
+ Stage4 -.-> BatchProcessing
323
+
324
+ subgraph TensorSizeHandling["Tensor Size Compatibility Solution"]
325
+ SizeDetection2["Image Size Detection:
326
+ • Extract width x height from enhanced image
327
+ • Create size_key string (e.g., '1536x1536')
328
+ • Group compatible images together"]:::configNode
329
+
330
+ GroupingStrategy["Grouping Strategy:
331
+ tensors_by_size = {}
332
+ metas_by_size = {}
333
+ idxs_by_size = {}
334
+ • Separate data structures per size"]:::configNode
335
+
336
+ ProcessingLoop["Processing Loop:
337
+ for size_key, tensors in tensors_by_size.items():
338
+ batch = torch.cat(tensors) # All same size!
339
+ # Process batch normally"]:::configNode
340
+
341
+ ErrorPrevention["Error Prevention:
342
+ • No more 'Sizes of tensors must match'
343
+ • Each size group processes independently
344
+ • Maintains batch efficiency within groups"]:::configNode
345
+
346
+ SizeDetection2 --> GroupingStrategy --> ProcessingLoop --> ErrorPrevention
347
+ end
348
+
349
+ Stage2 -.-> TensorSizeHandling
src/processing/remove_background/remove_background.py ADDED
Binary file (47.2 kB). View file
 
src/processing/return_images/flow_diagram.mermaid ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flowchart TD
2
+ classDef processNode fill:#dae8fc,stroke:#6c8ebf,color:#000
3
+ classDef dataNode fill:#d5e8d4,stroke:#82b366,color:#000
4
+ classDef colorNode fill:#ffe6cc,stroke:#d79b00,color:#000
5
+ classDef typeNode fill:#e1d5e7,stroke:#9673a6,color:#000
6
+ classDef outputNode fill:#f8cecc,stroke:#b85450,color:#000
7
+
8
+ Input[/"Processed Image"/]:::dataNode --> Step1
9
+
10
+ Step1["Step 1: Color Extraction & Mapping
11
+ Sample non-transparent pixels,
12
+ quantize colors, determine dominant color"]:::processNode --> Step2
13
+
14
+ Step2["Step 2: Image Type Detection
15
+ Analyze padding information
16
+ and detection keywords"]:::processNode --> Step3
17
+
18
+ Step3["Step 3: WebP Conversion & Base64 Encoding
19
+ Check for artifacts, convert format,
20
+ encode for API response"]:::processNode --> Output
21
+
22
+ Output[/"Final Response Data"/]:::dataNode
23
+
24
+ subgraph ColorAnalysis["Color Classification Logic"]
25
+ RGBToHSL["RGB → HSL conversion"]:::processNode
26
+
27
+ MonochromeCheck{"Is monochrome?
28
+ (low saturation)"}:::colorNode
29
+ RGBToHSL --> MonochromeCheck
30
+
31
+ MonochromeCheck -->|"Yes"| GreyscaleGrouping{"Lightness values"}:::colorNode
32
+ GreyscaleGrouping -->|"< 0.28"| Black["Black"]:::colorNode
33
+ GreyscaleGrouping -->|"0.28-0.88"| Grey["Grey"]:::colorNode
34
+ GreyscaleGrouping -->|"> 0.88"| White["White"]:::colorNode
35
+
36
+ MonochromeCheck -->|"No"| BlueRangeCheck{"Is blue range?
37
+ (160° ≤ H < 260°)"}:::colorNode
38
+ BlueRangeCheck -->|"Yes"| BlueShades{"Lightness values"}:::colorNode
39
+ BlueShades -->|"< 0.40"| DarkBlue["Dark Blue"]:::colorNode
40
+ BlueShades -->|"0.40-0.65"| Blue["Blue"]:::colorNode
41
+ BlueShades -->|"> 0.65"| LightBlue["Light Blue"]:::colorNode
42
+
43
+ BlueRangeCheck -->|"No"| PrimaryColors{"Primary color groups
44
+ based on hue angle"}:::colorNode
45
+ PrimaryColors -->|"0°-70°"| YellowGroup["Yellow Group"]:::colorNode
46
+ PrimaryColors -->|"70°-160°"| GreenGroup["Green Group"]:::colorNode
47
+ PrimaryColors -->|"260°-360°"| RedGroup["Red Group"]:::colorNode
48
+ end
49
+
50
+ Step1 -.-> ColorAnalysis
51
+
52
+ subgraph TypeAssignment["Image Type Assignment"]
53
+ PaddingCheck{"All sides padded?"}:::typeNode
54
+ PaddingCheck -->|"Yes"| PaddedProduct["padded_product"]:::typeNode
55
+ PaddingCheck -->|"No"| HeadShoesCheck{"Head or shoes
56
+ detected?"}:::typeNode
57
+ HeadShoesCheck -->|"No"| DetailType["detail"]:::typeNode
58
+ HeadShoesCheck -->|"Yes"| DefaultType["none"]:::typeNode
59
+ end
60
+
61
+ Step2 -.-> TypeAssignment
src/processing/return_images/return_images.py ADDED
Binary file (14.4 kB). View file
 
src/processing/under_development/under_development.py ADDED
Binary file (19.8 kB). View file
 
src/utils/__init__.py ADDED
Binary file (1.29 kB). View file
 
src/utils/context_utils.py ADDED
Binary file (11.2 kB). View file
 
src/utils/logging_utils.py ADDED
Binary file (10.3 kB). View file
 
tests/__init__.py ADDED
Binary file (177 Bytes). View file
 
tests/config.py ADDED
Binary file (402 Bytes). View file
 
tests/test_full_pipeline.py ADDED
Binary file (6.97 kB). View file