Carlexxx commited on
Commit
802ea4a
·
1 Parent(s): 5bbabfc

[Sincronização Automática] - 2025-09-23 15:50:50

Browse files
Files changed (3) hide show
  1. .gitignore +3 -1
  2. Dockerfile +59 -424
  3. requirements.txt +2 -8
.gitignore CHANGED
@@ -13,4 +13,6 @@ __pycache__/
13
  # Ignorar logs e arquivos de ambiente
14
  *.log
15
  aduc_log.txt
16
- .env
 
 
 
13
  # Ignorar logs e arquivos de ambiente
14
  *.log
15
  aduc_log.txt
16
+ .env
17
+
18
+ comid.sh
Dockerfile CHANGED
@@ -1,480 +1,115 @@
1
  # =============================================================================
2
- # DOCKERFILE - Complete AI Video Suite v2.0.0
3
- # Optimized for 8x NVIDIA L40S GPUs (384GB Total VRAM)
4
- # Production-Ready Multi-GPU Video Generation Suite
5
  # =============================================================================
6
 
 
7
  FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
8
 
9
  # =============================================================================
10
- # METADATA AND LABELS
11
  # =============================================================================
12
-
13
- LABEL maintainer="Complete AI Video Suite Team"
14
- LABEL description="Multi-GPU AI Video Generation Suite with LTX FP8, Q8 Kernels, SeedVR, Wan2.2, VINCIE, MMAudio"
15
- LABEL version="2.0.0"
16
- LABEL build_date="2025-09-18"
17
- LABEL cuda_version="12.4.0"
18
  LABEL python_version="3.10"
19
- LABEL pytorch_version="2.8.0+cu128"
20
- LABEL architecture="amd64"
21
- LABEL gpu_optimized="8x_L40S"
22
- LABEL total_vram="384GB"
23
- LABEL license="MIT"
24
-
25
 
26
  # =============================================================================
27
- # ENVIRONMENT VARIABLES - PRODUCTION OPTIMIZED
28
  # =============================================================================
29
-
30
  ENV DEBIAN_FRONTEND=noninteractive
31
  ENV TZ=UTC
32
- ENV LC_ALL=C.UTF-8
33
  ENV LANG=C.UTF-8
34
-
35
- # Python optimization
36
  ENV PYTHONUNBUFFERED=1
37
  ENV PYTHONDONTWRITEBYTECODE=1
38
- ENV PYTHONIOENCODING=utf-8
39
  ENV PIP_NO_CACHE_DIR=1
40
- ENV PIP_DISABLE_PIP_VERSION_CHECK=0
41
 
42
- # CUDA optimizations for 8x L40S GPUs
43
  ENV NVIDIA_VISIBLE_DEVICES=all
44
- ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility,graphics
45
- ENV NVIDIA_REQUIRE_CUDA="cuda>=12.8"
46
- ENV CUDA_LAUNCH_BLOCKING=0
47
  ENV TORCH_CUDA_ARCH_LIST="8.9"
48
- ENV CUDA_CACHE_MAXSIZE=2147483648
49
-
50
- # Multi-GPU distributed training
51
- ENV NCCL_DEBUG=DEBUG
52
- ENV NCCL_TREE_THRESHOLD=1
53
- ENV NCCL_P2P_DISABLE=0
54
- ENV NCCL_IB_DISABLE=0
55
- ENV NCCL_NVLS_ENABLE=1
56
- ENV NCCL_CROSS_NIC=1
57
-
58
- # PyTorch optimizations
59
- ENV PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512,roundup_power2_divisions:16
60
- ENV TORCH_BACKENDS_CUDNN_BENCHMARK=1
61
- ENV TORCH_BACKENDS_CUDA_MATMUL_ALLOW_TF32=1
62
- ENV TORCH_BACKENDS_CUDNN_ALLOW_TF32=1
63
 
64
- # Application paths
65
  ENV APP_HOME=/app
66
- ENV HF_HOME=/app/model_cache
67
- ENV HF_HUB_CACHE=/app/model_cache/hub
68
- ENV TRANSFORMERS_CACHE=/app/model_cache/transformers
69
- ENV TORCH_HOME=/app/model_cache/torch
70
- ENV TMPDIR=/app/tmp
71
- ENV OUTPUT_DIR=/app/outputs
72
-
73
- # CPU optimizations
74
- ENV OMP_NUM_THREADS=8
75
- ENV MKL_NUM_THREADS=8
76
- ENV NUMEXPR_NUM_THREADS=8
77
- ENV OPENBLAS_NUM_THREADS=8
78
-
79
- # =============================================================================
80
- # SYSTEM PACKAGE INSTALLATION
81
- # =============================================================================
82
-
83
- RUN apt-get update && apt-get install -y \
84
- build-essential \
85
- cmake \
86
- ninja-build \
87
- pkg-config \
88
- python3.11 \
89
- python3.11-dev \
90
- python3.11-distutils \
91
- python3-pip \
92
- python3.11-venv \
93
- git \
94
- git-lfs \
95
- curl \
96
- wget \
97
- rsync \
98
- unzip \
99
- zip \
100
- ffmpeg \
101
- libavcodec-dev \
102
- libavformat-dev \
103
- libavutil-dev \
104
- libswscale-dev \
105
- libgl1-mesa-glx \
106
- libgl1-mesa-dev \
107
- libglib2.0-0 \
108
- libsm6 \
109
- libxext6 \
110
- libxrender-dev \
111
- libgomp1 \
112
- libglu1-mesa \
113
- libglu1-mesa-dev \
114
- htop \
115
- nvtop \
116
- tree \
117
- vim \
118
- nano \
119
- tmux \
120
- screen \
121
- net-tools \
122
- iproute2 \
123
- iotop \
124
- && apt-get autoremove -y \
125
- && apt-get clean \
126
- && rm -rf /var/lib/apt/lists/* \
127
- && rm -rf /tmp/* \
128
- && rm -rf /var/tmp/*
129
 
130
  # =============================================================================
131
- # PYTHON SETUP AND OPTIMIZATION
132
  # =============================================================================
 
 
 
 
 
133
 
134
  RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
135
  ln -sf /usr/bin/python3.10 /usr/bin/python && \
136
- python3 -m pip install --upgrade pip==24.2 setuptools==70.0.0 wheel==0.43.0
137
-
138
- RUN pip install \
139
- packaging \
140
- ninja \
141
- cmake \
142
- pybind11 \
143
- scikit-build \
144
- cython \
145
- numpy>=1.24.3
146
 
147
  # =============================================================================
148
- # PYTORCH AND CUDA LIBRARIES
149
  # =============================================================================
150
 
 
151
  RUN pip install \
152
- torch>=2.8.0+cu128 \
153
- torchvision \
154
- torchaudio \
155
- --index-url https://download.pytorch.org/whl/cu128
156
 
157
- RUN pip install torchao
 
 
 
158
 
159
- RUN python3 -c "import torch; print(f'PyTorch: {torch.__version__}'); print(f'CUDA available: {torch.cuda.is_available()}'); print(f'CUDA version: {torch.version.cuda}'); print(f'Device count: {torch.cuda.device_count()}')"
 
 
 
 
160
 
 
 
161
 
162
  # =============================================================================
163
- # AI/ML LIBRARIES INSTALLATION
164
  # =============================================================================
 
165
 
166
- WORKDIR $APP_HOME
167
- COPY . .
168
-
169
  RUN pip install -r requirements.txt
170
 
171
  # =============================================================================
172
- # APPLICATION STRUCTURE SETUP
173
  # =============================================================================
174
-
175
- RUN mkdir -p \
176
- $APP_HOME/installer \
177
- $APP_HOME/monitoring \
178
- $APP_HOME/tools \
179
- $APP_HOME/configs \
180
- $APP_HOME/build_cache \
181
- $APP_HOME/model_cache/hub \
182
- $APP_HOME/model_cache/transformers \
183
- $APP_HOME/model_cache/torch \
184
- $APP_HOME/model_cache/ltx_models \
185
- $APP_HOME/tmp \
186
- $APP_HOME/outputs \
187
- $APP_HOME/logs \
188
- && chmod -R 755 $APP_HOME
189
 
190
  # =============================================================================
191
- # DOWNLOAD PREREQUISITE FILES
192
  # =============================================================================
193
-
194
-
195
  COPY . .
196
 
197
- COPY configs/ ./configs/
198
-
199
-
200
- RUN chmod +x start.sh && \
201
- find . -name "*.sh" -exec chmod +x {} \; && \
202
- find . -name "*.py" -exec chmod +x {} \;
203
-
204
- # =============================================================================
205
- # CREATE OPTIMIZATION PATCHES AND TOOLS (FIXED SYNTAX)
206
- # =============================================================================
207
-
208
-
209
- # =============================================================================
210
- # CONFIGURATION FILES
211
- # =============================================================================
212
-
213
- # Create default LTX FP8 configuration
214
- RUN cat <<'YAML_CONFIG' > $APP_HOME/configs/ltxv-13b-0.9.8-distilled-fp8.yaml
215
- # LTX Video FP8 Distilled Configuration
216
- # Optimized for 8x L40S GPUs (384GB VRAM)
217
- model:
218
- target: "ltx_video.models.transformer_temporal.TransformerTemporalModel"
219
- params:
220
- transformer_additional_kwargs:
221
- attention_mode: "sdpa"
222
- enable_flash_attention: true
223
- memory_efficient_attention: true
224
- network_config:
225
- model_name: "ltxv-13b-0.9.8-distilled-fp8"
226
- fp8_optimization: true
227
- quantization: "fp8"
228
- ada_optimized: true
229
- multi_gpu_support: true
230
-
231
- scheduler:
232
- target: "diffusers.LTXVideoScheduler"
233
- params:
234
- num_train_timesteps: 1000
235
- beta_start: 0.0001
236
- beta_end: 0.02
237
- beta_schedule: "scaled_linear"
238
-
239
- vae:
240
- target: "diffusers.AutoencoderKLLTXVideo"
241
- params:
242
- force_upcast: false
243
- enable_slicing: true
244
- enable_tiling: true
245
-
246
- text_encoder:
247
- target: "transformers.T5EncoderModel"
248
- params:
249
- torch_dtype: "bfloat16"
250
-
251
- pipeline:
252
- target: "diffusers.LTXVideoPipeline"
253
- params:
254
- scheduler_type: "LTXVideoScheduler"
255
- num_inference_steps: 4
256
- guidance_scale: 1.0
257
- height: 704
258
- width: 1216
259
- num_frames: 121
260
- fps: 30
261
- enable_memory_efficient_attention: true
262
- enable_cpu_offload: false
263
- enable_model_cpu_offload: false
264
- max_batch_size: 4
265
-
266
- multi_gpu:
267
- enabled: true
268
- num_gpus: 8
269
- distribution_strategy: "data_parallel"
270
- load_balancing: "memory_aware"
271
- synchronize_gpus: true
272
- YAML_CONFIG
273
-
274
- # Create multi-GPU optimization config
275
- RUN cat <<'GPU_CONFIG' > $APP_HOME/configs/multi_gpu_config.yaml
276
- # Multi-GPU Configuration for 8x L40S Setup
277
- system:
278
- gpu_count: 8
279
- total_vram: "384GB"
280
- compute_capability: "8.9"
281
- architecture: "ADA_LOVELACE"
282
-
283
- distributed_training:
284
- backend: "nccl"
285
- init_method: "env://"
286
- world_size: 8
287
- rank: 0
288
-
289
- memory_optimization:
290
- gradient_checkpointing: true
291
- mixed_precision: "bf16"
292
- max_batch_size_per_gpu: 8
293
- gradient_accumulation_steps: 4
294
- memory_fraction: 0.95
295
-
296
- performance:
297
- torch_compile: true
298
- cuda_graphs: true
299
- tensor_cores: true
300
- flash_attention: true
301
- memory_efficient_attention: true
302
-
303
- load_balancing:
304
- strategy: "memory_aware"
305
- rebalance_interval: 30
306
- utilization_threshold: 0.8
307
-
308
- thermal_management:
309
- max_temperature: 83
310
- fan_curve: "aggressive"
311
- throttle_threshold: 80
312
- monitoring_interval: 10
313
-
314
- power_management:
315
- max_power_limit: 300
316
- efficiency_mode: false
317
- power_monitoring: true
318
- GPU_CONFIG
319
-
320
- # =============================================================================
321
- # HEALTH CHECK SCRIPT
322
- # =============================================================================
323
-
324
- RUN cat <<'HEALTHCHECK_SCRIPT' > $APP_HOME/healthcheck.py
325
- #!/usr/bin/env python3
326
- """
327
- Health check script for Complete AI Video Suite
328
- """
329
- import sys
330
- import requests
331
- import torch
332
- import time
333
- import logging
334
-
335
- logging.basicConfig(level=logging.INFO)
336
- logger = logging.getLogger(__name__)
337
-
338
- def check_cuda():
339
- """Check CUDA availability and GPU status"""
340
- if not torch.cuda.is_available():
341
- logger.error("CUDA not available")
342
- return False
343
-
344
- gpu_count = torch.cuda.device_count()
345
- logger.info(f"CUDA available with {gpu_count} GPUs")
346
-
347
- for i in range(gpu_count):
348
- try:
349
- torch.cuda.set_device(i)
350
- props = torch.cuda.get_device_properties(i)
351
- memory_allocated = torch.cuda.memory_allocated() / 1024**3
352
- memory_total = props.total_memory / 1024**3
353
-
354
- logger.info(f"GPU {i}: {props.name} ({memory_allocated:.2f}GB/{memory_total:.1f}GB)")
355
-
356
- x = torch.randn(100, 100, device=f'cuda:{i}')
357
- y = torch.matmul(x, x)
358
- torch.cuda.synchronize()
359
-
360
- except Exception as e:
361
- logger.error(f"GPU {i} test failed: {e}")
362
- return False
363
-
364
- return True
365
-
366
- def check_web_service():
367
- """Check if web service is responding"""
368
- try:
369
- response = requests.get("http://localhost:7860/", timeout=10)
370
- if response.status_code == 200:
371
- logger.info("Web service is responding")
372
- return True
373
- else:
374
- logger.error(f"Web service returned status code: {response.status_code}")
375
- return False
376
- except requests.RequestException as e:
377
- logger.error(f"Web service check failed: {e}")
378
- return False
379
-
380
- def main():
381
- """Main health check routine"""
382
- logger.info("Starting health check...")
383
-
384
- if not check_cuda():
385
- sys.exit(1)
386
-
387
- if not check_web_service():
388
- sys.exit(1)
389
-
390
- logger.info("All health checks passed")
391
- sys.exit(0)
392
-
393
- if __name__ == "__main__":
394
- main()
395
- HEALTHCHECK_SCRIPT
396
-
397
- RUN chmod +x $APP_HOME/healthcheck.py
398
-
399
- # =============================================================================
400
- # USER SETUP AND SECURITY
401
- # =============================================================================
402
-
403
- RUN mkdir -p /etc/sudoers.d && \
404
- useradd -m -u 1000 -s /bin/bash appuser && \
405
- usermod -aG sudo appuser && \
406
  chown -R appuser:appuser $APP_HOME && \
407
- echo "appuser ALL=(ALL) NOPASSWD: /usr/bin/nvidia-smi, /usr/bin/nvidia-ml-py" > /etc/sudoers.d/appuser
408
 
409
  USER appuser
410
 
411
- WORKDIR $APP_HOME
412
-
413
- # =============================================================================
414
- # RUNTIME CONFIGURATION
415
- # =============================================================================
416
-
417
- EXPOSE 7860 8001 8002 6006
418
-
419
- VOLUME ["/app/model_cache", "/app/outputs", "/app/logs", "/app/build_cache"]
420
-
421
- HEALTHCHECK --interval=60s --timeout=30s --start-period=300s --retries=3 \
422
- CMD python3 /app/healthcheck.py
423
-
424
- # =============================================================================
425
- # FINAL SETUP AND ENTRY POINT
426
- # =============================================================================
427
-
428
- RUN cat <<'ENTRYPOINT_SCRIPT' > $APP_HOME/docker-entrypoint.sh
429
- #!/bin/bash
430
- set -euo pipefail
431
-
432
- echo "🚀 Complete AI Suite - Docker Container Starting..."
433
- echo "🐳 Container: $(hostname)"
434
- echo "👤 User: $(whoami)"
435
- echo "🎮 GPUs: $(nvidia-smi --list-gpus | wc -l || echo '0')"
436
-
437
- if command -v nvidia-smi >/dev/null 2>&1; then
438
- echo "💾 CUDA Memory:"
439
- nvidia-smi --query-gpu=memory.total,memory.used --format=csv,noheader,nounits | nl
440
- fi
441
-
442
- echo "🔧 Applying optimization patches..."
443
- python3 /app/tools/optimization_patch.py
444
-
445
- echo "📁 Setting up permissions..."
446
- chmod -R 755 /app/installer
447
- chmod -R 755 /app/monitoring
448
- chmod +x /app/start.sh
449
-
450
- mkdir -p /app/logs /app/outputs /app/tmp
451
- chmod 777 /app/logs /app/outputs /app/tmp
452
-
453
- echo "✅ Docker container initialization complete"
454
- echo "🚀 Starting Complete AI Video Suite..."
455
-
456
- exec /app/start.sh "$@"
457
- ENTRYPOINT_SCRIPT
458
-
459
- RUN chmod +x $APP_HOME/docker-entrypoint.sh
460
-
461
- ENTRYPOINT ["/app/docker-entrypoint.sh"]
462
-
463
- CMD ["--listen", "--multi-gpu", "--optimize"]
464
-
465
  # =============================================================================
466
- # FINAL METADATA
467
  # =============================================================================
 
468
 
469
- RUN echo "Complete AI Video Suite v2.0.0" > /app/VERSION && \
470
- echo "Build Date: 2025-09-18T$(date +%H:%M:%S)" >> /app/VERSION && \
471
- echo "CUDA: 12.4.1" >> /app/VERSION && \
472
- echo "PyTorch: $(python3 -c 'import torch; print(torch.__version__)')" >> /app/VERSION && \
473
- echo "Optimized for: 8x NVIDIA L40S GPUs" >> /app/VERSION
474
-
475
- LABEL org.opencontainers.image.title="Complete AI Video Suite"
476
- LABEL org.opencontainers.image.description="Production-ready multi-GPU video generation with LTX FP8, Q8 Kernels, and more"
477
- LABEL org.opencontainers.image.version="2.0.0"
478
- LABEL org.opencontainers.image.created="2025-09-18T17:42:00Z"
479
- LABEL org.opencontainers.image.revision="main"
480
- LABEL org.opencontainers.image.licenses="MIT"
 
1
  # =============================================================================
2
+ # DOCKERFILE - ADUC-SDR: AI Video Suite v4.1.0
3
+ # Base: CUDA 12.8.0 | PyTorch 2.8.0+cu121
4
+ # Otimizado para Hugging Face Spaces com 8x NVIDIA L40S GPUs
5
  # =============================================================================
6
 
7
+ # Base CUDA 12.8.0
8
  FROM nvidia/cuda:12.8.0-devel-ubuntu22.04
9
 
10
  # =============================================================================
11
+ # METADADOS
12
  # =============================================================================
13
+ LABEL maintainer="Carlos Rodrigues dos Santos & Development Partner"
14
+ LABEL description="ADUC-SDR: Production-Ready Multi-GPU AI Video Generation Suite with Wan2.2, SeedVR, LTX, MMAudio"
15
+ LABEL version="4.1.0"
16
+ LABEL cuda_version="12.8.0"
 
 
17
  LABEL python_version="3.10"
18
+ # A wheel do PyTorch é compilada contra o CUDA Toolkit 12.1, mas é 100% compatível
19
+ # com o driver mais recente (12.8) presente na imagem base.
20
+ LABEL pytorch_version="2.8.0+cu121"
21
+ LABEL gpu_optimized_for="8x_NVIDIA_L40S"
 
 
22
 
23
  # =============================================================================
24
+ # VARIÁVEIS DE AMBIENTE GLOBAIS
25
  # =============================================================================
 
26
  ENV DEBIAN_FRONTEND=noninteractive
27
  ENV TZ=UTC
 
28
  ENV LANG=C.UTF-8
29
+ ENV LC_ALL=C.UTF-8
 
30
  ENV PYTHONUNBUFFERED=1
31
  ENV PYTHONDONTWRITEBYTECODE=1
 
32
  ENV PIP_NO_CACHE_DIR=1
33
+ ENV PIP_DISABLE_PIP_VERSION_CHECK=1
34
 
35
+ # Otimizações de CUDA e Build
36
  ENV NVIDIA_VISIBLE_DEVICES=all
37
+ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
 
 
38
  ENV TORCH_CUDA_ARCH_LIST="8.9"
39
+ ENV MAX_JOBS=90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ # Caminhos da Aplicação
42
  ENV APP_HOME=/app
43
+ WORKDIR $APP_HOME
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  # =============================================================================
46
+ # PACOTES DO SISTEMA E PYTHON 3.10
47
  # =============================================================================
48
+ RUN apt-get update && \
49
+ apt-get install -y --no-install-recommends \
50
+ build-essential cmake git git-lfs curl wget ffmpeg ninja-build \
51
+ python3.10 python3.10-dev python3.10-distutils python3-pip \
52
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
53
 
54
  RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
55
  ln -sf /usr/bin/python3.10 /usr/bin/python && \
56
+ python3 -m pip install --upgrade pip
 
 
 
 
 
 
 
 
 
57
 
58
  # =============================================================================
59
+ # INSTALAÇÃO DE BIBLIOTECAS DE ALTA PERFORMANCE
60
  # =============================================================================
61
 
62
+ # 1. Instala PyTorch 2.8.0 e ferramentas de build
63
  RUN pip install \
64
+ torch==2.8.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121 \
65
+ packaging wheel ninja setuptools
 
 
66
 
67
+ # 2. Instala a wheel pré-compilada do Flash Attention 2.8.0
68
+ # Esta é a forma mais rápida e estável de instalar, evitando compilação demorada.
69
+ # Esta wheel é compatível com PyTorch 2.8.0 e CUDA 12.x
70
+ RUN pip install https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.8/flash_attn-2.5.8+cu122torch2.3.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
71
 
72
+ # 3. Compila e instala o NVIDIA Apex
73
+ RUN git clone https://github.com/NVIDIA/apex.git && \
74
+ cd apex && \
75
+ pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ && \
76
+ cd .. && rm -rf apex
77
 
78
+ # 4. Instala os Kernels customizados do LTX
79
+ RUN pip install --no-build-isolation git+https://github.com/Lightricks/LTX-Video-Q8-Kernels.git
80
 
81
  # =============================================================================
82
+ # INSTALAÇÃO DO RESTANTE DAS DEPENDÊNCIAS
83
  # =============================================================================
84
+ COPY requirements.txt .
85
 
86
+ # Instala os pacotes restantes do requirements.txt
87
+ # A linha do flash-attention no arquivo será ignorada se já estiver instalado, mas é bom limpá-la.
 
88
  RUN pip install -r requirements.txt
89
 
90
  # =============================================================================
91
+ # CLONAGEM E INSTALAÇÃO DOS REPOSITÓRIOS DA APLICAÇÃO
92
  # =============================================================================
93
+ RUN git clone https://github.com/Lightricks/LTX-Video.git && cd LTX-Video && pip install -e .[inference]
94
+ RUN git clone https://github.com/Wan-Video/Wan2.2.git && cd Wan2.2 && pip install -r requirements.txt && pip install -r requirements_s2v.txt
95
+ RUN git clone https://github.com/bytedance-seed/SeedVR.git && cd SeedVR && pip install -r requirements.txt
96
+ RUN git clone https://github.com/hkchengrex/MMAudio.git && cd MMAudio && pip install -e .
 
 
 
 
 
 
 
 
 
 
 
97
 
98
  # =============================================================================
99
+ # COPIA O CÓDIGO DA APLICAÇÃO E CONFIGURA PERMISSÕES
100
  # =============================================================================
 
 
101
  COPY . .
102
 
103
+ RUN useradd -m -u 1000 -s /bash appuser && \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  chown -R appuser:appuser $APP_HOME && \
105
+ mkdir -p /app/deformes_workspace && chown -R appuser:appuser /app/deformes_workspace
106
 
107
  USER appuser
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # =============================================================================
110
+ # PONTO DE ENTRADA
111
  # =============================================================================
112
+ RUN chmod +x start.sh
113
 
114
+ ENTRYPOINT ["./start.sh"]
115
+ CMD ["gradio"]
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,14 +1,10 @@
1
- torch==2.6.0
2
- torchvision==0.21.0
3
- torchaudio==2.6.0
4
- opencv-python==4.9.0.80
5
  torchao
6
  transformers
7
  accelerate
8
  safetensors
9
  einops
10
  sentencepiece
11
- git+https://github.com/hkchengrex/MMAudio.git@main
12
  git+https://github.com/huggingface/diffusers.git@main
13
  gradio>=5.23.1
14
  gradio[oauth]
@@ -41,9 +37,7 @@ isort
41
  pre-commit
42
  expecttest
43
  hypothesis
44
- numpy<2
45
  ninja
46
  psutil
47
  packaging
48
- https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
49
- #https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl
 
1
+ opencv-python>=4.9.0.80
 
 
 
2
  torchao
3
  transformers
4
  accelerate
5
  safetensors
6
  einops
7
  sentencepiece
 
8
  git+https://github.com/huggingface/diffusers.git@main
9
  gradio>=5.23.1
10
  gradio[oauth]
 
37
  pre-commit
38
  expecttest
39
  hypothesis
40
+ numpy
41
  ninja
42
  psutil
43
  packaging