Carlexxx commited on
Commit
691e419
·
1 Parent(s): ed2855b

[Sincronização Automática] - 2025-09-23 16:31:02

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -60,7 +60,7 @@ RUN ln -sf /usr/bin/python3.10 /usr/bin/python3 && \
60
  # =============================================================================
61
 
62
  # 1. Instala PyTorch 2.8.0 e ferramentas de build
63
- RUN pip install \
64
  torch>=2.8.0+cu128 \
65
  torchvision \
66
  torchaudio \
@@ -82,7 +82,7 @@ RUN pip install \
82
  # Esta wheel é compatível com PyTorch 2.8.0 e CUDA 12.x
83
  RUN git clone https://github.com/Dao-AILab/flash-attention && \
84
  cd flash-attention/hopper && \
85
- python setup.py install && \
86
  export PYTHONPATH=$PWD && \
87
  pytest -q -s test_flash_attn.py
88
 
@@ -94,7 +94,7 @@ RUN git clone https://github.com/NVIDIA/apex.git && \
94
  cd .. && rm -rf apex
95
 
96
  # 4. Instala os Kernels customizados do LTX
97
- RUN pip install --no-build-isolation git+https://github.com/Lightricks/LTX-Video-Q8-Kernels.git
98
 
99
  # =============================================================================
100
  # INSTALAÇÃO DO RESTANTE DAS DEPENDÊNCIAS
 
60
  # =============================================================================
61
 
62
  # 1. Instala PyTorch 2.8.0 e ferramentas de build
63
+ RUN pip -v install \
64
  torch>=2.8.0+cu128 \
65
  torchvision \
66
  torchaudio \
 
82
  # Esta wheel é compatível com PyTorch 2.8.0 e CUDA 12.x
83
  RUN git clone https://github.com/Dao-AILab/flash-attention && \
84
  cd flash-attention/hopper && \
85
+ pip -v setup.py install && \
86
  export PYTHONPATH=$PWD && \
87
  pytest -q -s test_flash_attn.py
88
 
 
94
  cd .. && rm -rf apex
95
 
96
  # 4. Instala os Kernels customizados do LTX
97
+ RUN pip -v install --no-build-isolation git+https://github.com/Lightricks/LTX-Video-Q8-Kernels.git
98
 
99
  # =============================================================================
100
  # INSTALAÇÃO DO RESTANTE DAS DEPENDÊNCIAS