zhzluke96 commited on
Commit
10a102f
1 Parent(s): f367757
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. webui.py +8 -0
requirements.txt CHANGED
@@ -27,4 +27,4 @@ python-box
27
  ftfy
28
  librosa
29
  pyrubberband
30
- https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
 
27
  ftfy
28
  librosa
29
  pyrubberband
30
+ # https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
webui.py CHANGED
@@ -27,6 +27,14 @@ from modules.utils.torch_opt import configure_torch_optimizations
27
  from modules.webui import webui_config
28
  from modules.webui.app import create_interface, webui_init
29
 
 
 
 
 
 
 
 
 
30
  dcls_patch()
31
  ignore_useless_warnings()
32
 
 
27
  from modules.webui import webui_config
28
  from modules.webui.app import create_interface, webui_init
29
 
30
+ import subprocess
31
+
32
+ subprocess.run(
33
+ "pip install flash-attn --no-build-isolation",
34
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
35
+ shell=True,
36
+ )
37
+
38
  dcls_patch()
39
  ignore_useless_warnings()
40