1inkusFace commited on
Commit
d2a0206
·
verified ·
1 Parent(s): e44e0ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -2,7 +2,7 @@ import subprocess
2
  subprocess.run(['sh', './spaces.sh'])
3
 
4
  import os
5
- # Environment variable setup
6
  os.environ['PYTORCH_NVML_BASED_CUDA_CHECK'] = '1'
7
  os.environ['TORCH_LINALG_PREFER_CUSOLVER'] = '1'
8
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True,pinned_use_background_threads:True'
@@ -145,7 +145,7 @@ fa_processor = FlashAttentionProcessor()
145
 
146
  pipe, upscaler_2 = load_model()
147
 
148
- for name, module in pipeline.transformer.named_modules():
149
  # Only replace processors that are instances of AttnProcessor2_0
150
  # This automatically includes the self-attention and cross-attention within transformer blocks.
151
  if isinstance(module, AttnProcessor2_0):
 
2
  subprocess.run(['sh', './spaces.sh'])
3
 
4
  import os
5
+
6
  os.environ['PYTORCH_NVML_BASED_CUDA_CHECK'] = '1'
7
  os.environ['TORCH_LINALG_PREFER_CUSOLVER'] = '1'
8
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True,pinned_use_background_threads:True'
 
145
 
146
  pipe, upscaler_2 = load_model()
147
 
148
+ for name, module in pipe.transformer.named_modules():
149
  # Only replace processors that are instances of AttnProcessor2_0
150
  # This automatically includes the self-attention and cross-attention within transformer blocks.
151
  if isinstance(module, AttnProcessor2_0):