1inkusFace commited on
Commit
4018215
·
verified ·
1 Parent(s): 796eb4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -11,16 +11,16 @@ os.environ['HF_HUB_ENABLE_HF_TRANSFER'] = '1'
11
 
12
  import torch
13
 
14
- #torch.backends.cuda.matmul.allow_tf32 = False # torch 2.8
15
- #torch.backends.cudnn.allow_tf32 = False # torch 2.8
16
 
17
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
18
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
19
- torch.backends.fp32_precision = "ieee" torch 2.9
20
- torch.backends.cuda.matmul.fp32_precision = "ieee" torch 2.9
21
- torch.backends.cudnn.fp32_precision = "ieee" torch 2.9
22
- torch.backends.cudnn.conv.fp32_precision = "ieee" torch 2.9
23
- torch.backends.cudnn.rnn.fp32_precision = "ieee" torch 2.9
24
  torch.backends.cudnn.deterministic = False
25
  torch.backends.cudnn.benchmark = False
26
  torch.backends.cuda.preferred_blas_library="cublas"
 
11
 
12
  import torch
13
 
14
+ torch.backends.cuda.matmul.allow_tf32 = False # torch 2.8
15
+ torch.backends.cudnn.allow_tf32 = False # torch 2.8
16
 
17
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
18
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
19
+ #torch.backends.fp32_precision = "ieee" torch 2.9
20
+ #torch.backends.cuda.matmul.fp32_precision = "ieee" torch 2.9
21
+ #torch.backends.cudnn.fp32_precision = "ieee" torch 2.9
22
+ #torch.backends.cudnn.conv.fp32_precision = "ieee" torch 2.9
23
+ #torch.backends.cudnn.rnn.fp32_precision = "ieee" torch 2.9
24
  torch.backends.cudnn.deterministic = False
25
  torch.backends.cudnn.benchmark = False
26
  torch.backends.cuda.preferred_blas_library="cublas"