carlex3321 commited on
Commit
78f911c
·
verified ·
1 Parent(s): 6c2ab55

Update info.sh

Browse files
Files changed (1) hide show
  1. info.sh +19 -17
info.sh CHANGED
@@ -1,7 +1,7 @@
1
  #!/usr/bin/env bash
2
- echo "================= RUNTIME CAPABILITIES ================="
3
 
4
- echo "[GPU / Driver / CUDA]"
5
  nvidia-smi || true
6
  echo "CUDA_HOME: ${CUDA_HOME:-/usr/local/cuda}"
7
  echo "NVCC: $(nvcc --version 2>/dev/null | tail -n1 || echo 'N/A')"
@@ -9,19 +9,19 @@ echo
9
 
10
  echo "[PyTorch / CUDA backend]"
11
  python3 - <<'PY'
12
- import json, sys
13
  try:
14
  import torch
15
  info = {
16
- "torch": torch.__version__,
17
- "cuda_available": torch.cuda.is_available(),
18
- "cuda_device_count": torch.cuda.device_count(),
19
- "cuda_runtime_version": torch.version.cuda if hasattr(torch.version, "cuda") else None,
20
- "cudnn_version": (torch.backends.cudnn.version() if torch.cuda.is_available() else None),
21
- "tf32": (torch.backends.cuda.matmul.allow_tf32 if torch.cuda.is_available() else None),
22
- "flash_sdp": (torch.backends.cuda.flash_sdp_enabled() if hasattr(torch.backends.cuda, "flash_sdp_enabled") else None),
23
- "mem_efficient_sdp": (torch.backends.cuda.mem_efficient_sdp_enabled() if hasattr(torch.backends.cuda, "mem_efficient_sdp_enabled") else None),
24
- "math_sdp": (torch.backends.cuda.math_sdp_enabled() if hasattr(torch.backends.cuda, "math_sdp_enabled") else None),
25
  }
26
  print(json.dumps(info, indent=2))
27
  if torch.cuda.is_available():
@@ -65,9 +65,11 @@ try:
65
  print(f"triton: OK (version={getattr(triton,'__version__','unknown')})")
66
  try:
67
  import triton.ops
68
- print("triton.ops: OK")
 
 
69
  except Exception as e:
70
- print(f"triton.ops: ERR {type(e).__name__}: {e}")
71
  except Exception as e:
72
  print(f"triton: ERR {type(e).__name__}: {e}")
73
  PY
@@ -77,15 +79,15 @@ echo "[BitsAndBytes (Q8/Q4)]"
77
  python3 - <<'PY'
78
  try:
79
  import bitsandbytes as bnb
80
- import importlib
81
  v = getattr(bnb, "__version__", "unknown")
82
  print(f"bitsandbytes: OK (version={v})")
83
- # Teste de presença de kernels Triton/Q8
84
  try:
85
  import bitsandbytes.triton.int8_matmul_mixed_dequantize as q8
86
  print("bnb.triton.int8_matmul_mixed_dequantize: OK")
 
 
87
  except Exception as e:
88
- print(f"bnb.triton.int8_matmul_mixed_dequantize: ERR {type(e).__name__}: {e}")
89
  except Exception as e:
90
  print(f"bitsandbytes: ERR {type(e).__name__}: {e}")
91
  PY
 
1
  #!/usr/bin/env bash
2
+ set -euo pipefail
3
 
4
+ echo "================= RUNTIME CAPABILITIES ================="
5
  nvidia-smi || true
6
  echo "CUDA_HOME: ${CUDA_HOME:-/usr/local/cuda}"
7
  echo "NVCC: $(nvcc --version 2>/dev/null | tail -n1 || echo 'N/A')"
 
9
 
10
  echo "[PyTorch / CUDA backend]"
11
  python3 - <<'PY'
12
+ import json
13
  try:
14
  import torch
15
  info = {
16
+ "torch": torch.__version__,
17
+ "cuda_available": torch.cuda.is_available(),
18
+ "cuda_device_count": torch.cuda.device_count(),
19
+ "cuda_runtime_version": getattr(torch.version, "cuda", None),
20
+ "cudnn_version": (torch.backends.cudnn.version() if torch.cuda.is_available() else None),
21
+ "tf32": (torch.backends.cuda.matmul.allow_tf32 if torch.cuda.is_available() else None),
22
+ "flash_sdp": (torch.backends.cuda.flash_sdp_enabled() if hasattr(torch.backends.cuda,"flash_sdp_enabled") else None),
23
+ "mem_efficient_sdp": (torch.backends.cuda.mem_efficient_sdp_enabled() if hasattr(torch.backends.cuda,"mem_efficient_sdp_enabled") else None),
24
+ "math_sdp": (torch.backends.cuda.math_sdp_enabled() if hasattr(torch.backends.cuda,"math_sdp_enabled") else None),
25
  }
26
  print(json.dumps(info, indent=2))
27
  if torch.cuda.is_available():
 
65
  print(f"triton: OK (version={getattr(triton,'__version__','unknown')})")
66
  try:
67
  import triton.ops
68
+ print("triton.ops: legacy module present")
69
+ except ModuleNotFoundError:
70
+ print("triton.ops: not present (ok on Triton>=3.x)")
71
  except Exception as e:
72
+ print(f"triton.ops: WARN {type(e).__name__}: {e}")
73
  except Exception as e:
74
  print(f"triton: ERR {type(e).__name__}: {e}")
75
  PY
 
79
  python3 - <<'PY'
80
  try:
81
  import bitsandbytes as bnb
 
82
  v = getattr(bnb, "__version__", "unknown")
83
  print(f"bitsandbytes: OK (version={v})")
 
84
  try:
85
  import bitsandbytes.triton.int8_matmul_mixed_dequantize as q8
86
  print("bnb.triton.int8_matmul_mixed_dequantize: OK")
87
+ except ModuleNotFoundError:
88
+ print("bnb.q8.triton: not present (disabled or no GPU build)")
89
  except Exception as e:
90
+ print(f"bnb.q8.triton: WARN {type(e).__name__}: {e}")
91
  except Exception as e:
92
  print(f"bitsandbytes: ERR {type(e).__name__}: {e}")
93
  PY