ZTWHHH commited on
Commit
bf0eb3e
·
verified ·
1 Parent(s): 9f91890

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. infer_4_33_0/lib/libpython3.10.so +3 -0
  3. infer_4_47_1/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc +3 -0
  4. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc +0 -0
  5. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc +0 -0
  6. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc +0 -0
  7. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc +0 -0
  8. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/functional.py +188 -0
  9. infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/impl.py +670 -0
  10. infer_4_47_1/lib/python3.10/site-packages/torch/_library/__init__.py +6 -0
  11. infer_4_47_1/lib/python3.10/site-packages/torch/_library/autograd.py +241 -0
  12. infer_4_47_1/lib/python3.10/site-packages/torch/_library/custom_ops.py +835 -0
  13. infer_4_47_1/lib/python3.10/site-packages/torch/_library/fake_impl.py +207 -0
  14. infer_4_47_1/lib/python3.10/site-packages/torch/_library/simple_registry.py +85 -0
  15. infer_4_47_1/lib/python3.10/site-packages/torch/_library/triton.py +233 -0
  16. infer_4_47_1/lib/python3.10/site-packages/torch/_library/utils.py +318 -0
  17. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__init__.py +17 -0
  18. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc +0 -0
  19. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc +0 -0
  20. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/scribe.cpython-310.pyc +0 -0
  21. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc +0 -0
  22. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/_internal.py +1162 -0
  23. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/_registrations.py +192 -0
  24. infer_4_47_1/lib/python3.10/site-packages/torch/_logging/structured.py +57 -0
  25. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc +0 -0
  26. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc +0 -0
  28. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes.cpython-310.pyc +0 -0
  29. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes_impl.cpython-310.pyc +0 -0
  30. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc +0 -0
  31. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc +0 -0
  33. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ndarray.cpython-310.pyc +0 -0
  34. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc +0 -0
  35. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc +0 -0
  36. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ufuncs.cpython-310.pyc +0 -0
  37. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc +0 -0
  38. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc +0 -0
  39. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/fft.cpython-310.pyc +0 -0
  40. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc +0 -0
  41. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc +0 -0
  42. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py +85 -0
  43. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py +1368 -0
  44. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_dtypes.py +453 -0
  45. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py +217 -0
  46. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_funcs.py +76 -0
  47. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py +2056 -0
  48. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_ndarray.py +592 -0
  49. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_normalizations.py +259 -0
  50. infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py +459 -0
.gitattributes CHANGED
@@ -1541,3 +1541,5 @@ infer_4_47_1/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3
1541
  evalkit_tf446/lib/python3.10/site-packages/cusparselt/lib/libcusparseLt.so.0 filter=lfs diff=lfs merge=lfs -text
1542
  infer_4_47_1/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1543
  evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1541
  evalkit_tf446/lib/python3.10/site-packages/cusparselt/lib/libcusparseLt.so.0 filter=lfs diff=lfs merge=lfs -text
1542
  infer_4_47_1/lib/python3.10/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1543
  evalkit_cambrian/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1544
+ infer_4_33_0/lib/libpython3.10.so filter=lfs diff=lfs merge=lfs -text
1545
+ infer_4_47_1/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
infer_4_33_0/lib/libpython3.10.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52417985e9a3a806afba85d5b82d2db467dda5522096df98aede1a754dd32d70
3
+ size 17434008
infer_4_47_1/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9fd63246772250f7e6e893a567be9621c38d09916352a8e89b995c17446b069
3
+ size 134921
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/autograd.cpython-310.pyc ADDED
Binary file (8.88 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/functional.cpython-310.pyc ADDED
Binary file (5.96 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/__pycache__/impl.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/functional.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import weakref
3
+
4
+ import torch
5
+ import torch.utils._pytree as pytree
6
+ from torch._C import _ExcludeDispatchKeyGuard, DispatchKey, DispatchKeySet
7
+ from torch._ops import OpOverload
8
+ from torch.library import Library
9
+ from torchgen.model import (
10
+ BaseTy,
11
+ BaseType,
12
+ FunctionSchema,
13
+ OperatorName,
14
+ OptionalType,
15
+ SchemaKind,
16
+ )
17
+
18
+ from .autograd import autograd_not_implemented
19
+
20
+
21
+ def register_functional_op(
22
+ lib: Library,
23
+ new_op_name: str,
24
+ mutable_op: OpOverload,
25
+ ) -> None:
26
+ """Given a mutable operator, registers the functional variant.
27
+
28
+ This API also correctly links the functional variant with the mutable
29
+ operator for the purposes of functionalization.
30
+
31
+ All of the new registrations are performed on the ``lib`` passed in.
32
+
33
+ Arguments:
34
+ lib (Library): Should be a torch.library.Library object that has
35
+ the same namespace as ``mutable_op``'s namespace.
36
+ lib will be used to register the new functional op as well
37
+ as a functionalization kernel for the ``mutable_op``
38
+ If you don't have a library handy, use
39
+ ``torch.library.Library(ns, 'FRAGMENT')`` to construct one.
40
+ new_op_name (str): The name of the functional operator (without the
41
+ namespace). If no namespace, the new functional variant will be
42
+ accessible under ``torch.ops.{lib.ns}.new_op_name``.
43
+ mutable_op (OpOverload): The mutable custom operator. Note
44
+ that you may need to add a `.default` to it, like
45
+ `torch.ops.aten.abs_.default`.
46
+
47
+ """
48
+ validate(mutable_op)
49
+ schema = functional_schema(new_op_name, mutable_op)
50
+ lib.define(schema)
51
+
52
+ functional_impl = construct_functional_impl(mutable_op)
53
+ lib.impl(new_op_name, functional_impl, 'CompositeExplicitAutograd')
54
+
55
+ functional_op = getattr(getattr(torch.ops, lib.ns), new_op_name).default
56
+
57
+ # There's no easy way for us to generate the autograd kernel, so we
58
+ # use autograd_not_implemented. Also, this makes it so that the user
59
+ # is unable to register an autograd formula themselves. This shouldn't
60
+ # be a problem if the user doesn't use the functional op direclty
61
+ # in their program, but we may need to revist this in the future.
62
+ lib.impl(new_op_name, autograd_not_implemented(functional_op), 'Autograd')
63
+
64
+ f_kernel = construct_functionalization_kernel(weakref.proxy(mutable_op), functional_op)
65
+
66
+ lib.impl(mutable_op, f_kernel, 'Functionalize')
67
+
68
+
69
+ def construct_functional_impl(mutable_op):
70
+ def functional_impl(*args):
71
+ # Strategy:
72
+ # - clone args that would have been mutated
73
+ # - run mutable_op
74
+ # - return the cloned args as additional outputs
75
+ new_args = []
76
+ extra_rets = []
77
+ for is_write, arg in zip(mutable_args(mutable_op), args):
78
+ if is_write:
79
+ cloned = arg.clone() if arg is not None else None
80
+ new_args.append(cloned)
81
+ extra_rets.append(cloned)
82
+ else:
83
+ new_args.append(arg)
84
+ result = mutable_op(*new_args)
85
+ if result is None:
86
+ return tuple(extra_rets)
87
+ if isinstance(result, tuple):
88
+ return (*result, *extra_rets)
89
+ return (result, *extra_rets)
90
+ return functional_impl
91
+
92
+
93
+ def construct_functionalization_kernel(mutable_op, functional_op):
94
+ def kernel(*args):
95
+ # There's nothing to be functionalized!
96
+ # We can still end up here because DispatchKey::Functionalize is a mode key
97
+ if pytree.tree_all_only(torch.Tensor, lambda x: not torch._is_functional_tensor(x), args):
98
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
99
+ return mutable_op(*args)
100
+
101
+ # NB: This differs from the codegen -- codegen handles cases where there
102
+ # are mixed FunctionalTensorWrapper and non-FunctionalTensorWrapper.
103
+ # This only really matters for XLA (mixed CPU-XLA tensors) and
104
+ # running functionalization without the PT2 stack (which guarantees to us that
105
+ # all tensors are FunctionalTensorWrapper).
106
+ if not pytree.tree_all_only(torch.Tensor, torch._is_functional_tensor, args):
107
+ raise RuntimeError("{mutable_op}: expected all args to be FunctionalTensorWrapper")
108
+
109
+ unwrapped_args = []
110
+ for arg in args:
111
+ if isinstance(arg, torch.Tensor) and torch._is_functional_tensor(arg):
112
+ torch._sync(arg)
113
+ unwrapped = torch._from_functional_tensor(arg)
114
+ unwrapped_args.append(unwrapped)
115
+ else:
116
+ unwrapped_args.append(arg)
117
+
118
+ with _ExcludeDispatchKeyGuard(DispatchKeySet(DispatchKey.Functionalize)):
119
+ output = functional_op(*unwrapped_args)
120
+
121
+ num_actual_output = len(mutable_op._schema.returns)
122
+ actual_output = pytree.tree_map(
123
+ torch._to_functional_tensor, output[:num_actual_output])
124
+
125
+ new_values_to_propagate = output[num_actual_output:]
126
+ inputs_to_replace = [arg for is_write, arg in zip(mutable_args(mutable_op), args)
127
+ if is_write]
128
+ assert len(new_values_to_propagate) == len(inputs_to_replace)
129
+ for new_value, arg in zip(new_values_to_propagate, inputs_to_replace):
130
+ if (arg is None and new_value is None) or (arg is not None and new_value is not None):
131
+ continue
132
+ torch._C._propagate_xla_data(arg, new_value)
133
+ torch._C._replace_(arg, new_value)
134
+ torch._C._commit_update(arg)
135
+ torch._sync(arg)
136
+
137
+ if len(actual_output) == 1:
138
+ return actual_output[0]
139
+ elif len(actual_output) == 0:
140
+ return None
141
+ return actual_output
142
+
143
+ return kernel
144
+
145
+
146
+ def validate(mutable_op: OpOverload):
147
+ if not isinstance(mutable_op, OpOverload):
148
+ raise TypeError(
149
+ f"register_functional_op(mutable_op): expected mutable_op to be instance of "
150
+ f"OpOverload but got {type(mutable_op)}")
151
+
152
+ # There are generally three types of "in-place" or "mutable" ops.
153
+ # Each of them have their own conventions:
154
+ # - inplace (first input modified in-place and returned as only output)
155
+ # - out= (some args modified in-place and returned as outputs)
156
+ # - mutable (some args modified in-place but none of those returned as outputs)
157
+ # In theory we can support all three, but we'll just support the last
158
+ # option right now for simplicity.
159
+ schema = FunctionSchema.parse(str(mutable_op._schema))
160
+ if not schema.kind() == SchemaKind.mutable:
161
+ raise RuntimeError("Expected op to be mutable (as opposed to functional, inplace or out)")
162
+ for ret in schema.returns:
163
+ # construct_functionalization_kernel assumes this for simplicity
164
+ if ret.annotation is not None:
165
+ raise NotImplementedError(
166
+ "NYI: register_functional_op(op) where op returns a mutated or aliased value. "
167
+ "Please file an issue (and as a workaround, modify your operator to "
168
+ "not return the mutated value or aliases)")
169
+ for arg in schema.arguments.flat_all:
170
+ # construct_functionalization_kernel assumes this for simplicity
171
+ if arg.type.is_tensor_like() and (
172
+ arg.type != BaseType(BaseTy.Tensor)
173
+ and arg.type != OptionalType(BaseType(BaseTy.Tensor))
174
+ ):
175
+ raise NotImplementedError(
176
+ "NYI: register_functional_op(op) where op has a List[Tensor] input."
177
+ "Please file an issue.")
178
+
179
+
180
+ def functional_schema(new_op_name, op: OpOverload):
181
+ schema = FunctionSchema.parse(str(op._schema))
182
+ schema = schema.signature().with_name(OperatorName.parse(new_op_name))
183
+ return str(schema)
184
+
185
+
186
+ def mutable_args(op: OpOverload):
187
+ return tuple(False if arg.alias_info is None else arg.alias_info.is_write
188
+ for arg in op._schema.arguments)
infer_4_47_1/lib/python3.10/site-packages/torch/_custom_op/impl.py ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ import functools
4
+ import inspect
5
+ import sys
6
+ import typing
7
+ import weakref
8
+ import warnings
9
+
10
+ from torchgen.model import FunctionSchema, OperatorName, SchemaKind, BaseType, ListType, BaseTy
11
+
12
+ import torch
13
+ import torch._C as _C
14
+ import torch.library as library
15
+ from torch.library import get_ctx
16
+
17
+ from .autograd import autograd_kernel_indirection, construct_autograd_kernel
18
+ import torch._library.infer_schema
19
+ from torch._library.infer_schema import infer_schema
20
+
21
+ """
22
+ torch._custom_op is deprecated. We shipped a production-ready version of it into torch.library.
23
+ Please use those APIs instead.
24
+ """
25
+
26
+ __all__ = ["custom_op", "CustomOp", "get_ctx"]
27
+
28
+
29
+ SUPPORTED_DEVICE_TYPE_TO_KEY = {
30
+ "cpu": "CPU",
31
+ "cuda": "CUDA",
32
+ }
33
+
34
+ # We will not let users register CustomOps with anything that could look like
35
+ # PyTorch internals to avoid confusion.
36
+ RESERVED_NS = {
37
+ "prim",
38
+ "prims",
39
+ "aten",
40
+ "at",
41
+ "torch",
42
+ "pytorch",
43
+ }
44
+
45
+ def warn_deprecated():
46
+ warnings.warn(
47
+ "torch._custom_op is deprecated and will be removed in PyTorch 2.6, please "
48
+ "use the equivalent torch.library API instead.", DeprecationWarning)
49
+
50
+
51
+ def custom_op(
52
+ qualname: str, manual_schema: typing.Optional[str] = None
53
+ ) -> typing.Callable:
54
+ r"""
55
+ This API is deprecated, please use torch.library.custom_op instead
56
+ """
57
+ warn_deprecated()
58
+
59
+ def inner(func):
60
+ if not inspect.isfunction(func):
61
+ raise ValueError(
62
+ f"custom_op(...)(func): Expected `func` to be a Python "
63
+ f"function, got: {type(func)}"
64
+ )
65
+
66
+ ns, name = parse_qualname(qualname)
67
+ validate_namespace(ns)
68
+ if func.__name__ != name:
69
+ raise ValueError(
70
+ f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
71
+ f"to have name '{name}' but got '{func.__name__}'. "
72
+ f"Please either change the name of `func` or the qualname that "
73
+ f"is passed to `custom_op`"
74
+ )
75
+
76
+ schema = infer_schema(func, mutates_args=()) if manual_schema is None else manual_schema
77
+ schema_str = f"{name}{schema}"
78
+ function_schema = FunctionSchema.parse(schema_str)
79
+ validate_schema(function_schema)
80
+ if manual_schema is not None:
81
+ validate_function_matches_schema(function_schema, func)
82
+
83
+ lib = library.Library(ns, "FRAGMENT")
84
+ lib.define(schema_str)
85
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
86
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
87
+
88
+ result.__name__ = func.__name__
89
+ result.__module__ = func.__module__
90
+ result.__doc__ = func.__doc__
91
+
92
+ library.impl(lib, result._opname, "Autograd")(
93
+ autograd_kernel_indirection(weakref.proxy(result))
94
+ )
95
+
96
+ torch._C._dispatch_set_report_error_callback(
97
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
98
+ )
99
+
100
+ return result
101
+
102
+ return inner
103
+
104
+
105
+ # Global dictionary holding references to all CustomOp objects
106
+ # Yes, it keeps all CustomOps alive (see NOTE [CustomOp lifetime])
107
+ # Used to query the CustomOp associated with a specific C++ dispatcher operator.
108
+ # An example usage is FakeTensor: FakeTensor checks if a specific operator
109
+ # has an implementation registered via the CustomOp API.
110
+ # Indexed by qualname (e.g. aten::foo)
111
+ global_registry: typing.Dict[str, "CustomOp"] = {}
112
+
113
+
114
+ class CustomOp:
115
+ r"""
116
+ This API is deprecated, please use torch.library.custom_op instead
117
+ """
118
+
119
+ def __init__(self, lib, cpp_ns, schema, operator_name, ophandle, *, _private_access=False):
120
+ super().__init__()
121
+ warn_deprecated()
122
+ if not _private_access:
123
+ raise RuntimeError(
124
+ "The CustomOp constructor is private and we do not guarantee "
125
+ "BC for it. Please use custom_op(...) to create a CustomOp object"
126
+ )
127
+ name = f"{cpp_ns}::{operator_name}"
128
+ self._schema = schema
129
+ self._cpp_ns = cpp_ns
130
+ self._lib: library.Library = lib
131
+ self._ophandle: _C._DispatchOperatorHandle = ophandle
132
+ # Has the name of the op, e.g. "foo". We cache here for convenience.
133
+ self._opname: str = operator_name
134
+ # this is _opname but with namespace. e.g. "custom::foo"
135
+ self._qualname: str = name
136
+ self.__name__ = None # mypy requires this
137
+ # NB: Some of these impls are registered as kernels to DispatchKeys.
138
+ # Modifying the _impls dict directly won't do anything in that case.
139
+ self._impls: typing.Dict[str, typing.Optional[FuncAndLocation]] = {}
140
+ # See NOTE [CustomOp autograd kernel indirection]
141
+ self._registered_autograd_kernel_indirection = False
142
+
143
+ global_registry[self._qualname] = self
144
+
145
+ def _register_autograd_kernel_indirection(self):
146
+ assert not self._registered_autograd_kernel_indirection
147
+ self._lib.impl(self._opname, autograd_kernel_indirection(weakref.proxy(self)), "Autograd")
148
+ self._registered_autograd_kernel_indirection = True
149
+
150
+ # Records the impl and the source location in self._impls
151
+ # Note that this doesn't cause torch.library to use the impl, that
152
+ # needs to be done in a separate self._lib.impl call.
153
+ def _register_impl(self, kind, func, stacklevel=2):
154
+ if self._has_impl(kind):
155
+ func_and_location = self._impls[kind]
156
+ assert func_and_location is not None # Pacify mypy
157
+ location = func_and_location.location
158
+ raise RuntimeError(
159
+ f"Attempting to register a {kind} impl for operator {self._qualname} "
160
+ f"that already has a {kind} impl registered from Python at "
161
+ f"{location}. This is not supported."
162
+ )
163
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
164
+ location = f"{frame.filename}:{frame.lineno}"
165
+ self._impls[kind] = FuncAndLocation(func, location)
166
+
167
+ def _get_impl(self, kind):
168
+ return self._impls[kind]
169
+
170
+ def _has_impl(self, kind):
171
+ return kind in self._impls
172
+
173
+ def _destroy(self):
174
+ # NOTE: [CustomOp lifetime]
175
+ # A CustomOp, once created, lives forever. The mechanism is that the
176
+ # global registry holds a reference to it. However, to make testing
177
+ # easier, we want to be able to destroy CustomOp objects.
178
+ # CustomOp._destroy does the job, though it leaves the CustomOp
179
+ # in a garbage state.
180
+ del self._lib
181
+
182
+ opnamespace = getattr(torch.ops, self._cpp_ns)
183
+ if hasattr(opnamespace, self._opname):
184
+ delattr(opnamespace, self._opname)
185
+
186
+ del global_registry[self._qualname]
187
+
188
+ def __repr__(self):
189
+ return f'<CustomOp(op="{self._qualname}")>'
190
+
191
+ def __call__(self, *args, **kwargs):
192
+ # Bypass torch.ops.* and directly do OperatorHandle::callBoxed.
193
+ # Using torch.ops.* is a bit of a pain (it can be slow and it has lifetime
194
+ # issues from caching operators that make testing CustomOp difficult).
195
+ result = _C._dispatch_call_boxed(self._ophandle, *args, **kwargs)
196
+ return result
197
+
198
+ def impl(
199
+ self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2,
200
+ ) -> typing.Callable:
201
+ r"""
202
+ This API is deprecated, please use torch.library.custom_op instead
203
+ """
204
+ if isinstance(device_types, str):
205
+ device_types = [device_types]
206
+ for device_type in device_types:
207
+ validate_device_type(device_type)
208
+
209
+ def inner(f):
210
+ for device_type in set(device_types):
211
+ self._check_doesnt_have_library_impl(device_type)
212
+ self._register_impl(device_type, f, stacklevel=_stacklevel)
213
+ dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
214
+ library.impl(self._lib, self._opname, dispatch_key)(f)
215
+ return f
216
+
217
+ return inner
218
+
219
+ def _check_doesnt_have_library_impl(self, device_type):
220
+ if self._has_impl(device_type):
221
+ return
222
+ key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]
223
+ if _C._dispatch_has_computed_kernel_for_dispatch_key(self._qualname, key):
224
+ raise RuntimeError(
225
+ f"impl(..., device_types={device_type}): the operator {self._qualname} "
226
+ f"already has an implementation for this device type via a "
227
+ f"pre-existing torch.library or TORCH_LIBRARY registration.")
228
+
229
+ def impl_factory(self) -> typing.Callable:
230
+ r"""Register an implementation for a factory function."""
231
+
232
+ def inner(f):
233
+ self._register_impl("factory", f)
234
+ library.impl(self._lib, self._opname, "BackendSelect")(f)
235
+ return f
236
+
237
+ return inner
238
+
239
+ def impl_abstract(self, _stacklevel=2) -> typing.Callable:
240
+ r"""
241
+ This API is deprecated, please use torch.library.custom_op instead
242
+ """
243
+
244
+ def inner(f):
245
+ self._check_doesnt_have_library_meta_impl()
246
+ self._register_impl("abstract", f, stacklevel=_stacklevel)
247
+ location = self._get_impl("abstract").location
248
+
249
+ qualname = self._qualname
250
+
251
+ # Handle DispatchKey.Meta registration
252
+ @functools.wraps(f)
253
+ def f_with_ctx(*args, **kwargs):
254
+ def error_on_ctx():
255
+ raise RuntimeError(
256
+ f"Attempted to call get_ctx() for the meta implementation "
257
+ f"for {qualname}."
258
+ f"You have presumably called get_ctx() because the operator "
259
+ f"has a data-dependent output shape; if so, there is no "
260
+ f"such meta implementation and this error is the correct "
261
+ f"behavior. Otherwise, please remove the call to get_ctx() "
262
+ f"in the implementation registered with impl_abstract "
263
+ f"at {location}"
264
+ )
265
+
266
+ with torch._library.fake_impl.set_ctx_getter(error_on_ctx):
267
+ return f(*args, **kwargs)
268
+
269
+ self._lib.impl(self._opname, f_with_ctx, "Meta")
270
+ return f
271
+
272
+ return inner
273
+
274
+ def _check_can_register_backward(self):
275
+ def error(detail):
276
+ raise RuntimeError(
277
+ f"Cannot use torch._custom_ops APIs to register backward "
278
+ f"formula for {detail}. Got operator "
279
+ f"{self._qualname} with schema: {schema}"
280
+ )
281
+
282
+ schema = self._schema
283
+ if schema.kind() != SchemaKind.functional:
284
+ error("non-functional operator")
285
+
286
+ rets = schema.returns
287
+ if not schema.returns:
288
+ error("operator with no returns")
289
+
290
+ assert len(rets) > 0
291
+ is_non_mutating_view = any(
292
+ r.annotation is not None and not r.annotation.is_write for r in rets
293
+ )
294
+ if is_non_mutating_view:
295
+ error("operator that returns views")
296
+
297
+ # We make assumptions about the schema's return types.
298
+ allowed_return_types = {
299
+ BaseType(BaseTy.int): "int",
300
+ BaseType(BaseTy.SymInt): "SymInt",
301
+ BaseType(BaseTy.bool): "bool",
302
+ BaseType(BaseTy.float): "float",
303
+ BaseType(BaseTy.Tensor): "Tensor",
304
+ ListType(BaseType(BaseTy.Tensor), None): "List[Tensor]",
305
+ }
306
+ for ret in schema.returns:
307
+ if ret.type in allowed_return_types:
308
+ continue
309
+ error(f"operator with return not in {list(allowed_return_types.values())} (got {ret.type})")
310
+
311
+ def _check_doesnt_have_library_autograd_impl(self):
312
+ if self._registered_autograd_kernel_indirection:
313
+ return
314
+
315
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
316
+ raise RuntimeError(
317
+ f"impl_backward/impl_save_for_backward: the operator {self._qualname} "
318
+ f"already has an implementation for this device type via a "
319
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
320
+ f"CompositeImplicitAutograd operators do not need an autograd formula; "
321
+ f"instead, the operator will decompose into its constituents and those "
322
+ f"can have autograd formulas defined on them.")
323
+
324
+ # We can improve this by adding "all Autograd<BACKEND> keys", but
325
+ # realistically people will just be using this API for CPU/CUDA for now.
326
+ for key in ["Autograd", "AutogradCPU", "AutogradCUDA"]:
327
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, key):
328
+ raise RuntimeError(
329
+ f"impl_backward/impl_save_for_backward: "
330
+ f"the operator {self._qualname} already has an Autograd kernel "
331
+ f"registered to DispatchKey::{key} vi a pre-existing "
332
+ f"torch.library or TORCH_LIBRARY registration. Please either "
333
+ f"remove those registrations or don't use the torch._custom_ops APIs")
334
+
335
+ def _check_doesnt_have_library_meta_impl(self):
336
+ if self._has_impl("abstract"):
337
+ return
338
+
339
+ # If the user's operator is CompositeExplicitAutograd,
340
+ # allow them to impl_abstract. This is being pragmatic
341
+ # (existing custom ops may have CompositeExplicitAutograd
342
+ # registration that don't work with Meta kernels, so this
343
+ # gives them an escape hatch).
344
+ if (
345
+ _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeExplicitAutograd")
346
+ and not _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta")
347
+ ):
348
+ return
349
+
350
+ # Otherwise, if the user's already has a Meta kernel or their
351
+ # op is CompositeImplicitAutograd or some other alias dispatch key,
352
+ # raise.
353
+
354
+ # Special case for CompositeImplicitAutograd
355
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "CompositeImplicitAutograd"):
356
+ raise RuntimeError(
357
+ f"impl_abstract(...): the operator {self._qualname} "
358
+ f"already has an implementation for this device type via a "
359
+ f"pre-existing registration to DispatchKey::CompositeImplicitAutograd."
360
+ f"CompositeImplicitAutograd operators do not need an abstract impl; "
361
+ f"instead, the operator will decompose into its constituents and those "
362
+ f"can have abstract impls defined on them.")
363
+
364
+ if _C._dispatch_has_kernel_for_dispatch_key(self._qualname, "Meta"):
365
+ raise RuntimeError(
366
+ f"impl_abstract(...): the operator {self._qualname} "
367
+ f"already has an DispatchKey::Meta implementation via a "
368
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
369
+ f"Please either remove that registration or don't call impl_abstract.")
370
+
371
+ # NOTE ["backward", "save_for_backward", and "autograd"]
372
+ # As a part of the explicit autograd API, a user must provide us
373
+ # a "save_for_backward" function and a "backward" function.
374
+ # When both of these have been provided, then we automatically
375
+ # construct the "autograd" kernel.
376
+ def _register_autograd_kernel(self):
377
+ assert self._has_impl("backward")
378
+ assert self._has_impl("save_for_backward")
379
+ kernel = construct_autograd_kernel(
380
+ self._schema,
381
+ self._output_differentiability,
382
+ self,
383
+ get_op(self._qualname),
384
+ self._get_impl("save_for_backward").func,
385
+ self._get_impl("backward").func)
386
+ self._register_impl("autograd", kernel)
387
+
388
+ def impl_save_for_backward(self, _stacklevel=2):
389
+ r"""Register a function that tells us what to save for backward.
390
+
391
+ Please see impl_backward for more details.
392
+ """
393
+ def inner(f):
394
+ self._check_can_register_backward()
395
+ self._check_doesnt_have_library_autograd_impl()
396
+ if not self._registered_autograd_kernel_indirection:
397
+ self._register_autograd_kernel_indirection()
398
+ self._register_impl("save_for_backward", f, stacklevel=_stacklevel)
399
+ if self._has_impl("backward"):
400
+ self._register_autograd_kernel()
401
+ return inner
402
+
403
+ def impl_backward(self, output_differentiability=None, _stacklevel=2):
404
+ r"""
405
+ This API is deprecated, please use torch.library.custom_op instead
406
+ """
407
+ if output_differentiability is not None:
408
+ def yell():
409
+ raise RuntimeError(
410
+ f"impl_backward(output_differentiability): expected "
411
+ f"output_differentiability to be a list of bools with "
412
+ f"length equal to the number of outputs of this CustomOp "
413
+ f"got: {output_differentiability}")
414
+
415
+ if not isinstance(output_differentiability, list):
416
+ yell()
417
+ for diff in output_differentiability:
418
+ if not isinstance(diff, bool):
419
+ yell()
420
+ if len(self._schema.returns) != len(output_differentiability):
421
+ yell()
422
+
423
+ def inner(f):
424
+ self._check_can_register_backward()
425
+ self._check_doesnt_have_library_autograd_impl()
426
+ if not self._registered_autograd_kernel_indirection:
427
+ self._register_autograd_kernel_indirection()
428
+ self._register_impl("backward", f, stacklevel=_stacklevel)
429
+ self._output_differentiability = output_differentiability
430
+ if self._has_impl("save_for_backward"):
431
+ self._register_autograd_kernel()
432
+ return inner
433
+
434
+
435
+ @dataclasses.dataclass
436
+ class FuncAndLocation:
437
+ func: typing.Callable
438
+ location: str
439
+
440
+
441
+ def find_ophandle_or_throw(cpp_ns: str, operator_name: OperatorName):
442
+ overload_name = (
443
+ "" if operator_name.overload_name is None else operator_name.overload_name
444
+ )
445
+ return _C._dispatch_find_schema_or_throw(
446
+ f"{cpp_ns}::{str(operator_name.name)}", overload_name
447
+ )
448
+
449
+
450
+ def validate_namespace(ns: str) -> None:
451
+ if "." in ns:
452
+ raise ValueError(
453
+ f'custom_op(..., ns="{ns}"): expected ns to not contain any . (and be a '
454
+ f"valid variable name)"
455
+ )
456
+ if ns in RESERVED_NS:
457
+ raise ValueError(
458
+ f"custom_op(..., ns='{ns}'): '{ns}' is a reserved namespace, "
459
+ f"please choose something else. "
460
+ )
461
+
462
+ def validate_schema(schema: FunctionSchema) -> None:
463
+ if not torch._library.utils.is_functional_schema(schema):
464
+ raise ValueError(
465
+ f"custom_op only supports functional operators "
466
+ f"(ops that do not mutate any inputs, do not return "
467
+ f"views of the inputs, and has at least one return). "
468
+ f"Got the following non-functional schema: {schema}"
469
+ )
470
+
471
+ # For simplicity: don't allow self arguments
472
+ if schema.arguments.self_arg is not None:
473
+ raise ValueError(
474
+ f"custom_op does not support arguments named 'self'. Please "
475
+ f"rename your argument. Got: {schema}"
476
+ )
477
+
478
+
479
+ def parse_qualname(qualname: str) -> typing.Tuple[str, str]:
480
+ names = qualname.split("::", 1)
481
+ if len(names) != 2:
482
+ raise ValueError(f"Expected there to be a namespace in {qualname}, i.e. The "
483
+ f"operator name should look something like ns::foo")
484
+ if '.' in names[1]:
485
+ raise ValueError(f"The torch.custom_ops APIs do not handle overloads, "
486
+ f"i.e. operator names with '.' in them. "
487
+ f"Please name your operator something like ns::foo. "
488
+ f"Got: {qualname}")
489
+ return names[0], names[1]
490
+
491
+
492
+ def validate_device_type(device_type: str) -> None:
493
+ if device_type not in SUPPORTED_DEVICE_TYPE_TO_KEY:
494
+ raise ValueError(
495
+ f"CustomOp.impl(device_types=[{device_type}, ...]): we only support device_type "
496
+ f"in {SUPPORTED_DEVICE_TYPE_TO_KEY.keys()}."
497
+ )
498
+
499
+
500
+ def supported_param(param: inspect.Parameter) -> bool:
501
+ return param.kind in (
502
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
503
+ inspect.Parameter.KEYWORD_ONLY,
504
+ )
505
+
506
+
507
+ def validate_function_matches_schema(
508
+ schema: FunctionSchema, func: typing.Callable
509
+ ) -> None:
510
+ sig = inspect.signature(func)
511
+
512
+ if not all(supported_param(p) for _, p in sig.parameters.items()):
513
+ raise ValueError(
514
+ f"custom_op(..., manual_schema)(func): positional-only args, "
515
+ f"varargs, and kwargs are not supported. Please rewrite `func` "
516
+ f"to not have them. Got `func` with signature: {sig}"
517
+ )
518
+
519
+ if (
520
+ any(
521
+ p.annotation is not inspect.Parameter.empty
522
+ for _, p in sig.parameters.items()
523
+ )
524
+ or sig.return_annotation is not inspect.Signature.empty
525
+ ):
526
+ raise ValueError(
527
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
528
+ f"schema, we expect `func` to have no type annotations to avoid "
529
+ f"ambiguity. Got `func` with signature: {sig}"
530
+ )
531
+
532
+ positional = [
533
+ (name, param)
534
+ for name, param in sig.parameters.items()
535
+ if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
536
+ ]
537
+ kwargonly = [
538
+ (name, param)
539
+ for name, param in sig.parameters.items()
540
+ if param.kind == inspect.Parameter.KEYWORD_ONLY
541
+ ]
542
+
543
+ def error():
544
+ raise ValueError(
545
+ f"custom_op(..., manual_schema)(func): When passing in a manual "
546
+ f"schema, we expect `func`'s signature to match `manual_schema` "
547
+ f"(aside from type annotations). "
548
+ f"func's signature: {sig}, manual_schema: {schema}"
549
+ )
550
+
551
+ def error_default_args():
552
+ raise ValueError(
553
+ f"custom_op(..., manual_schema)(func): "
554
+ f"neither func nor manual_schema should have default "
555
+ f"arguments. Got "
556
+ f"func's signature: {sig}, manual_schema: {schema}"
557
+ )
558
+
559
+ def compare(sig_args, schema_args):
560
+ if len(sig_args) != len(schema_args):
561
+ error()
562
+ for (name, param), arg in zip(sig_args, schema_args):
563
+ if name != arg.name:
564
+ error()
565
+ if param.default is not inspect.Parameter.empty or arg.default is not None:
566
+ error_default_args()
567
+
568
+ compare(positional, schema.arguments.flat_positional)
569
+ compare(kwargonly, schema.arguments.flat_kwarg_only)
570
+
571
+
572
+ def report_error_callback(custom_op: typing.Any, key: str) -> None:
573
+ if key == "Undefined":
574
+ raise NotImplementedError(
575
+ f"{custom_op}: There were no Tensor inputs to this operator "
576
+ f"(e.g. you passed an empty list of Tensors). If your operator is a "
577
+ f"factory function (that is, it takes no Tensors and constructs "
578
+ f"a new one), then please use CustomOp.impl_factory to register "
579
+ f"an implementation for it"
580
+ )
581
+ if key == "Meta":
582
+ raise NotImplementedError(
583
+ f"{custom_op}: when running with device='Meta' tensors: there is no "
584
+ f"abstract impl registered for this CustomOp. Please register one via "
585
+ f"CustomOp.impl_abstract to get this CustomOp to work with Meta tensors"
586
+ )
587
+ if key in ("CPU", "CUDA"):
588
+ device = key.lower()
589
+ raise NotImplementedError(
590
+ f"{custom_op}: when running with device='{device}' tensors: there is no "
591
+ f"{device} impl registered for this CustomOp. Please register one via "
592
+ f"CustomOp.impl(device_type='{device}')"
593
+ )
594
+ raise NotImplementedError(
595
+ f"{custom_op}: No implementation for dispatch key {key}. It is likely "
596
+ f"that we have not added this functionality yet, please either open an "
597
+ f"issue or if you're feeling adventurous, use the low-level "
598
+ f"torch.library API"
599
+ )
600
+
601
+
602
+ def custom_op_from_existing(op):
603
+ ns = op.namespace
604
+ lib = torch.library.Library(ns, "FRAGMENT")
605
+ name = op.name().split("::")[-1]
606
+ schema_str = str(op._schema)
607
+ # CustomOp expects the schema string without the namespace
608
+ schema_str = schema_str.split("::")[-1]
609
+ schema = FunctionSchema.parse(schema_str)
610
+ return CustomOp(lib, ns, schema, name, op, _private_access=True)
611
+
612
+
613
+ def get_op(qualname):
614
+ def error_not_found():
615
+ raise ValueError(
616
+ f"Could not find the operator {qualname}. Please make sure you have "
617
+ f"already registered the operator and (if registered from C++) "
618
+ f"loaded it via torch.ops.load_library.")
619
+
620
+ ns, name = parse_qualname(qualname)
621
+ if not hasattr(torch.ops, ns):
622
+ error_not_found()
623
+ opnamespace = getattr(torch.ops, ns)
624
+ if not hasattr(opnamespace, name):
625
+ error_not_found()
626
+ packet = getattr(opnamespace, name)
627
+ if not hasattr(packet, 'default'):
628
+ error_not_found()
629
+ return packet.default
630
+
631
+
632
+ def _find_custom_op(qualname, also_check_torch_library=False):
633
+ if qualname in global_registry:
634
+ return global_registry[qualname]
635
+ if not also_check_torch_library:
636
+ raise RuntimeError(
637
+ f'Could not find custom op "{qualname}". Did you register it via '
638
+ f"the torch._custom_ops API?")
639
+ overload = get_op(qualname)
640
+ result = custom_op_from_existing(overload)
641
+ return result
642
+
643
+
644
+ def get_abstract_impl(qualname):
645
+ if qualname not in torch._custom_op.impl.global_registry:
646
+ return None
647
+ custom_op = torch._custom_op.impl.global_registry[qualname]
648
+ if custom_op is None:
649
+ return None
650
+ if not custom_op._has_impl("abstract"):
651
+ return None
652
+ return custom_op._get_impl("abstract").func
653
+
654
+
655
+ def _custom_op_with_schema(qualname, schema, needs_fixed_stride_order=True):
656
+ ns, name = qualname.split("::")
657
+ schema_str = f"{name}{schema}"
658
+ function_schema = FunctionSchema.parse(schema_str)
659
+ validate_schema(function_schema)
660
+ tags = [torch._C.Tag.needs_fixed_stride_order] if needs_fixed_stride_order else []
661
+ lib = library.Library(ns, "FRAGMENT")
662
+ lib.define(schema_str, tags=tags)
663
+ ophandle = find_ophandle_or_throw(ns, function_schema.name)
664
+ result = CustomOp(lib, ns, function_schema, name, ophandle, _private_access=True)
665
+ result._register_autograd_kernel_indirection()
666
+
667
+ torch._C._dispatch_set_report_error_callback(
668
+ ophandle, functools.partial(report_error_callback, weakref.proxy(result))
669
+ )
670
+ return get_op(qualname)
infer_4_47_1/lib/python3.10/site-packages/torch/_library/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch._library.autograd
2
+ import torch._library.fake_impl
3
+ import torch._library.simple_registry
4
+ import torch._library.utils
5
+ from torch._library.fake_class_registry import register_fake_class
6
+ from torch._library.triton import capture_triton, triton_op
infer_4_47_1/lib/python3.10/site-packages/torch/_library/autograd.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, Optional, Protocol
5
+
6
+ from torch import _C, _ops, autograd, Tensor
7
+ from torch.utils import _pytree
8
+
9
+ from . import utils
10
+
11
+
12
+ class InfoProtocol(Protocol):
13
+ _backward_fn: Optional[Callable]
14
+ _setup_context_fn: Optional[Callable]
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class Info:
19
+ _backward_fn: Optional[Callable]
20
+ _setup_context_fn: Optional[Callable]
21
+
22
+
23
+ def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable:
24
+ name: str = f"GeneratedBackwardFor_{op._namespace}_{op._opname}_{op._overloadname}"
25
+
26
+ has_kwarg_only_args = utils.has_kwarg_only_args(op._schema)
27
+
28
+ @dataclass
29
+ class Metadata:
30
+ keyset: _C.DispatchKeySet
31
+ keyword_only_args: Dict[str, Any]
32
+
33
+ def forward_no_grad(*args):
34
+ metadata = args[-1]
35
+ args = args[:-1]
36
+
37
+ with _C._AutoDispatchBelowAutograd():
38
+ keyset = metadata.keyset
39
+ kwargs = metadata.keyword_only_args
40
+ result = op.redispatch(keyset & _C._after_autograd_keyset, *args, **kwargs)
41
+ return result
42
+
43
+ def forward(ctx, *args):
44
+ metadata = args[-1]
45
+ args = args[:-1]
46
+
47
+ with _C._AutoDispatchBelowAutograd():
48
+ keyset = metadata.keyset
49
+ kwargs = metadata.keyword_only_args
50
+ result = op.redispatch(keyset & _C._after_autograd_keyset, *args, **kwargs)
51
+ if info._setup_context_fn:
52
+ # The Dispatcher will remove args that are equal to their default
53
+ # values from (args, kwargs). We're going to add it back so that
54
+ # the user can access them.
55
+ #
56
+ # This is OK to do: The Dispatcher removed the args for serialization
57
+ # FC/BC reasons (that is, a graph will not store args that are equal
58
+ # to their default values), but that doesn't matter here. If the user
59
+ # adds a new default arg, then they must update
60
+ # their setup_context (along with the rest of their operator
61
+ # registrations)
62
+ args, kwargs = utils.fill_defaults(op._schema, args, kwargs)
63
+
64
+ if has_kwarg_only_args:
65
+ info._setup_context_fn(
66
+ ctx=ctx, inputs=args, keyword_only_inputs=kwargs, output=result
67
+ )
68
+ else:
69
+ info._setup_context_fn(ctx=ctx, inputs=args, output=result)
70
+ return result
71
+
72
+ def backward(ctx, *grads):
73
+ if info._backward_fn:
74
+ try:
75
+ prev_needs_input_grad = ctx.needs_input_grad
76
+ ctx.needs_input_grad = ctx.needs_input_grad[:-1]
77
+ result = info._backward_fn(ctx, *grads)
78
+ finally:
79
+ ctx.needs_input_grad = prev_needs_input_grad
80
+ if isinstance(result, tuple):
81
+ return (*result, None)
82
+ return result, None
83
+ raise RuntimeError(
84
+ f"Trying to backward through {op} but no autograd "
85
+ f"formula was registered. "
86
+ f"Please use register_autograd to add one."
87
+ )
88
+
89
+ Generated = type(
90
+ name,
91
+ (autograd.Function,),
92
+ {
93
+ "forward": staticmethod(forward),
94
+ "backward": staticmethod(backward),
95
+ },
96
+ )
97
+
98
+ schema = op._schema
99
+ if any(
100
+ utils.is_tensorlist_like_type(a.type)
101
+ for a in (*schema.arguments, *schema.returns)
102
+ ):
103
+ Generated = supports_tensorlist(Generated)
104
+
105
+ # The dispatcher passes any keyword-only-args as kwargs and the
106
+ # rest of the args (even if specified as kwargs) as args.
107
+ def autograd_impl(keyset, *args, **keyword_only_args):
108
+ if _C.is_grad_enabled() and _pytree.tree_any_only(
109
+ Tensor, lambda x: x.requires_grad, args, not_list_of_tensor
110
+ ):
111
+ result = Generated.apply(*args, Metadata(keyset, keyword_only_args)) # type: ignore[attr-defined]
112
+ else:
113
+ result = forward_no_grad(*args, Metadata(keyset, keyword_only_args))
114
+ return result
115
+
116
+ return autograd_impl
117
+
118
+
119
+ def supports_tensorlist(cls: Any) -> Any:
120
+ """Allows a given autograd.Function class to support List[Tensor] inputs/outputs.
121
+
122
+ Regular autograd.Function has a constraint that it only directly supports autograd for
123
+ Tensors. Applying @supports_tensorlist enables an autograd.Function to support
124
+ autograd for List[Tensor] inputs and outputs.
125
+ """
126
+ orig_forward = cls.forward
127
+ orig_backward = cls.backward
128
+ orig_apply = cls.apply
129
+
130
+ @dataclass
131
+ class Metadata:
132
+ input_spec: spec_t
133
+ output_spec: Optional[spec_t] = None
134
+ result_is_tuple: Optional[bool] = None
135
+
136
+ def new_forward(ctx, *args):
137
+ metadata = args[-1]
138
+ args = args[:-1]
139
+ if not isinstance(metadata, Metadata):
140
+ raise NotImplementedError(
141
+ "NYI: calling supports_tensorlist autograd.Function.forward directly. "
142
+ "You should probably be calling .apply instead. "
143
+ "Please file an issue if not."
144
+ )
145
+ args = unflatten(list(args), metadata.input_spec)
146
+ result = orig_forward(ctx, *args)
147
+ metadata.result_is_tuple = isinstance(result, tuple)
148
+ if not metadata.result_is_tuple:
149
+ result = (result,)
150
+ flat_result, output_spec = flatten(result, not_list_of_tensor)
151
+ metadata.output_spec = output_spec
152
+
153
+ if hasattr(ctx, "_pt_metadata"):
154
+ raise RuntimeError(
155
+ "Please don't set ctx._pt_metadata; PyTorch uses it to store info"
156
+ )
157
+ ctx._pt_metadata = metadata
158
+
159
+ return tuple(flat_result)
160
+
161
+ def new_backward(ctx, *grads):
162
+ if not hasattr(ctx, "_pt_metadata"):
163
+ raise NotImplementedError(
164
+ "NYI: calling supports_tensorlist autograd.Function.backward directly. "
165
+ "This will automatically get called by PyTorch autograd. "
166
+ "Please file an issue if you need this."
167
+ )
168
+
169
+ metadata = ctx._pt_metadata
170
+ grads = unflatten(list(grads), metadata.output_spec)
171
+
172
+ # If the user's input is ([x, y, z], w),
173
+ # then needs_input_grad is (bool, bool, bool, bool, bool).
174
+ # We need to
175
+ # 1. get rid of the additional bool (which comes from the extra
176
+ # `metadata input`)
177
+ # 2. unflatten to get the right structure.
178
+ prev_needs_input_grad = ctx.needs_input_grad
179
+ try:
180
+ ctx.needs_input_grad = unflatten(
181
+ list(ctx.needs_input_grad[:-1]), metadata.input_spec
182
+ )
183
+ grad_inputs = orig_backward(ctx, *grads)
184
+ finally:
185
+ ctx.needs_input_grad = prev_needs_input_grad
186
+
187
+ if not isinstance(grad_inputs, tuple):
188
+ grad_inputs = (grad_inputs,)
189
+ # Assume that any Nones in the backward are Tensors.
190
+ # If the forward has an arg that is [1, 2, 3], the backward should
191
+ # return None as the grad.
192
+ # If the forward has an arg that is [tensor, tensor], the backward
193
+ # may return [None, None], [grad, None], [None, grad], or [grad, grad].
194
+ flat_grad_inputs, grad_inputs_spec = flatten(
195
+ grad_inputs, not_list_of_optional_tensor
196
+ )
197
+ if grad_inputs_spec != metadata.input_spec:
198
+ raise RuntimeError(
199
+ f"Expected the return from backward to be of the same structure "
200
+ f"as the inputs. Got: {grad_inputs_spec} (return from backward), "
201
+ f"{metadata.input_spec} (inputs)"
202
+ )
203
+ return tuple(flat_grad_inputs + [None])
204
+
205
+ def new_apply(*args):
206
+ flat_args, input_spec = flatten(args, is_leaf=not_list_of_tensor)
207
+ metadata = Metadata(input_spec)
208
+ result = orig_apply(*flat_args, metadata) # type: ignore[misc]
209
+ assert metadata.output_spec is not None
210
+ result = unflatten(list(result), metadata.output_spec)
211
+ if not metadata.result_is_tuple:
212
+ assert isinstance(result, tuple)
213
+ assert len(result) == 1
214
+ return result[0]
215
+ return result
216
+
217
+ cls.forward = new_forward
218
+ cls.backward = new_backward
219
+ cls.apply = new_apply
220
+ return cls
221
+
222
+
223
+ def not_list_of_tensor(tree):
224
+ if isinstance(tree, tuple):
225
+ return False
226
+ if isinstance(tree, list):
227
+ return any(not isinstance(l, Tensor) for l in tree)
228
+ return True
229
+
230
+
231
+ def not_list_of_optional_tensor(tree):
232
+ if isinstance(tree, tuple):
233
+ return False
234
+ if isinstance(tree, list):
235
+ return any(l is not None and not isinstance(l, Tensor) for l in tree)
236
+ return True
237
+
238
+
239
+ flatten = _pytree.tree_flatten
240
+ unflatten = _pytree.tree_unflatten
241
+ spec_t = _pytree.TreeSpec
infer_4_47_1/lib/python3.10/site-packages/torch/_library/custom_ops.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import inspect
4
+ import logging
5
+ import weakref
6
+ from contextlib import contextmanager
7
+ from typing import (
8
+ Any,
9
+ Callable,
10
+ Dict,
11
+ Iterable,
12
+ Iterator,
13
+ List,
14
+ Optional,
15
+ Sequence,
16
+ Set,
17
+ Tuple,
18
+ Union,
19
+ )
20
+
21
+ import torch
22
+ from torch import _C, _ops, Tensor
23
+ from torch.utils._exposed_in import exposed_in
24
+
25
+ from . import autograd, utils
26
+
27
+
28
+ device_types_t = Optional[Union[str, Sequence[str]]]
29
+ log = logging.getLogger(__name__)
30
+
31
+
32
+ @exposed_in("torch.library")
33
+ def custom_op(
34
+ name: str,
35
+ fn: Optional[Callable] = None,
36
+ /,
37
+ *,
38
+ mutates_args: Union[str, Iterable[str]],
39
+ device_types: device_types_t = None,
40
+ schema: Optional[str] = None,
41
+ ) -> Callable:
42
+ """Wraps a function into custom operator.
43
+
44
+ Reasons why you may want to create a custom op include:
45
+ - Wrapping a third-party library or custom kernel to work with PyTorch
46
+ subsystems like Autograd.
47
+ - Preventing torch.compile/export/FX tracing from peeking inside your function.
48
+
49
+ This API is used as a decorator around a function (please see examples).
50
+ The provided function must have type hints; these are needed to interface
51
+ with PyTorch's various subsystems.
52
+
53
+ Args:
54
+ name (str): A name for the custom op that looks like "{namespace}::{name}",
55
+ e.g. "mylib::my_linear". The name is used as the op's stable identifier
56
+ in PyTorch subsystems (e.g. torch.export, FX graphs).
57
+ To avoid name collisions, please use your project name as the namespace;
58
+ e.g. all custom ops in pytorch/fbgemm use "fbgemm" as the namespace.
59
+ mutates_args (Iterable[str] or "unknown"): The names of args that the function mutates.
60
+ This MUST be accurate, otherwise, the behavior is undefined. If "unknown",
61
+ it pessimistically assumes that all inputs to the operator are being mutated.
62
+ device_types (None | str | Sequence[str]): The device type(s) the function
63
+ is valid for. If no device type is provided, then the function
64
+ is used as the default implementation for all device types.
65
+ Examples: "cpu", "cuda".
66
+ When registering a device-specific implementation for an operator that accepts no Tensors,
67
+ we require the operator to have a "device: torch.device argument".
68
+ schema (None | str): A schema string for the operator. If None
69
+ (recommended) we'll infer a schema for the operator from its type
70
+ annotations. We recommend letting us infer a schema unless you
71
+ have a specific reason not to.
72
+ Example: "(Tensor x, int y) -> (Tensor, Tensor)".
73
+
74
+ .. note::
75
+ We recommend not passing in a ``schema`` arg and instead letting us infer
76
+ it from the type annotations. It is error-prone to write your own schema.
77
+ You may wish to provide your own schema if our interpretation of
78
+ the type annotation is not what you want.
79
+ For more info on how to write a schema string, see
80
+ `here <https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func>`_
81
+
82
+ Examples::
83
+ >>> import torch
84
+ >>> from torch import Tensor
85
+ >>> from torch.library import custom_op
86
+ >>> import numpy as np
87
+ >>>
88
+ >>> @custom_op("mylib::numpy_sin", mutates_args=())
89
+ >>> def numpy_sin(x: Tensor) -> Tensor:
90
+ >>> x_np = x.cpu().numpy()
91
+ >>> y_np = np.sin(x_np)
92
+ >>> return torch.from_numpy(y_np).to(device=x.device)
93
+ >>>
94
+ >>> x = torch.randn(3)
95
+ >>> y = numpy_sin(x)
96
+ >>> assert torch.allclose(y, x.sin())
97
+ >>>
98
+ >>> # Example of a custom op that only works for one device type.
99
+ >>> @custom_op("mylib::numpy_sin_cpu", mutates_args=(), device_types="cpu")
100
+ >>> def numpy_sin_cpu(x: Tensor) -> Tensor:
101
+ >>> x_np = x.numpy()
102
+ >>> y_np = np.sin(x_np)
103
+ >>> return torch.from_numpy(y_np)
104
+ >>>
105
+ >>> x = torch.randn(3)
106
+ >>> y = numpy_sin_cpu(x)
107
+ >>> assert torch.allclose(y, x.sin())
108
+ >>>
109
+ >>> # Example of a custom op that mutates an input
110
+ >>> @custom_op("mylib::numpy_sin_inplace", mutates_args={"x"}, device_types="cpu")
111
+ >>> def numpy_sin_inplace(x: Tensor) -> None:
112
+ >>> x_np = x.numpy()
113
+ >>> np.sin(x_np, out=x_np)
114
+ >>>
115
+ >>> x = torch.randn(3)
116
+ >>> expected = x.sin()
117
+ >>> numpy_sin_inplace(x)
118
+ >>> assert torch.allclose(x, expected)
119
+ >>>
120
+ >>> # Example of a factory function
121
+ >>> @torch.library.custom_op("mylib::bar", mutates_args={}, device_types="cpu")
122
+ >>> def bar(device: torch.device) -> Tensor:
123
+ >>> return torch.ones(3)
124
+ >>>
125
+ >>> bar("cpu")
126
+
127
+ """
128
+
129
+ def inner(fn):
130
+ import torch
131
+
132
+ if schema is None:
133
+ schema_str = torch.library.infer_schema(fn, mutates_args=mutates_args)
134
+ else:
135
+ schema_str = schema
136
+
137
+ namespace, opname = name.split("::")
138
+ result = CustomOpDef(namespace, opname, schema_str, fn)
139
+ if schema is not None:
140
+ # Check that schema's alias annotations match those of `mutates_args`.
141
+ expected = set()
142
+ for arg in result._opoverload._schema.arguments:
143
+ if arg.alias_info is not None and arg.alias_info.is_write:
144
+ expected.add(arg.name)
145
+ if expected != set(mutates_args):
146
+ raise ValueError(
147
+ f"Attempted to create a custom op with `mutates_args={mutates_args}` "
148
+ f"and `schema={schema}. The schema suggests that the op mutates {expected}"
149
+ f"which is different from what was provided to us in `mutates_args`. "
150
+ f"Please make these consistent."
151
+ )
152
+ result.register_kernel(device_types)(fn)
153
+ return result
154
+
155
+ if fn is None:
156
+ return inner
157
+ return inner(fn)
158
+
159
+
160
+ class CustomOpDef:
161
+ """CustomOpDef is a wrapper around a function that turns it into a custom op.
162
+
163
+ It has various methods for registering additional behavior for this
164
+ custom op.
165
+
166
+ You should not instantiate CustomOpDef directly; instead, use the
167
+ :func:`torch.library.custom_op` API.
168
+ """
169
+
170
+ def __init__(self, namespace: str, name: str, schema: str, fn: Callable) -> None:
171
+ # Fields used to interface with the PyTorch dispatcher
172
+ self._namespace = namespace
173
+ self._name = name
174
+ self._schema = schema
175
+
176
+ self._init_fn = fn
177
+
178
+ self._backend_fns: Dict[Union[str, None], Callable] = {}
179
+ self._abstract_fn: Optional[Callable] = None
180
+ self._setup_context_fn: Optional[Callable] = None
181
+ self._backward_fn: Optional[Callable] = None
182
+ self._torch_dispatch_fns: Dict[type, Callable] = {}
183
+ self._vmap_fn: Optional[Callable] = None
184
+
185
+ self._lib = get_library_allowing_overwrite(self._namespace, self._name)
186
+ self._register_to_dispatcher()
187
+ self._disabled_kernel: Set = set()
188
+ OPDEFS[self._qualname] = self
189
+
190
+ @property
191
+ def _qualname(self) -> str:
192
+ return f"{self._namespace}::{self._name}"
193
+
194
+ def __repr__(self) -> str:
195
+ return f"<CustomOpDef({self._qualname})>"
196
+
197
+ @contextmanager
198
+ def set_kernel_enabled(self, device_type: str, enabled: bool = True):
199
+ """
200
+ Disable or re-enable an already registered kernel for this custom operator.
201
+
202
+ If the kernel is already disabled/enabled, this is a no-op.
203
+
204
+ Note:
205
+ If a kernel is first disabled and then registered, it is disabled until enabled again.
206
+
207
+ Args:
208
+ device_type (str): The device type to disable/enable the kernel for.
209
+ disable (bool): Whether to disable or enable the kernel.
210
+
211
+ Example:
212
+ >>> inp = torch.randn(1)
213
+ >>>
214
+ >>> # define custom op `f`.
215
+ >>> @custom_op("mylib::f", mutates_args=())
216
+ >>> def f(x: Tensor) -> Tensor:
217
+ >>> return torch.zeros(1)
218
+ >>>
219
+ >>> print(f(inp)) # tensor([0.]), default kernel
220
+ >>>
221
+ >>> @f.register_kernel("cpu")
222
+ >>> def _(x):
223
+ >>> return torch.ones(1)
224
+ >>>
225
+ >>> print(f(inp)) # tensor([1.]), CPU kernel
226
+ >>>
227
+ >>> # temporarily disable the CPU kernel
228
+ >>> with f.set_kernel_enabled("cpu", enabled = False):
229
+ >>> print(f(inp)) # tensor([0.]) with CPU kernel disabled
230
+
231
+ """
232
+ action = "enable" if enabled else "disable"
233
+ originally_disabled = device_type in self._disabled_kernel
234
+ if device_type not in self._backend_fns:
235
+ log.warning(
236
+ "Attempted to %s kernel for %s but no kernel was registered for this device type.",
237
+ action,
238
+ device_type,
239
+ )
240
+
241
+ if not enabled:
242
+ if originally_disabled:
243
+ log.warning(
244
+ "Attempted to disable kernel for %s but it was already disabled.",
245
+ device_type,
246
+ )
247
+ else:
248
+ self._disabled_kernel.add(device_type)
249
+ else: # enable the kernel
250
+ if not originally_disabled:
251
+ log.warning(
252
+ "Attempted to enable kernel for %s but it was already enabled.",
253
+ device_type,
254
+ )
255
+ else:
256
+ self._disabled_kernel.remove(device_type)
257
+
258
+ try:
259
+ yield
260
+ finally:
261
+ # restore original state
262
+ if originally_disabled:
263
+ self._disabled_kernel.add(device_type)
264
+ else:
265
+ self._disabled_kernel.discard(device_type)
266
+
267
+ def register_kernel(
268
+ self, device_types: device_types_t, fn: Optional[Callable] = None, /
269
+ ) -> Callable:
270
+ """Register an implementation for a device type for this operator.
271
+
272
+ Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu".
273
+ This API may be used as a decorator.
274
+
275
+ Args:
276
+ fn (Callable): The function to register as the implementation for
277
+ the given device types.
278
+ device_types (str | Sequence[str]): The device device_types to register an impl to.
279
+
280
+ Examples::
281
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
282
+ >>> import torch
283
+ >>> from torch import Tensor
284
+ >>> from torch.library import custom_op
285
+ >>> import numpy as np
286
+ >>>
287
+ >>> # Create a custom op that works on cpu
288
+ >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu")
289
+ >>> def numpy_sin(x: Tensor) -> Tensor:
290
+ >>> x_np = x.numpy()
291
+ >>> y_np = np.sin(x_np)
292
+ >>> return torch.from_numpy(y_np)
293
+ >>>
294
+ >>> # Add implementations for the cuda device
295
+ >>> @numpy_sin.register_kernel("cuda")
296
+ >>> def _(x):
297
+ >>> x_np = x.cpu().numpy()
298
+ >>> y_np = np.sin(x_np)
299
+ >>> return torch.from_numpy(y_np).to(device=x.device)
300
+ >>>
301
+ >>> x_cpu = torch.randn(3)
302
+ >>> x_cuda = x_cpu.cuda()
303
+ >>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin())
304
+ >>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin())
305
+
306
+ """
307
+
308
+ def inner(fn):
309
+ if device_types is None or isinstance(device_types, str):
310
+ dtypes: List[Union[str, None]] = [device_types]
311
+ else:
312
+ dtypes = list(device_types)
313
+ for device_type in dtypes:
314
+ if device_type not in self._backend_fns:
315
+
316
+ def backend_impl(*args, **kwargs):
317
+ # Checks the assumption that outputs cannot alias
318
+ # inputs or other outputs.
319
+ storages = {
320
+ id(tensor.untyped_storage())
321
+ for tensor in iter_tensors(args, kwargs)
322
+ }
323
+
324
+ result = self._backend_fns[device_type](*args, **kwargs)
325
+
326
+ tuple_result = result
327
+ if not isinstance(result, tuple):
328
+ tuple_result = (result,)
329
+ for tensor in iter_tensors(tuple_result, {}):
330
+ key = id(tensor.untyped_storage())
331
+ if id(tensor.untyped_storage()) in storages:
332
+ fn = self._backend_fns[device_type]
333
+ module = inspect.getmodule(fn)
334
+ raise RuntimeError(
335
+ f"{self._name} (with implementation in {module}): "
336
+ f"The output of this custom operator (1) must not "
337
+ f"also be an input to this custom operator and "
338
+ f"(2) may not alias any inputs to this custom operator "
339
+ f"or other returns. "
340
+ f"The most common way to trigger this error is if "
341
+ f"we have y = custom_op(x) and y and x are the same Tensor. "
342
+ f"Please instead return a clone of the offending output "
343
+ f"tensor(s) (e.g. return x.clone()) or refactor the custom "
344
+ f"operator to not return y."
345
+ )
346
+ storages.add(key)
347
+ return result
348
+
349
+ if device_type is None:
350
+ self._lib.impl(
351
+ self._name, backend_impl, "CompositeExplicitAutograd"
352
+ )
353
+ else:
354
+ self._lib.impl(
355
+ self._name,
356
+ backend_impl,
357
+ _C._dispatch_key_for_device(device_type),
358
+ )
359
+
360
+ # Wrap function to choose between the default implementation or the device-specific
361
+ # implementation depending on if the kernel is disabled.
362
+ @torch._disable_dynamo
363
+ def wrapped_fn(*args, **kwargs):
364
+ if device_type in self._disabled_kernel:
365
+ return self._init_fn(*args, **kwargs)
366
+ else:
367
+ return fn(*args, **kwargs)
368
+
369
+ self._backend_fns[device_type] = wrapped_fn
370
+ return fn
371
+
372
+ if device_types is not None and not utils.has_tensor_arg(
373
+ self._opoverload._schema
374
+ ):
375
+ device_arg_index = utils.get_device_arg_index(self._opoverload._schema)
376
+ if device_arg_index is None:
377
+ raise ValueError(
378
+ "Functions without tensor inputs are required to have a `device: torch.device` argument"
379
+ )
380
+ self._register_backend_select_dispatcher(device_arg_index)
381
+
382
+ # See NOTE: [Supporting decorator and non-decorator usage]
383
+ if fn is None:
384
+ return inner
385
+ return inner(fn)
386
+
387
+ def register_fake(self, fn: Callable, /) -> Callable:
388
+ r"""Register a FakeTensor implementation for this custom op.
389
+
390
+ This is necessary to get the operator to work efficiently with torch.compile.
391
+
392
+ The Fake impl (sometimes also known as a meta kernel or abstract impl)
393
+ specifies the behavior of this operator on Tensors that carry no data.
394
+ Given some input Tensors with certain properties
395
+ (sizes/strides/storage_offset/device), it specifies what the properties of
396
+ the output Tensors are.
397
+
398
+ Please see :func:`torch.library.impl_abstract` for more details.
399
+
400
+ Args:
401
+ fn (Callable): The function to register as the FakeTensor
402
+ implementation.
403
+
404
+ Examples:
405
+ >>> import torch
406
+ >>> import numpy as np
407
+ >>> from torch import Tensor
408
+ >>>
409
+ >>> # Example 1: an operator without data-dependent output shape
410
+ >>> @torch.library.custom_op("mylib::linear", mutates_args=())
411
+ >>> def linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
412
+ >>> return (x @ weight.t()) + bias
413
+ >>>
414
+ >>> @linear.register_fake
415
+ >>> def _(x, weight, bias):
416
+ >>> assert x.dim() == 2
417
+ >>> assert weight.dim() == 2
418
+ >>> assert bias.dim() == 1
419
+ >>> assert x.shape[1] == weight.shape[1]
420
+ >>> assert weight.shape[0] == bias.shape[0]
421
+ >>> assert x.device == weight.device
422
+ >>> return x.new_empty(x.size(0), weight.size(0))
423
+ >>>
424
+ >>> x = torch.randn(2, 2)
425
+ >>> weight = torch.randn(2, 2)
426
+ >>> bias = torch.randn(2)
427
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
428
+ >>> out = torch.compile(linear, fullgraph=True)(x, weight, bias)
429
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
430
+ >>> assert torch.allclose(out, torch.nn.functional.linear(x, weight, bias))
431
+ >>>
432
+ >>> # Example 2: an operator with data-dependent output shape
433
+ >>> @torch.library.custom_op("mylib::nonzero", mutates_args=())
434
+ >>> def nonzero(x: Tensor) -> Tensor:
435
+ >>> x_np = x.cpu().numpy()
436
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
437
+ >>> return torch.tensor(res, device=x.device)
438
+ >>>
439
+ >>> @nonzero.register_fake
440
+ >>> def _(x):
441
+ >>> # Number of nonzero-elements is data-dependent.
442
+ >>> # Since we cannot peek at the data in an abstract impl,
443
+ >>> # we use the ctx object to construct a new symint that
444
+ >>> # represents the data-dependent size.
445
+ >>> ctx = torch.library.get_ctx()
446
+ >>> nnz = ctx.new_dynamic_size()
447
+ >>> shape = [nnz, x.dim()]
448
+ >>> result = x.new_empty(shape, dtype=torch.int64)
449
+ >>> return result
450
+ >>>
451
+ >>> x = torch.tensor([0, 1, 2, 0, 0, 1])
452
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
453
+ >>> out = torch.compile(nonzero, fullgraph=True)(x)
454
+ >>> # xdoctest: +SKIP("Requires Python <= 3.11")
455
+ >>> assert torch.allclose(out, x.nonzero())
456
+
457
+ """
458
+ self._abstract_fn = fn
459
+ return fn
460
+
461
+ def register_torch_dispatch(
462
+ self, torch_dispatch_class: Any, fn: Optional[Callable] = None, /
463
+ ) -> Callable:
464
+ r"""Registers a torch_dispatch rule for the given operator and ``torch_dispatch_class``.
465
+
466
+ This allows for open registration to specify the behavior between the operator
467
+ and the ``torch_dispatch_class`` without needing to modify the ``torch_dispatch_class``
468
+ or the operator directly.
469
+
470
+ Please see :func:`torch.library.register_torch_dispatch` for examples and more details.
471
+ """
472
+
473
+ def register(fn):
474
+ if torch_dispatch_class not in self._torch_dispatch_fns:
475
+
476
+ def inner(*args, **kwargs):
477
+ return self._torch_dispatch_fns[torch_dispatch_class](
478
+ *args, **kwargs
479
+ )
480
+
481
+ self._lib._register_torch_dispatch_rule(
482
+ self._name, torch_dispatch_class, inner
483
+ )
484
+ self._torch_dispatch_fns[torch_dispatch_class] = fn
485
+ return fn
486
+
487
+ if fn is None:
488
+ return register
489
+ else:
490
+ return register(fn)
491
+
492
+ def register_autograd(
493
+ self,
494
+ backward: Callable,
495
+ /,
496
+ *,
497
+ setup_context: Optional[Callable] = None,
498
+ ) -> None:
499
+ r"""Register a backward formula for this custom op.
500
+
501
+ In order for an operator to work with autograd, you need to register
502
+ a backward formula:
503
+ 1. You must tell us how to compute gradients during the backward pass
504
+ by providing us a "backward" function.
505
+ 2. If you need any values from the forward to compute gradients, you can
506
+ use `setup_context` to save values for backward.
507
+
508
+ ``backward_fn`` runs during the backward pass. It accepts ``(ctx, *grads)``:
509
+ - ``grads`` is one or more gradients. The number of gradients matches
510
+ the number of outputs of the operator.
511
+ The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by
512
+ :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the
513
+ same as :meth:`torch.autograd.Function.backward`.
514
+
515
+ ``setup_context(ctx, inputs, output)`` runs during the forward pass.
516
+ Please save quantities needed for backward onto the ``ctx`` object via
517
+ either :meth:`torch.autograd.function.FunctionCtx.save_for_backward`
518
+ or assigning them as attributes of ``ctx``. If your custom op has
519
+ kwarg-only arguments, we expect the signature of ``setup_context``
520
+ to be ``setup_context(ctx, inputs, keyword_only_inputs, output)``.
521
+
522
+ Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is,
523
+ they may not directly access :meth:`torch.Tensor.data_ptr` and they must
524
+ not depend on or mutate global state. If you need a non-traceable backward,
525
+ you can make it a separate custom_op that you call inside ``backward_fn``.
526
+
527
+ Examples:
528
+ >>> import torch
529
+ >>> import numpy as np
530
+ >>> from torch import Tensor
531
+ >>>
532
+ >>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=())
533
+ >>> def numpy_sin(x: Tensor) -> Tensor:
534
+ >>> x_np = x.cpu().numpy()
535
+ >>> y_np = np.sin(x_np)
536
+ >>> return torch.from_numpy(y_np).to(device=x.device)
537
+ >>>
538
+ >>> def setup_context(ctx, inputs, output) -> Tensor:
539
+ >>> x, = inputs
540
+ >>> ctx.save_for_backward(x)
541
+ >>>
542
+ >>> def backward(ctx, grad):
543
+ >>> x, = ctx.saved_tensors
544
+ >>> return grad * x.cos()
545
+ >>>
546
+ >>> numpy_sin.register_autograd(backward, setup_context=setup_context)
547
+ >>>
548
+ >>> x = torch.randn(3, requires_grad=True)
549
+ >>> y = numpy_sin(x)
550
+ >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y))
551
+ >>> assert torch.allclose(grad_x, x.cos())
552
+ >>>
553
+ >>> # Example with a keyword-only arg
554
+ >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
555
+ >>> def numpy_mul(x: Tensor, *, val: float) -> Tensor:
556
+ >>> x_np = x.cpu().numpy()
557
+ >>> y_np = x_np * val
558
+ >>> return torch.from_numpy(y_np).to(device=x.device)
559
+ >>>
560
+ >>> def setup_context(ctx, inputs, keyword_only_inputs, output) -> Tensor:
561
+ >>> ctx.val = keyword_only_inputs["val"]
562
+ >>>
563
+ >>> def backward(ctx, grad):
564
+ >>> return grad * ctx.val
565
+ >>>
566
+ >>> numpy_mul.register_autograd(backward, setup_context=setup_context)
567
+ >>>
568
+ >>> x = torch.randn(3, requires_grad=True)
569
+ >>> y = numpy_mul(x, val=3.14)
570
+ >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y))
571
+ >>> assert torch.allclose(grad_x, torch.full_like(x, 3.14))
572
+
573
+ """
574
+ schema = self._opoverload._schema
575
+ if not utils.is_functional_schema(schema):
576
+ raise RuntimeError(
577
+ f"Cannot register autograd formula for non-functional operator "
578
+ f"{self} with schema {schema}. Please create "
579
+ f"a functional operator and register an autograd formula for that."
580
+ )
581
+
582
+ self._backward_fn = backward
583
+ self._setup_context_fn = setup_context
584
+
585
+ def _register_to_dispatcher(self) -> None:
586
+ lib = self._lib
587
+ schema_str = self._name + self._schema
588
+ cpp_schema = _C.parse_schema(schema_str)
589
+ if utils.has_kwarg_only_tensors(cpp_schema):
590
+ # If you want to support this, the progression is:
591
+ # - supporting kwarg-only Tensors that are non-differentiable
592
+ # - supporting kwarg-only Tensors (regardless of differentiability)
593
+ raise NotImplementedError(
594
+ f"custom_op with kwarg-only Tensor args. Please make your "
595
+ f"tensors not kwarg-only. Got: {schema_str}"
596
+ )
597
+
598
+ lib.define(
599
+ schema_str,
600
+ tags=[_C.Tag.pt2_compliant_tag, _C.Tag.needs_fixed_stride_order],
601
+ )
602
+ self._opoverload = utils.lookup_op(self._qualname)
603
+
604
+ def fake_impl(*args, **kwargs):
605
+ if self._abstract_fn is None:
606
+ if utils.can_generate_trivial_fake_impl(self._opoverload):
607
+ return None
608
+ raise RuntimeError(
609
+ f"There was no fake impl registered for {self}. "
610
+ f"This is necessary for torch.compile/export/fx tracing to work. "
611
+ f"Please use `{self._init_fn.__name__}.register_fake` to add an "
612
+ f"fake impl."
613
+ )
614
+ return self._abstract_fn(*args, **kwargs)
615
+
616
+ lib._register_fake(self._name, fake_impl, _stacklevel=4)
617
+
618
+ autograd_impl = autograd.make_autograd_impl(self._opoverload, self)
619
+ lib.impl(self._name, autograd_impl, "Autograd", with_keyset=True)
620
+
621
+ schema = self._opoverload._schema
622
+ if schema.is_mutable:
623
+
624
+ def adinplaceorview_impl(keyset, *args, **kwargs):
625
+ for arg, val in utils.zip_schema(schema, args, kwargs):
626
+ if not arg.alias_info:
627
+ continue
628
+ if not arg.alias_info.is_write:
629
+ continue
630
+ if isinstance(val, Tensor):
631
+ torch.autograd.graph.increment_version(val)
632
+ elif isinstance(val, (tuple, list)):
633
+ for v in val:
634
+ if isinstance(v, Tensor):
635
+ torch.autograd.graph.increment_version(v)
636
+ with _C._AutoDispatchBelowADInplaceOrView():
637
+ return self._opoverload.redispatch(
638
+ keyset & _C._after_ADInplaceOrView_keyset, *args, **kwargs
639
+ )
640
+
641
+ lib.impl(
642
+ self._name,
643
+ adinplaceorview_impl,
644
+ "ADInplaceOrView",
645
+ with_keyset=True,
646
+ )
647
+
648
+ def _register_backend_select_dispatcher(self, device_arg_index: int):
649
+ """
650
+ Switch on the device argument to select the correct backend to dispatch to.
651
+ """
652
+
653
+ def backend_select(keyset, *args, **kwargs):
654
+ device = args[device_arg_index].type
655
+ if device not in self._backend_fns:
656
+ raise RuntimeError(
657
+ f"{self._name} does not have a kernel registered for {device}. "
658
+ "Please use register_kernel to do so."
659
+ )
660
+ dispatch_key = _C._dispatch_key_for_device(device)
661
+ dispatch_key = getattr(_C.DispatchKey, dispatch_key)
662
+ return self._opoverload.redispatch(
663
+ _C.DispatchKeySet(dispatch_key), *args, **kwargs
664
+ )
665
+
666
+ self._lib.impl(self._name, backend_select, "BackendSelect", with_keyset=True)
667
+
668
+ def __call__(self, *args, **kwargs):
669
+ return self._opoverload(*args, **kwargs)
670
+
671
+ def register_vmap(
672
+ self,
673
+ func: Optional[Callable] = None,
674
+ ):
675
+ r"""Register a vmap implementation to support :func:`torch.vmap` for this custom op.
676
+
677
+ This API may be used as a decorator.
678
+
679
+ In order for an operator to work with :func:`torch.vmap`, you may need to register a
680
+ vmap implementation in the following signature:
681
+
682
+ ``vmap_func(info, in_dims: Tuple[Optional[int]], *args, **kwargs)``,
683
+
684
+ where ``*args`` and ``**kwargs`` are the arguments and kwargs for ``op``.
685
+
686
+ It specifies how do we compute the batched version of ``op`` given inputs with an additional
687
+ dimension (specified by ``in_dims``).
688
+
689
+ For each arg in ``args``, ``in_dims`` has a corresponding ``Optional[int]``. It is ``None``
690
+ if the arg is not a Tensor or if the arg is not being vmapped over, otherwise, it is an integer
691
+ specifying what dimension of the Tensor is being vmapped over.
692
+
693
+ ``info`` is a collection of additional metadata that may be helpful:
694
+ ``info.batch_size`` specifies the size of the dimension being vmapped over, while
695
+ ``info.randomness`` is the ``randomness`` option that was passed to :func:`torch.vmap`.
696
+
697
+ The return of the function ``func`` is a tuple of ``(output, out_dims)``. Similar to ``in_dims``,
698
+ ``out_dims`` should be of the same structure as ``output`` and contain one ``out_dim``
699
+ per output that specifies if the output has the vmapped dimension and what index it is in.
700
+
701
+ Examples:
702
+ >>> import torch
703
+ >>> import numpy as np
704
+ >>> from torch import Tensor
705
+ >>> from typing import Tuple
706
+ >>>
707
+ >>> def to_numpy(tensor):
708
+ >>> return tensor.cpu().numpy()
709
+ >>>
710
+ >>> lib = torch.library.Library("mylib", "FRAGMENT")
711
+ >>> @torch.library.custom_op("mylib::numpy_cube", mutates_args=())
712
+ >>> def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]:
713
+ >>> x_np = to_numpy(x)
714
+ >>> dx = torch.tensor(3 * x_np ** 2, device=x.device)
715
+ >>> return torch.tensor(x_np ** 3, device=x.device), dx
716
+ >>>
717
+ >>> def numpy_cube_vmap(info, in_dims, x):
718
+ >>> result = numpy_cube(x)
719
+ >>> return result, (in_dims[0], in_dims[0])
720
+ >>>
721
+ >>> numpy_cube.register_vmap(numpy_cube_vmap)
722
+ >>>
723
+ >>> x = torch.randn(3)
724
+ >>> torch.vmap(numpy_cube)(x)
725
+ >>>
726
+ >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
727
+ >>> def numpy_mul(x: Tensor, y: Tensor) -> Tensor:
728
+ >>> return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
729
+ >>>
730
+ >>> @numpy_mul.register_vmap
731
+ >>> def numpy_mul_vmap(info, in_dims, x, y):
732
+ >>> x_bdim, y_bdim = in_dims
733
+ >>> x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
734
+ >>> y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
735
+ >>> result = x * y
736
+ >>> result = result.movedim(-1, 0)
737
+ >>> return result, 0
738
+ >>>
739
+ >>>
740
+ >>> x = torch.randn(3)
741
+ >>> y = torch.randn(3)
742
+ >>> torch.vmap(numpy_mul)(x, y)
743
+ """
744
+ from torch._functorch.autograd_function import custom_function_call_vmap_helper
745
+ from torch._functorch.pyfunctorch import retrieve_current_functorch_interpreter
746
+
747
+ def register(func):
748
+ need_register = self._vmap_fn is None
749
+ self._vmap_fn = func
750
+
751
+ if need_register:
752
+
753
+ def wrapped_func(keyset, *args, **kwargs):
754
+ interpreter = retrieve_current_functorch_interpreter()
755
+ return custom_function_call_vmap_helper(
756
+ interpreter, self._vmap_fn, self._opoverload, *args, **kwargs
757
+ )
758
+
759
+ self._lib.impl(
760
+ self._name, wrapped_func, "FuncTorchBatched", with_keyset=True
761
+ )
762
+
763
+ if func is None:
764
+ return register
765
+ else:
766
+ return register(func)
767
+
768
+
769
+ # NOTE: [Supporting decorator and non-decorator usage]
770
+ #
771
+ # Some APIs may be both used as a decorator and not as a decorator.
772
+ # For example:
773
+ #
774
+ # >>> def fn(x):
775
+ # >>> return x.sin()
776
+ # >>>
777
+ # >>> # Usage 1: not as a decorator
778
+ # >>> numpy_sin.register_kernel("cuda", fn)
779
+ # >>>
780
+ # >>> # Usage 2: as a decorator
781
+ # >>> @numpy_sin.register_kernel("cuda")
782
+ # >>> def fn2(x):
783
+ # >>> return x.sin
784
+ #
785
+ # The way we support this is that `register_kernel` accepts an optional `fn`.
786
+ # If `fn` is provided (Usage 1), then we know that the user is using it not
787
+ # as a decorator.
788
+ # If `fn` is not provided (Usage 2), then `register_kernel` needs to return a
789
+ # decorator.
790
+
791
+
792
+ OPDEF_TO_LIB: Dict[str, "torch.library.Library"] = {}
793
+ OPDEFS: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
794
+
795
+
796
+ def get_library_allowing_overwrite(
797
+ namespace: str, name: str
798
+ ) -> "torch.library.Library":
799
+ qualname = f"{namespace}::{name}"
800
+
801
+ if qualname in OPDEF_TO_LIB:
802
+ OPDEF_TO_LIB[qualname]._destroy()
803
+ del OPDEF_TO_LIB[qualname]
804
+
805
+ lib = torch.library.Library(namespace, "FRAGMENT") # noqa: TOR901
806
+ OPDEF_TO_LIB[qualname] = lib
807
+ return lib
808
+
809
+
810
+ def iter_tensors(
811
+ args: Tuple[Any], kwargs: Dict[str, Any], allowed_nesting: int = 1
812
+ ) -> Iterator[Tensor]:
813
+ def check(arg):
814
+ if isinstance(arg, Tensor):
815
+ yield arg
816
+ elif allowed_nesting > 0 and isinstance(arg, (tuple, list)):
817
+ yield from iter_tensors(tuple(arg), {}, allowed_nesting - 1)
818
+
819
+ for arg in args:
820
+ yield from check(arg)
821
+ for kwarg in kwargs.values():
822
+ yield from check(kwarg)
823
+
824
+
825
+ def _maybe_get_opdef(
826
+ op: Union[CustomOpDef, _ops.OpOverload, str]
827
+ ) -> Optional[CustomOpDef]:
828
+ if isinstance(op, CustomOpDef):
829
+ return op
830
+ if isinstance(op, _ops.OpOverload):
831
+ op = op._name
832
+ assert isinstance(op, str)
833
+ if op in OPDEFS:
834
+ return OPDEFS[op]
835
+ return None
infer_4_47_1/lib/python3.10/site-packages/torch/_library/fake_impl.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+ import functools
4
+ from typing import Callable, Optional
5
+ from typing_extensions import deprecated
6
+
7
+ import torch
8
+ from torch._library.utils import Kernel, RegistrationHandle
9
+
10
+
11
+ class FakeImplHolder:
12
+ """A holder where one can register an fake impl to."""
13
+
14
+ def __init__(self, qualname: str):
15
+ self.qualname: str = qualname
16
+ self.kernel: Optional[Kernel] = None
17
+ self.lib: Optional[torch.library.Library] = None
18
+
19
+ def register(self, func: Callable, source: str) -> RegistrationHandle:
20
+ """Register an fake impl.
21
+
22
+ Returns a RegistrationHandle that one can use to de-register this
23
+ fake impl.
24
+ """
25
+ if self.kernel is not None:
26
+ raise RuntimeError(
27
+ f"register_fake(...): the operator {self.qualname} "
28
+ f"already has an fake impl registered at "
29
+ f"{self.kernel.source}."
30
+ )
31
+ if torch._C._dispatch_has_kernel_for_dispatch_key(self.qualname, "Meta"):
32
+ raise RuntimeError(
33
+ f"register_fake(...): the operator {self.qualname} "
34
+ f"already has an DispatchKey::Meta implementation via a "
35
+ f"pre-existing torch.library or TORCH_LIBRARY registration. "
36
+ f"Please either remove that registration or don't call "
37
+ f"register_fake."
38
+ )
39
+
40
+ if torch._C._dispatch_has_kernel_for_dispatch_key(
41
+ self.qualname, "CompositeImplicitAutograd"
42
+ ):
43
+ raise RuntimeError(
44
+ f"register_fake(...): the operator {self.qualname} "
45
+ f"already has an implementation for this device type via a "
46
+ f"pre-existing registration to "
47
+ f"DispatchKey::CompositeImplicitAutograd."
48
+ f"CompositeImplicitAutograd operators do not need an fake "
49
+ f"impl; "
50
+ f"instead, the operator will decompose into its constituents "
51
+ f"and those "
52
+ f"can have fake impls defined on them."
53
+ )
54
+
55
+ # Store the kernel in this holder
56
+ self.kernel = Kernel(func, source)
57
+
58
+ # Also register the fake impl to Meta key
59
+ if self.lib is None:
60
+ ns = self.qualname.split("::")[0]
61
+ self.lib = torch.library.Library(ns, "FRAGMENT") # noqa: TOR901
62
+ meta_kernel = construct_meta_kernel(self.qualname, self)
63
+ self.lib.impl(self.qualname, meta_kernel, "Meta")
64
+
65
+ def deregister_fake_class():
66
+ if self.lib:
67
+ self.lib._destroy()
68
+ self.lib = None
69
+ self.kernel = None
70
+
71
+ return RegistrationHandle(deregister_fake_class)
72
+
73
+
74
+ def construct_meta_kernel(qualname: str, fake_impl_holder: FakeImplHolder) -> Callable:
75
+ assert fake_impl_holder.kernel is not None
76
+
77
+ @functools.wraps(fake_impl_holder.kernel.func)
78
+ def meta_kernel(*args, **kwargs):
79
+ assert fake_impl_holder.kernel is not None
80
+ source = fake_impl_holder.kernel.source
81
+
82
+ def error_on_ctx():
83
+ raise RuntimeError(
84
+ f"Attempted to call get_ctx() for the meta implementation "
85
+ f"for {qualname} (implemented at {source})"
86
+ f"You have presumably called get_ctx() because the operator "
87
+ f"has a data-dependent output shape; if so, there is no "
88
+ f"such meta implementation and this error is the correct "
89
+ f"behavior."
90
+ )
91
+
92
+ with set_ctx_getter(error_on_ctx):
93
+ return fake_impl_holder.kernel(*args, **kwargs)
94
+
95
+ return meta_kernel
96
+
97
+
98
+ def get_none():
99
+ return None
100
+
101
+
102
+ global_ctx_getter: Callable = get_none
103
+
104
+
105
+ @contextlib.contextmanager
106
+ def set_ctx_getter(ctx_getter):
107
+ global global_ctx_getter
108
+ prev = global_ctx_getter
109
+ try:
110
+ global_ctx_getter = ctx_getter
111
+ yield
112
+ finally:
113
+ global_ctx_getter = prev
114
+
115
+
116
+ class FakeImplCtx:
117
+ """
118
+ Context object for writing fake implementations for custom operators.
119
+ """
120
+
121
+ def __init__(self, _fake_mode, _op):
122
+ self._fake_mode = _fake_mode
123
+ self._shape_env = _fake_mode.shape_env
124
+ self._op = _op
125
+
126
+ @deprecated(
127
+ "`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead",
128
+ category=FutureWarning,
129
+ )
130
+ def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
131
+ return self.new_dynamic_size(min=min, max=max)
132
+
133
+ def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
134
+ """Constructs a new symint (symbolic int) representing a data-dependent value.
135
+
136
+ This is useful for writing the fake implementation (which is necessary
137
+ for torch.compile) for a CustomOp where an output Tensor has a size
138
+ that depends on the data of the input Tensors.
139
+
140
+ Args:
141
+ min (int): A statically known inclusive lower bound for this symint. Default: 0
142
+ max (Optional[int]): A statically known inclusive upper bound for this
143
+ symint. Default: None
144
+
145
+ .. warning:
146
+
147
+ It is important that the ``min`` and ``max`` (if not None) values are set
148
+ correctly, otherwise, there will be undefined behavior under
149
+ torch.compile. The default value of ``min`` is 2 due to torch.compile
150
+ specializing on 0/1 sizes.
151
+
152
+ You must also verify that your implementation on concrete Tensors
153
+ (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
154
+ to the symint also has respects these constraint.
155
+ The easiest way to do this is to add an assertion in the CPU/CUDA/etc
156
+ implementation that the size follows these bounds.
157
+
158
+ Example::
159
+
160
+ >>> # An operator with data-dependent output shape
161
+ >>> lib = torch.library.Library("mymodule", "FRAGMENT")
162
+ >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
163
+ >>>
164
+ >>> @torch.library.register_fake("mymodule::custom_nonzero")
165
+ >>> def _(x):
166
+ >>> # Number of nonzero-elements is data-dependent.
167
+ >>> # Since we cannot peek at the data in an fake impl,
168
+ >>> # we use the ctx object to construct a new symint that
169
+ >>> # represents the data-dependent size.
170
+ >>> ctx = torch.library.get_ctx()
171
+ >>> nnz = ctx.new_dynamic_size()
172
+ >>> shape = [nnz, x.dim()]
173
+ >>> result = x.new_empty(shape, dtype=torch.int64)
174
+ >>> return result
175
+ >>>
176
+ >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
177
+ >>> def _(x):
178
+ >>> x_np = x.numpy()
179
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
180
+ >>> return torch.tensor(res, device=x.device)
181
+
182
+ """
183
+ if (
184
+ self._shape_env is None
185
+ or not self._shape_env.allow_dynamic_output_shape_ops
186
+ ):
187
+ raise torch._subclasses.fake_tensor.DynamicOutputShapeException(self._op)
188
+
189
+ if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
190
+ raise ValueError(
191
+ f"ctx.new_dynamic_size(min={min}, max={max}): expected "
192
+ f"min and max to be statically known ints but got SymInt. "
193
+ f"This is not supported."
194
+ )
195
+
196
+ if min < 0:
197
+ raise ValueError(
198
+ f"ctx.new_dynamic_size(min={min}, ...): expected min to be "
199
+ f"greater than or equal to 0: this API can only create "
200
+ f"non-negative sizes."
201
+ )
202
+
203
+ result = self._shape_env.create_unbacked_symint()
204
+ torch.fx.experimental.symbolic_shapes._constrain_range_for_size(
205
+ result, min=min, max=max
206
+ )
207
+ return result
infer_4_47_1/lib/python3.10/site-packages/torch/_library/simple_registry.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Callable, Optional
3
+
4
+ from .fake_impl import FakeImplHolder
5
+ from .utils import RegistrationHandle
6
+
7
+
8
+ __all__ = ["SimpleLibraryRegistry", "SimpleOperatorEntry", "singleton"]
9
+
10
+
11
+ class SimpleLibraryRegistry:
12
+ """Registry for the "simple" torch.library APIs
13
+
14
+ The "simple" torch.library APIs are a higher-level API on top of the
15
+ raw PyTorch DispatchKey registration APIs that includes:
16
+ - fake impl
17
+
18
+ Registrations for these APIs do not go into the PyTorch dispatcher's
19
+ table because they may not directly involve a DispatchKey. For example,
20
+ the fake impl is a Python function that gets invoked by FakeTensor.
21
+ Instead, we manage them here.
22
+
23
+ SimpleLibraryRegistry is a mapping from a fully qualified operator name
24
+ (including the overload) to SimpleOperatorEntry.
25
+ """
26
+
27
+ def __init__(self):
28
+ self._data = {}
29
+
30
+ def find(self, qualname: str) -> "SimpleOperatorEntry":
31
+ if qualname not in self._data:
32
+ self._data[qualname] = SimpleOperatorEntry(qualname)
33
+ return self._data[qualname]
34
+
35
+
36
+ singleton: SimpleLibraryRegistry = SimpleLibraryRegistry()
37
+
38
+
39
+ class SimpleOperatorEntry:
40
+ """This is 1:1 to an operator overload.
41
+
42
+ The fields of SimpleOperatorEntry are Holders where kernels can be
43
+ registered to.
44
+ """
45
+
46
+ def __init__(self, qualname: str):
47
+ self.qualname: str = qualname
48
+ self.fake_impl: FakeImplHolder = FakeImplHolder(qualname)
49
+ self.torch_dispatch_rules: GenericTorchDispatchRuleHolder = (
50
+ GenericTorchDispatchRuleHolder(qualname)
51
+ )
52
+
53
+ # For compatibility reasons. We can delete this soon.
54
+ @property
55
+ def abstract_impl(self):
56
+ return self.fake_impl
57
+
58
+
59
+ class GenericTorchDispatchRuleHolder:
60
+ def __init__(self, qualname):
61
+ self._data = {}
62
+ self.qualname = qualname
63
+
64
+ def register(
65
+ self, torch_dispatch_class: type, func: Callable
66
+ ) -> RegistrationHandle:
67
+ if self.find(torch_dispatch_class):
68
+ raise RuntimeError(
69
+ f"{torch_dispatch_class} already has a `__torch_dispatch__` rule registered for {self.qualname}"
70
+ )
71
+ self._data[torch_dispatch_class] = func
72
+
73
+ def deregister():
74
+ del self._data[torch_dispatch_class]
75
+
76
+ return RegistrationHandle(deregister)
77
+
78
+ def find(self, torch_dispatch_class):
79
+ return self._data.get(torch_dispatch_class, None)
80
+
81
+
82
+ def find_torch_dispatch_rule(op, torch_dispatch_class: type) -> Optional[Callable]:
83
+ return singleton.find(op.__qualname__).torch_dispatch_rules.find(
84
+ torch_dispatch_class
85
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/_library/triton.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import threading
3
+ from typing import Callable, Generator, Iterable, Optional, Union
4
+
5
+ from .custom_ops import custom_op
6
+ from .infer_schema import infer_schema
7
+
8
+
9
+ def triton_op(
10
+ name: str,
11
+ fn: Optional[Callable] = None,
12
+ /,
13
+ *,
14
+ mutates_args: Union[str, Iterable[str]],
15
+ schema: Optional[str] = None,
16
+ ) -> Callable:
17
+ """Create a custom operator whose implementation is backed by 1+ triton kernels.
18
+
19
+ Use this instead of :func:`torch.library.custom_op` when the implementation
20
+ consists of 1+ triton kernels. :func:`torch.library.custom_op` treats
21
+ custom operators as opaque (:func:`torch.compile` and
22
+ :func:`torch.export.export` will never trace into them), but ``triton_op``
23
+ makes the implementation visible to these subsystems, allowing them
24
+ to optimize the triton kernel(s).
25
+
26
+ Note that ``fn`` must only consist of calls to PyTorch-understood
27
+ operators and triton kernels. Any triton kernels called inside ``fn``
28
+ must be wrapped in a call to :func:`torch._library.capture_triton``.
29
+
30
+ Args:
31
+ name (str): A name for the custom op that looks like "{namespace}::{name}",
32
+ e.g. "mylib::my_linear". The name is used as the op's stable identifier
33
+ in PyTorch subsystems (e.g. torch.export, FX graphs).
34
+ To avoid name collisions, please use your project name as the namespace;
35
+ e.g. all custom ops in pytorch/fbgemm use "fbgemm" as the namespace.
36
+ mutates_args (Iterable[str] or "unknown"): The names of args that the function mutates.
37
+ This MUST be accurate, otherwise, the behavior is undefined. If "unknown",
38
+ it pessimistically assumes that all inputs to the operator are being mutated.
39
+ schema (None | str): A schema string for the operator. If None
40
+ (recommended) we'll infer a schema for the operator from its type
41
+ annotations. We recommend letting us infer a schema unless you
42
+ have a specific reason not to.
43
+ Example: "(Tensor x, int y) -> (Tensor, Tensor)".
44
+
45
+ Example::
46
+
47
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
48
+ >>> import torch
49
+ >>> from torch._library import triton_op, capture_triton
50
+ >>>
51
+ >>> import triton
52
+ >>> from triton import language as tl
53
+ >>>
54
+ >>> @triton.jit
55
+ >>> def add_kernel(
56
+ >>> in_ptr0,
57
+ >>> in_ptr1,
58
+ >>> out_ptr,
59
+ >>> n_elements,
60
+ >>> BLOCK_SIZE: "tl.constexpr",
61
+ >>> ):
62
+ >>> pid = tl.program_id(axis=0)
63
+ >>> block_start = pid * BLOCK_SIZE
64
+ >>> offsets = block_start + tl.arange(0, BLOCK_SIZE)
65
+ >>> mask = offsets < n_elements
66
+ >>> x = tl.load(in_ptr0 + offsets, mask=mask)
67
+ >>> y = tl.load(in_ptr1 + offsets, mask=mask)
68
+ >>> output = x + y
69
+ >>> tl.store(out_ptr + offsets, output, mask=mask)
70
+ >>>
71
+ >>> @triton_op("mylib::add", mutates_args={})
72
+ >>> def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
73
+ >>> output = torch.empty_like(x)
74
+ >>> n_elements = output.numel()
75
+ >>>
76
+ >>> def grid(meta):
77
+ >>> return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
78
+ >>>
79
+ >>> # NB: we need to wrap the triton kernel in a call to capture_triton
80
+ >>> capture_triton(add_kernel)[grid](x, y, output, n_elements, 16)
81
+ >>> return output
82
+ >>>
83
+ >>> @torch.compile
84
+ >>> def f(x, y):
85
+ >>> return add(x, y)
86
+ >>>
87
+ >>> x = torch.randn(3, device="cuda")
88
+ >>> y = torch.randn(3, device="cuda")
89
+ >>>
90
+ >>> z = f(x, y)
91
+ >>> assert torch.allclose(z, x + y)
92
+
93
+ """
94
+
95
+ def dec(fn: Callable) -> Callable:
96
+ def backend_fn(*args, **kwargs): # type: ignore[no-untyped-def]
97
+ # Optimization: we're passing regular Tensors into the triton kernel, so
98
+ # no need to go through HOP dispatch
99
+ with set_capture_triton_enabled(False):
100
+ return fn(*args, **kwargs)
101
+
102
+ result = custom_op(
103
+ name,
104
+ backend_fn,
105
+ mutates_args=mutates_args,
106
+ schema=infer_schema(fn, mutates_args=mutates_args),
107
+ )
108
+ from .._subclasses.functional_tensor import FunctionalTensorMode
109
+
110
+ # We require that the user pass us a function that is make_fx traceable,
111
+ # so we can just register it as the Fake/meta kernel.
112
+ result.register_fake(fn)
113
+
114
+ # We decompose the operator when FunctionalTensorMode is active.
115
+ # The goal is to decompose the operator in AOTDispatcher.
116
+ # - With torch.compile, this means that the backend (usually Inductor)
117
+ # can see a call to the triton kernel(s) and so it can directly optimize
118
+ # them by inlining them into the lowering process.
119
+ # - With post-dispatch torch.export, this means that there will
120
+ # be a call(s) to the triton_kernel_wrapper_functional HOP in the
121
+ # graph (that we have yet to figure out how to serialize).
122
+ def functional_decomp( # type: ignore[no-untyped-def]
123
+ mode, _, types, args, kwargs
124
+ ):
125
+ with mode:
126
+ return fn(*args, **kwargs)
127
+
128
+ result.register_torch_dispatch(FunctionalTensorMode, functional_decomp)
129
+ return result
130
+
131
+ if fn is None:
132
+ return dec
133
+ else:
134
+ return dec(fn)
135
+
136
+
137
+ capture_triton_enabled = threading.local()
138
+ capture_triton_enabled_default = True
139
+
140
+
141
+ @contextlib.contextmanager
142
+ def set_capture_triton_enabled(enabled: bool) -> Generator[None, None, None]:
143
+ """If triton kernels annotated with @capture_triton should dispatch via HOP
144
+ or go straight to the triton kernel execution.
145
+
146
+ We have this switch because eager-mode performance of HOP dispatch is slow
147
+ enough to matter (~1ms) and we know that capture_triton isn't necessary in
148
+ some situations (eager-mode with regular Tensors)
149
+ """
150
+ try:
151
+ prev = is_capture_triton_enabled()
152
+ capture_triton_enabled.value = enabled
153
+ yield
154
+ finally:
155
+ capture_triton_enabled.value = prev
156
+
157
+
158
+ def is_capture_triton_enabled() -> bool:
159
+ return getattr(capture_triton_enabled, "value", capture_triton_enabled_default)
160
+
161
+
162
+ def capture_triton(triton_kernel: Callable, /) -> Callable:
163
+ """Allows capture of a triton kernel into a graph via make_fx or
164
+ non-strict export (coming soon).
165
+
166
+ These technologies perform Dispatcher-based tracing (via
167
+ ``__torch_dispatch__``) and cannot see calls to raw triton kernels.
168
+ The ``capture_triton`` API returns a new callable that can actually
169
+ be traced into a graph.
170
+
171
+ Examples:
172
+
173
+ >>> # xdoctest: +SKIP
174
+ >>> import torch
175
+ >>> import triton
176
+ >>> from triton import language as tl
177
+ >>> from torch.fx.experimental.proxy_tensor import make_fx
178
+ >>> from torch._higher_order_ops.triton_kernel_wrap import capture_triton
179
+ >>>
180
+ >>> @triton.jit
181
+ >>> def add_kernel(
182
+ >>> in_ptr0,
183
+ >>> in_ptr1,
184
+ >>> out_ptr,
185
+ >>> n_elements,
186
+ >>> BLOCK_SIZE: "tl.constexpr",
187
+ >>> ):
188
+ >>> pid = tl.program_id(axis=0)
189
+ >>> block_start = pid * BLOCK_SIZE
190
+ >>> offsets = block_start + tl.arange(0, BLOCK_SIZE)
191
+ >>> mask = offsets < n_elements
192
+ >>> x = tl.load(in_ptr0 + offsets, mask=mask)
193
+ >>> y = tl.load(in_ptr1 + offsets, mask=mask)
194
+ >>> output = x + y
195
+ >>> tl.store(out_ptr + offsets, output, mask=mask)
196
+ >>>
197
+ >>> def add(x, y):
198
+ >>> output = torch.empty_like(x)
199
+ >>> n_elements = output.numel()
200
+ >>>
201
+ >>> def grid_fn(meta):
202
+ >>> return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
203
+ >>>
204
+ >>> capture_triton(add_kernel)[grid_fn](x, y, output, n_elements, 16)
205
+ >>> return output
206
+ >>>
207
+ >>> x = torch.randn(3, device="cuda")
208
+ >>> y = torch.randn(3, device="cuda")
209
+ >>> gm = make_fx(add)(x, y)
210
+ >>> print(gm.code)
211
+ >>> # def forward(self, x_1, y_1):
212
+ >>> # empty_like = torch.ops.aten.empty_like.default(x_1, pin_memory = False)
213
+ >>> # triton_kernel_wrapper_mutation_proxy = triton_kernel_wrapper_mutation(
214
+ >>> # kernel_idx = 0, constant_args_idx = 0,
215
+ >>> # grid = [(1, 1, 1)], kwargs = {
216
+ >>> # 'in_ptr0': x_1, 'in_ptr1': y_1, 'out_ptr': empty_like,
217
+ >>> # 'n_elements': 3, 'BLOCK_SIZE': 16
218
+ >>> # })
219
+ >>> # return empty_like
220
+
221
+ """
222
+ from triton.runtime.autotuner import Autotuner
223
+ from triton.runtime.jit import JITFunction
224
+
225
+ from torch._higher_order_ops.triton_kernel_wrap import TraceableTritonKernelWrapper
226
+
227
+ if not isinstance(triton_kernel, (JITFunction, Autotuner)):
228
+ raise RuntimeError(
229
+ "capture_triton only works on functions annotated with triton.jit or triton.autotune"
230
+ )
231
+ if not is_capture_triton_enabled():
232
+ return triton_kernel
233
+ return TraceableTritonKernelWrapper(triton_kernel, None, None)
infer_4_47_1/lib/python3.10/site-packages/torch/_library/utils.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import dataclasses
3
+ import inspect
4
+ import sys
5
+ from typing import Any, Callable, Dict, Iterable, Tuple, Union
6
+
7
+ import torch
8
+ from torch import _C, _utils_internal
9
+ from torch._ops import OpOverload
10
+
11
+
12
+ @dataclasses.dataclass
13
+ class Kernel:
14
+ """Models a (function, source location)"""
15
+
16
+ func: Callable
17
+ source: str
18
+
19
+ def __call__(self, *args, **kwargs):
20
+ return self.func(*args, **kwargs)
21
+
22
+
23
+ class RegistrationHandle:
24
+ """Does something when someone calls .destroy() on it"""
25
+
26
+ def __init__(self, on_destroy: Callable):
27
+ self._on_destroy = on_destroy
28
+
29
+ def destroy(self) -> None:
30
+ self._on_destroy()
31
+
32
+
33
+ def get_source(stacklevel: int) -> str:
34
+ """Get a string that represents the caller.
35
+
36
+ Example: "/path/to/foo.py:42"
37
+
38
+ Use stacklevel=1 to get the caller's source
39
+ Use stacklevel=2 to get the caller's caller's source
40
+ etc.
41
+ """
42
+ frame = inspect.getframeinfo(sys._getframe(stacklevel))
43
+ source = f"{frame.filename}:{frame.lineno}"
44
+ return source
45
+
46
+
47
+ def parse_namespace(qualname: str) -> Tuple[str, str]:
48
+ splits = qualname.split("::")
49
+ if len(splits) != 2:
50
+ raise ValueError(
51
+ f"Expected `qualname` to be of the form "
52
+ f'"namespace::name", but got {qualname}. '
53
+ f"The qualname passed to the torch.library APIs must consist "
54
+ f"of a namespace and a name, e.g. aten::sin"
55
+ )
56
+ return splits[0], splits[1]
57
+
58
+
59
+ def lookup_op(qualname: str) -> OpOverload:
60
+ namespace, name = parse_namespace(qualname)
61
+ if "." in name:
62
+ name, overload = name.split(".")
63
+ else:
64
+ overload = "default"
65
+ ns = getattr(torch.ops, namespace)
66
+ packet = getattr(ns, name)
67
+ return getattr(packet, overload)
68
+
69
+
70
+ def is_builtin(op: OpOverload) -> bool:
71
+ assert isinstance(op, OpOverload)
72
+ return op.namespace in {"aten", "prim", "prims"}
73
+
74
+
75
+ def is_functional_schema(schema: Any) -> bool:
76
+ """Check if the schema is functional.
77
+
78
+ An operator is functional if:
79
+ - it does not mutate any of its inputs
80
+ - it does not return a view on any of its inputs
81
+ - it has at least one return
82
+ """
83
+
84
+ def is_functional(schema):
85
+ if schema.is_mutable:
86
+ return False
87
+ rets = schema.returns
88
+ is_non_mutating_view = len(rets) > 0 and any(
89
+ r.alias_info is not None and not r.alias_info.is_write for r in rets
90
+ )
91
+ if is_non_mutating_view:
92
+ return False
93
+ if not schema.returns:
94
+ return False
95
+ return True
96
+
97
+ if isinstance(schema, torch._C.FunctionSchema):
98
+ return is_functional(schema)
99
+
100
+ # Lazy import because not all PyTorch builds have torchgen
101
+ from torchgen.model import FunctionSchema
102
+
103
+ if isinstance(schema, str):
104
+ schema = FunctionSchema.parse(schema)
105
+ assert isinstance(schema, FunctionSchema)
106
+ return is_functional(schema)
107
+
108
+
109
+ # should be torch._C.JitType but that annotation is busted
110
+ def is_tensorlist_like_type(typ: Any) -> bool:
111
+ return (
112
+ typ == _C.ListType(_C.TensorType.get())
113
+ or typ == _C.ListType(_C.OptionalType(_C.TensorType.get()))
114
+ or typ == _C.OptionalType(_C.ListType(_C.TensorType.get()))
115
+ or typ == _C.OptionalType(_C.ListType(_C.OptionalType(_C.TensorType.get())))
116
+ )
117
+
118
+
119
+ # should be torch._C.JitType but that annotation is busted
120
+ def is_tensor_like_type(typ: Any) -> bool:
121
+ return typ == _C.TensorType.get() or typ == _C.OptionalType(_C.TensorType.get())
122
+
123
+
124
+ def mutates_and_returns_first_arg(op: OpOverload):
125
+ """Check if an op is an inplace aten op, i.e. it mutates and returns the first arg.
126
+
127
+ TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this,
128
+ but not all PyTorch builds have torchgen (due to the yaml dependency being weird).
129
+ Figure this out.
130
+
131
+ Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a)
132
+ """
133
+ if op.namespace != "aten":
134
+ return False
135
+ schema = op._schema
136
+ if not len(schema.returns) == 1:
137
+ return False
138
+ if schema.returns[0].alias_info is None:
139
+ return False
140
+ alias_set = schema.returns[0].alias_info.after_set
141
+ if len(alias_set) != 1:
142
+ return False
143
+ loc = next(iter(alias_set))
144
+ if len(schema.arguments) < 1:
145
+ return False
146
+ first_arg = schema.arguments[0]
147
+ if first_arg.alias_info is None:
148
+ return False
149
+ if not first_arg.alias_info.is_write:
150
+ return False
151
+ alias_set = first_arg.alias_info.after_set
152
+ if len(alias_set) != 1:
153
+ return False
154
+ if loc != next(iter(alias_set)):
155
+ return False
156
+ for arg in schema.arguments[1:]:
157
+ if arg.alias_info is not None:
158
+ return False
159
+ return True
160
+
161
+
162
+ def fill_defaults(schema, args, kwargs):
163
+ new_args = []
164
+ new_kwargs = {}
165
+ for i in range(len(schema.arguments)):
166
+ info = schema.arguments[i]
167
+ if info.kwarg_only:
168
+ if info.name in kwargs:
169
+ new_kwargs[info.name] = kwargs[info.name]
170
+ else:
171
+ new_kwargs[info.name] = info.default_value
172
+ else:
173
+ if i < len(args):
174
+ new_args.append(args[i])
175
+ else:
176
+ new_args.append(info.default_value)
177
+ return tuple(new_args), new_kwargs
178
+
179
+
180
+ def zip_schema(
181
+ schema: _C.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any]
182
+ ) -> Iterable[Tuple[_C.Argument, Any]]:
183
+ """zips schema.arguments and (args, kwargs) together.
184
+
185
+ Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload:
186
+ that is, kwargs must be keyword-only arguments and default values may be omitted.
187
+ """
188
+ assert len(schema.arguments) >= len(args) + len(kwargs)
189
+ for i in range(len(schema.arguments)):
190
+ info = schema.arguments[i]
191
+ if info.kwarg_only:
192
+ if info.name in kwargs:
193
+ yield info, kwargs[info.name]
194
+ continue
195
+ if i >= len(args):
196
+ # args that are equal to their default values are not populated
197
+ # if they are followed by args that are equal to their defaults.
198
+ # Skip these.
199
+ continue
200
+ yield info, args[i]
201
+ return
202
+
203
+
204
+ def hop_schema_from_fx_node(node):
205
+ from torchgen.gen_schema_utils import FunctionSchemaGen
206
+
207
+ hop = node.target
208
+ if not isinstance(hop, torch._ops.HigherOrderOperator):
209
+ raise RuntimeError("fx_node's target must be a hop.")
210
+
211
+ def _collect_example_val(node):
212
+ meta_val = node.meta.get("val", None)
213
+ if meta_val is None:
214
+ assert node.op == "get_attr"
215
+ meta_val = getattr(node.graph.owning_module, node.target)
216
+ return meta_val
217
+
218
+ example_inputs = []
219
+ for arg in node.args:
220
+ if isinstance(arg, (torch.fx.Node, torch.fx.node.Node)):
221
+ example_inputs.append(_collect_example_val(arg))
222
+ elif isinstance(
223
+ arg, (torch.fx.immutable_collections.immutable_list, list, tuple)
224
+ ):
225
+ example_inputs.append([_collect_example_val(x) for x in arg])
226
+ else:
227
+ raise RuntimeError(f"Unsupported arg type {type(arg)}")
228
+
229
+ # Bound the arguments to make sure number of inputs are correct
230
+ bound_args: inspect.BoundArguments = inspect.signature(hop.__call__).bind(
231
+ *example_inputs
232
+ )
233
+
234
+ # We treat example_output as a single value in return. This is to differentiate 1. return a single val
235
+ # vs 2. return a tuple with one element.
236
+ example_output = _collect_example_val(node)
237
+ return FunctionSchemaGen.from_example(
238
+ hop._name, tuple(bound_args.arguments.items()), (list(example_output),)
239
+ )
240
+
241
+
242
+ def can_generate_trivial_fake_impl(op: OpOverload) -> bool:
243
+ assert isinstance(op, OpOverload)
244
+ if is_builtin(op):
245
+ # We control the built-ins. These may (in rare cases)
246
+ # do input metadata mutation (which we have banned on custom ops)
247
+ return False
248
+ schema = op._schema
249
+ # It's suspicious if the op is not mutable but returns nothing, so we return False out of an abundance of caution
250
+ if not schema.is_mutable:
251
+ return False
252
+ if len(schema.returns) > 0:
253
+ return False
254
+ # If the op returns nothing, then it has a trivial fake impl.
255
+ return True
256
+
257
+
258
+ def requires_set_python_module() -> bool:
259
+ """If an op was defined in C++ and extended from Python using the
260
+ torch.library APIs, returns if we require that there have been a
261
+ m.set_python_module("mylib.ops") call from C++ that associates
262
+ the C++ op with a python module.
263
+ """
264
+ return getattr(_utils_internal, "REQUIRES_SET_PYTHON_MODULE", True)
265
+
266
+
267
+ def handle_dispatch_mode(curr_mode, op_overload, *args, **kwargs):
268
+ assert isinstance(curr_mode, torch.utils._python_dispatch.TorchDispatchMode)
269
+ overload_types = []
270
+ args_flattened, _ = torch.utils._pytree.tree_flatten((args, kwargs.values()))
271
+ for a in args_flattened:
272
+ # TODO: need to double check the semantics of the "types" argument to torch_dispatch.
273
+ # It's generated in PyInterpreter.cpp, but seems to be generated in two places,
274
+ # where in one case we only include tensors with the python key, and in another
275
+ # we include **all** tensors.
276
+ if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(a).has(
277
+ torch._C.DispatchKey.Python
278
+ ):
279
+ overload_types.append(type(a))
280
+ # TODO: check that I got these args correct (in C++, we pass in "0000"??)
281
+
282
+ return curr_mode.__torch_dispatch__(op_overload, overload_types, args, kwargs)
283
+
284
+
285
+ def has_kwarg_only_args(schema: _C.FunctionSchema):
286
+ return any(a.kwarg_only for a in schema.arguments)
287
+
288
+
289
+ def has_kwarg_only_tensors(schema: _C.FunctionSchema):
290
+ for a in schema.arguments:
291
+ if not (is_tensor_like_type(a.type) or is_tensorlist_like_type(a.type)):
292
+ continue
293
+ if not a.kwarg_only:
294
+ continue
295
+ return True
296
+ return False
297
+
298
+
299
+ def has_tensor_arg(schema: _C.FunctionSchema) -> bool:
300
+ """
301
+ Given a schema, returns True if the schema has a Tensor arg.
302
+ A Tensor arg is any arg with a type annotation that might involve Tensor.
303
+ """
304
+ return any(
305
+ (is_tensor_like_type(a.type) or is_tensorlist_like_type(a.type))
306
+ for a in schema.arguments
307
+ )
308
+
309
+
310
+ def get_device_arg_index(schema: _C.FunctionSchema) -> Union[int, None]:
311
+ """
312
+ Given a schema, returns the id of the `device: torch.device` argument.
313
+ If it does not exist, returns None.
314
+ """
315
+ for index, arg in enumerate(schema.arguments):
316
+ if arg.type is _C.DeviceObjType.get() and arg.name == "device":
317
+ return index
318
+ return None
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Top level logging module for torch logging
2
+ # Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
3
+ # Simple setup for onboarding (see above doc for more detail):
4
+ # 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples)
5
+ # 2. register any artifacts (<artifact_name> below) in torch._logging._registrations
6
+ # a. call getArtifactLogger(__name__, <artifact_name>) at your logging site instead of the standard logger to log your artifact
7
+ import torch._logging._registrations
8
+
9
+ from ._internal import (
10
+ _init_logs,
11
+ DEFAULT_LOGGING,
12
+ getArtifactLogger,
13
+ LazyString,
14
+ set_logs,
15
+ trace_structured,
16
+ warning_once,
17
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc ADDED
Binary file (33.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc ADDED
Binary file (5.49 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/scribe.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/_internal.py ADDED
@@ -0,0 +1,1162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ import hashlib
4
+ import itertools
5
+ import json
6
+ import logging
7
+ import os
8
+ import os.path
9
+ import pathlib
10
+ import re
11
+ import sys
12
+ import tempfile
13
+ from dataclasses import dataclass, field
14
+ from importlib import __import__
15
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
16
+ from weakref import WeakSet
17
+
18
+ import torch._logging.structured
19
+ from torch._utils_internal import log_trace_structured_event
20
+ from torch.utils._traceback import CapturedTraceback
21
+
22
+
23
+ log = logging.getLogger(__name__)
24
+
25
+ # This is a synthetic logger which doesn't correspond to an actual logger,
26
+ # but handles all of our "tracing" logging, which is structured and doesn't go
27
+ # to stderr but always goes to a dedicated log file. We don't put these
28
+ # loggers in the classic module hierarchy, because we don't want a suppression
29
+ # of logs to also cause a trace to get suppressed (traces typically are not
30
+ # collected, unless we are in prod, in which case they always are collected.)
31
+ #
32
+ # TODO: Maybe we should allow for some sub-hierarchy so you can control which
33
+ # traces you want to collect, for performance reasons.
34
+ #
35
+ # See https://docs.google.com/document/d/1CX_hJ0PNy9f3R1y8TJrfkSeLkvGjjjLU84BSXgS2AZ8/edit
36
+ trace_log = logging.getLogger("torch.__trace")
37
+
38
+ DEFAULT_LOG_LEVEL = logging.WARNING
39
+ LOG_ENV_VAR = "TORCH_LOGS"
40
+ LOG_OUT_ENV_VAR = "TORCH_LOGS_OUT"
41
+ LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT"
42
+ TRACE_ENV_VAR = "TORCH_TRACE"
43
+
44
+
45
+ @dataclass
46
+ class LogRegistry:
47
+ # shorthand name to log qualified name
48
+ # Note: this only contains loggers registered
49
+ # from register_log
50
+ # e.g. "dynamo" -> "torch._dynamo"
51
+ log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict)
52
+
53
+ # artifact logger qualified names,
54
+ # this is populated lazily, as calls to getArtifactLogger
55
+ # currently formatted as <module>.__<artifact_name>
56
+ # e.g. "torch._dynamo.convert_frame.__guards"
57
+ artifact_log_qnames: Set[str] = field(default_factory=set)
58
+
59
+ # child logs of registered logs if specified via open
60
+ # registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
61
+ # these need to be tracked so their levels can be reset properly
62
+ # e.g. "torch._dynamo.output_graph"
63
+ child_log_qnames: Set[str] = field(default_factory=set)
64
+
65
+ # artifact names, populated by register_artifact
66
+ # e.g. "guards"
67
+ artifact_names: Set[str] = field(default_factory=set)
68
+
69
+ # Artifacts that should be visible by default in the error message
70
+ visible_artifacts: Set[str] = field(default_factory=set)
71
+
72
+ # A short description of each artifact
73
+ artifact_descriptions: Dict[str, str] = field(default_factory=dict)
74
+
75
+ # artifacts which are not displayed unless explicitly named in the
76
+ # settings. Ex. output_code is NOT displayed even if the inductor
77
+ # log level is set to DEBUG. It must be explicitly named in the settings
78
+ off_by_default_artifact_names: Set[str] = field(default_factory=set)
79
+
80
+ # logging format string for artifacts
81
+ artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict)
82
+
83
+ def is_artifact(self, name):
84
+ return name in self.artifact_names
85
+
86
+ def is_log(self, alias):
87
+ return alias in self.log_alias_to_log_qnames
88
+
89
+ # register a log with an alias
90
+ def register_log(self, alias, log_qnames: Union[str, List[str]]):
91
+ if isinstance(log_qnames, str):
92
+ log_qnames = [log_qnames]
93
+ self.log_alias_to_log_qnames[alias] = log_qnames
94
+
95
+ # register an artifact name
96
+ def register_artifact_name(
97
+ self, name, description, visible, off_by_default, log_format
98
+ ):
99
+ self.artifact_names.add(name)
100
+ if visible:
101
+ self.visible_artifacts.add(name)
102
+ self.artifact_descriptions[name] = description
103
+
104
+ # if off by default, don't enable it
105
+ # when log_name's log_level is set to DEBUG
106
+ if off_by_default:
107
+ self.off_by_default_artifact_names.add(name)
108
+
109
+ if log_format is not None:
110
+ self.artifact_log_formatters[name] = logging.Formatter(log_format)
111
+
112
+ # register the qualified name of an artifact log
113
+ # this is needed to know which logs need to be reset
114
+ # whenever the log_state is changed
115
+ def register_artifact_log(self, artifact_log_qname):
116
+ self.artifact_log_qnames.add(artifact_log_qname)
117
+
118
+ def register_child_log(self, log_qname):
119
+ self.child_log_qnames.add(log_qname)
120
+
121
+ # flattens all the qnames together (TODO: consider memoizing?)
122
+ def get_log_qnames(self) -> Set[str]:
123
+ return {
124
+ qname
125
+ for qnames in self.log_alias_to_log_qnames.values()
126
+ for qname in qnames
127
+ }
128
+
129
+ def get_artifact_log_qnames(self):
130
+ return set(self.artifact_log_qnames)
131
+
132
+ def get_child_log_qnames(self):
133
+ return set(self.child_log_qnames)
134
+
135
+ def is_off_by_default(self, artifact_qname):
136
+ return artifact_qname in self.off_by_default_artifact_names
137
+
138
+
139
+ @dataclass
140
+ class LogState:
141
+ # qualified log names -> currently set log level
142
+ log_qname_to_level: Dict[str, str] = field(default_factory=dict)
143
+
144
+ # the set of currently enabled artifacts
145
+ artifact_names: Set[str] = field(default_factory=set)
146
+
147
+ def enable_artifact(self, artifact_name):
148
+ self.artifact_names.add(artifact_name)
149
+
150
+ def is_artifact_enabled(self, name):
151
+ return name in self.artifact_names
152
+
153
+ def enable_log(self, log_qnames, log_level):
154
+ if isinstance(log_qnames, str):
155
+ log_qnames = [log_qnames]
156
+ for log_qname in log_qnames:
157
+ self.log_qname_to_level[log_qname] = log_level
158
+
159
+ def get_log_level_pairs(self):
160
+ """Returns all qualified module names for which the user requested
161
+ explicit logging settings.
162
+
163
+ .. warning:
164
+
165
+ This function used to return all loggers, regardless of whether
166
+ or not the user specified them or not; it now only returns logs
167
+ which were explicitly mentioned by the user (and torch, which
168
+ always is implicitly requested when we initialize our logging
169
+ subsystem.)
170
+ """
171
+ return self.log_qname_to_level.items()
172
+
173
+ def clear(self):
174
+ self.log_qname_to_level.clear()
175
+ self.artifact_names.clear()
176
+
177
+
178
+ log_registry = LogRegistry()
179
+ log_state = LogState()
180
+
181
+ # sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
182
+ DEFAULT_LOGGING = {
183
+ "dynamo": logging.INFO,
184
+ "aot": logging.INFO,
185
+ "inductor": logging.INFO,
186
+ "fsdp": logging.INFO,
187
+ "ddp_graphs": True,
188
+ "graph_breaks": True,
189
+ "guards": True,
190
+ "recompiles": True,
191
+ "dynamic": logging.INFO,
192
+ }
193
+
194
+
195
+ def set_logs(
196
+ *,
197
+ all: Optional[int] = None,
198
+ dynamo: Optional[int] = None,
199
+ aot: Optional[int] = None,
200
+ autograd: Optional[int] = None,
201
+ dynamic: Optional[int] = None,
202
+ inductor: Optional[int] = None,
203
+ distributed: Optional[int] = None,
204
+ c10d: Optional[int] = None,
205
+ ddp: Optional[int] = None,
206
+ fsdp: Optional[int] = None,
207
+ dtensor: Optional[int] = None,
208
+ onnx: Optional[int] = None,
209
+ bytecode: bool = False,
210
+ aot_graphs: bool = False,
211
+ aot_joint_graph: bool = False,
212
+ ddp_graphs: bool = False,
213
+ graph: bool = False,
214
+ graph_code: bool = False,
215
+ graph_breaks: bool = False,
216
+ graph_sizes: bool = False,
217
+ guards: bool = False,
218
+ recompiles: bool = False,
219
+ recompiles_verbose: bool = False,
220
+ trace_source: bool = False,
221
+ trace_call: bool = False,
222
+ trace_bytecode: bool = False,
223
+ output_code: bool = False,
224
+ kernel_code: bool = False,
225
+ schedule: bool = False,
226
+ perf_hints: bool = False,
227
+ post_grad_graphs: bool = False,
228
+ onnx_diagnostics: bool = False,
229
+ fusion: bool = False,
230
+ overlap: bool = False,
231
+ export: Optional[int] = None,
232
+ modules: Optional[Dict[str, Union[int, bool]]] = None,
233
+ cudagraphs: bool = False,
234
+ sym_node: bool = False,
235
+ compiled_autograd: bool = False,
236
+ compiled_autograd_verbose: bool = False,
237
+ cudagraph_static_inputs: bool = False,
238
+ benchmarking: bool = False,
239
+ ):
240
+ """
241
+ Sets the log level for individual components and toggles individual log
242
+ artifact types.
243
+
244
+ .. warning:: This feature is a prototype and may have compatibility
245
+ breaking changes in the future.
246
+
247
+ .. note:: The ``TORCH_LOGS`` environment variable has complete precedence
248
+ over this function, so if it was set, this function does nothing.
249
+
250
+ A component is a set of related features in PyTorch. All of the log
251
+ messages emitted from a given component have their own log levels. If the
252
+ log level of a particular message has priority greater than or equal to its
253
+ component's log level setting, it is emitted. Otherwise, it is suppressed.
254
+ This allows you to, for instance, silence large groups of log messages that
255
+ are not relevant to you and increase verbosity of logs for components that
256
+ are relevant. The expected log level values, ordered from highest to lowest
257
+ priority, are:
258
+
259
+ * ``logging.CRITICAL``
260
+ * ``logging.ERROR``
261
+ * ``logging.WARNING``
262
+ * ``logging.INFO``
263
+ * ``logging.DEBUG``
264
+ * ``logging.NOTSET``
265
+
266
+ See documentation for the Python ``logging`` module for more information on
267
+ log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
268
+
269
+ An artifact is a particular type of log message. Each artifact is assigned
270
+ to a parent component. A component can emit many different kinds of
271
+ artifacts. In general, an artifact is emitted if either its corresponding
272
+ setting in the argument list below is turned on or if its parent component
273
+ is set to a log level less than or equal to the log level of the artifact.
274
+
275
+ Keyword args:
276
+ all (:class:`Optional[int]`):
277
+ The default log level for all components. Default: ``logging.WARN``
278
+
279
+ dynamo (:class:`Optional[int]`):
280
+ The log level for the TorchDynamo component. Default: ``logging.WARN``
281
+
282
+ aot (:class:`Optional[int]`):
283
+ The log level for the AOTAutograd component. Default: ``logging.WARN``
284
+
285
+ autograd (:class:`Optional[int]`):
286
+ The log level for autograd. Default: ``logging.WARN``
287
+
288
+ inductor (:class:`Optional[int]`):
289
+ The log level for the TorchInductor component. Default: ``logging.WARN``
290
+
291
+ dynamic (:class:`Optional[int]`):
292
+ The log level for dynamic shapes. Default: ``logging.WARN``
293
+
294
+ distributed (:class:`Optional[int]`):
295
+ Whether to log c10d communication operations and other debug info from PyTorch Distributed components.
296
+ Default: ``logging.WARN``
297
+
298
+ c10d (:class:`Optional[int]`):
299
+ Whether to log c10d communication operations related debug info in PyTorch Distributed components.
300
+ Default: ``logging.WARN``
301
+
302
+ ddp (:class:`Optional[int]`):
303
+ Whether to log debug info related to ``DistributedDataParallel``(DDP) from PyTorch Distributed components.
304
+ Default: ``logging.WARN``
305
+
306
+ fsdp (:class:`Optional[int]`):
307
+ Whether to log debug info related to ``FullyShardedDataParallel``(FSDP) in PyTorch Distributed components.
308
+ Default: ``logging.WARN``
309
+
310
+ dtensor (:class:`Optional[int]`):
311
+ Whether to log debug info related to ``DTensor``(DTensor) in PyTorch Distributed components.
312
+ Default: ``logging.WARN``
313
+
314
+ onnx (:class:`Optional[int]`):
315
+ The log level for the ONNX exporter component. Default: ``logging.WARN``
316
+
317
+ bytecode (:class:`bool`):
318
+ Whether to emit the original and generated bytecode from TorchDynamo.
319
+ Default: ``False``
320
+
321
+ aot_graphs (:class:`bool`):
322
+ Whether to emit the graphs generated by AOTAutograd. Default: ``False``
323
+
324
+ aot_joint_graph (:class:`bool`):
325
+ Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
326
+
327
+ ddp_graphs (:class:`bool`):
328
+ Whether to emit graphs generated by DDPOptimizer. Default: ``False``
329
+
330
+ graph (:class:`bool`):
331
+ Whether to emit the graph captured by TorchDynamo in tabular format.
332
+ Default: ``False``
333
+
334
+ graph_code (:class:`bool`):
335
+ Whether to emit the python source of the graph captured by TorchDynamo.
336
+ Default: ``False``
337
+
338
+ graph_breaks (:class:`bool`):
339
+ Whether to emit the graph breaks encountered by TorchDynamo.
340
+ Default: ``False``
341
+
342
+ graph_sizes (:class:`bool`):
343
+ Whether to emit tensor sizes of the graph captured by TorchDynamo.
344
+ Default: ``False``
345
+
346
+ guards (:class:`bool`):
347
+ Whether to emit the guards generated by TorchDynamo for each compiled
348
+ function. Default: ``False``
349
+
350
+ recompiles (:class:`bool`):
351
+ Whether to emit a guard failure reason and message every time
352
+ TorchDynamo recompiles a function. Default: ``False``
353
+
354
+ recompiles_verbose (:class:`bool`):
355
+ Whether to emit all guard failure reasons when TorchDynamo recompiles
356
+ a function, even those that are not actually run. Default: ``False``
357
+
358
+ trace_source (:class:`bool`):
359
+ Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
360
+
361
+ trace_call (:class:`bool`):
362
+ Whether to emit detailed line location when TorchDynamo creates an FX node
363
+ corresponding to function call. Python 3.11+ only. Default: ``False``
364
+
365
+ trace_bytecode (:class:`bool`):
366
+ Whether to emit bytecode instructions and traced stack state as TorchDynamo
367
+ traces bytecode. Default: ``False``
368
+
369
+ output_code (:class:`bool`):
370
+ Whether to emit the TorchInductor output code on a per-graph basis. Default: ``False``
371
+
372
+ kernel_code (:class:`bool`):
373
+ Whether to emit the TorchInductor output code on a per-kernel bases. Default: ``False``
374
+
375
+ schedule (:class:`bool`):
376
+ Whether to emit the TorchInductor schedule. Default: ``False``
377
+
378
+ perf_hints (:class:`bool`):
379
+ Whether to emit the TorchInductor perf hints. Default: ``False``
380
+
381
+ post_grad_graphs (:class:`bool`):
382
+ Whether to emit the graphs generated by after post grad passes. Default: ``False``
383
+
384
+ onnx_diagnostics (:class:`bool`):
385
+ Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
386
+
387
+ fusion (:class:`bool`):
388
+ Whether to emit detailed Inductor fusion decisions. Default: ``False``
389
+
390
+ overlap (:class:`bool`):
391
+ Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False``
392
+
393
+ sym_node (:class:`bool`):
394
+ Whether to emit debug info for various SymNode opterations. Default: ``False``
395
+
396
+ export (:class:`Optional[int]`):
397
+ The log level for export. Default: ``logging.WARN``
398
+
399
+ benchmarking (:class:`bool`):
400
+ Whether to emit detailed Inductor benchmarking information. Default: ``False``
401
+
402
+ modules (dict):
403
+ This argument provides an alternate way to specify the above log
404
+ component and artifact settings, in the format of a keyword args
405
+ dictionary given as a single argument. There are two cases
406
+ where this is useful (1) if a new log component or artifact has
407
+ been registered but a keyword argument for it has not been added
408
+ to this function and (2) if the log level for an unregistered module
409
+ needs to be set. This can be done by providing the fully-qualified module
410
+ name as the key, with the log level as the value. Default: ``None``
411
+
412
+ cudagraph_static_inputs (:class:`bool`):
413
+ Whether to emit debug info for cudagraph static input detection. Default: ``False``
414
+
415
+
416
+ Example::
417
+
418
+ >>> # xdoctest: +SKIP
419
+ >>> import logging
420
+
421
+ # The following changes the "dynamo" component to emit DEBUG-level
422
+ # logs, and to emit "graph_code" artifacts.
423
+
424
+ >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
425
+
426
+ # The following enables the logs for a different module
427
+
428
+ >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
429
+ """
430
+ # ignore if env var is set
431
+ if LOG_ENV_VAR in os.environ:
432
+ log.warning(
433
+ "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
434
+ )
435
+ return
436
+
437
+ log_state.clear()
438
+
439
+ modules = modules or {}
440
+
441
+ def _set_logs(**kwargs):
442
+ for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
443
+ if val is None:
444
+ continue
445
+
446
+ if log_registry.is_artifact(alias):
447
+ if not isinstance(val, bool):
448
+ raise ValueError(
449
+ f"Expected bool to enable artifact {alias}, received {val}"
450
+ )
451
+
452
+ if val:
453
+ log_state.enable_artifact(alias)
454
+ elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
455
+ if val not in logging._levelToName:
456
+ raise ValueError(
457
+ f"Unrecognized log level for log {alias}: {val}, valid level values "
458
+ f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
459
+ )
460
+
461
+ log_state.enable_log(
462
+ log_registry.log_alias_to_log_qnames.get(alias, alias), val
463
+ )
464
+ else:
465
+ raise ValueError(
466
+ f"Unrecognized log or artifact name passed to set_logs: {alias}"
467
+ )
468
+
469
+ _init_logs()
470
+
471
+ _set_logs(
472
+ torch=all,
473
+ dynamo=dynamo,
474
+ aot=aot,
475
+ autograd=autograd,
476
+ inductor=inductor,
477
+ dynamic=dynamic,
478
+ bytecode=bytecode,
479
+ aot_graphs=aot_graphs,
480
+ aot_joint_graph=aot_joint_graph,
481
+ ddp_graphs=ddp_graphs,
482
+ distributed=distributed,
483
+ c10d=c10d,
484
+ ddp=ddp,
485
+ fsdp=fsdp,
486
+ dtensor=dtensor,
487
+ graph=graph,
488
+ graph_code=graph_code,
489
+ graph_breaks=graph_breaks,
490
+ graph_sizes=graph_sizes,
491
+ guards=guards,
492
+ recompiles=recompiles,
493
+ recompiles_verbose=recompiles_verbose,
494
+ trace_source=trace_source,
495
+ trace_call=trace_call,
496
+ trace_bytecode=trace_bytecode,
497
+ output_code=output_code,
498
+ kernel_code=kernel_code,
499
+ schedule=schedule,
500
+ perf_hints=perf_hints,
501
+ post_grad_graphs=post_grad_graphs,
502
+ onnx=onnx,
503
+ onnx_diagnostics=onnx_diagnostics,
504
+ fusion=fusion,
505
+ overlap=overlap,
506
+ sym_node=sym_node,
507
+ export=export,
508
+ cudagraphs=cudagraphs,
509
+ compiled_autograd=compiled_autograd,
510
+ compiled_autograd_verbose=compiled_autograd_verbose,
511
+ cudagraph_static_inputs=cudagraph_static_inputs,
512
+ benchmarking=benchmarking,
513
+ )
514
+
515
+
516
+ def get_loggers():
517
+ """
518
+ Returns: a list of all registered loggers
519
+ """
520
+ return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
521
+
522
+
523
+ def register_log(setting_name, log_name):
524
+ """
525
+ Enables a log to be controlled by the env var and user API with the setting_name
526
+ Args:
527
+ setting_name: the shorthand name used in the env var and user API
528
+ log_name: the log name that the setting_name is associated with
529
+ """
530
+ log_registry.register_log(setting_name, log_name)
531
+
532
+
533
+ def register_artifact(
534
+ setting_name, description, visible=False, off_by_default=False, log_format=None
535
+ ):
536
+ """
537
+ Enables an artifact to be controlled by the env var and user API with name
538
+ Args:
539
+ setting_name: the shorthand name used in the env var and user API
540
+ description: A description of what this outputs
541
+ visible: Whether it gets suggested to users by default
542
+ off_by_default: whether this artifact should be logged when the ancestor loggers
543
+ are enabled at level DEBUG
544
+ """
545
+ log_registry.register_artifact_name(
546
+ setting_name, description, visible, off_by_default, log_format
547
+ )
548
+
549
+
550
+ def getArtifactLogger(module_qname, artifact_name):
551
+ if artifact_name not in log_registry.artifact_names:
552
+ raise ValueError(
553
+ f"Artifact name: {repr(artifact_name)} not registered,"
554
+ f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
555
+ )
556
+ qname = module_qname + f".__{artifact_name}"
557
+ log = logging.getLogger(qname)
558
+ log.artifact_name = artifact_name # type: ignore[attr-defined]
559
+ log_registry.register_artifact_log(qname)
560
+ configure_artifact_log(log)
561
+ return log
562
+
563
+
564
+ INCR_VERBOSITY_CHAR = "+"
565
+ DECR_VERBOSITY_CHAR = "-"
566
+ VERBOSITY_REGEX = (
567
+ "("
568
+ + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
569
+ + "?)"
570
+ )
571
+
572
+
573
+ def configure_artifact_log(log):
574
+ # If the artifact is off by default, then it should only be logged when explicitly
575
+ # enabled; set propagate to False so that this artifact is not propagated
576
+ # to its ancestor logger
577
+ if log_registry.is_off_by_default(log.artifact_name):
578
+ log.propagate = False
579
+
580
+ # enable artifact logging when explicitly enabled
581
+ if log_state.is_artifact_enabled(log.artifact_name):
582
+ log.setLevel(logging.DEBUG)
583
+ log.propagate = True
584
+
585
+
586
+ # match a comma separated list of loggable names (whitespace allowed after commas)
587
+ def _gen_settings_regex():
588
+ return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
589
+
590
+
591
+ def _validate_settings(settings):
592
+ return re.fullmatch(_gen_settings_regex(), settings) is not None
593
+
594
+
595
+ def help_message(verbose=False):
596
+ def pad_to(s, length=30):
597
+ assert len(s) <= length
598
+ return s + " " * (length - len(s))
599
+
600
+ if verbose:
601
+ printed_artifacts = log_registry.artifact_names
602
+ else:
603
+ printed_artifacts = log_registry.visible_artifacts
604
+
605
+ if verbose:
606
+ heading = "All registered names"
607
+ else:
608
+ heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
609
+ lines = (
610
+ ["all"]
611
+ + sorted(log_registry.log_alias_to_log_qnames.keys())
612
+ + sorted(
613
+ [
614
+ f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
615
+ for name in printed_artifacts
616
+ ]
617
+ )
618
+ )
619
+ setting_info = " " + "\n ".join(lines)
620
+ examples = """
621
+ Examples:
622
+ TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
623
+ logging.DEBUG and AOT to logging.INFO
624
+
625
+ TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
626
+ logging.ERROR and TorchInductor to logging.DEBUG
627
+
628
+ TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
629
+
630
+ TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
631
+ to logging.DEBUG and enable the schedule artifact
632
+
633
+ TORCH_LOGS="+some.random.module,schedule" will set the log level of
634
+ some.random.module to logging.DEBUG and enable the schedule artifact
635
+
636
+ TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format
637
+ string will set the output format
638
+ Valid keys are "levelname", "message", "pathname", "levelno", "lineno",
639
+ "filename" and "name".
640
+
641
+ TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
642
+ well. This is useful when the output is long.
643
+ """ # flake8: noqa: B950
644
+ msg = f"""
645
+ TORCH_LOGS Info
646
+ {examples}
647
+
648
+ {heading}
649
+ {setting_info}
650
+ """
651
+ return msg
652
+
653
+
654
+ def _invalid_settings_err_msg(settings, verbose=False):
655
+ valid_settings = ", ".join(
656
+ ["all"]
657
+ + list(log_registry.log_alias_to_log_qnames.keys())
658
+ + list(log_registry.artifact_names)
659
+ )
660
+ msg = f"""
661
+ Invalid log settings: {settings}, must be a comma separated list of fully
662
+ qualified module names, registered log names or registered artifact names.
663
+ For more info on various settings, try TORCH_LOGS="help"
664
+ Valid settings:
665
+ {valid_settings}
666
+ """
667
+ return msg
668
+
669
+
670
+ @functools.lru_cache
671
+ def _parse_log_settings(settings):
672
+ if settings == "":
673
+ return {}
674
+
675
+ if settings == "help":
676
+ raise ValueError(help_message(verbose=False))
677
+ elif settings == "+help":
678
+ raise ValueError(help_message(verbose=True))
679
+ if not _validate_settings(settings):
680
+ raise ValueError(_invalid_settings_err_msg(settings))
681
+
682
+ settings = re.sub(r"\s+", "", settings)
683
+ log_names = settings.split(",")
684
+
685
+ def get_name_level_pair(name):
686
+ clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
687
+ clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
688
+
689
+ if name[0] == INCR_VERBOSITY_CHAR:
690
+ level = logging.DEBUG
691
+ elif name[0] == DECR_VERBOSITY_CHAR:
692
+ level = logging.ERROR
693
+ else:
694
+ level = logging.INFO
695
+
696
+ return clean_name, level
697
+
698
+ log_state = LogState()
699
+
700
+ for name in log_names:
701
+ name, level = get_name_level_pair(name)
702
+
703
+ if name == "all":
704
+ name = "torch"
705
+
706
+ if log_registry.is_log(name):
707
+ assert level is not None
708
+ log_qnames = log_registry.log_alias_to_log_qnames[name]
709
+ log_state.enable_log(log_qnames, level)
710
+ elif log_registry.is_artifact(name):
711
+ log_state.enable_artifact(name)
712
+ elif _is_valid_module(name):
713
+ if not _has_registered_parent(name):
714
+ log_registry.register_log(name, name)
715
+ else:
716
+ log_registry.register_child_log(name)
717
+ log_state.enable_log(name, level)
718
+ else:
719
+ raise ValueError(_invalid_settings_err_msg(settings))
720
+
721
+ return log_state
722
+
723
+
724
+ def _is_valid_module(qname):
725
+ try:
726
+ __import__(qname)
727
+ return True
728
+ except ImportError:
729
+ return False
730
+
731
+
732
+ def _update_log_state_from_env():
733
+ global log_state
734
+ log_setting = os.environ.get(LOG_ENV_VAR, None)
735
+ if log_setting is not None:
736
+ log_state = _parse_log_settings(log_setting)
737
+
738
+
739
+ def _has_registered_parent(log_qname):
740
+ cur_log = logging.getLogger(log_qname)
741
+
742
+ registered_log_qnames = log_registry.get_log_qnames()
743
+
744
+ while cur_log.parent:
745
+ if cur_log.name in registered_log_qnames:
746
+ return True
747
+ cur_log = cur_log.parent
748
+
749
+ return False
750
+
751
+
752
+ def make_module_path_relative(abs_path):
753
+ """
754
+ Given an absolute filepath corresponding to a Python module which was
755
+ loaded via normal import mechanisms using sys.path, convert it into
756
+ a relative path relative to one of the Python search paths.
757
+ """
758
+
759
+ abs_path = pathlib.Path(abs_path).resolve()
760
+
761
+ for path in sys.path:
762
+ try:
763
+ rel_path = abs_path.relative_to(path)
764
+ except ValueError:
765
+ continue
766
+ else:
767
+ return str(rel_path)
768
+
769
+ return str(abs_path)
770
+
771
+
772
+ # apply custom formats to artifacts when necessary
773
+ class TorchLogsFormatter(logging.Formatter):
774
+ def __init__(self, *, trace: bool = False):
775
+ super().__init__()
776
+ self._is_trace = trace
777
+
778
+ def format(self, record):
779
+ artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None)
780
+ if artifact_name is not None:
781
+ artifact_formatter = log_registry.artifact_log_formatters.get(
782
+ artifact_name, None
783
+ )
784
+ if artifact_formatter is not None:
785
+ return artifact_formatter.format(record)
786
+
787
+ record.message = record.getMessage()
788
+ record.asctime = self.formatTime(record, "%m%d %H:%M:%S")
789
+
790
+ # exception handling - copied from logging.Formatter.format
791
+ s = record.message
792
+ if record.exc_info:
793
+ # Cache the traceback text to avoid converting it multiple times
794
+ # (it's constant anyway)
795
+ if not record.exc_text:
796
+ record.exc_text = self.formatException(record.exc_info)
797
+ if record.exc_text:
798
+ if s[-1:] != "\n":
799
+ s = s + "\n"
800
+ s = s + record.exc_text
801
+ if record.stack_info:
802
+ if s[-1:] != "\n":
803
+ s = s + "\n"
804
+ s = s + self.formatStack(record.stack_info)
805
+
806
+ record.rankprefix = ""
807
+ if not self._is_trace and dist.is_available() and dist.is_initialized():
808
+ record.rankprefix = f"[rank{dist.get_rank()}]:"
809
+
810
+ record.traceid = ""
811
+ if (
812
+ not self._is_trace
813
+ and (trace_id := torch._guards.CompileContext.current_trace_id())
814
+ is not None
815
+ ):
816
+ record.traceid = f" [{trace_id}]"
817
+
818
+ glog_level_to_abbr = {
819
+ "DEBUG": "V", # V is for VERBOSE in glog
820
+ "INFO": "I",
821
+ "WARNING": "W",
822
+ "ERROR": "E",
823
+ "CRITICAL": "C",
824
+ }
825
+
826
+ shortlevel = glog_level_to_abbr.get(record.levelname, record.levelname)
827
+
828
+ record.artifactprefix = ""
829
+ if artifact_name is not None:
830
+ record.artifactprefix = f" [__{artifact_name}]"
831
+
832
+ filepath = make_module_path_relative(record.pathname)
833
+
834
+ prefix = (
835
+ f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.process} "
836
+ f"{filepath}:"
837
+ f"{record.lineno}]{record.traceid}{record.artifactprefix}"
838
+ )
839
+ if self._is_trace:
840
+ assert s == ""
841
+ try:
842
+ r = f"{prefix} {json.dumps(record.metadata)}"
843
+ except TypeError:
844
+ log.warning("failing metadata: %r", record.metadata)
845
+ raise
846
+ if record.payload is not None:
847
+ r += "".join(f"\n\t{l}" for l in record.payload.split("\n"))
848
+ return r
849
+ else:
850
+ lines = s.split("\n")
851
+ return "\n".join(f"{prefix} {l}" for l in lines)
852
+
853
+
854
+ def _default_formatter():
855
+ fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None)
856
+ if fmt is None:
857
+ return TorchLogsFormatter()
858
+ else:
859
+ if fmt in ("short", "basic"):
860
+ fmt = logging.BASIC_FORMAT
861
+ return logging.Formatter(fmt)
862
+
863
+
864
+ DEFAULT_FORMATTER = _default_formatter()
865
+
866
+
867
+ def _setup_handlers(create_handler_fn, log):
868
+ debug_handler = _track_handler(create_handler_fn())
869
+ debug_handler.setFormatter(DEFAULT_FORMATTER)
870
+ debug_handler.setLevel(logging.DEBUG)
871
+ log.addHandler(debug_handler)
872
+
873
+
874
+ handlers = WeakSet() # type: ignore[var-annotated]
875
+
876
+
877
+ # mark handlers that we've created
878
+ # so we don't modify user handlers
879
+ def _track_handler(handler):
880
+ handlers.add(handler)
881
+ return handler
882
+
883
+
884
+ def _is_torch_handler(handler):
885
+ return handler in handlers
886
+
887
+
888
+ # clears all torch handlers on specified loggers
889
+ def _clear_handlers(log):
890
+ to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)]
891
+ for handler in to_remove:
892
+ log.removeHandler(handler)
893
+
894
+
895
+ def _reset_logs():
896
+ # reset all registered logs
897
+ for log_qname in log_registry.get_log_qnames():
898
+ log = logging.getLogger(log_qname)
899
+ log.setLevel(logging.WARNING)
900
+ log.propagate = False
901
+ _clear_handlers(log)
902
+
903
+ # reset all artifact and child logs
904
+ for artifact_log_qname in itertools.chain(
905
+ log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames()
906
+ ):
907
+ log = logging.getLogger(artifact_log_qname)
908
+ log.setLevel(logging.NOTSET)
909
+ log.propagate = True
910
+
911
+ trace_log.propagate = False
912
+ _clear_handlers(trace_log)
913
+
914
+
915
+ def _get_log_state():
916
+ return log_state
917
+
918
+
919
+ def _set_log_state(state):
920
+ global log_state
921
+ log_state = state
922
+
923
+
924
+ def _init_logs(log_file_name=None):
925
+ _reset_logs()
926
+ _update_log_state_from_env()
927
+
928
+ out = os.environ.get(LOG_OUT_ENV_VAR, None)
929
+ if out is not None:
930
+ log_file_name = out
931
+
932
+ # First, reset all known (registered) loggers to NOTSET, so that they
933
+ # respect their parent log level
934
+ for log_qname in log_registry.get_log_qnames():
935
+ # But not the top level torch level: this defaults to WARNING so
936
+ # that our log messages don't leak to the lower levels
937
+ if log_qname == "torch":
938
+ continue
939
+ log = logging.getLogger(log_qname)
940
+ log.setLevel(logging.NOTSET)
941
+
942
+ # Now, for all loggers which the user requested to have non-standard
943
+ # logging behavior, modify their log levels
944
+ for log_qname, level in log_state.get_log_level_pairs():
945
+ log = logging.getLogger(log_qname)
946
+ log.setLevel(level)
947
+
948
+ # Finally, setup handlers for all registered loggers
949
+ for log_qname in log_registry.get_log_qnames():
950
+ log = logging.getLogger(log_qname)
951
+ _setup_handlers(
952
+ logging.StreamHandler,
953
+ log,
954
+ )
955
+
956
+ if log_file_name is not None:
957
+ _setup_handlers(
958
+ lambda: logging.FileHandler(log_file_name),
959
+ log,
960
+ )
961
+
962
+ # configure artifact loggers, note: this must happen last
963
+ # since the levels of ancestor loggers are taken into account
964
+ for artifact_log_qname in log_registry.get_artifact_log_qnames():
965
+ log = logging.getLogger(artifact_log_qname)
966
+ configure_artifact_log(log)
967
+
968
+ # Setup handler for the special trace_log, with different default
969
+ # configuration
970
+ trace_dir_name = os.environ.get(TRACE_ENV_VAR, None)
971
+ # This handler may remove itself if trace_dir_name is None and we are not
972
+ # actually in an FB environment. This allows us to defer actually
973
+ # initializing it until we actually need to log anything. This is
974
+ # important because JK initializes a C++ singleton, which will pork our
975
+ # process if we subsequently fork.
976
+ handler = LazyTraceHandler(trace_dir_name)
977
+ # This log is ALWAYS at debug level. We will additionally test if there
978
+ # are any handlers before deciding to actually call logging on this. Do
979
+ # not manually call
980
+ trace_log.setLevel(logging.DEBUG)
981
+ trace_log_handler = _track_handler(handler)
982
+ trace_log_handler.setFormatter(TorchLogsFormatter(trace=True))
983
+ trace_log.addHandler(trace_log_handler)
984
+
985
+
986
+ class LazyTraceHandler(logging.StreamHandler):
987
+ """Like FileHandler, but the file is allocated lazily only upon the first log message"""
988
+
989
+ def __init__(self, root_dir: Optional[str]):
990
+ # This is implemented in the same way that delay is implemented on
991
+ # FileHandler
992
+ self.root_dir = root_dir
993
+ logging.Handler.__init__(self)
994
+ self.stream = None
995
+ self._builtin_open = open
996
+
997
+ # cloned from FileHandler in cpython
998
+ def close(self):
999
+ self.acquire()
1000
+ try:
1001
+ try:
1002
+ if self.stream:
1003
+ try:
1004
+ self.flush()
1005
+ finally:
1006
+ stream = self.stream
1007
+ self.stream = None
1008
+ if hasattr(stream, "close"):
1009
+ stream.close()
1010
+ finally:
1011
+ # Issue #19523: call unconditionally to
1012
+ # prevent a handler leak when delay is set
1013
+ # Also see Issue #42378: we also rely on
1014
+ # self._closed being set to True there
1015
+ logging.StreamHandler.close(self)
1016
+ finally:
1017
+ self.release()
1018
+
1019
+ def emit(self, record):
1020
+ if self.stream is None:
1021
+ ok = False
1022
+ if self.root_dir is None:
1023
+ TRACE_LOG_DIR = "/logs"
1024
+ open_func = self._builtin_open
1025
+
1026
+ import torch.version as torch_version
1027
+
1028
+ if (
1029
+ hasattr(torch_version, "git_version")
1030
+ and os.getenv("MAST_HPC_JOB_NAME") is None
1031
+ ):
1032
+ log.info(
1033
+ "LazyTraceHandler: disabled because not fbcode or conda on mast"
1034
+ )
1035
+ elif not torch._utils_internal.justknobs_check("pytorch/trace:enable"):
1036
+ log.info(
1037
+ "LazyTraceHandler: disabled because justknobs_check('pytorch/trace:enable') returned False"
1038
+ )
1039
+ elif not os.path.exists(TRACE_LOG_DIR):
1040
+ log.info(
1041
+ "LazyTraceHandler: disabled because %s does not exist",
1042
+ TRACE_LOG_DIR,
1043
+ )
1044
+ elif not os.access(TRACE_LOG_DIR, os.W_OK):
1045
+ log.info(
1046
+ "LazyTraceHandler: disabled because %s is not writeable",
1047
+ TRACE_LOG_DIR,
1048
+ )
1049
+ else:
1050
+ self.root_dir = TRACE_LOG_DIR
1051
+
1052
+ if self.root_dir is not None:
1053
+ os.makedirs(self.root_dir, exist_ok=True)
1054
+ ranksuffix = ""
1055
+ if dist.is_available() and dist.is_initialized():
1056
+ ranksuffix = f"rank_{dist.get_rank()}_"
1057
+ self.stream = tempfile.NamedTemporaryFile(
1058
+ mode="w+",
1059
+ suffix=".log",
1060
+ prefix=f"dedicated_log_torch_trace_{ranksuffix}",
1061
+ dir=self.root_dir,
1062
+ delete=False,
1063
+ )
1064
+ log.info("LazyTraceHandler: logging to %s", self.stream.name)
1065
+ else:
1066
+ # We go poof, remove and no-op
1067
+ trace_log.removeHandler(self)
1068
+ return
1069
+ if self.stream:
1070
+ super().emit(record)
1071
+
1072
+
1073
+ @functools.lru_cache(None)
1074
+ def warning_once(logger_obj, *args, **kwargs):
1075
+ """
1076
+ This function is similar to `logger.warning()`, but will emit the warning with the same message only once
1077
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
1078
+ The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
1079
+ another type of cache that includes the caller frame information in the hashing function.
1080
+ """
1081
+ logger_obj.warning(*args, **kwargs)
1082
+
1083
+
1084
+ class LazyString:
1085
+ def __init__(self, func, *args, **kwargs):
1086
+ self.func = func
1087
+ self.args = args
1088
+ self.kwargs = kwargs
1089
+
1090
+ def __str__(self):
1091
+ return self.func(*self.args, **self.kwargs)
1092
+
1093
+
1094
+ def trace_structured(
1095
+ name: str,
1096
+ # NB: metadata expected to be dict so adding more info is forward compatible
1097
+ # Tuple[str, int] is a special case for string interning
1098
+ metadata_fn: Callable[[], Union[Dict[str, Any], Tuple[str, int]]] = dict,
1099
+ *,
1100
+ payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
1101
+ suppress_context: bool = False,
1102
+ expect_trace_id: bool = True, # Whether or not we expect to have a current trace id
1103
+ ):
1104
+ """
1105
+ metadata is an arbitrary JSON compatible struct, but it's expected to not be
1106
+ too long (e.g., less than 1MB)
1107
+
1108
+ payload is an arbitrary string, which can be arbitrarily long (but expected to have
1109
+ newlines so no lines are too long)
1110
+ """
1111
+ assert "name" not in ["rank", "frame_id", "frame_compile_id", "attempt"]
1112
+ assert callable(
1113
+ metadata_fn
1114
+ ), f"metadata_fn should be callable, but got {type(metadata_fn)}"
1115
+ assert callable(
1116
+ payload_fn
1117
+ ), f"payload_fn should be callable, but got {type(payload_fn)}"
1118
+ # trace_log never propagates and is ALWAYS DEBUG, so also check that there
1119
+ # are handlers instead of checking the log level
1120
+ if trace_log.handlers:
1121
+ record: Dict[str, object] = {}
1122
+ record[name] = metadata_fn()
1123
+ if not suppress_context:
1124
+ # TODO: Actually, the rank probably should just be emitted once at
1125
+ # the top, and not repeatedly spammed in all the logs, since it
1126
+ # never changes and we assume no interleaving
1127
+ if dist.is_available() and dist.is_initialized():
1128
+ record["rank"] = dist.get_rank()
1129
+ if (
1130
+ trace_id := torch._guards.CompileContext.current_trace_id()
1131
+ ) is not None:
1132
+ record["frame_id"] = trace_id.compile_id.frame_id
1133
+ record["frame_compile_id"] = trace_id.compile_id.frame_compile_id
1134
+ record["attempt"] = trace_id.attempt
1135
+ else:
1136
+ if expect_trace_id:
1137
+ # Record the stack of the log call to better diagnose why we
1138
+ # don't have a frame id for it
1139
+ record["stack"] = torch._logging.structured.from_traceback(
1140
+ CapturedTraceback.extract(skip=1).summary()
1141
+ )
1142
+ payload = payload_fn()
1143
+ if payload is not None:
1144
+ if not isinstance(payload, str):
1145
+ if isinstance(payload, list):
1146
+ # special case to look better
1147
+ payload = "[\n" + ",\n".join(json.dumps(i) for i in payload) + "\n]"
1148
+ else:
1149
+ # force newlines so we are unlikely to overflow line limit
1150
+ payload = json.dumps(payload, indent=0)
1151
+ h = hashlib.md5()
1152
+ h.update(payload.encode("utf-8"))
1153
+ record["has_payload"] = h.hexdigest()
1154
+ trace_log.debug(
1155
+ "", extra={"metadata": record, "payload": payload}, stacklevel=2
1156
+ )
1157
+ log_trace_structured_event(name, record)
1158
+
1159
+
1160
+ import torch._guards
1161
+ import torch._utils_internal
1162
+ import torch.distributed as dist
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/_registrations.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: B950
2
+ from ._internal import register_artifact, register_log
3
+
4
+
5
+ DYNAMIC = [
6
+ "torch.fx.experimental.symbolic_shapes",
7
+ "torch.fx.experimental.sym_node",
8
+ "torch.fx.experimental.recording",
9
+ ]
10
+ DISTRIBUTED = [
11
+ "torch.distributed",
12
+ "torch._dynamo.backends.distributed",
13
+ "torch.nn.parallel.distributed",
14
+ ]
15
+
16
+ register_log("dynamo", ["torch._dynamo", *DYNAMIC])
17
+ register_log("fake_tensor", ["torch._subclasses.fake_tensor"])
18
+ register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"])
19
+ register_log("autograd", "torch.autograd")
20
+ register_log("inductor", ["torch._inductor", "torch._inductor.cudagraph_trees"])
21
+
22
+ register_artifact(
23
+ "cudagraphs",
24
+ "Logs information from wrapping inductor generated code with cudagraphs.",
25
+ )
26
+
27
+ register_log("dynamic", DYNAMIC)
28
+ register_log("torch", "torch")
29
+ register_log("distributed", DISTRIBUTED)
30
+ register_log(
31
+ "c10d", ["torch.distributed.distributed_c10d", "torch.distributed.rendezvous"]
32
+ )
33
+ register_log(
34
+ "ddp", ["torch.nn.parallel.distributed", "torch._dynamo.backends.distributed"]
35
+ )
36
+ register_log("pp", ["torch.distributed.pipelining"])
37
+ register_log("fsdp", ["torch.distributed.fsdp", "torch.distributed._composable.fsdp"])
38
+ register_log("dtensor", ["torch.distributed._tensor", "torch.distributed.tensor"])
39
+ register_log("onnx", "torch.onnx")
40
+ register_log(
41
+ "export",
42
+ [
43
+ "torch._dynamo",
44
+ "torch.export",
45
+ "torch.export.dynamic_shapes",
46
+ *DYNAMIC,
47
+ "torch._export.converter",
48
+ "torch._export.non_strict_utils",
49
+ ],
50
+ )
51
+
52
+ register_artifact(
53
+ "guards",
54
+ "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.",
55
+ visible=True,
56
+ )
57
+ register_artifact("verbose_guards", "", off_by_default=True)
58
+ register_artifact(
59
+ "bytecode",
60
+ "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.",
61
+ off_by_default=True,
62
+ )
63
+ register_artifact(
64
+ "graph",
65
+ "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ",
66
+ )
67
+ register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.")
68
+ register_artifact(
69
+ "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph."
70
+ )
71
+ register_artifact(
72
+ "trace_source",
73
+ "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`",
74
+ )
75
+ register_artifact(
76
+ "trace_call",
77
+ "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.",
78
+ )
79
+ register_artifact(
80
+ "trace_bytecode",
81
+ "As we trace bytecode, prints the instruction and the current stack.",
82
+ )
83
+ register_artifact(
84
+ "aot_graphs",
85
+ "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor",
86
+ visible=True,
87
+ )
88
+ register_artifact(
89
+ "aot_joint_graph",
90
+ "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning",
91
+ )
92
+ register_artifact(
93
+ "aot_graphs_effects",
94
+ "Prints the FX forward and backward graph generated by AOTDispatch, useful for debugging effects processing.",
95
+ visible=True,
96
+ )
97
+ register_artifact(
98
+ "post_grad_graphs",
99
+ "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes",
100
+ )
101
+ register_artifact(
102
+ "compiled_autograd",
103
+ "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.",
104
+ visible=True,
105
+ )
106
+ register_artifact(
107
+ "compiled_autograd_verbose",
108
+ "Will affect performance. Prints compiled_autograd logs with C++ info e.g. autograd node -> fx node mapping",
109
+ off_by_default=True,
110
+ )
111
+ register_artifact(
112
+ "ddp_graphs",
113
+ "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
114
+ )
115
+ register_artifact(
116
+ "recompiles",
117
+ "Prints the reason why we recompiled a graph. Very, very useful.",
118
+ visible=True,
119
+ )
120
+ register_artifact(
121
+ "recompiles_verbose",
122
+ "Prints all guard checks that fail during a recompilation. "
123
+ "At runtime, Dynamo will stop at the first failed check for each failing guard. "
124
+ "So not all logged failing checks are actually ran by Dynamo.",
125
+ visible=True,
126
+ off_by_default=True,
127
+ )
128
+ register_artifact(
129
+ "graph_breaks",
130
+ "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance",
131
+ visible=True,
132
+ )
133
+ register_artifact(
134
+ "not_implemented",
135
+ "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to",
136
+ )
137
+ register_artifact(
138
+ "output_code",
139
+ "Prints the code that Inductor generates (either Triton or C++)",
140
+ off_by_default=True,
141
+ visible=True,
142
+ )
143
+ register_artifact(
144
+ "kernel_code",
145
+ "Prints the code that Inductor generates (on a per-kernel basis)",
146
+ off_by_default=True,
147
+ visible=True,
148
+ )
149
+ register_artifact(
150
+ "schedule",
151
+ "Inductor scheduler information. Useful if working on Inductor fusion algo",
152
+ off_by_default=True,
153
+ )
154
+ register_artifact("perf_hints", "", off_by_default=True)
155
+ register_artifact("onnx_diagnostics", "", off_by_default=True)
156
+ register_artifact(
157
+ "fusion",
158
+ "Detailed Inductor fusion decisions. More detailed than 'schedule'",
159
+ off_by_default=True,
160
+ )
161
+ register_artifact(
162
+ "loop_ordering",
163
+ "Logs related to loop ordering",
164
+ off_by_default=True,
165
+ )
166
+ register_artifact(
167
+ "overlap",
168
+ "Detailed Inductor compute/comm overlap decisions",
169
+ off_by_default=True,
170
+ )
171
+ register_artifact(
172
+ "sym_node",
173
+ "Logs extra info for various SymNode operations",
174
+ off_by_default=True,
175
+ )
176
+ register_artifact(
177
+ "trace_shape_events",
178
+ "Logs traces for every ShapeEnv operation that we record for replay",
179
+ off_by_default=True,
180
+ )
181
+ register_artifact(
182
+ "cudagraph_static_inputs",
183
+ "Logs static inputs handling in dynamo, AOT, and cudagraphs",
184
+ off_by_default=True,
185
+ )
186
+ register_artifact(
187
+ "benchmarking",
188
+ "Detailed Inductor benchmarking information.",
189
+ off_by_default=True,
190
+ )
191
+
192
+ register_artifact("custom_format_test_artifact", "Testing only", log_format="")
infer_4_47_1/lib/python3.10/site-packages/torch/_logging/structured.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for converting data types into structured JSON for dumping.
3
+ """
4
+
5
+ import traceback
6
+ from typing import Any, Dict, List, Sequence, Set
7
+
8
+ import torch._logging._internal
9
+
10
+
11
+ INTERN_TABLE: Dict[str, int] = {}
12
+
13
+
14
+ DUMPED_FILES: Set[str] = set()
15
+
16
+
17
+ def intern_string(s: str) -> int:
18
+ r = INTERN_TABLE.get(s, None)
19
+ if r is None:
20
+ r = len(INTERN_TABLE)
21
+ INTERN_TABLE[s] = r
22
+ torch._logging._internal.trace_structured(
23
+ "str", lambda: (s, r), suppress_context=True
24
+ )
25
+ return r
26
+
27
+
28
+ def dump_file(filename: str) -> None:
29
+ if "eval_with_key" not in filename:
30
+ return
31
+ if filename in DUMPED_FILES:
32
+ return
33
+ DUMPED_FILES.add(filename)
34
+ from torch.fx.graph_module import _loader
35
+
36
+ torch._logging._internal.trace_structured(
37
+ "dump_file",
38
+ metadata_fn=lambda: {
39
+ "name": filename,
40
+ },
41
+ payload_fn=lambda: _loader.get_source(filename),
42
+ )
43
+
44
+
45
+ def from_traceback(tb: Sequence[traceback.FrameSummary]) -> List[Dict[str, Any]]:
46
+ r = []
47
+ for frame in tb:
48
+ # dict naming convention here coincides with
49
+ # python/combined_traceback.cpp
50
+ r.append(
51
+ {
52
+ "line": frame.lineno,
53
+ "name": frame.name,
54
+ "filename": intern_string(frame.filename),
55
+ }
56
+ )
57
+ return r
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (773 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes_impl.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc ADDED
Binary file (504 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ndarray.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ufuncs.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/fft.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc ADDED
Binary file (5.59 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Export torch work functions for binary ufuncs, rename/tweak to match numpy.
4
+ This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module.
5
+ """
6
+
7
+ import torch
8
+ from torch import ( # noqa: F401
9
+ add,
10
+ arctan2,
11
+ bitwise_and,
12
+ bitwise_left_shift as left_shift,
13
+ bitwise_or,
14
+ bitwise_right_shift as right_shift,
15
+ bitwise_xor,
16
+ copysign,
17
+ divide,
18
+ eq as equal,
19
+ float_power,
20
+ floor_divide,
21
+ fmax,
22
+ fmin,
23
+ fmod,
24
+ gcd,
25
+ greater,
26
+ greater_equal,
27
+ heaviside,
28
+ hypot,
29
+ lcm,
30
+ ldexp,
31
+ less,
32
+ less_equal,
33
+ logaddexp,
34
+ logaddexp2,
35
+ logical_and,
36
+ logical_or,
37
+ logical_xor,
38
+ maximum,
39
+ minimum,
40
+ multiply,
41
+ nextafter,
42
+ not_equal,
43
+ pow as power,
44
+ remainder,
45
+ remainder as mod,
46
+ subtract,
47
+ true_divide,
48
+ )
49
+
50
+ from . import _dtypes_impl, _util
51
+
52
+
53
+ # work around torch limitations w.r.t. numpy
54
+ def matmul(x, y):
55
+ # work around:
56
+ # - RuntimeError: expected scalar type Int but found Double
57
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool'
58
+ # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
59
+ dtype = _dtypes_impl.result_type_impl(x, y)
60
+ is_bool = dtype == torch.bool
61
+ is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and (
62
+ x.is_cpu or y.is_cpu
63
+ )
64
+
65
+ work_dtype = dtype
66
+ if is_bool:
67
+ work_dtype = torch.uint8
68
+ if is_half:
69
+ work_dtype = torch.float32
70
+
71
+ x = _util.cast_if_needed(x, work_dtype)
72
+ y = _util.cast_if_needed(y, work_dtype)
73
+
74
+ result = torch.matmul(x, y)
75
+
76
+ if work_dtype != dtype:
77
+ result = result.to(dtype)
78
+
79
+ return result
80
+
81
+
82
+ # a stub implementation of divmod, should be improved after
83
+ # https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch
84
+ def divmod(x, y):
85
+ return x // y, x % y
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py ADDED
@@ -0,0 +1,1368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+
5
+
6
+ # These two dicts are autogenerated with autogen/gen_dtypes.py,
7
+ # using numpy version 1.24.3.
8
+
9
+ _can_cast_dict = {
10
+ "no": {
11
+ torch.float16: {
12
+ torch.float16: True,
13
+ torch.float32: False,
14
+ torch.float64: False,
15
+ torch.complex64: False,
16
+ torch.complex128: False,
17
+ torch.uint8: False,
18
+ torch.uint16: False,
19
+ torch.uint32: False,
20
+ torch.uint64: False,
21
+ torch.int8: False,
22
+ torch.int16: False,
23
+ torch.int32: False,
24
+ torch.int64: False,
25
+ torch.bool: False,
26
+ },
27
+ torch.float32: {
28
+ torch.float16: False,
29
+ torch.float32: True,
30
+ torch.float64: False,
31
+ torch.complex64: False,
32
+ torch.complex128: False,
33
+ torch.uint8: False,
34
+ torch.uint16: False,
35
+ torch.uint32: False,
36
+ torch.uint64: False,
37
+ torch.int8: False,
38
+ torch.int16: False,
39
+ torch.int32: False,
40
+ torch.int64: False,
41
+ torch.bool: False,
42
+ },
43
+ torch.float64: {
44
+ torch.float16: False,
45
+ torch.float32: False,
46
+ torch.float64: True,
47
+ torch.complex64: False,
48
+ torch.complex128: False,
49
+ torch.uint8: False,
50
+ torch.uint16: False,
51
+ torch.uint32: False,
52
+ torch.uint64: False,
53
+ torch.int8: False,
54
+ torch.int16: False,
55
+ torch.int32: False,
56
+ torch.int64: False,
57
+ torch.bool: False,
58
+ },
59
+ torch.complex64: {
60
+ torch.float16: False,
61
+ torch.float32: False,
62
+ torch.float64: False,
63
+ torch.complex64: True,
64
+ torch.complex128: False,
65
+ torch.uint8: False,
66
+ torch.uint16: False,
67
+ torch.uint32: False,
68
+ torch.uint64: False,
69
+ torch.int8: False,
70
+ torch.int16: False,
71
+ torch.int32: False,
72
+ torch.int64: False,
73
+ torch.bool: False,
74
+ },
75
+ torch.complex128: {
76
+ torch.float16: False,
77
+ torch.float32: False,
78
+ torch.float64: False,
79
+ torch.complex64: False,
80
+ torch.complex128: True,
81
+ torch.uint8: False,
82
+ torch.uint16: False,
83
+ torch.uint32: False,
84
+ torch.uint64: False,
85
+ torch.int8: False,
86
+ torch.int16: False,
87
+ torch.int32: False,
88
+ torch.int64: False,
89
+ torch.bool: False,
90
+ },
91
+ torch.uint8: {
92
+ torch.float16: False,
93
+ torch.float32: False,
94
+ torch.float64: False,
95
+ torch.complex64: False,
96
+ torch.complex128: False,
97
+ torch.uint8: True,
98
+ torch.uint16: False,
99
+ torch.uint32: False,
100
+ torch.uint64: False,
101
+ torch.int8: False,
102
+ torch.int16: False,
103
+ torch.int32: False,
104
+ torch.int64: False,
105
+ torch.bool: False,
106
+ },
107
+ torch.uint16: {
108
+ torch.float16: False,
109
+ torch.float32: False,
110
+ torch.float64: False,
111
+ torch.complex64: False,
112
+ torch.complex128: False,
113
+ torch.uint8: False,
114
+ torch.uint16: True,
115
+ torch.uint32: False,
116
+ torch.uint64: False,
117
+ torch.int8: False,
118
+ torch.int16: False,
119
+ torch.int32: False,
120
+ torch.int64: False,
121
+ torch.bool: False,
122
+ },
123
+ torch.uint32: {
124
+ torch.float16: False,
125
+ torch.float32: False,
126
+ torch.float64: False,
127
+ torch.complex64: False,
128
+ torch.complex128: False,
129
+ torch.uint8: False,
130
+ torch.uint16: False,
131
+ torch.uint32: True,
132
+ torch.uint64: False,
133
+ torch.int8: False,
134
+ torch.int16: False,
135
+ torch.int32: False,
136
+ torch.int64: False,
137
+ torch.bool: False,
138
+ },
139
+ torch.uint64: {
140
+ torch.float16: False,
141
+ torch.float32: False,
142
+ torch.float64: False,
143
+ torch.complex64: False,
144
+ torch.complex128: False,
145
+ torch.uint8: False,
146
+ torch.uint16: False,
147
+ torch.uint32: False,
148
+ torch.uint64: True,
149
+ torch.int8: False,
150
+ torch.int16: False,
151
+ torch.int32: False,
152
+ torch.int64: False,
153
+ torch.bool: False,
154
+ },
155
+ torch.int8: {
156
+ torch.float16: False,
157
+ torch.float32: False,
158
+ torch.float64: False,
159
+ torch.complex64: False,
160
+ torch.complex128: False,
161
+ torch.uint8: False,
162
+ torch.uint16: False,
163
+ torch.uint32: False,
164
+ torch.uint64: False,
165
+ torch.int8: True,
166
+ torch.int16: False,
167
+ torch.int32: False,
168
+ torch.int64: False,
169
+ torch.bool: False,
170
+ },
171
+ torch.int16: {
172
+ torch.float16: False,
173
+ torch.float32: False,
174
+ torch.float64: False,
175
+ torch.complex64: False,
176
+ torch.complex128: False,
177
+ torch.uint8: False,
178
+ torch.uint16: False,
179
+ torch.uint32: False,
180
+ torch.uint64: False,
181
+ torch.int8: False,
182
+ torch.int16: True,
183
+ torch.int32: False,
184
+ torch.int64: False,
185
+ torch.bool: False,
186
+ },
187
+ torch.int32: {
188
+ torch.float16: False,
189
+ torch.float32: False,
190
+ torch.float64: False,
191
+ torch.complex64: False,
192
+ torch.complex128: False,
193
+ torch.uint8: False,
194
+ torch.uint16: False,
195
+ torch.uint32: False,
196
+ torch.uint64: False,
197
+ torch.int8: False,
198
+ torch.int16: False,
199
+ torch.int32: True,
200
+ torch.int64: False,
201
+ torch.bool: False,
202
+ },
203
+ torch.int64: {
204
+ torch.float16: False,
205
+ torch.float32: False,
206
+ torch.float64: False,
207
+ torch.complex64: False,
208
+ torch.complex128: False,
209
+ torch.uint8: False,
210
+ torch.uint16: False,
211
+ torch.uint32: False,
212
+ torch.uint64: False,
213
+ torch.int8: False,
214
+ torch.int16: False,
215
+ torch.int32: False,
216
+ torch.int64: True,
217
+ torch.bool: False,
218
+ },
219
+ torch.bool: {
220
+ torch.float16: False,
221
+ torch.float32: False,
222
+ torch.float64: False,
223
+ torch.complex64: False,
224
+ torch.complex128: False,
225
+ torch.uint8: False,
226
+ torch.uint16: False,
227
+ torch.uint32: False,
228
+ torch.uint64: False,
229
+ torch.int8: False,
230
+ torch.int16: False,
231
+ torch.int32: False,
232
+ torch.int64: False,
233
+ torch.bool: True,
234
+ },
235
+ },
236
+ "equiv": {
237
+ torch.float16: {
238
+ torch.float16: True,
239
+ torch.float32: False,
240
+ torch.float64: False,
241
+ torch.complex64: False,
242
+ torch.complex128: False,
243
+ torch.uint8: False,
244
+ torch.uint16: False,
245
+ torch.uint32: False,
246
+ torch.uint64: False,
247
+ torch.int8: False,
248
+ torch.int16: False,
249
+ torch.int32: False,
250
+ torch.int64: False,
251
+ torch.bool: False,
252
+ },
253
+ torch.float32: {
254
+ torch.float16: False,
255
+ torch.float32: True,
256
+ torch.float64: False,
257
+ torch.complex64: False,
258
+ torch.complex128: False,
259
+ torch.uint8: False,
260
+ torch.uint16: False,
261
+ torch.uint32: False,
262
+ torch.uint64: False,
263
+ torch.int8: False,
264
+ torch.int16: False,
265
+ torch.int32: False,
266
+ torch.int64: False,
267
+ torch.bool: False,
268
+ },
269
+ torch.float64: {
270
+ torch.float16: False,
271
+ torch.float32: False,
272
+ torch.float64: True,
273
+ torch.complex64: False,
274
+ torch.complex128: False,
275
+ torch.uint8: False,
276
+ torch.uint16: False,
277
+ torch.uint32: False,
278
+ torch.uint64: False,
279
+ torch.int8: False,
280
+ torch.int16: False,
281
+ torch.int32: False,
282
+ torch.int64: False,
283
+ torch.bool: False,
284
+ },
285
+ torch.complex64: {
286
+ torch.float16: False,
287
+ torch.float32: False,
288
+ torch.float64: False,
289
+ torch.complex64: True,
290
+ torch.complex128: False,
291
+ torch.uint8: False,
292
+ torch.uint16: False,
293
+ torch.uint32: False,
294
+ torch.uint64: False,
295
+ torch.int8: False,
296
+ torch.int16: False,
297
+ torch.int32: False,
298
+ torch.int64: False,
299
+ torch.bool: False,
300
+ },
301
+ torch.complex128: {
302
+ torch.float16: False,
303
+ torch.float32: False,
304
+ torch.float64: False,
305
+ torch.complex64: False,
306
+ torch.complex128: True,
307
+ torch.uint8: False,
308
+ torch.uint16: False,
309
+ torch.uint32: False,
310
+ torch.uint64: False,
311
+ torch.int8: False,
312
+ torch.int16: False,
313
+ torch.int32: False,
314
+ torch.int64: False,
315
+ torch.bool: False,
316
+ },
317
+ torch.uint8: {
318
+ torch.float16: False,
319
+ torch.float32: False,
320
+ torch.float64: False,
321
+ torch.complex64: False,
322
+ torch.complex128: False,
323
+ torch.uint8: True,
324
+ torch.uint16: False,
325
+ torch.uint32: False,
326
+ torch.uint64: False,
327
+ torch.int8: False,
328
+ torch.int16: False,
329
+ torch.int32: False,
330
+ torch.int64: False,
331
+ torch.bool: False,
332
+ },
333
+ torch.uint16: {
334
+ torch.float16: False,
335
+ torch.float32: False,
336
+ torch.float64: False,
337
+ torch.complex64: False,
338
+ torch.complex128: False,
339
+ torch.uint8: False,
340
+ torch.uint16: True,
341
+ torch.uint32: False,
342
+ torch.uint64: False,
343
+ torch.int8: False,
344
+ torch.int16: False,
345
+ torch.int32: False,
346
+ torch.int64: False,
347
+ torch.bool: False,
348
+ },
349
+ torch.uint32: {
350
+ torch.float16: False,
351
+ torch.float32: False,
352
+ torch.float64: False,
353
+ torch.complex64: False,
354
+ torch.complex128: False,
355
+ torch.uint8: False,
356
+ torch.uint16: False,
357
+ torch.uint32: True,
358
+ torch.uint64: False,
359
+ torch.int8: False,
360
+ torch.int16: False,
361
+ torch.int32: False,
362
+ torch.int64: False,
363
+ torch.bool: False,
364
+ },
365
+ torch.uint64: {
366
+ torch.float16: False,
367
+ torch.float32: False,
368
+ torch.float64: False,
369
+ torch.complex64: False,
370
+ torch.complex128: False,
371
+ torch.uint8: False,
372
+ torch.uint16: False,
373
+ torch.uint32: False,
374
+ torch.uint64: True,
375
+ torch.int8: False,
376
+ torch.int16: False,
377
+ torch.int32: False,
378
+ torch.int64: False,
379
+ torch.bool: False,
380
+ },
381
+ torch.int8: {
382
+ torch.float16: False,
383
+ torch.float32: False,
384
+ torch.float64: False,
385
+ torch.complex64: False,
386
+ torch.complex128: False,
387
+ torch.uint8: False,
388
+ torch.uint16: False,
389
+ torch.uint32: False,
390
+ torch.uint64: False,
391
+ torch.int8: True,
392
+ torch.int16: False,
393
+ torch.int32: False,
394
+ torch.int64: False,
395
+ torch.bool: False,
396
+ },
397
+ torch.int16: {
398
+ torch.float16: False,
399
+ torch.float32: False,
400
+ torch.float64: False,
401
+ torch.complex64: False,
402
+ torch.complex128: False,
403
+ torch.uint8: False,
404
+ torch.uint16: False,
405
+ torch.uint32: False,
406
+ torch.uint64: False,
407
+ torch.int8: False,
408
+ torch.int16: True,
409
+ torch.int32: False,
410
+ torch.int64: False,
411
+ torch.bool: False,
412
+ },
413
+ torch.int32: {
414
+ torch.float16: False,
415
+ torch.float32: False,
416
+ torch.float64: False,
417
+ torch.complex64: False,
418
+ torch.complex128: False,
419
+ torch.uint8: False,
420
+ torch.uint16: False,
421
+ torch.uint32: False,
422
+ torch.uint64: False,
423
+ torch.int8: False,
424
+ torch.int16: False,
425
+ torch.int32: True,
426
+ torch.int64: False,
427
+ torch.bool: False,
428
+ },
429
+ torch.int64: {
430
+ torch.float16: False,
431
+ torch.float32: False,
432
+ torch.float64: False,
433
+ torch.complex64: False,
434
+ torch.complex128: False,
435
+ torch.uint8: False,
436
+ torch.uint16: False,
437
+ torch.uint32: False,
438
+ torch.uint64: False,
439
+ torch.int8: False,
440
+ torch.int16: False,
441
+ torch.int32: False,
442
+ torch.int64: True,
443
+ torch.bool: False,
444
+ },
445
+ torch.bool: {
446
+ torch.float16: False,
447
+ torch.float32: False,
448
+ torch.float64: False,
449
+ torch.complex64: False,
450
+ torch.complex128: False,
451
+ torch.uint8: False,
452
+ torch.uint16: False,
453
+ torch.uint32: False,
454
+ torch.uint64: False,
455
+ torch.int8: False,
456
+ torch.int16: False,
457
+ torch.int32: False,
458
+ torch.int64: False,
459
+ torch.bool: True,
460
+ },
461
+ },
462
+ "safe": {
463
+ torch.float16: {
464
+ torch.float16: True,
465
+ torch.float32: True,
466
+ torch.float64: True,
467
+ torch.complex64: True,
468
+ torch.complex128: True,
469
+ torch.uint8: False,
470
+ torch.uint16: False,
471
+ torch.uint32: False,
472
+ torch.uint64: False,
473
+ torch.int8: False,
474
+ torch.int16: False,
475
+ torch.int32: False,
476
+ torch.int64: False,
477
+ torch.bool: False,
478
+ },
479
+ torch.float32: {
480
+ torch.float16: False,
481
+ torch.float32: True,
482
+ torch.float64: True,
483
+ torch.complex64: True,
484
+ torch.complex128: True,
485
+ torch.uint8: False,
486
+ torch.uint16: False,
487
+ torch.uint32: False,
488
+ torch.uint64: False,
489
+ torch.int8: False,
490
+ torch.int16: False,
491
+ torch.int32: False,
492
+ torch.int64: False,
493
+ torch.bool: False,
494
+ },
495
+ torch.float64: {
496
+ torch.float16: False,
497
+ torch.float32: False,
498
+ torch.float64: True,
499
+ torch.complex64: False,
500
+ torch.complex128: True,
501
+ torch.uint8: False,
502
+ torch.uint16: False,
503
+ torch.uint32: False,
504
+ torch.uint64: False,
505
+ torch.int8: False,
506
+ torch.int16: False,
507
+ torch.int32: False,
508
+ torch.int64: False,
509
+ torch.bool: False,
510
+ },
511
+ torch.complex64: {
512
+ torch.float16: False,
513
+ torch.float32: False,
514
+ torch.float64: False,
515
+ torch.complex64: True,
516
+ torch.complex128: True,
517
+ torch.uint8: False,
518
+ torch.uint16: False,
519
+ torch.uint32: False,
520
+ torch.uint64: False,
521
+ torch.int8: False,
522
+ torch.int16: False,
523
+ torch.int32: False,
524
+ torch.int64: False,
525
+ torch.bool: False,
526
+ },
527
+ torch.complex128: {
528
+ torch.float16: False,
529
+ torch.float32: False,
530
+ torch.float64: False,
531
+ torch.complex64: False,
532
+ torch.complex128: True,
533
+ torch.uint8: False,
534
+ torch.uint16: False,
535
+ torch.uint32: False,
536
+ torch.uint64: False,
537
+ torch.int8: False,
538
+ torch.int16: False,
539
+ torch.int32: False,
540
+ torch.int64: False,
541
+ torch.bool: False,
542
+ },
543
+ torch.uint8: {
544
+ torch.float16: True,
545
+ torch.float32: True,
546
+ torch.float64: True,
547
+ torch.complex64: True,
548
+ torch.complex128: True,
549
+ torch.uint8: True,
550
+ torch.uint16: True,
551
+ torch.uint32: True,
552
+ torch.uint64: True,
553
+ torch.int8: False,
554
+ torch.int16: True,
555
+ torch.int32: True,
556
+ torch.int64: True,
557
+ torch.bool: False,
558
+ },
559
+ torch.uint16: {
560
+ torch.float16: False,
561
+ torch.float32: True,
562
+ torch.float64: True,
563
+ torch.complex64: True,
564
+ torch.complex128: True,
565
+ torch.uint8: False,
566
+ torch.uint16: True,
567
+ torch.uint32: True,
568
+ torch.uint64: True,
569
+ torch.int8: False,
570
+ torch.int16: False,
571
+ torch.int32: True,
572
+ torch.int64: True,
573
+ torch.bool: False,
574
+ },
575
+ torch.uint32: {
576
+ torch.float16: False,
577
+ torch.float32: False,
578
+ torch.float64: True,
579
+ torch.complex64: False,
580
+ torch.complex128: True,
581
+ torch.uint8: False,
582
+ torch.uint16: False,
583
+ torch.uint32: True,
584
+ torch.uint64: True,
585
+ torch.int8: False,
586
+ torch.int16: False,
587
+ torch.int32: False,
588
+ torch.int64: True,
589
+ torch.bool: False,
590
+ },
591
+ torch.uint64: {
592
+ torch.float16: False,
593
+ torch.float32: False,
594
+ torch.float64: True,
595
+ torch.complex64: False,
596
+ torch.complex128: True,
597
+ torch.uint8: False,
598
+ torch.uint16: False,
599
+ torch.uint32: False,
600
+ torch.uint64: True,
601
+ torch.int8: False,
602
+ torch.int16: False,
603
+ torch.int32: False,
604
+ torch.int64: False,
605
+ torch.bool: False,
606
+ },
607
+ torch.int8: {
608
+ torch.float16: True,
609
+ torch.float32: True,
610
+ torch.float64: True,
611
+ torch.complex64: True,
612
+ torch.complex128: True,
613
+ torch.uint8: False,
614
+ torch.uint16: False,
615
+ torch.uint32: False,
616
+ torch.uint64: False,
617
+ torch.int8: True,
618
+ torch.int16: True,
619
+ torch.int32: True,
620
+ torch.int64: True,
621
+ torch.bool: False,
622
+ },
623
+ torch.int16: {
624
+ torch.float16: False,
625
+ torch.float32: True,
626
+ torch.float64: True,
627
+ torch.complex64: True,
628
+ torch.complex128: True,
629
+ torch.uint8: False,
630
+ torch.uint16: False,
631
+ torch.uint32: False,
632
+ torch.uint64: False,
633
+ torch.int8: False,
634
+ torch.int16: True,
635
+ torch.int32: True,
636
+ torch.int64: True,
637
+ torch.bool: False,
638
+ },
639
+ torch.int32: {
640
+ torch.float16: False,
641
+ torch.float32: False,
642
+ torch.float64: True,
643
+ torch.complex64: False,
644
+ torch.complex128: True,
645
+ torch.uint8: False,
646
+ torch.uint16: False,
647
+ torch.uint32: False,
648
+ torch.uint64: False,
649
+ torch.int8: False,
650
+ torch.int16: False,
651
+ torch.int32: True,
652
+ torch.int64: True,
653
+ torch.bool: False,
654
+ },
655
+ torch.int64: {
656
+ torch.float16: False,
657
+ torch.float32: False,
658
+ torch.float64: True,
659
+ torch.complex64: False,
660
+ torch.complex128: True,
661
+ torch.uint8: False,
662
+ torch.uint16: False,
663
+ torch.uint32: False,
664
+ torch.uint64: False,
665
+ torch.int8: False,
666
+ torch.int16: False,
667
+ torch.int32: False,
668
+ torch.int64: True,
669
+ torch.bool: False,
670
+ },
671
+ torch.bool: {
672
+ torch.float16: True,
673
+ torch.float32: True,
674
+ torch.float64: True,
675
+ torch.complex64: True,
676
+ torch.complex128: True,
677
+ torch.uint8: True,
678
+ torch.uint16: True,
679
+ torch.uint32: True,
680
+ torch.uint64: True,
681
+ torch.int8: True,
682
+ torch.int16: True,
683
+ torch.int32: True,
684
+ torch.int64: True,
685
+ torch.bool: True,
686
+ },
687
+ },
688
+ "same_kind": {
689
+ torch.float16: {
690
+ torch.float16: True,
691
+ torch.float32: True,
692
+ torch.float64: True,
693
+ torch.complex64: True,
694
+ torch.complex128: True,
695
+ torch.uint8: False,
696
+ torch.uint16: False,
697
+ torch.uint32: False,
698
+ torch.uint64: False,
699
+ torch.int8: False,
700
+ torch.int16: False,
701
+ torch.int32: False,
702
+ torch.int64: False,
703
+ torch.bool: False,
704
+ },
705
+ torch.float32: {
706
+ torch.float16: True,
707
+ torch.float32: True,
708
+ torch.float64: True,
709
+ torch.complex64: True,
710
+ torch.complex128: True,
711
+ torch.uint8: False,
712
+ torch.uint16: False,
713
+ torch.uint32: False,
714
+ torch.uint64: False,
715
+ torch.int8: False,
716
+ torch.int16: False,
717
+ torch.int32: False,
718
+ torch.int64: False,
719
+ torch.bool: False,
720
+ },
721
+ torch.float64: {
722
+ torch.float16: True,
723
+ torch.float32: True,
724
+ torch.float64: True,
725
+ torch.complex64: True,
726
+ torch.complex128: True,
727
+ torch.uint8: False,
728
+ torch.uint16: False,
729
+ torch.uint32: False,
730
+ torch.uint64: False,
731
+ torch.int8: False,
732
+ torch.int16: False,
733
+ torch.int32: False,
734
+ torch.int64: False,
735
+ torch.bool: False,
736
+ },
737
+ torch.complex64: {
738
+ torch.float16: False,
739
+ torch.float32: False,
740
+ torch.float64: False,
741
+ torch.complex64: True,
742
+ torch.complex128: True,
743
+ torch.uint8: False,
744
+ torch.uint16: False,
745
+ torch.uint32: False,
746
+ torch.uint64: False,
747
+ torch.int8: False,
748
+ torch.int16: False,
749
+ torch.int32: False,
750
+ torch.int64: False,
751
+ torch.bool: False,
752
+ },
753
+ torch.complex128: {
754
+ torch.float16: False,
755
+ torch.float32: False,
756
+ torch.float64: False,
757
+ torch.complex64: True,
758
+ torch.complex128: True,
759
+ torch.uint8: False,
760
+ torch.uint16: False,
761
+ torch.uint32: False,
762
+ torch.uint64: False,
763
+ torch.int8: False,
764
+ torch.int16: False,
765
+ torch.int32: False,
766
+ torch.int64: False,
767
+ torch.bool: False,
768
+ },
769
+ torch.uint8: {
770
+ torch.float16: True,
771
+ torch.float32: True,
772
+ torch.float64: True,
773
+ torch.complex64: True,
774
+ torch.complex128: True,
775
+ torch.uint8: True,
776
+ torch.uint16: True,
777
+ torch.uint32: True,
778
+ torch.uint64: True,
779
+ torch.int8: True,
780
+ torch.int16: True,
781
+ torch.int32: True,
782
+ torch.int64: True,
783
+ torch.bool: False,
784
+ },
785
+ torch.uint16: {
786
+ torch.float16: True,
787
+ torch.float32: True,
788
+ torch.float64: True,
789
+ torch.complex64: True,
790
+ torch.complex128: True,
791
+ torch.uint8: True,
792
+ torch.uint16: True,
793
+ torch.uint32: True,
794
+ torch.uint64: True,
795
+ torch.int8: True,
796
+ torch.int16: True,
797
+ torch.int32: True,
798
+ torch.int64: True,
799
+ torch.bool: False,
800
+ },
801
+ torch.uint32: {
802
+ torch.float16: True,
803
+ torch.float32: True,
804
+ torch.float64: True,
805
+ torch.complex64: True,
806
+ torch.complex128: True,
807
+ torch.uint8: True,
808
+ torch.uint16: True,
809
+ torch.uint32: True,
810
+ torch.uint64: True,
811
+ torch.int8: True,
812
+ torch.int16: True,
813
+ torch.int32: True,
814
+ torch.int64: True,
815
+ torch.bool: False,
816
+ },
817
+ torch.uint64: {
818
+ torch.float16: True,
819
+ torch.float32: True,
820
+ torch.float64: True,
821
+ torch.complex64: True,
822
+ torch.complex128: True,
823
+ torch.uint8: True,
824
+ torch.uint16: True,
825
+ torch.uint32: True,
826
+ torch.uint64: True,
827
+ torch.int8: True,
828
+ torch.int16: True,
829
+ torch.int32: True,
830
+ torch.int64: True,
831
+ torch.bool: False,
832
+ },
833
+ torch.int8: {
834
+ torch.float16: True,
835
+ torch.float32: True,
836
+ torch.float64: True,
837
+ torch.complex64: True,
838
+ torch.complex128: True,
839
+ torch.uint8: False,
840
+ torch.uint16: False,
841
+ torch.uint32: False,
842
+ torch.uint64: False,
843
+ torch.int8: True,
844
+ torch.int16: True,
845
+ torch.int32: True,
846
+ torch.int64: True,
847
+ torch.bool: False,
848
+ },
849
+ torch.int16: {
850
+ torch.float16: True,
851
+ torch.float32: True,
852
+ torch.float64: True,
853
+ torch.complex64: True,
854
+ torch.complex128: True,
855
+ torch.uint8: False,
856
+ torch.uint16: False,
857
+ torch.uint32: False,
858
+ torch.uint64: False,
859
+ torch.int8: True,
860
+ torch.int16: True,
861
+ torch.int32: True,
862
+ torch.int64: True,
863
+ torch.bool: False,
864
+ },
865
+ torch.int32: {
866
+ torch.float16: True,
867
+ torch.float32: True,
868
+ torch.float64: True,
869
+ torch.complex64: True,
870
+ torch.complex128: True,
871
+ torch.uint8: False,
872
+ torch.uint16: False,
873
+ torch.uint32: False,
874
+ torch.uint64: False,
875
+ torch.int8: True,
876
+ torch.int16: True,
877
+ torch.int32: True,
878
+ torch.int64: True,
879
+ torch.bool: False,
880
+ },
881
+ torch.int64: {
882
+ torch.float16: True,
883
+ torch.float32: True,
884
+ torch.float64: True,
885
+ torch.complex64: True,
886
+ torch.complex128: True,
887
+ torch.uint8: False,
888
+ torch.uint16: False,
889
+ torch.uint32: False,
890
+ torch.uint64: False,
891
+ torch.int8: True,
892
+ torch.int16: True,
893
+ torch.int32: True,
894
+ torch.int64: True,
895
+ torch.bool: False,
896
+ },
897
+ torch.bool: {
898
+ torch.float16: True,
899
+ torch.float32: True,
900
+ torch.float64: True,
901
+ torch.complex64: True,
902
+ torch.complex128: True,
903
+ torch.uint8: True,
904
+ torch.uint16: True,
905
+ torch.uint32: True,
906
+ torch.uint64: True,
907
+ torch.int8: True,
908
+ torch.int16: True,
909
+ torch.int32: True,
910
+ torch.int64: True,
911
+ torch.bool: True,
912
+ },
913
+ },
914
+ "unsafe": {
915
+ torch.float16: {
916
+ torch.float16: True,
917
+ torch.float32: True,
918
+ torch.float64: True,
919
+ torch.complex64: True,
920
+ torch.complex128: True,
921
+ torch.uint8: True,
922
+ torch.uint16: True,
923
+ torch.uint32: True,
924
+ torch.uint64: True,
925
+ torch.int8: True,
926
+ torch.int16: True,
927
+ torch.int32: True,
928
+ torch.int64: True,
929
+ torch.bool: True,
930
+ },
931
+ torch.float32: {
932
+ torch.float16: True,
933
+ torch.float32: True,
934
+ torch.float64: True,
935
+ torch.complex64: True,
936
+ torch.complex128: True,
937
+ torch.uint8: True,
938
+ torch.uint16: True,
939
+ torch.uint32: True,
940
+ torch.uint64: True,
941
+ torch.int8: True,
942
+ torch.int16: True,
943
+ torch.int32: True,
944
+ torch.int64: True,
945
+ torch.bool: True,
946
+ },
947
+ torch.float64: {
948
+ torch.float16: True,
949
+ torch.float32: True,
950
+ torch.float64: True,
951
+ torch.complex64: True,
952
+ torch.complex128: True,
953
+ torch.uint8: True,
954
+ torch.uint16: True,
955
+ torch.uint32: True,
956
+ torch.uint64: True,
957
+ torch.int8: True,
958
+ torch.int16: True,
959
+ torch.int32: True,
960
+ torch.int64: True,
961
+ torch.bool: True,
962
+ },
963
+ torch.complex64: {
964
+ torch.float16: True,
965
+ torch.float32: True,
966
+ torch.float64: True,
967
+ torch.complex64: True,
968
+ torch.complex128: True,
969
+ torch.uint8: True,
970
+ torch.uint16: True,
971
+ torch.uint32: True,
972
+ torch.uint64: True,
973
+ torch.int8: True,
974
+ torch.int16: True,
975
+ torch.int32: True,
976
+ torch.int64: True,
977
+ torch.bool: True,
978
+ },
979
+ torch.complex128: {
980
+ torch.float16: True,
981
+ torch.float32: True,
982
+ torch.float64: True,
983
+ torch.complex64: True,
984
+ torch.complex128: True,
985
+ torch.uint8: True,
986
+ torch.uint16: True,
987
+ torch.uint32: True,
988
+ torch.uint64: True,
989
+ torch.int8: True,
990
+ torch.int16: True,
991
+ torch.int32: True,
992
+ torch.int64: True,
993
+ torch.bool: True,
994
+ },
995
+ torch.uint8: {
996
+ torch.float16: True,
997
+ torch.float32: True,
998
+ torch.float64: True,
999
+ torch.complex64: True,
1000
+ torch.complex128: True,
1001
+ torch.uint8: True,
1002
+ torch.uint16: True,
1003
+ torch.uint32: True,
1004
+ torch.uint64: True,
1005
+ torch.int8: True,
1006
+ torch.int16: True,
1007
+ torch.int32: True,
1008
+ torch.int64: True,
1009
+ torch.bool: True,
1010
+ },
1011
+ torch.uint16: {
1012
+ torch.float16: True,
1013
+ torch.float32: True,
1014
+ torch.float64: True,
1015
+ torch.complex64: True,
1016
+ torch.complex128: True,
1017
+ torch.uint8: True,
1018
+ torch.uint16: True,
1019
+ torch.uint32: True,
1020
+ torch.uint64: True,
1021
+ torch.int8: True,
1022
+ torch.int16: True,
1023
+ torch.int32: True,
1024
+ torch.int64: True,
1025
+ torch.bool: True,
1026
+ },
1027
+ torch.uint32: {
1028
+ torch.float16: True,
1029
+ torch.float32: True,
1030
+ torch.float64: True,
1031
+ torch.complex64: True,
1032
+ torch.complex128: True,
1033
+ torch.uint8: True,
1034
+ torch.uint16: True,
1035
+ torch.uint32: True,
1036
+ torch.uint64: True,
1037
+ torch.int8: True,
1038
+ torch.int16: True,
1039
+ torch.int32: True,
1040
+ torch.int64: True,
1041
+ torch.bool: True,
1042
+ },
1043
+ torch.uint64: {
1044
+ torch.float16: True,
1045
+ torch.float32: True,
1046
+ torch.float64: True,
1047
+ torch.complex64: True,
1048
+ torch.complex128: True,
1049
+ torch.uint8: True,
1050
+ torch.uint16: True,
1051
+ torch.uint32: True,
1052
+ torch.uint64: True,
1053
+ torch.int8: True,
1054
+ torch.int16: True,
1055
+ torch.int32: True,
1056
+ torch.int64: True,
1057
+ torch.bool: True,
1058
+ },
1059
+ torch.int8: {
1060
+ torch.float16: True,
1061
+ torch.float32: True,
1062
+ torch.float64: True,
1063
+ torch.complex64: True,
1064
+ torch.complex128: True,
1065
+ torch.uint8: True,
1066
+ torch.uint16: True,
1067
+ torch.uint32: True,
1068
+ torch.uint64: True,
1069
+ torch.int8: True,
1070
+ torch.int16: True,
1071
+ torch.int32: True,
1072
+ torch.int64: True,
1073
+ torch.bool: True,
1074
+ },
1075
+ torch.int16: {
1076
+ torch.float16: True,
1077
+ torch.float32: True,
1078
+ torch.float64: True,
1079
+ torch.complex64: True,
1080
+ torch.complex128: True,
1081
+ torch.uint8: True,
1082
+ torch.uint16: True,
1083
+ torch.uint32: True,
1084
+ torch.uint64: True,
1085
+ torch.int8: True,
1086
+ torch.int16: True,
1087
+ torch.int32: True,
1088
+ torch.int64: True,
1089
+ torch.bool: True,
1090
+ },
1091
+ torch.int32: {
1092
+ torch.float16: True,
1093
+ torch.float32: True,
1094
+ torch.float64: True,
1095
+ torch.complex64: True,
1096
+ torch.complex128: True,
1097
+ torch.uint8: True,
1098
+ torch.uint16: True,
1099
+ torch.uint32: True,
1100
+ torch.uint64: True,
1101
+ torch.int8: True,
1102
+ torch.int16: True,
1103
+ torch.int32: True,
1104
+ torch.int64: True,
1105
+ torch.bool: True,
1106
+ },
1107
+ torch.int64: {
1108
+ torch.float16: True,
1109
+ torch.float32: True,
1110
+ torch.float64: True,
1111
+ torch.complex64: True,
1112
+ torch.complex128: True,
1113
+ torch.uint8: True,
1114
+ torch.uint16: True,
1115
+ torch.uint32: True,
1116
+ torch.uint64: True,
1117
+ torch.int8: True,
1118
+ torch.int16: True,
1119
+ torch.int32: True,
1120
+ torch.int64: True,
1121
+ torch.bool: True,
1122
+ },
1123
+ torch.bool: {
1124
+ torch.float16: True,
1125
+ torch.float32: True,
1126
+ torch.float64: True,
1127
+ torch.complex64: True,
1128
+ torch.complex128: True,
1129
+ torch.uint8: True,
1130
+ torch.uint16: True,
1131
+ torch.uint32: True,
1132
+ torch.uint64: True,
1133
+ torch.int8: True,
1134
+ torch.int16: True,
1135
+ torch.int32: True,
1136
+ torch.int64: True,
1137
+ torch.bool: True,
1138
+ },
1139
+ },
1140
+ }
1141
+
1142
+
1143
+ _result_type_dict = {
1144
+ torch.float16: {
1145
+ torch.float16: torch.float16,
1146
+ torch.float32: torch.float32,
1147
+ torch.float64: torch.float64,
1148
+ torch.complex64: torch.complex64,
1149
+ torch.complex128: torch.complex128,
1150
+ torch.uint8: torch.float16,
1151
+ torch.uint16: torch.float32,
1152
+ torch.uint32: torch.float64,
1153
+ torch.uint64: torch.float64,
1154
+ torch.int8: torch.float16,
1155
+ torch.int16: torch.float32,
1156
+ torch.int32: torch.float64,
1157
+ torch.int64: torch.float64,
1158
+ torch.bool: torch.float16,
1159
+ },
1160
+ torch.float32: {
1161
+ torch.float16: torch.float32,
1162
+ torch.float32: torch.float32,
1163
+ torch.float64: torch.float64,
1164
+ torch.complex64: torch.complex64,
1165
+ torch.complex128: torch.complex128,
1166
+ torch.uint8: torch.float32,
1167
+ torch.uint16: torch.float32,
1168
+ torch.uint32: torch.float64,
1169
+ torch.uint64: torch.float64,
1170
+ torch.int8: torch.float32,
1171
+ torch.int16: torch.float32,
1172
+ torch.int32: torch.float64,
1173
+ torch.int64: torch.float64,
1174
+ torch.bool: torch.float32,
1175
+ },
1176
+ torch.float64: {
1177
+ torch.float16: torch.float64,
1178
+ torch.float32: torch.float64,
1179
+ torch.float64: torch.float64,
1180
+ torch.complex64: torch.complex128,
1181
+ torch.complex128: torch.complex128,
1182
+ torch.uint8: torch.float64,
1183
+ torch.uint16: torch.float64,
1184
+ torch.uint32: torch.float64,
1185
+ torch.uint64: torch.float64,
1186
+ torch.int8: torch.float64,
1187
+ torch.int16: torch.float64,
1188
+ torch.int32: torch.float64,
1189
+ torch.int64: torch.float64,
1190
+ torch.bool: torch.float64,
1191
+ },
1192
+ torch.complex64: {
1193
+ torch.float16: torch.complex64,
1194
+ torch.float32: torch.complex64,
1195
+ torch.float64: torch.complex128,
1196
+ torch.complex64: torch.complex64,
1197
+ torch.complex128: torch.complex128,
1198
+ torch.uint8: torch.complex64,
1199
+ torch.uint16: torch.complex64,
1200
+ torch.uint32: torch.complex128,
1201
+ torch.uint64: torch.complex128,
1202
+ torch.int8: torch.complex64,
1203
+ torch.int16: torch.complex64,
1204
+ torch.int32: torch.complex128,
1205
+ torch.int64: torch.complex128,
1206
+ torch.bool: torch.complex64,
1207
+ },
1208
+ torch.complex128: {
1209
+ torch.float16: torch.complex128,
1210
+ torch.float32: torch.complex128,
1211
+ torch.float64: torch.complex128,
1212
+ torch.complex64: torch.complex128,
1213
+ torch.complex128: torch.complex128,
1214
+ torch.uint8: torch.complex128,
1215
+ torch.uint16: torch.complex128,
1216
+ torch.uint32: torch.complex128,
1217
+ torch.uint64: torch.complex128,
1218
+ torch.int8: torch.complex128,
1219
+ torch.int16: torch.complex128,
1220
+ torch.int32: torch.complex128,
1221
+ torch.int64: torch.complex128,
1222
+ torch.bool: torch.complex128,
1223
+ },
1224
+ torch.uint8: {
1225
+ torch.float16: torch.float16,
1226
+ torch.float32: torch.float32,
1227
+ torch.float64: torch.float64,
1228
+ torch.complex64: torch.complex64,
1229
+ torch.complex128: torch.complex128,
1230
+ torch.uint8: torch.uint8,
1231
+ torch.uint16: torch.uint16,
1232
+ torch.uint32: torch.uint32,
1233
+ torch.uint64: torch.uint64,
1234
+ torch.int8: torch.int16,
1235
+ torch.int16: torch.int16,
1236
+ torch.int32: torch.int32,
1237
+ torch.int64: torch.int64,
1238
+ torch.bool: torch.uint8,
1239
+ },
1240
+ torch.uint16: {
1241
+ torch.float16: torch.float32,
1242
+ torch.float32: torch.float32,
1243
+ torch.float64: torch.float64,
1244
+ torch.complex64: torch.complex64,
1245
+ torch.complex128: torch.complex128,
1246
+ torch.uint8: torch.uint16,
1247
+ torch.uint16: torch.uint16,
1248
+ torch.uint32: torch.uint32,
1249
+ torch.uint64: torch.uint64,
1250
+ torch.int8: torch.int32,
1251
+ torch.int16: torch.int32,
1252
+ torch.int32: torch.int32,
1253
+ torch.int64: torch.int64,
1254
+ torch.bool: torch.uint16,
1255
+ },
1256
+ torch.uint32: {
1257
+ torch.float16: torch.float64,
1258
+ torch.float32: torch.float64,
1259
+ torch.float64: torch.float64,
1260
+ torch.complex64: torch.complex128,
1261
+ torch.complex128: torch.complex128,
1262
+ torch.uint8: torch.uint32,
1263
+ torch.uint16: torch.uint32,
1264
+ torch.uint32: torch.uint32,
1265
+ torch.uint64: torch.uint64,
1266
+ torch.int8: torch.int64,
1267
+ torch.int16: torch.int64,
1268
+ torch.int32: torch.int64,
1269
+ torch.int64: torch.int64,
1270
+ torch.bool: torch.uint32,
1271
+ },
1272
+ torch.uint64: {
1273
+ torch.float16: torch.float64,
1274
+ torch.float32: torch.float64,
1275
+ torch.float64: torch.float64,
1276
+ torch.complex64: torch.complex128,
1277
+ torch.complex128: torch.complex128,
1278
+ torch.uint8: torch.uint64,
1279
+ torch.uint16: torch.uint64,
1280
+ torch.uint32: torch.uint64,
1281
+ torch.uint64: torch.uint64,
1282
+ torch.int8: torch.float64,
1283
+ torch.int16: torch.float64,
1284
+ torch.int32: torch.float64,
1285
+ torch.int64: torch.float64,
1286
+ torch.bool: torch.uint64,
1287
+ },
1288
+ torch.int8: {
1289
+ torch.float16: torch.float16,
1290
+ torch.float32: torch.float32,
1291
+ torch.float64: torch.float64,
1292
+ torch.complex64: torch.complex64,
1293
+ torch.complex128: torch.complex128,
1294
+ torch.uint8: torch.int16,
1295
+ torch.uint16: torch.int32,
1296
+ torch.uint32: torch.int64,
1297
+ torch.uint64: torch.float64,
1298
+ torch.int8: torch.int8,
1299
+ torch.int16: torch.int16,
1300
+ torch.int32: torch.int32,
1301
+ torch.int64: torch.int64,
1302
+ torch.bool: torch.int8,
1303
+ },
1304
+ torch.int16: {
1305
+ torch.float16: torch.float32,
1306
+ torch.float32: torch.float32,
1307
+ torch.float64: torch.float64,
1308
+ torch.complex64: torch.complex64,
1309
+ torch.complex128: torch.complex128,
1310
+ torch.uint8: torch.int16,
1311
+ torch.uint16: torch.int32,
1312
+ torch.uint32: torch.int64,
1313
+ torch.uint64: torch.float64,
1314
+ torch.int8: torch.int16,
1315
+ torch.int16: torch.int16,
1316
+ torch.int32: torch.int32,
1317
+ torch.int64: torch.int64,
1318
+ torch.bool: torch.int16,
1319
+ },
1320
+ torch.int32: {
1321
+ torch.float16: torch.float64,
1322
+ torch.float32: torch.float64,
1323
+ torch.float64: torch.float64,
1324
+ torch.complex64: torch.complex128,
1325
+ torch.complex128: torch.complex128,
1326
+ torch.uint8: torch.int32,
1327
+ torch.uint16: torch.int32,
1328
+ torch.uint32: torch.int64,
1329
+ torch.uint64: torch.float64,
1330
+ torch.int8: torch.int32,
1331
+ torch.int16: torch.int32,
1332
+ torch.int32: torch.int32,
1333
+ torch.int64: torch.int64,
1334
+ torch.bool: torch.int32,
1335
+ },
1336
+ torch.int64: {
1337
+ torch.float16: torch.float64,
1338
+ torch.float32: torch.float64,
1339
+ torch.float64: torch.float64,
1340
+ torch.complex64: torch.complex128,
1341
+ torch.complex128: torch.complex128,
1342
+ torch.uint8: torch.int64,
1343
+ torch.uint16: torch.int64,
1344
+ torch.uint32: torch.int64,
1345
+ torch.uint64: torch.float64,
1346
+ torch.int8: torch.int64,
1347
+ torch.int16: torch.int64,
1348
+ torch.int32: torch.int64,
1349
+ torch.int64: torch.int64,
1350
+ torch.bool: torch.int64,
1351
+ },
1352
+ torch.bool: {
1353
+ torch.float16: torch.float16,
1354
+ torch.float32: torch.float32,
1355
+ torch.float64: torch.float64,
1356
+ torch.complex64: torch.complex64,
1357
+ torch.complex128: torch.complex128,
1358
+ torch.uint8: torch.uint8,
1359
+ torch.uint16: torch.uint16,
1360
+ torch.uint32: torch.uint32,
1361
+ torch.uint64: torch.uint64,
1362
+ torch.int8: torch.int8,
1363
+ torch.int16: torch.int16,
1364
+ torch.int32: torch.int32,
1365
+ torch.int64: torch.int64,
1366
+ torch.bool: torch.bool,
1367
+ },
1368
+ }
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_dtypes.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ Define analogs of numpy dtypes supported by pytorch.
4
+ Define the scalar types and supported dtypes and numpy <--> torch dtype mappings.
5
+ """
6
+ import builtins
7
+
8
+ import torch
9
+
10
+ from . import _dtypes_impl
11
+
12
+
13
+ # ### Scalar types ###
14
+
15
+
16
+ class generic:
17
+ name = "generic"
18
+
19
+ def __new__(cls, value):
20
+ # NumPy scalars are modelled as 0-D arrays
21
+ # so a call to np.float32(4) produces a 0-D array.
22
+
23
+ from ._ndarray import asarray, ndarray
24
+
25
+ if isinstance(value, str) and value in ["inf", "nan"]:
26
+ value = {"inf": torch.inf, "nan": torch.nan}[value]
27
+
28
+ if isinstance(value, ndarray):
29
+ return value.astype(cls)
30
+ else:
31
+ return asarray(value, dtype=cls)
32
+
33
+
34
+ ##################
35
+ # abstract types #
36
+ ##################
37
+
38
+
39
+ class number(generic):
40
+ name = "number"
41
+
42
+
43
+ class integer(number):
44
+ name = "integer"
45
+
46
+
47
+ class inexact(number):
48
+ name = "inexact"
49
+
50
+
51
+ class signedinteger(integer):
52
+ name = "signedinteger"
53
+
54
+
55
+ class unsignedinteger(integer):
56
+ name = "unsignedinteger"
57
+
58
+
59
+ class floating(inexact):
60
+ name = "floating"
61
+
62
+
63
+ class complexfloating(inexact):
64
+ name = "complexfloating"
65
+
66
+
67
+ _abstract_dtypes = [
68
+ "generic",
69
+ "number",
70
+ "integer",
71
+ "signedinteger",
72
+ "unsignedinteger",
73
+ "inexact",
74
+ "floating",
75
+ "complexfloating",
76
+ ]
77
+
78
+ # ##### concrete types
79
+
80
+ # signed integers
81
+
82
+
83
+ class int8(signedinteger):
84
+ name = "int8"
85
+ typecode = "b"
86
+ torch_dtype = torch.int8
87
+
88
+
89
+ class int16(signedinteger):
90
+ name = "int16"
91
+ typecode = "h"
92
+ torch_dtype = torch.int16
93
+
94
+
95
+ class int32(signedinteger):
96
+ name = "int32"
97
+ typecode = "i"
98
+ torch_dtype = torch.int32
99
+
100
+
101
+ class int64(signedinteger):
102
+ name = "int64"
103
+ typecode = "l"
104
+ torch_dtype = torch.int64
105
+
106
+
107
+ # unsigned integers
108
+
109
+
110
+ class uint8(unsignedinteger):
111
+ name = "uint8"
112
+ typecode = "B"
113
+ torch_dtype = torch.uint8
114
+
115
+
116
+ class uint16(unsignedinteger):
117
+ name = "uint16"
118
+ typecode = "H"
119
+ torch_dtype = torch.uint16
120
+
121
+
122
+ class uint32(signedinteger):
123
+ name = "uint32"
124
+ typecode = "I"
125
+ torch_dtype = torch.uint32
126
+
127
+
128
+ class uint64(signedinteger):
129
+ name = "uint64"
130
+ typecode = "L"
131
+ torch_dtype = torch.uint64
132
+
133
+
134
+ # floating point
135
+
136
+
137
+ class float16(floating):
138
+ name = "float16"
139
+ typecode = "e"
140
+ torch_dtype = torch.float16
141
+
142
+
143
+ class float32(floating):
144
+ name = "float32"
145
+ typecode = "f"
146
+ torch_dtype = torch.float32
147
+
148
+
149
+ class float64(floating):
150
+ name = "float64"
151
+ typecode = "d"
152
+ torch_dtype = torch.float64
153
+
154
+
155
+ class complex64(complexfloating):
156
+ name = "complex64"
157
+ typecode = "F"
158
+ torch_dtype = torch.complex64
159
+
160
+
161
+ class complex128(complexfloating):
162
+ name = "complex128"
163
+ typecode = "D"
164
+ torch_dtype = torch.complex128
165
+
166
+
167
+ class bool_(generic):
168
+ name = "bool_"
169
+ typecode = "?"
170
+ torch_dtype = torch.bool
171
+
172
+
173
+ # name aliases
174
+ _name_aliases = {
175
+ "intp": int64,
176
+ "int_": int64,
177
+ "intc": int32,
178
+ "byte": int8,
179
+ "short": int16,
180
+ "longlong": int64, # XXX: is this correct?
181
+ "ulonglong": uint64,
182
+ "ubyte": uint8,
183
+ "half": float16,
184
+ "single": float32,
185
+ "double": float64,
186
+ "float_": float64,
187
+ "csingle": complex64,
188
+ "singlecomplex": complex64,
189
+ "cdouble": complex128,
190
+ "cfloat": complex128,
191
+ "complex_": complex128,
192
+ }
193
+ # We register float_ = float32 and so on
194
+ for name, obj in _name_aliases.items():
195
+ vars()[name] = obj
196
+
197
+
198
+ # Replicate this NumPy-defined way of grouping scalar types,
199
+ # cf tests/core/test_scalar_methods.py
200
+ sctypes = {
201
+ "int": [int8, int16, int32, int64],
202
+ "uint": [uint8, uint16, uint32, uint64],
203
+ "float": [float16, float32, float64],
204
+ "complex": [complex64, complex128],
205
+ "others": [bool_],
206
+ }
207
+
208
+
209
+ # Support mappings/functions
210
+
211
+ _names = {st.name: st for cat in sctypes for st in sctypes[cat]}
212
+ _typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]}
213
+ _torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]}
214
+
215
+
216
+ _aliases = {
217
+ "u1": uint8,
218
+ "i1": int8,
219
+ "i2": int16,
220
+ "i4": int32,
221
+ "i8": int64,
222
+ "b": int8, # XXX: srsly?
223
+ "f2": float16,
224
+ "f4": float32,
225
+ "f8": float64,
226
+ "c8": complex64,
227
+ "c16": complex128,
228
+ # numpy-specific trailing underscore
229
+ "bool_": bool_,
230
+ }
231
+
232
+
233
+ _python_types = {
234
+ int: int64,
235
+ float: float64,
236
+ complex: complex128,
237
+ builtins.bool: bool_,
238
+ # also allow stringified names of python types
239
+ int.__name__: int64,
240
+ float.__name__: float64,
241
+ complex.__name__: complex128,
242
+ builtins.bool.__name__: bool_,
243
+ }
244
+
245
+
246
+ def sctype_from_string(s):
247
+ """Normalize a string value: a type 'name' or a typecode or a width alias."""
248
+ if s in _names:
249
+ return _names[s]
250
+ if s in _name_aliases.keys():
251
+ return _name_aliases[s]
252
+ if s in _typecodes:
253
+ return _typecodes[s]
254
+ if s in _aliases:
255
+ return _aliases[s]
256
+ if s in _python_types:
257
+ return _python_types[s]
258
+ raise TypeError(f"data type {s!r} not understood")
259
+
260
+
261
+ def sctype_from_torch_dtype(torch_dtype):
262
+ return _torch_dtypes[torch_dtype]
263
+
264
+
265
+ # ### DTypes. ###
266
+
267
+
268
+ def dtype(arg):
269
+ if arg is None:
270
+ arg = _dtypes_impl.default_dtypes().float_dtype
271
+ return DType(arg)
272
+
273
+
274
+ class DType:
275
+ def __init__(self, arg):
276
+ # a pytorch object?
277
+ if isinstance(arg, torch.dtype):
278
+ sctype = _torch_dtypes[arg]
279
+ elif isinstance(arg, torch.Tensor):
280
+ sctype = _torch_dtypes[arg.dtype]
281
+ # a scalar type?
282
+ elif issubclass_(arg, generic):
283
+ sctype = arg
284
+ # a dtype already?
285
+ elif isinstance(arg, DType):
286
+ sctype = arg._scalar_type
287
+ # a has a right attribute?
288
+ elif hasattr(arg, "dtype"):
289
+ sctype = arg.dtype._scalar_type
290
+ else:
291
+ sctype = sctype_from_string(arg)
292
+ self._scalar_type = sctype
293
+
294
+ @property
295
+ def name(self):
296
+ return self._scalar_type.name
297
+
298
+ @property
299
+ def type(self):
300
+ return self._scalar_type
301
+
302
+ @property
303
+ def kind(self):
304
+ # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
305
+ return _torch_dtypes[self.torch_dtype].name[0]
306
+
307
+ @property
308
+ def typecode(self):
309
+ return self._scalar_type.typecode
310
+
311
+ def __eq__(self, other):
312
+ if isinstance(other, DType):
313
+ return self._scalar_type == other._scalar_type
314
+ try:
315
+ other_instance = DType(other)
316
+ except TypeError:
317
+ return False
318
+ return self._scalar_type == other_instance._scalar_type
319
+
320
+ @property
321
+ def torch_dtype(self):
322
+ return self._scalar_type.torch_dtype
323
+
324
+ def __hash__(self):
325
+ return hash(self._scalar_type.name)
326
+
327
+ def __repr__(self):
328
+ return f'dtype("{self.name}")'
329
+
330
+ __str__ = __repr__
331
+
332
+ @property
333
+ def itemsize(self):
334
+ elem = self.type(1)
335
+ return elem.tensor.element_size()
336
+
337
+ def __getstate__(self):
338
+ return self._scalar_type
339
+
340
+ def __setstate__(self, value):
341
+ self._scalar_type = value
342
+
343
+
344
+ typecodes = {
345
+ "All": "efdFDBbhil?",
346
+ "AllFloat": "efdFD",
347
+ "AllInteger": "Bbhil",
348
+ "Integer": "bhil",
349
+ "UnsignedInteger": "B",
350
+ "Float": "efd",
351
+ "Complex": "FD",
352
+ }
353
+
354
+
355
+ # ### Defaults and dtype discovery
356
+
357
+
358
+ def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
359
+ """Set the (global) defaults for fp, complex, and int dtypes.
360
+
361
+ The complex dtype is inferred from the float (fp) dtype. It has
362
+ a width at least twice the width of the float dtype,
363
+ i.e., it's complex128 for float64 and complex64 for float32.
364
+
365
+ Parameters
366
+ ----------
367
+ fp_dtype
368
+ Allowed values are "numpy", "pytorch" or dtype_like things which
369
+ can be converted into a DType instance.
370
+ Default is "numpy" (i.e. float64).
371
+ int_dtype
372
+ Allowed values are "numpy", "pytorch" or dtype_like things which
373
+ can be converted into a DType instance.
374
+ Default is "numpy" (i.e. int64).
375
+
376
+ Returns
377
+ -------
378
+ The old default dtype state: a namedtuple with attributes ``float_dtype``,
379
+ ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch*
380
+ dtypes.
381
+
382
+ Notes
383
+ ------------
384
+ This functions has a side effect: it sets the global state with the provided dtypes.
385
+
386
+ The complex dtype has bit width of at least twice the width of the float
387
+ dtype, i.e. it's complex128 for float64 and complex64 for float32.
388
+
389
+ """
390
+ if fp_dtype not in ["numpy", "pytorch"]:
391
+ fp_dtype = dtype(fp_dtype).torch_dtype
392
+ if int_dtype not in ["numpy", "pytorch"]:
393
+ int_dtype = dtype(int_dtype).torch_dtype
394
+
395
+ if fp_dtype == "numpy":
396
+ float_dtype = torch.float64
397
+ elif fp_dtype == "pytorch":
398
+ float_dtype = torch.float32
399
+ else:
400
+ float_dtype = fp_dtype
401
+
402
+ complex_dtype = {
403
+ torch.float64: torch.complex128,
404
+ torch.float32: torch.complex64,
405
+ torch.float16: torch.complex64,
406
+ }[float_dtype]
407
+
408
+ if int_dtype in ["numpy", "pytorch"]:
409
+ int_dtype = torch.int64
410
+ else:
411
+ int_dtype = int_dtype
412
+
413
+ new_defaults = _dtypes_impl.DefaultDTypes(
414
+ float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype
415
+ )
416
+
417
+ # set the new global state and return the old state
418
+ old_defaults = _dtypes_impl.default_dtypes
419
+ _dtypes_impl._default_dtypes = new_defaults
420
+ return old_defaults
421
+
422
+
423
+ def issubclass_(arg, klass):
424
+ try:
425
+ return issubclass(arg, klass)
426
+ except TypeError:
427
+ return False
428
+
429
+
430
+ def issubdtype(arg1, arg2):
431
+ # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420
432
+
433
+ # We also accept strings even if NumPy doesn't as dtypes are serialized as their
434
+ # string representation in dynamo's graph
435
+ def str_to_abstract(t):
436
+ if isinstance(t, str) and t in _abstract_dtypes:
437
+ return globals()[t]
438
+ return t
439
+
440
+ arg1 = str_to_abstract(arg1)
441
+ arg2 = str_to_abstract(arg2)
442
+
443
+ if not issubclass_(arg1, generic):
444
+ arg1 = dtype(arg1).type
445
+ if not issubclass_(arg2, generic):
446
+ arg2 = dtype(arg2).type
447
+ return issubclass(arg1, arg2)
448
+
449
+
450
+ __all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"]
451
+ __all__ += list(_names.keys()) # noqa: PLE0605
452
+ __all__ += list(_name_aliases.keys()) # noqa: PLE0605
453
+ __all__ += _abstract_dtypes # noqa: PLE0605
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Dtypes/scalar type implementaions with torch dtypes.
4
+
5
+ Here `dtype` is always a torch.dtype, this module knows nothing about
6
+ scalar types, wrapper dtypes or anything like that. PyTorch only.
7
+ """
8
+ from collections import namedtuple
9
+
10
+ import torch
11
+
12
+
13
+ # defaults : mimic NumPy, allow user control
14
+ DefaultDTypes = namedtuple(
15
+ "DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
16
+ )
17
+
18
+ # a global state
19
+ # We set it the first time we call default_dtypes() to avoid importing
20
+ # torch._dynamo.config and create a circular reference
21
+ _default_dtypes = None
22
+
23
+
24
+ def default_dtypes():
25
+ global _default_dtypes
26
+ if _default_dtypes is None:
27
+ import torch._dynamo.config as config
28
+
29
+ _default_dtypes = DefaultDTypes(
30
+ float_dtype=getattr(torch, config.numpy_default_float),
31
+ complex_dtype=getattr(torch, config.numpy_default_complex),
32
+ int_dtype=getattr(torch, config.numpy_default_int),
33
+ )
34
+ assert isinstance(_default_dtypes.float_dtype, torch.dtype)
35
+ assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
36
+ assert isinstance(_default_dtypes.int_dtype, torch.dtype)
37
+ return _default_dtypes
38
+
39
+
40
+ def get_default_dtype_for(dtype):
41
+ """Default scalar type given sctype category."""
42
+ if dtype == torch.bool:
43
+ return dtype
44
+ if dtype.is_complex:
45
+ return default_dtypes().complex_dtype
46
+ if dtype.is_floating_point:
47
+ return default_dtypes().float_dtype
48
+ # else, it must be (some) integer
49
+ return default_dtypes().int_dtype
50
+
51
+
52
+ from . import _casting_dicts as _cd
53
+
54
+
55
+ def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
56
+ return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
57
+
58
+
59
+ def result_type_impl(*tensors):
60
+ # NB: torch dtypes here
61
+ dtyp = tensors[0].dtype
62
+ if len(tensors) == 1:
63
+ return dtyp
64
+
65
+ for curr in tensors[1:]:
66
+ dtyp = _cd._result_type_dict[dtyp][curr.dtype]
67
+
68
+ return dtyp
69
+
70
+
71
+ def python_type_for_torch(dtyp):
72
+ """Get a python scalar type a torch dtype"""
73
+ if dtyp.is_floating_point:
74
+ typ = float
75
+ elif dtyp.is_complex:
76
+ typ = complex
77
+ elif dtyp == torch.bool:
78
+ typ = bool
79
+ else:
80
+ typ = int
81
+ return typ
82
+
83
+
84
+ # ### NEP 50 helpers ###
85
+
86
+ _SCALAR_TYPES = (int, bool, float, complex)
87
+
88
+ _SCALAR_AND_SYMBOLIC_TYPES = (
89
+ *_SCALAR_TYPES,
90
+ torch.SymInt,
91
+ torch.SymFloat,
92
+ torch.SymBool,
93
+ )
94
+
95
+ _NEP50_FUNCS_TENSOR_ONLY = (
96
+ "minimum",
97
+ "maximum",
98
+ "logaddexp",
99
+ "logaddexp2",
100
+ "lcm",
101
+ "gcd",
102
+ "hypot",
103
+ "heaviside",
104
+ "fmod",
105
+ "fmin",
106
+ "fmax",
107
+ "copysign",
108
+ "arctan2",
109
+ )
110
+
111
+
112
+ def is_scalar(x):
113
+ return isinstance(x, _SCALAR_TYPES)
114
+
115
+
116
+ def is_scalar_or_symbolic(x):
117
+ return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES)
118
+
119
+
120
+ def _dtype_for_scalar(py_type):
121
+ return {
122
+ bool: torch.bool,
123
+ torch.SymBool: torch.bool,
124
+ int: torch.int64,
125
+ torch.SymInt: torch.int64,
126
+ float: torch.float64,
127
+ torch.SymFloat: torch.float64,
128
+ complex: torch.complex128,
129
+ }[py_type]
130
+
131
+
132
+ def _dtype_for_scalar_or_tensor(x):
133
+ return x.dtype if isinstance(x, torch.Tensor) else _dtype_for_scalar(type(x))
134
+
135
+
136
+ def is_float_or_fp_tensor(x):
137
+ return _dtype_for_scalar_or_tensor(x).is_floating_point
138
+
139
+
140
+ def is_complex_or_complex_tensor(x):
141
+ return _dtype_for_scalar_or_tensor(x).is_complex
142
+
143
+
144
+ def _category(dtype):
145
+ return {
146
+ torch.bool: 0,
147
+ torch.SymBool: 0,
148
+ # int
149
+ torch.uint8: 1,
150
+ torch.int8: 1,
151
+ torch.int16: 1,
152
+ torch.int32: 1,
153
+ torch.int64: 1,
154
+ torch.SymInt: 1,
155
+ # float
156
+ torch.float16: 2,
157
+ torch.float32: 2,
158
+ torch.float64: 2,
159
+ torch.SymFloat: 2,
160
+ # complex
161
+ torch.complex64: 3,
162
+ torch.complex128: 3,
163
+ }[dtype]
164
+
165
+
166
+ def nep50_to_tensors(x1, x2, handle_weaks, function_name):
167
+ """If either of inputs is a python scalar, type-promote with NEP 50."""
168
+
169
+ def to_tensor(scalar, dtype=None):
170
+ if dtype is None:
171
+ dtype = _dtype_for_scalar(type(scalar))
172
+ dtype = get_default_dtype_for(dtype)
173
+ return torch.as_tensor(scalar, dtype=dtype)
174
+
175
+ x1_is_weak = not isinstance(x1, torch.Tensor)
176
+ x2_is_weak = not isinstance(x2, torch.Tensor)
177
+ if not handle_weaks or (x1_is_weak and x2_is_weak):
178
+ x1 = to_tensor(x1) if x1_is_weak else x1
179
+ x2 = to_tensor(x2) if x2_is_weak else x2
180
+ return x1, x2
181
+
182
+ # scalar <op> tensor: NEP 50
183
+ assert x1_is_weak != x2_is_weak
184
+
185
+ weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
186
+
187
+ # find the dtype for the weak's type
188
+ weak_dtype = _dtype_for_scalar(type(weak))
189
+
190
+ cat_weak = _category(weak_dtype)
191
+ cat_not_weak = _category(not_weak.dtype)
192
+
193
+ dt = not_weak.dtype if cat_weak <= cat_not_weak else None
194
+
195
+ # special-case complex + float32
196
+ if weak_dtype.is_complex and not_weak.dtype == torch.float32:
197
+ dt = torch.complex64
198
+
199
+ # detect overflows: in PyTorch, uint8(-1) wraps around to 255,
200
+ # while NEP50 mandates an exception.
201
+ #
202
+ # Note that we only check if each element of the binop overflows,
203
+ # not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
204
+ # in uint8, but the result overflows and wrap around 255.
205
+ # Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
206
+ if cat_weak == 1 and cat_not_weak == 1:
207
+ # integers
208
+ iinfo = torch.iinfo(not_weak.dtype)
209
+ if not (iinfo.min <= weak <= iinfo.max):
210
+ raise OverflowError(
211
+ f"Python integer {weak} out of bounds for {not_weak.dtype}"
212
+ )
213
+ if weak_dtype != dt or function_name in _NEP50_FUNCS_TENSOR_ONLY:
214
+ # finally, can make `weak` into a 0D tensor, if both parameters are required to be tensor.
215
+ weak = to_tensor(weak, dt)
216
+
217
+ return (weak, not_weak) if x1_is_weak else (not_weak, weak)
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_funcs.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import inspect
4
+ import itertools
5
+
6
+ from . import _funcs_impl, _reductions_impl
7
+ from ._normalizations import normalizer
8
+
9
+
10
+ # _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents,
11
+ # and consume/return PyTorch tensors/dtypes.
12
+ # They are also type annotated.
13
+ # Pull these functions from _funcs_impl and decorate them with @normalizer, which
14
+ # - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`.
15
+ # - Maps NumPy dtypes to PyTorch dtypes
16
+ # - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple
17
+ # - Implements the semantics for the `out=` arg
18
+ # - Wraps back the outputs into `torch._numpy.ndarrays`
19
+
20
+
21
+ def _public_functions(mod):
22
+ def is_public_function(f):
23
+ return inspect.isfunction(f) and not f.__name__.startswith("_")
24
+
25
+ return inspect.getmembers(mod, is_public_function)
26
+
27
+
28
+ # We fill in __all__ in the loop below
29
+ __all__ = []
30
+
31
+ # decorate implementer functions with argument normalizers and export to the top namespace
32
+ for name, func in itertools.chain(
33
+ _public_functions(_funcs_impl), _public_functions(_reductions_impl)
34
+ ):
35
+ if name in ["percentile", "quantile", "median"]:
36
+ decorated = normalizer(func, promote_scalar_result=True)
37
+ elif name == "einsum":
38
+ # normalized manually
39
+ decorated = func
40
+ else:
41
+ decorated = normalizer(func)
42
+
43
+ decorated.__qualname__ = name
44
+ decorated.__name__ = name
45
+ vars()[name] = decorated
46
+ __all__.append(name)
47
+
48
+
49
+ """
50
+ Vendored objects from numpy.lib.index_tricks
51
+ """
52
+
53
+
54
+ class IndexExpression:
55
+ """
56
+ Written by Konrad Hinsen <[email protected]>
57
+ last revision: 1999-7-23
58
+
59
+ Cosmetic changes by T. Oliphant 2001
60
+ """
61
+
62
+ def __init__(self, maketuple):
63
+ self.maketuple = maketuple
64
+
65
+ def __getitem__(self, item):
66
+ if self.maketuple and not isinstance(item, tuple):
67
+ return (item,)
68
+ else:
69
+ return item
70
+
71
+
72
+ index_exp = IndexExpression(maketuple=True)
73
+ s_ = IndexExpression(maketuple=False)
74
+
75
+
76
+ __all__ += ["index_exp", "s_"]
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py ADDED
@@ -0,0 +1,2056 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """A thin pytorch / numpy compat layer.
4
+
5
+ Things imported from here have numpy-compatible signatures but operate on
6
+ pytorch tensors.
7
+ """
8
+ # Contents of this module ends up in the main namespace via _funcs.py
9
+ # where type annotations are used in conjunction with the @normalizer decorator.
10
+ from __future__ import annotations
11
+
12
+ import builtins
13
+ import itertools
14
+ import operator
15
+ from typing import Optional, Sequence, TYPE_CHECKING
16
+
17
+ import torch
18
+
19
+ from . import _dtypes_impl, _util
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ._normalizations import (
24
+ ArrayLike,
25
+ ArrayLikeOrScalar,
26
+ CastingModes,
27
+ DTypeLike,
28
+ NDArray,
29
+ NotImplementedType,
30
+ OutArray,
31
+ )
32
+
33
+
34
+ def copy(
35
+ a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False
36
+ ):
37
+ return a.clone()
38
+
39
+
40
+ def copyto(
41
+ dst: NDArray,
42
+ src: ArrayLike,
43
+ casting: Optional[CastingModes] = "same_kind",
44
+ where: NotImplementedType = None,
45
+ ):
46
+ (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting)
47
+ dst.copy_(src)
48
+
49
+
50
+ def atleast_1d(*arys: ArrayLike):
51
+ res = torch.atleast_1d(*arys)
52
+ if isinstance(res, tuple):
53
+ return list(res)
54
+ else:
55
+ return res
56
+
57
+
58
+ def atleast_2d(*arys: ArrayLike):
59
+ res = torch.atleast_2d(*arys)
60
+ if isinstance(res, tuple):
61
+ return list(res)
62
+ else:
63
+ return res
64
+
65
+
66
+ def atleast_3d(*arys: ArrayLike):
67
+ res = torch.atleast_3d(*arys)
68
+ if isinstance(res, tuple):
69
+ return list(res)
70
+ else:
71
+ return res
72
+
73
+
74
+ def _concat_check(tup, dtype, out):
75
+ if tup == ():
76
+ raise ValueError("need at least one array to concatenate")
77
+
78
+ """Check inputs in concatenate et al."""
79
+ if out is not None and dtype is not None:
80
+ # mimic numpy
81
+ raise TypeError(
82
+ "concatenate() only takes `out` or `dtype` as an "
83
+ "argument, but both were provided."
84
+ )
85
+
86
+
87
+ def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"):
88
+ """Figure out dtypes, cast if necessary."""
89
+
90
+ if out is not None or dtype is not None:
91
+ # figure out the type of the inputs and outputs
92
+ out_dtype = out.dtype.torch_dtype if dtype is None else dtype
93
+ else:
94
+ out_dtype = _dtypes_impl.result_type_impl(*tensors)
95
+
96
+ # cast input arrays if necessary; do not broadcast them agains `out`
97
+ tensors = _util.typecast_tensors(tensors, out_dtype, casting)
98
+
99
+ return tensors
100
+
101
+
102
+ def _concatenate(
103
+ tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind"
104
+ ):
105
+ # pure torch implementation, used below and in cov/corrcoef below
106
+ tensors, axis = _util.axis_none_flatten(*tensors, axis=axis)
107
+ tensors = _concat_cast_helper(tensors, out, dtype, casting)
108
+ return torch.cat(tensors, axis)
109
+
110
+
111
+ def concatenate(
112
+ ar_tuple: Sequence[ArrayLike],
113
+ axis=0,
114
+ out: Optional[OutArray] = None,
115
+ dtype: Optional[DTypeLike] = None,
116
+ casting: Optional[CastingModes] = "same_kind",
117
+ ):
118
+ _concat_check(ar_tuple, dtype, out=out)
119
+ result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting)
120
+ return result
121
+
122
+
123
+ def vstack(
124
+ tup: Sequence[ArrayLike],
125
+ *,
126
+ dtype: Optional[DTypeLike] = None,
127
+ casting: Optional[CastingModes] = "same_kind",
128
+ ):
129
+ _concat_check(tup, dtype, out=None)
130
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
131
+ return torch.vstack(tensors)
132
+
133
+
134
+ row_stack = vstack
135
+
136
+
137
+ def hstack(
138
+ tup: Sequence[ArrayLike],
139
+ *,
140
+ dtype: Optional[DTypeLike] = None,
141
+ casting: Optional[CastingModes] = "same_kind",
142
+ ):
143
+ _concat_check(tup, dtype, out=None)
144
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
145
+ return torch.hstack(tensors)
146
+
147
+
148
+ def dstack(
149
+ tup: Sequence[ArrayLike],
150
+ *,
151
+ dtype: Optional[DTypeLike] = None,
152
+ casting: Optional[CastingModes] = "same_kind",
153
+ ):
154
+ # XXX: in numpy 1.24 dstack does not have dtype and casting keywords
155
+ # but {h,v}stack do. Hence add them here for consistency.
156
+ _concat_check(tup, dtype, out=None)
157
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
158
+ return torch.dstack(tensors)
159
+
160
+
161
+ def column_stack(
162
+ tup: Sequence[ArrayLike],
163
+ *,
164
+ dtype: Optional[DTypeLike] = None,
165
+ casting: Optional[CastingModes] = "same_kind",
166
+ ):
167
+ # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords
168
+ # but row_stack does. (because row_stack is an alias for vstack, really).
169
+ # Hence add these keywords here for consistency.
170
+ _concat_check(tup, dtype, out=None)
171
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
172
+ return torch.column_stack(tensors)
173
+
174
+
175
+ def stack(
176
+ arrays: Sequence[ArrayLike],
177
+ axis=0,
178
+ out: Optional[OutArray] = None,
179
+ *,
180
+ dtype: Optional[DTypeLike] = None,
181
+ casting: Optional[CastingModes] = "same_kind",
182
+ ):
183
+ _concat_check(arrays, dtype, out=out)
184
+
185
+ tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting)
186
+ result_ndim = tensors[0].ndim + 1
187
+ axis = _util.normalize_axis_index(axis, result_ndim)
188
+ return torch.stack(tensors, axis=axis)
189
+
190
+
191
+ def append(arr: ArrayLike, values: ArrayLike, axis=None):
192
+ if axis is None:
193
+ if arr.ndim != 1:
194
+ arr = arr.flatten()
195
+ values = values.flatten()
196
+ axis = arr.ndim - 1
197
+ return _concatenate((arr, values), axis=axis)
198
+
199
+
200
+ # ### split ###
201
+
202
+
203
+ def _split_helper(tensor, indices_or_sections, axis, strict=False):
204
+ if isinstance(indices_or_sections, int):
205
+ return _split_helper_int(tensor, indices_or_sections, axis, strict)
206
+ elif isinstance(indices_or_sections, (list, tuple)):
207
+ # NB: drop split=..., it only applies to split_helper_int
208
+ return _split_helper_list(tensor, list(indices_or_sections), axis)
209
+ else:
210
+ raise TypeError("split_helper: ", type(indices_or_sections))
211
+
212
+
213
+ def _split_helper_int(tensor, indices_or_sections, axis, strict=False):
214
+ if not isinstance(indices_or_sections, int):
215
+ raise NotImplementedError("split: indices_or_sections")
216
+
217
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
218
+
219
+ # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n
220
+ l, n = tensor.shape[axis], indices_or_sections
221
+
222
+ if n <= 0:
223
+ raise ValueError
224
+
225
+ if l % n == 0:
226
+ num, sz = n, l // n
227
+ lst = [sz] * num
228
+ else:
229
+ if strict:
230
+ raise ValueError("array split does not result in an equal division")
231
+
232
+ num, sz = l % n, l // n + 1
233
+ lst = [sz] * num
234
+
235
+ lst += [sz - 1] * (n - num)
236
+
237
+ return torch.split(tensor, lst, axis)
238
+
239
+
240
+ def _split_helper_list(tensor, indices_or_sections, axis):
241
+ if not isinstance(indices_or_sections, list):
242
+ raise NotImplementedError("split: indices_or_sections: list")
243
+ # numpy expects indices, while torch expects lengths of sections
244
+ # also, numpy appends zero-size arrays for indices above the shape[axis]
245
+ lst = [x for x in indices_or_sections if x <= tensor.shape[axis]]
246
+ num_extra = len(indices_or_sections) - len(lst)
247
+
248
+ lst.append(tensor.shape[axis])
249
+ lst = [
250
+ lst[0],
251
+ ] + [a - b for a, b in zip(lst[1:], lst[:-1])]
252
+ lst += [0] * num_extra
253
+
254
+ return torch.split(tensor, lst, axis)
255
+
256
+
257
+ def array_split(ary: ArrayLike, indices_or_sections, axis=0):
258
+ return _split_helper(ary, indices_or_sections, axis)
259
+
260
+
261
+ def split(ary: ArrayLike, indices_or_sections, axis=0):
262
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
263
+
264
+
265
+ def hsplit(ary: ArrayLike, indices_or_sections):
266
+ if ary.ndim == 0:
267
+ raise ValueError("hsplit only works on arrays of 1 or more dimensions")
268
+ axis = 1 if ary.ndim > 1 else 0
269
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
270
+
271
+
272
+ def vsplit(ary: ArrayLike, indices_or_sections):
273
+ if ary.ndim < 2:
274
+ raise ValueError("vsplit only works on arrays of 2 or more dimensions")
275
+ return _split_helper(ary, indices_or_sections, 0, strict=True)
276
+
277
+
278
+ def dsplit(ary: ArrayLike, indices_or_sections):
279
+ if ary.ndim < 3:
280
+ raise ValueError("dsplit only works on arrays of 3 or more dimensions")
281
+ return _split_helper(ary, indices_or_sections, 2, strict=True)
282
+
283
+
284
+ def kron(a: ArrayLike, b: ArrayLike):
285
+ return torch.kron(a, b)
286
+
287
+
288
+ def vander(x: ArrayLike, N=None, increasing=False):
289
+ return torch.vander(x, N, increasing)
290
+
291
+
292
+ # ### linspace, geomspace, logspace and arange ###
293
+
294
+
295
+ def linspace(
296
+ start: ArrayLike,
297
+ stop: ArrayLike,
298
+ num=50,
299
+ endpoint=True,
300
+ retstep=False,
301
+ dtype: Optional[DTypeLike] = None,
302
+ axis=0,
303
+ ):
304
+ if axis != 0 or retstep or not endpoint:
305
+ raise NotImplementedError
306
+ if dtype is None:
307
+ dtype = _dtypes_impl.default_dtypes().float_dtype
308
+ # XXX: raises TypeError if start or stop are not scalars
309
+ return torch.linspace(start, stop, num, dtype=dtype)
310
+
311
+
312
+ def geomspace(
313
+ start: ArrayLike,
314
+ stop: ArrayLike,
315
+ num=50,
316
+ endpoint=True,
317
+ dtype: Optional[DTypeLike] = None,
318
+ axis=0,
319
+ ):
320
+ if axis != 0 or not endpoint:
321
+ raise NotImplementedError
322
+ base = torch.pow(stop / start, 1.0 / (num - 1))
323
+ logbase = torch.log(base)
324
+ return torch.logspace(
325
+ torch.log(start) / logbase,
326
+ torch.log(stop) / logbase,
327
+ num,
328
+ base=base,
329
+ )
330
+
331
+
332
+ def logspace(
333
+ start,
334
+ stop,
335
+ num=50,
336
+ endpoint=True,
337
+ base=10.0,
338
+ dtype: Optional[DTypeLike] = None,
339
+ axis=0,
340
+ ):
341
+ if axis != 0 or not endpoint:
342
+ raise NotImplementedError
343
+ return torch.logspace(start, stop, num, base=base, dtype=dtype)
344
+
345
+
346
+ def arange(
347
+ start: Optional[ArrayLikeOrScalar] = None,
348
+ stop: Optional[ArrayLikeOrScalar] = None,
349
+ step: Optional[ArrayLikeOrScalar] = 1,
350
+ dtype: Optional[DTypeLike] = None,
351
+ *,
352
+ like: NotImplementedType = None,
353
+ ):
354
+ if step == 0:
355
+ raise ZeroDivisionError
356
+ if stop is None and start is None:
357
+ raise TypeError
358
+ if stop is None:
359
+ # XXX: this breaks if start is passed as a kwarg:
360
+ # arange(start=4) should raise (no stop) but doesn't
361
+ start, stop = 0, start
362
+ if start is None:
363
+ start = 0
364
+
365
+ # the dtype of the result
366
+ if dtype is None:
367
+ dtype = (
368
+ _dtypes_impl.default_dtypes().float_dtype
369
+ if any(_dtypes_impl.is_float_or_fp_tensor(x) for x in (start, stop, step))
370
+ else _dtypes_impl.default_dtypes().int_dtype
371
+ )
372
+ work_dtype = torch.float64 if dtype.is_complex else dtype
373
+
374
+ # RuntimeError: "lt_cpu" not implemented for 'ComplexFloat'. Fall back to eager.
375
+ if any(_dtypes_impl.is_complex_or_complex_tensor(x) for x in (start, stop, step)):
376
+ raise NotImplementedError
377
+
378
+ if (step > 0 and start > stop) or (step < 0 and start < stop):
379
+ # empty range
380
+ return torch.empty(0, dtype=dtype)
381
+
382
+ result = torch.arange(start, stop, step, dtype=work_dtype)
383
+ result = _util.cast_if_needed(result, dtype)
384
+ return result
385
+
386
+
387
+ # ### zeros/ones/empty/full ###
388
+
389
+
390
+ def empty(
391
+ shape,
392
+ dtype: Optional[DTypeLike] = None,
393
+ order: NotImplementedType = "C",
394
+ *,
395
+ like: NotImplementedType = None,
396
+ ):
397
+ if dtype is None:
398
+ dtype = _dtypes_impl.default_dtypes().float_dtype
399
+ return torch.empty(shape, dtype=dtype)
400
+
401
+
402
+ # NB: *_like functions deliberately deviate from numpy: it has subok=True
403
+ # as the default; we set subok=False and raise on anything else.
404
+
405
+
406
+ def empty_like(
407
+ prototype: ArrayLike,
408
+ dtype: Optional[DTypeLike] = None,
409
+ order: NotImplementedType = "K",
410
+ subok: NotImplementedType = False,
411
+ shape=None,
412
+ ):
413
+ result = torch.empty_like(prototype, dtype=dtype)
414
+ if shape is not None:
415
+ result = result.reshape(shape)
416
+ return result
417
+
418
+
419
+ def full(
420
+ shape,
421
+ fill_value: ArrayLike,
422
+ dtype: Optional[DTypeLike] = None,
423
+ order: NotImplementedType = "C",
424
+ *,
425
+ like: NotImplementedType = None,
426
+ ):
427
+ if isinstance(shape, int):
428
+ shape = (shape,)
429
+ if dtype is None:
430
+ dtype = fill_value.dtype
431
+ if not isinstance(shape, (tuple, list)):
432
+ shape = (shape,)
433
+ return torch.full(shape, fill_value, dtype=dtype)
434
+
435
+
436
+ def full_like(
437
+ a: ArrayLike,
438
+ fill_value,
439
+ dtype: Optional[DTypeLike] = None,
440
+ order: NotImplementedType = "K",
441
+ subok: NotImplementedType = False,
442
+ shape=None,
443
+ ):
444
+ # XXX: fill_value broadcasts
445
+ result = torch.full_like(a, fill_value, dtype=dtype)
446
+ if shape is not None:
447
+ result = result.reshape(shape)
448
+ return result
449
+
450
+
451
+ def ones(
452
+ shape,
453
+ dtype: Optional[DTypeLike] = None,
454
+ order: NotImplementedType = "C",
455
+ *,
456
+ like: NotImplementedType = None,
457
+ ):
458
+ if dtype is None:
459
+ dtype = _dtypes_impl.default_dtypes().float_dtype
460
+ return torch.ones(shape, dtype=dtype)
461
+
462
+
463
+ def ones_like(
464
+ a: ArrayLike,
465
+ dtype: Optional[DTypeLike] = None,
466
+ order: NotImplementedType = "K",
467
+ subok: NotImplementedType = False,
468
+ shape=None,
469
+ ):
470
+ result = torch.ones_like(a, dtype=dtype)
471
+ if shape is not None:
472
+ result = result.reshape(shape)
473
+ return result
474
+
475
+
476
+ def zeros(
477
+ shape,
478
+ dtype: Optional[DTypeLike] = None,
479
+ order: NotImplementedType = "C",
480
+ *,
481
+ like: NotImplementedType = None,
482
+ ):
483
+ if dtype is None:
484
+ dtype = _dtypes_impl.default_dtypes().float_dtype
485
+ return torch.zeros(shape, dtype=dtype)
486
+
487
+
488
+ def zeros_like(
489
+ a: ArrayLike,
490
+ dtype: Optional[DTypeLike] = None,
491
+ order: NotImplementedType = "K",
492
+ subok: NotImplementedType = False,
493
+ shape=None,
494
+ ):
495
+ result = torch.zeros_like(a, dtype=dtype)
496
+ if shape is not None:
497
+ result = result.reshape(shape)
498
+ return result
499
+
500
+
501
+ # ### cov & corrcoef ###
502
+
503
+
504
+ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True):
505
+ """Prepare inputs for cov and corrcoef."""
506
+
507
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636
508
+ if y_tensor is not None:
509
+ # make sure x and y are at least 2D
510
+ ndim_extra = 2 - x_tensor.ndim
511
+ if ndim_extra > 0:
512
+ x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape)
513
+ if not rowvar and x_tensor.shape[0] != 1:
514
+ x_tensor = x_tensor.mT
515
+ x_tensor = x_tensor.clone()
516
+
517
+ ndim_extra = 2 - y_tensor.ndim
518
+ if ndim_extra > 0:
519
+ y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape)
520
+ if not rowvar and y_tensor.shape[0] != 1:
521
+ y_tensor = y_tensor.mT
522
+ y_tensor = y_tensor.clone()
523
+
524
+ x_tensor = _concatenate((x_tensor, y_tensor), axis=0)
525
+
526
+ return x_tensor
527
+
528
+
529
+ def corrcoef(
530
+ x: ArrayLike,
531
+ y: Optional[ArrayLike] = None,
532
+ rowvar=True,
533
+ bias=None,
534
+ ddof=None,
535
+ *,
536
+ dtype: Optional[DTypeLike] = None,
537
+ ):
538
+ if bias is not None or ddof is not None:
539
+ # deprecated in NumPy
540
+ raise NotImplementedError
541
+ xy_tensor = _xy_helper_corrcoef(x, y, rowvar)
542
+
543
+ is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu
544
+ if is_half:
545
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
546
+ dtype = torch.float32
547
+
548
+ xy_tensor = _util.cast_if_needed(xy_tensor, dtype)
549
+ result = torch.corrcoef(xy_tensor)
550
+
551
+ if is_half:
552
+ result = result.to(torch.float16)
553
+
554
+ return result
555
+
556
+
557
+ def cov(
558
+ m: ArrayLike,
559
+ y: Optional[ArrayLike] = None,
560
+ rowvar=True,
561
+ bias=False,
562
+ ddof=None,
563
+ fweights: Optional[ArrayLike] = None,
564
+ aweights: Optional[ArrayLike] = None,
565
+ *,
566
+ dtype: Optional[DTypeLike] = None,
567
+ ):
568
+ m = _xy_helper_corrcoef(m, y, rowvar)
569
+
570
+ if ddof is None:
571
+ ddof = 1 if bias == 0 else 0
572
+
573
+ is_half = (m.dtype == torch.float16) and m.is_cpu
574
+ if is_half:
575
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
576
+ dtype = torch.float32
577
+
578
+ m = _util.cast_if_needed(m, dtype)
579
+ result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights)
580
+
581
+ if is_half:
582
+ result = result.to(torch.float16)
583
+
584
+ return result
585
+
586
+
587
+ def _conv_corr_impl(a, v, mode):
588
+ dt = _dtypes_impl.result_type_impl(a, v)
589
+ a = _util.cast_if_needed(a, dt)
590
+ v = _util.cast_if_needed(v, dt)
591
+
592
+ padding = v.shape[0] - 1 if mode == "full" else mode
593
+
594
+ if padding == "same" and v.shape[0] % 2 == 0:
595
+ # UserWarning: Using padding='same' with even kernel lengths and odd
596
+ # dilation may require a zero-padded copy of the input be created
597
+ # (Triggered internally at pytorch/aten/src/ATen/native/Convolution.cpp:1010.)
598
+ raise NotImplementedError("mode='same' and even-length weights")
599
+
600
+ # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights
601
+ aa = a[None, :]
602
+ vv = v[None, None, :]
603
+
604
+ result = torch.nn.functional.conv1d(aa, vv, padding=padding)
605
+
606
+ # torch returns a 2D result, numpy returns a 1D array
607
+ return result[0, :]
608
+
609
+
610
+ def convolve(a: ArrayLike, v: ArrayLike, mode="full"):
611
+ # NumPy: if v is longer than a, the arrays are swapped before computation
612
+ if a.shape[0] < v.shape[0]:
613
+ a, v = v, a
614
+
615
+ # flip the weights since numpy does and torch does not
616
+ v = torch.flip(v, (0,))
617
+
618
+ return _conv_corr_impl(a, v, mode)
619
+
620
+
621
+ def correlate(a: ArrayLike, v: ArrayLike, mode="valid"):
622
+ v = torch.conj_physical(v)
623
+ return _conv_corr_impl(a, v, mode)
624
+
625
+
626
+ # ### logic & element selection ###
627
+
628
+
629
+ def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0):
630
+ if x.numel() == 0:
631
+ # edge case allowed by numpy
632
+ x = x.new_empty(0, dtype=int)
633
+
634
+ int_dtype = _dtypes_impl.default_dtypes().int_dtype
635
+ (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe")
636
+
637
+ return torch.bincount(x, weights, minlength)
638
+
639
+
640
+ def where(
641
+ condition: ArrayLike,
642
+ x: Optional[ArrayLikeOrScalar] = None,
643
+ y: Optional[ArrayLikeOrScalar] = None,
644
+ /,
645
+ ):
646
+ if (x is None) != (y is None):
647
+ raise ValueError("either both or neither of x and y should be given")
648
+
649
+ if condition.dtype != torch.bool:
650
+ condition = condition.to(torch.bool)
651
+
652
+ if x is None and y is None:
653
+ result = torch.where(condition)
654
+ else:
655
+ result = torch.where(condition, x, y)
656
+ return result
657
+
658
+
659
+ # ###### module-level queries of object properties
660
+
661
+
662
+ def ndim(a: ArrayLike):
663
+ return a.ndim
664
+
665
+
666
+ def shape(a: ArrayLike):
667
+ return tuple(a.shape)
668
+
669
+
670
+ def size(a: ArrayLike, axis=None):
671
+ if axis is None:
672
+ return a.numel()
673
+ else:
674
+ return a.shape[axis]
675
+
676
+
677
+ # ###### shape manipulations and indexing
678
+
679
+
680
+ def expand_dims(a: ArrayLike, axis):
681
+ shape = _util.expand_shape(a.shape, axis)
682
+ return a.view(shape) # never copies
683
+
684
+
685
+ def flip(m: ArrayLike, axis=None):
686
+ # XXX: semantic difference: np.flip returns a view, torch.flip copies
687
+ if axis is None:
688
+ axis = tuple(range(m.ndim))
689
+ else:
690
+ axis = _util.normalize_axis_tuple(axis, m.ndim)
691
+ return torch.flip(m, axis)
692
+
693
+
694
+ def flipud(m: ArrayLike):
695
+ return torch.flipud(m)
696
+
697
+
698
+ def fliplr(m: ArrayLike):
699
+ return torch.fliplr(m)
700
+
701
+
702
+ def rot90(m: ArrayLike, k=1, axes=(0, 1)):
703
+ axes = _util.normalize_axis_tuple(axes, m.ndim)
704
+ return torch.rot90(m, k, axes)
705
+
706
+
707
+ # ### broadcasting and indices ###
708
+
709
+
710
+ def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False):
711
+ return torch.broadcast_to(array, size=shape)
712
+
713
+
714
+ # This is a function from tuples to tuples, so we just reuse it
715
+ from torch import broadcast_shapes
716
+
717
+
718
+ def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False):
719
+ return torch.broadcast_tensors(*args)
720
+
721
+
722
+ def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"):
723
+ ndim = len(xi)
724
+
725
+ if indexing not in ["xy", "ij"]:
726
+ raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
727
+
728
+ s0 = (1,) * ndim
729
+ output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)]
730
+
731
+ if indexing == "xy" and ndim > 1:
732
+ # switch first and second axis
733
+ output[0] = output[0].reshape((1, -1) + s0[2:])
734
+ output[1] = output[1].reshape((-1, 1) + s0[2:])
735
+
736
+ if not sparse:
737
+ # Return the full N-D matrix (not only the 1-D vector)
738
+ output = torch.broadcast_tensors(*output)
739
+
740
+ if copy:
741
+ output = [x.clone() for x in output]
742
+
743
+ return list(output) # match numpy, return a list
744
+
745
+
746
+ def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False):
747
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791
748
+ dimensions = tuple(dimensions)
749
+ N = len(dimensions)
750
+ shape = (1,) * N
751
+ if sparse:
752
+ res = ()
753
+ else:
754
+ res = torch.empty((N,) + dimensions, dtype=dtype)
755
+ for i, dim in enumerate(dimensions):
756
+ idx = torch.arange(dim, dtype=dtype).reshape(
757
+ shape[:i] + (dim,) + shape[i + 1 :]
758
+ )
759
+ if sparse:
760
+ res = res + (idx,)
761
+ else:
762
+ res[i] = idx
763
+ return res
764
+
765
+
766
+ # ### tri*-something ###
767
+
768
+
769
+ def tril(m: ArrayLike, k=0):
770
+ return torch.tril(m, k)
771
+
772
+
773
+ def triu(m: ArrayLike, k=0):
774
+ return torch.triu(m, k)
775
+
776
+
777
+ def tril_indices(n, k=0, m=None):
778
+ if m is None:
779
+ m = n
780
+ return torch.tril_indices(n, m, offset=k)
781
+
782
+
783
+ def triu_indices(n, k=0, m=None):
784
+ if m is None:
785
+ m = n
786
+ return torch.triu_indices(n, m, offset=k)
787
+
788
+
789
+ def tril_indices_from(arr: ArrayLike, k=0):
790
+ if arr.ndim != 2:
791
+ raise ValueError("input array must be 2-d")
792
+ # Return a tensor rather than a tuple to avoid a graphbreak
793
+ return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k)
794
+
795
+
796
+ def triu_indices_from(arr: ArrayLike, k=0):
797
+ if arr.ndim != 2:
798
+ raise ValueError("input array must be 2-d")
799
+ # Return a tensor rather than a tuple to avoid a graphbreak
800
+ return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k)
801
+
802
+
803
+ def tri(
804
+ N,
805
+ M=None,
806
+ k=0,
807
+ dtype: Optional[DTypeLike] = None,
808
+ *,
809
+ like: NotImplementedType = None,
810
+ ):
811
+ if M is None:
812
+ M = N
813
+ tensor = torch.ones((N, M), dtype=dtype)
814
+ return torch.tril(tensor, diagonal=k)
815
+
816
+
817
+ # ### equality, equivalence, allclose ###
818
+
819
+
820
+ def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
821
+ dtype = _dtypes_impl.result_type_impl(a, b)
822
+ a = _util.cast_if_needed(a, dtype)
823
+ b = _util.cast_if_needed(b, dtype)
824
+ return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
825
+
826
+
827
+ def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False):
828
+ dtype = _dtypes_impl.result_type_impl(a, b)
829
+ a = _util.cast_if_needed(a, dtype)
830
+ b = _util.cast_if_needed(b, dtype)
831
+ return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
832
+
833
+
834
+ def _tensor_equal(a1, a2, equal_nan=False):
835
+ # Implementation of array_equal/array_equiv.
836
+ if a1.shape != a2.shape:
837
+ return False
838
+ cond = a1 == a2
839
+ if equal_nan:
840
+ cond = cond | (torch.isnan(a1) & torch.isnan(a2))
841
+ return cond.all().item()
842
+
843
+
844
+ def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False):
845
+ return _tensor_equal(a1, a2, equal_nan=equal_nan)
846
+
847
+
848
+ def array_equiv(a1: ArrayLike, a2: ArrayLike):
849
+ # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not
850
+ try:
851
+ a1_t, a2_t = torch.broadcast_tensors(a1, a2)
852
+ except RuntimeError:
853
+ # failed to broadcast => not equivalent
854
+ return False
855
+ return _tensor_equal(a1_t, a2_t)
856
+
857
+
858
+ def nan_to_num(
859
+ x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None
860
+ ):
861
+ # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble'
862
+ if x.is_complex():
863
+ re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf)
864
+ im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf)
865
+ return re + 1j * im
866
+ else:
867
+ return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
868
+
869
+
870
+ # ### put/take_along_axis ###
871
+
872
+
873
+ def take(
874
+ a: ArrayLike,
875
+ indices: ArrayLike,
876
+ axis=None,
877
+ out: Optional[OutArray] = None,
878
+ mode: NotImplementedType = "raise",
879
+ ):
880
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
881
+ axis = _util.normalize_axis_index(axis, a.ndim)
882
+ idx = (slice(None),) * axis + (indices, ...)
883
+ result = a[idx]
884
+ return result
885
+
886
+
887
+ def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis):
888
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
889
+ axis = _util.normalize_axis_index(axis, arr.ndim)
890
+ return torch.take_along_dim(arr, indices, axis)
891
+
892
+
893
+ def put(
894
+ a: NDArray,
895
+ indices: ArrayLike,
896
+ values: ArrayLike,
897
+ mode: NotImplementedType = "raise",
898
+ ):
899
+ v = values.type(a.dtype)
900
+ # If indices is larger than v, expand v to at least the size of indices. Any
901
+ # unnecessary trailing elements are then trimmed.
902
+ if indices.numel() > v.numel():
903
+ ratio = (indices.numel() + v.numel() - 1) // v.numel()
904
+ v = v.unsqueeze(0).expand((ratio,) + v.shape)
905
+ # Trim unnecessary elements, regardless if v was expanded or not. Note
906
+ # np.put() trims v to match indices by default too.
907
+ if indices.numel() < v.numel():
908
+ v = v.flatten()
909
+ v = v[: indices.numel()]
910
+ a.put_(indices, v)
911
+ return None
912
+
913
+
914
+ def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis):
915
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
916
+ axis = _util.normalize_axis_index(axis, arr.ndim)
917
+
918
+ indices, values = torch.broadcast_tensors(indices, values)
919
+ values = _util.cast_if_needed(values, arr.dtype)
920
+ result = torch.scatter(arr, axis, indices, values)
921
+ arr.copy_(result.reshape(arr.shape))
922
+ return None
923
+
924
+
925
+ def choose(
926
+ a: ArrayLike,
927
+ choices: Sequence[ArrayLike],
928
+ out: Optional[OutArray] = None,
929
+ mode: NotImplementedType = "raise",
930
+ ):
931
+ # First, broadcast elements of `choices`
932
+ choices = torch.stack(torch.broadcast_tensors(*choices))
933
+
934
+ # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`:
935
+ # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939)
936
+ idx_list = [
937
+ torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1))
938
+ for i, dim in enumerate(choices.shape)
939
+ ]
940
+
941
+ idx_list[0] = a
942
+ return choices[idx_list].squeeze(0)
943
+
944
+
945
+ # ### unique et al. ###
946
+
947
+
948
+ def unique(
949
+ ar: ArrayLike,
950
+ return_index: NotImplementedType = False,
951
+ return_inverse=False,
952
+ return_counts=False,
953
+ axis=None,
954
+ *,
955
+ equal_nan: NotImplementedType = True,
956
+ ):
957
+ (ar,), axis = _util.axis_none_flatten(ar, axis=axis)
958
+ axis = _util.normalize_axis_index(axis, ar.ndim)
959
+
960
+ result = torch.unique(
961
+ ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis
962
+ )
963
+
964
+ return result
965
+
966
+
967
+ def nonzero(a: ArrayLike):
968
+ return torch.nonzero(a, as_tuple=True)
969
+
970
+
971
+ def argwhere(a: ArrayLike):
972
+ return torch.argwhere(a)
973
+
974
+
975
+ def flatnonzero(a: ArrayLike):
976
+ return torch.flatten(a).nonzero(as_tuple=True)[0]
977
+
978
+
979
+ def clip(
980
+ a: ArrayLike,
981
+ min: Optional[ArrayLike] = None,
982
+ max: Optional[ArrayLike] = None,
983
+ out: Optional[OutArray] = None,
984
+ ):
985
+ return torch.clamp(a, min, max)
986
+
987
+
988
+ def repeat(a: ArrayLike, repeats: ArrayLikeOrScalar, axis=None):
989
+ return torch.repeat_interleave(a, repeats, axis)
990
+
991
+
992
+ def tile(A: ArrayLike, reps):
993
+ if isinstance(reps, int):
994
+ reps = (reps,)
995
+ return torch.tile(A, reps)
996
+
997
+
998
+ def resize(a: ArrayLike, new_shape=None):
999
+ # implementation vendored from
1000
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497
1001
+ if new_shape is None:
1002
+ return a
1003
+
1004
+ if isinstance(new_shape, int):
1005
+ new_shape = (new_shape,)
1006
+
1007
+ a = a.flatten()
1008
+
1009
+ new_size = 1
1010
+ for dim_length in new_shape:
1011
+ new_size *= dim_length
1012
+ if dim_length < 0:
1013
+ raise ValueError("all elements of `new_shape` must be non-negative")
1014
+
1015
+ if a.numel() == 0 or new_size == 0:
1016
+ # First case must zero fill. The second would have repeats == 0.
1017
+ return torch.zeros(new_shape, dtype=a.dtype)
1018
+
1019
+ repeats = -(-new_size // a.numel()) # ceil division
1020
+ a = concatenate((a,) * repeats)[:new_size]
1021
+
1022
+ return reshape(a, new_shape)
1023
+
1024
+
1025
+ # ### diag et al. ###
1026
+
1027
+
1028
+ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1):
1029
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1030
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1031
+ return torch.diagonal(a, offset, axis1, axis2)
1032
+
1033
+
1034
+ def trace(
1035
+ a: ArrayLike,
1036
+ offset=0,
1037
+ axis1=0,
1038
+ axis2=1,
1039
+ dtype: Optional[DTypeLike] = None,
1040
+ out: Optional[OutArray] = None,
1041
+ ):
1042
+ result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype)
1043
+ return result
1044
+
1045
+
1046
+ def eye(
1047
+ N,
1048
+ M=None,
1049
+ k=0,
1050
+ dtype: Optional[DTypeLike] = None,
1051
+ order: NotImplementedType = "C",
1052
+ *,
1053
+ like: NotImplementedType = None,
1054
+ ):
1055
+ if dtype is None:
1056
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1057
+ if M is None:
1058
+ M = N
1059
+ z = torch.zeros(N, M, dtype=dtype)
1060
+ z.diagonal(k).fill_(1)
1061
+ return z
1062
+
1063
+
1064
+ def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None):
1065
+ return torch.eye(n, dtype=dtype)
1066
+
1067
+
1068
+ def diag(v: ArrayLike, k=0):
1069
+ return torch.diag(v, k)
1070
+
1071
+
1072
+ def diagflat(v: ArrayLike, k=0):
1073
+ return torch.diagflat(v, k)
1074
+
1075
+
1076
+ def diag_indices(n, ndim=2):
1077
+ idx = torch.arange(n)
1078
+ return (idx,) * ndim
1079
+
1080
+
1081
+ def diag_indices_from(arr: ArrayLike):
1082
+ if not arr.ndim >= 2:
1083
+ raise ValueError("input array must be at least 2-d")
1084
+ # For more than d=2, the strided formula is only valid for arrays with
1085
+ # all dimensions equal, so we check first.
1086
+ s = arr.shape
1087
+ if s[1:] != s[:-1]:
1088
+ raise ValueError("All dimensions of input must be of equal length")
1089
+ return diag_indices(s[0], arr.ndim)
1090
+
1091
+
1092
+ def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False):
1093
+ if a.ndim < 2:
1094
+ raise ValueError("array must be at least 2-d")
1095
+ if val.numel() == 0 and not wrap:
1096
+ a.fill_diagonal_(val)
1097
+ return a
1098
+
1099
+ if val.ndim == 0:
1100
+ val = val.unsqueeze(0)
1101
+
1102
+ # torch.Tensor.fill_diagonal_ only accepts scalars
1103
+ # If the size of val is too large, then val is trimmed
1104
+ if a.ndim == 2:
1105
+ tall = a.shape[0] > a.shape[1]
1106
+ # wrap does nothing for wide matrices...
1107
+ if not wrap or not tall:
1108
+ # Never wraps
1109
+ diag = a.diagonal()
1110
+ diag.copy_(val[: diag.numel()])
1111
+ else:
1112
+ # wraps and tall... leaving one empty line between diagonals?!
1113
+ max_, min_ = a.shape
1114
+ idx = torch.arange(max_ - max_ // (min_ + 1))
1115
+ mod = idx % min_
1116
+ div = idx // min_
1117
+ a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()]
1118
+ else:
1119
+ idx = diag_indices_from(a)
1120
+ # a.shape = (n, n, ..., n)
1121
+ a[idx] = val[: a.shape[0]]
1122
+
1123
+ return a
1124
+
1125
+
1126
+ def vdot(a: ArrayLike, b: ArrayLike, /):
1127
+ # 1. torch only accepts 1D arrays, numpy flattens
1128
+ # 2. torch requires matching dtype, while numpy casts (?)
1129
+ t_a, t_b = torch.atleast_1d(a, b)
1130
+ if t_a.ndim > 1:
1131
+ t_a = t_a.flatten()
1132
+ if t_b.ndim > 1:
1133
+ t_b = t_b.flatten()
1134
+
1135
+ dtype = _dtypes_impl.result_type_impl(t_a, t_b)
1136
+ is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu)
1137
+ is_bool = dtype == torch.bool
1138
+
1139
+ # work around torch's "dot" not implemented for 'Half', 'Bool'
1140
+ if is_half:
1141
+ dtype = torch.float32
1142
+ elif is_bool:
1143
+ dtype = torch.uint8
1144
+
1145
+ t_a = _util.cast_if_needed(t_a, dtype)
1146
+ t_b = _util.cast_if_needed(t_b, dtype)
1147
+
1148
+ result = torch.vdot(t_a, t_b)
1149
+
1150
+ if is_half:
1151
+ result = result.to(torch.float16)
1152
+ elif is_bool:
1153
+ result = result.to(torch.bool)
1154
+
1155
+ return result
1156
+
1157
+
1158
+ def tensordot(a: ArrayLike, b: ArrayLike, axes=2):
1159
+ if isinstance(axes, (list, tuple)):
1160
+ axes = [[ax] if isinstance(ax, int) else ax for ax in axes]
1161
+
1162
+ target_dtype = _dtypes_impl.result_type_impl(a, b)
1163
+ a = _util.cast_if_needed(a, target_dtype)
1164
+ b = _util.cast_if_needed(b, target_dtype)
1165
+
1166
+ return torch.tensordot(a, b, dims=axes)
1167
+
1168
+
1169
+ def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1170
+ dtype = _dtypes_impl.result_type_impl(a, b)
1171
+ is_bool = dtype == torch.bool
1172
+ if is_bool:
1173
+ dtype = torch.uint8
1174
+
1175
+ a = _util.cast_if_needed(a, dtype)
1176
+ b = _util.cast_if_needed(b, dtype)
1177
+
1178
+ if a.ndim == 0 or b.ndim == 0:
1179
+ result = a * b
1180
+ else:
1181
+ result = torch.matmul(a, b)
1182
+
1183
+ if is_bool:
1184
+ result = result.to(torch.bool)
1185
+
1186
+ return result
1187
+
1188
+
1189
+ def inner(a: ArrayLike, b: ArrayLike, /):
1190
+ dtype = _dtypes_impl.result_type_impl(a, b)
1191
+ is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu)
1192
+ is_bool = dtype == torch.bool
1193
+
1194
+ if is_half:
1195
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
1196
+ dtype = torch.float32
1197
+ elif is_bool:
1198
+ dtype = torch.uint8
1199
+
1200
+ a = _util.cast_if_needed(a, dtype)
1201
+ b = _util.cast_if_needed(b, dtype)
1202
+
1203
+ result = torch.inner(a, b)
1204
+
1205
+ if is_half:
1206
+ result = result.to(torch.float16)
1207
+ elif is_bool:
1208
+ result = result.to(torch.bool)
1209
+ return result
1210
+
1211
+
1212
+ def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1213
+ return torch.outer(a, b)
1214
+
1215
+
1216
+ def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None):
1217
+ # implementation vendored from
1218
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685
1219
+ if axis is not None:
1220
+ axisa, axisb, axisc = (axis,) * 3
1221
+
1222
+ # Check axisa and axisb are within bounds
1223
+ axisa = _util.normalize_axis_index(axisa, a.ndim)
1224
+ axisb = _util.normalize_axis_index(axisb, b.ndim)
1225
+
1226
+ # Move working axis to the end of the shape
1227
+ a = torch.moveaxis(a, axisa, -1)
1228
+ b = torch.moveaxis(b, axisb, -1)
1229
+ msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)"
1230
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
1231
+ raise ValueError(msg)
1232
+
1233
+ # Create the output array
1234
+ shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape)
1235
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
1236
+ shape += (3,)
1237
+ # Check axisc is within bounds
1238
+ axisc = _util.normalize_axis_index(axisc, len(shape))
1239
+ dtype = _dtypes_impl.result_type_impl(a, b)
1240
+ cp = torch.empty(shape, dtype=dtype)
1241
+
1242
+ # recast arrays as dtype
1243
+ a = _util.cast_if_needed(a, dtype)
1244
+ b = _util.cast_if_needed(b, dtype)
1245
+
1246
+ # create local aliases for readability
1247
+ a0 = a[..., 0]
1248
+ a1 = a[..., 1]
1249
+ if a.shape[-1] == 3:
1250
+ a2 = a[..., 2]
1251
+ b0 = b[..., 0]
1252
+ b1 = b[..., 1]
1253
+ if b.shape[-1] == 3:
1254
+ b2 = b[..., 2]
1255
+ if cp.ndim != 0 and cp.shape[-1] == 3:
1256
+ cp0 = cp[..., 0]
1257
+ cp1 = cp[..., 1]
1258
+ cp2 = cp[..., 2]
1259
+
1260
+ if a.shape[-1] == 2:
1261
+ if b.shape[-1] == 2:
1262
+ # a0 * b1 - a1 * b0
1263
+ cp[...] = a0 * b1 - a1 * b0
1264
+ return cp
1265
+ else:
1266
+ assert b.shape[-1] == 3
1267
+ # cp0 = a1 * b2 - 0 (a2 = 0)
1268
+ # cp1 = 0 - a0 * b2 (a2 = 0)
1269
+ # cp2 = a0 * b1 - a1 * b0
1270
+ cp0[...] = a1 * b2
1271
+ cp1[...] = -a0 * b2
1272
+ cp2[...] = a0 * b1 - a1 * b0
1273
+ else:
1274
+ assert a.shape[-1] == 3
1275
+ if b.shape[-1] == 3:
1276
+ cp0[...] = a1 * b2 - a2 * b1
1277
+ cp1[...] = a2 * b0 - a0 * b2
1278
+ cp2[...] = a0 * b1 - a1 * b0
1279
+ else:
1280
+ assert b.shape[-1] == 2
1281
+ cp0[...] = -a2 * b1
1282
+ cp1[...] = a2 * b0
1283
+ cp2[...] = a0 * b1 - a1 * b0
1284
+
1285
+ return torch.moveaxis(cp, -1, axisc)
1286
+
1287
+
1288
+ def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False):
1289
+ # Have to manually normalize *operands and **kwargs, following the NumPy signature
1290
+ # We have a local import to avoid poluting the global space, as it will be then
1291
+ # exported in funcs.py
1292
+ from ._ndarray import ndarray
1293
+ from ._normalizations import (
1294
+ maybe_copy_to,
1295
+ normalize_array_like,
1296
+ normalize_casting,
1297
+ normalize_dtype,
1298
+ wrap_tensors,
1299
+ )
1300
+
1301
+ dtype = normalize_dtype(dtype)
1302
+ casting = normalize_casting(casting)
1303
+ if out is not None and not isinstance(out, ndarray):
1304
+ raise TypeError("'out' must be an array")
1305
+ if order != "K":
1306
+ raise NotImplementedError("'order' parameter is not supported.")
1307
+
1308
+ # parse arrays and normalize them
1309
+ sublist_format = not isinstance(operands[0], str)
1310
+ if sublist_format:
1311
+ # op, str, op, str ... [sublistout] format: normalize every other argument
1312
+
1313
+ # - if sublistout is not given, the length of operands is even, and we pick
1314
+ # odd-numbered elements, which are arrays.
1315
+ # - if sublistout is given, the length of operands is odd, we peel off
1316
+ # the last one, and pick odd-numbered elements, which are arrays.
1317
+ # Without [:-1], we would have picked sublistout, too.
1318
+ array_operands = operands[:-1][::2]
1319
+ else:
1320
+ # ("ij->", arrays) format
1321
+ subscripts, array_operands = operands[0], operands[1:]
1322
+
1323
+ tensors = [normalize_array_like(op) for op in array_operands]
1324
+ target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype
1325
+
1326
+ # work around 'bmm' not implemented for 'Half' etc
1327
+ is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors)
1328
+ if is_half:
1329
+ target_dtype = torch.float32
1330
+
1331
+ is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32]
1332
+ if is_short_int:
1333
+ target_dtype = torch.int64
1334
+
1335
+ tensors = _util.typecast_tensors(tensors, target_dtype, casting)
1336
+
1337
+ from torch.backends import opt_einsum
1338
+
1339
+ try:
1340
+ # set the global state to handle the optimize=... argument, restore on exit
1341
+ if opt_einsum.is_available():
1342
+ old_strategy = torch.backends.opt_einsum.strategy
1343
+ old_enabled = torch.backends.opt_einsum.enabled
1344
+
1345
+ # torch.einsum calls opt_einsum.contract_path, which runs into
1346
+ # https://github.com/dgasmith/opt_einsum/issues/219
1347
+ # for strategy={True, False}
1348
+ if optimize is True:
1349
+ optimize = "auto"
1350
+ elif optimize is False:
1351
+ torch.backends.opt_einsum.enabled = False
1352
+
1353
+ torch.backends.opt_einsum.strategy = optimize
1354
+
1355
+ if sublist_format:
1356
+ # recombine operands
1357
+ sublists = operands[1::2]
1358
+ has_sublistout = len(operands) % 2 == 1
1359
+ if has_sublistout:
1360
+ sublistout = operands[-1]
1361
+ operands = list(itertools.chain.from_iterable(zip(tensors, sublists)))
1362
+ if has_sublistout:
1363
+ operands.append(sublistout)
1364
+
1365
+ result = torch.einsum(*operands)
1366
+ else:
1367
+ result = torch.einsum(subscripts, *tensors)
1368
+
1369
+ finally:
1370
+ if opt_einsum.is_available():
1371
+ torch.backends.opt_einsum.strategy = old_strategy
1372
+ torch.backends.opt_einsum.enabled = old_enabled
1373
+
1374
+ result = maybe_copy_to(out, result)
1375
+ return wrap_tensors(result)
1376
+
1377
+
1378
+ # ### sort and partition ###
1379
+
1380
+
1381
+ def _sort_helper(tensor, axis, kind, order):
1382
+ if tensor.dtype.is_complex:
1383
+ raise NotImplementedError(f"sorting {tensor.dtype} is not supported")
1384
+ (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis)
1385
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
1386
+
1387
+ stable = kind == "stable"
1388
+
1389
+ return tensor, axis, stable
1390
+
1391
+
1392
+ def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1393
+ # `order` keyword arg is only relevant for structured dtypes; so not supported here.
1394
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1395
+ result = torch.sort(a, dim=axis, stable=stable)
1396
+ return result.values
1397
+
1398
+
1399
+ def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1400
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1401
+ return torch.argsort(a, dim=axis, stable=stable)
1402
+
1403
+
1404
+ def searchsorted(
1405
+ a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None
1406
+ ):
1407
+ if a.dtype.is_complex:
1408
+ raise NotImplementedError(f"searchsorted with dtype={a.dtype}")
1409
+
1410
+ return torch.searchsorted(a, v, side=side, sorter=sorter)
1411
+
1412
+
1413
+ # ### swap/move/roll axis ###
1414
+
1415
+
1416
+ def moveaxis(a: ArrayLike, source, destination):
1417
+ source = _util.normalize_axis_tuple(source, a.ndim, "source")
1418
+ destination = _util.normalize_axis_tuple(destination, a.ndim, "destination")
1419
+ return torch.moveaxis(a, source, destination)
1420
+
1421
+
1422
+ def swapaxes(a: ArrayLike, axis1, axis2):
1423
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1424
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1425
+ return torch.swapaxes(a, axis1, axis2)
1426
+
1427
+
1428
+ def rollaxis(a: ArrayLike, axis, start=0):
1429
+ # Straight vendor from:
1430
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259
1431
+ #
1432
+ # Also note this function in NumPy is mostly retained for backwards compat
1433
+ # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing)
1434
+ # so let's not touch it unless hard pressed.
1435
+ n = a.ndim
1436
+ axis = _util.normalize_axis_index(axis, n)
1437
+ if start < 0:
1438
+ start += n
1439
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
1440
+ if not (0 <= start < n + 1):
1441
+ raise _util.AxisError(msg % ("start", -n, "start", n + 1, start))
1442
+ if axis < start:
1443
+ # it's been removed
1444
+ start -= 1
1445
+ if axis == start:
1446
+ # numpy returns a view, here we try returning the tensor itself
1447
+ # return tensor[...]
1448
+ return a
1449
+ axes = list(range(0, n))
1450
+ axes.remove(axis)
1451
+ axes.insert(start, axis)
1452
+ return a.view(axes)
1453
+
1454
+
1455
+ def roll(a: ArrayLike, shift, axis=None):
1456
+ if axis is not None:
1457
+ axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
1458
+ if not isinstance(shift, tuple):
1459
+ shift = (shift,) * len(axis)
1460
+ return torch.roll(a, shift, axis)
1461
+
1462
+
1463
+ # ### shape manipulations ###
1464
+
1465
+
1466
+ def squeeze(a: ArrayLike, axis=None):
1467
+ if axis == ():
1468
+ result = a
1469
+ elif axis is None:
1470
+ result = a.squeeze()
1471
+ else:
1472
+ if isinstance(axis, tuple):
1473
+ result = a
1474
+ for ax in axis:
1475
+ result = a.squeeze(ax)
1476
+ else:
1477
+ result = a.squeeze(axis)
1478
+ return result
1479
+
1480
+
1481
+ def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"):
1482
+ # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh)
1483
+ newshape = newshape[0] if len(newshape) == 1 else newshape
1484
+ return a.reshape(newshape)
1485
+
1486
+
1487
+ # NB: cannot use torch.reshape(a, newshape) above, because of
1488
+ # (Pdb) torch.reshape(torch.as_tensor([1]), 1)
1489
+ # *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int
1490
+
1491
+
1492
+ def transpose(a: ArrayLike, axes=None):
1493
+ # numpy allows both .transpose(sh) and .transpose(*sh)
1494
+ # also older code uses axes being a list
1495
+ if axes in [(), None, (None,)]:
1496
+ axes = tuple(reversed(range(a.ndim)))
1497
+ elif len(axes) == 1:
1498
+ axes = axes[0]
1499
+ return a.permute(axes)
1500
+
1501
+
1502
+ def ravel(a: ArrayLike, order: NotImplementedType = "C"):
1503
+ return torch.flatten(a)
1504
+
1505
+
1506
+ def diff(
1507
+ a: ArrayLike,
1508
+ n=1,
1509
+ axis=-1,
1510
+ prepend: Optional[ArrayLike] = None,
1511
+ append: Optional[ArrayLike] = None,
1512
+ ):
1513
+ axis = _util.normalize_axis_index(axis, a.ndim)
1514
+
1515
+ if n < 0:
1516
+ raise ValueError(f"order must be non-negative but got {n}")
1517
+
1518
+ if n == 0:
1519
+ # match numpy and return the input immediately
1520
+ return a
1521
+
1522
+ if prepend is not None:
1523
+ shape = list(a.shape)
1524
+ shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1
1525
+ prepend = torch.broadcast_to(prepend, shape)
1526
+
1527
+ if append is not None:
1528
+ shape = list(a.shape)
1529
+ shape[axis] = append.shape[axis] if append.ndim > 0 else 1
1530
+ append = torch.broadcast_to(append, shape)
1531
+
1532
+ return torch.diff(a, n, axis=axis, prepend=prepend, append=append)
1533
+
1534
+
1535
+ # ### math functions ###
1536
+
1537
+
1538
+ def angle(z: ArrayLike, deg=False):
1539
+ result = torch.angle(z)
1540
+ if deg:
1541
+ result = result * (180 / torch.pi)
1542
+ return result
1543
+
1544
+
1545
+ def sinc(x: ArrayLike):
1546
+ return torch.sinc(x)
1547
+
1548
+
1549
+ # NB: have to normalize *varargs manually
1550
+ def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1):
1551
+ N = f.ndim # number of dimensions
1552
+
1553
+ varargs = _util.ndarrays_to_tensors(varargs)
1554
+
1555
+ if axis is None:
1556
+ axes = tuple(range(N))
1557
+ else:
1558
+ axes = _util.normalize_axis_tuple(axis, N)
1559
+
1560
+ len_axes = len(axes)
1561
+ n = len(varargs)
1562
+ if n == 0:
1563
+ # no spacing argument - use 1 in all axes
1564
+ dx = [1.0] * len_axes
1565
+ elif n == 1 and (_dtypes_impl.is_scalar(varargs[0]) or varargs[0].ndim == 0):
1566
+ # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0)
1567
+ dx = varargs * len_axes
1568
+ elif n == len_axes:
1569
+ # scalar or 1d array for each axis
1570
+ dx = list(varargs)
1571
+ for i, distances in enumerate(dx):
1572
+ distances = torch.as_tensor(distances)
1573
+ if distances.ndim == 0:
1574
+ continue
1575
+ elif distances.ndim != 1:
1576
+ raise ValueError("distances must be either scalars or 1d")
1577
+ if len(distances) != f.shape[axes[i]]:
1578
+ raise ValueError(
1579
+ "when 1d, distances must match "
1580
+ "the length of the corresponding dimension"
1581
+ )
1582
+ if not (distances.dtype.is_floating_point or distances.dtype.is_complex):
1583
+ distances = distances.double()
1584
+
1585
+ diffx = torch.diff(distances)
1586
+ # if distances are constant reduce to the scalar case
1587
+ # since it brings a consistent speedup
1588
+ if (diffx == diffx[0]).all():
1589
+ diffx = diffx[0]
1590
+ dx[i] = diffx
1591
+ else:
1592
+ raise TypeError("invalid number of arguments")
1593
+
1594
+ if edge_order > 2:
1595
+ raise ValueError("'edge_order' greater than 2 not supported")
1596
+
1597
+ # use central differences on interior and one-sided differences on the
1598
+ # endpoints. This preserves second order-accuracy over the full domain.
1599
+
1600
+ outvals = []
1601
+
1602
+ # create slice objects --- initially all are [:, :, ..., :]
1603
+ slice1 = [slice(None)] * N
1604
+ slice2 = [slice(None)] * N
1605
+ slice3 = [slice(None)] * N
1606
+ slice4 = [slice(None)] * N
1607
+
1608
+ otype = f.dtype
1609
+ if _dtypes_impl.python_type_for_torch(otype) in (int, bool):
1610
+ # Convert to floating point.
1611
+ # First check if f is a numpy integer type; if so, convert f to float64
1612
+ # to avoid modular arithmetic when computing the changes in f.
1613
+ f = f.double()
1614
+ otype = torch.float64
1615
+
1616
+ for axis, ax_dx in zip(axes, dx):
1617
+ if f.shape[axis] < edge_order + 1:
1618
+ raise ValueError(
1619
+ "Shape of array too small to calculate a numerical gradient, "
1620
+ "at least (edge_order + 1) elements are required."
1621
+ )
1622
+ # result allocation
1623
+ out = torch.empty_like(f, dtype=otype)
1624
+
1625
+ # spacing for the current axis (NB: np.ndim(ax_dx) == 0)
1626
+ uniform_spacing = _dtypes_impl.is_scalar(ax_dx) or ax_dx.ndim == 0
1627
+
1628
+ # Numerical differentiation: 2nd order interior
1629
+ slice1[axis] = slice(1, -1)
1630
+ slice2[axis] = slice(None, -2)
1631
+ slice3[axis] = slice(1, -1)
1632
+ slice4[axis] = slice(2, None)
1633
+
1634
+ if uniform_spacing:
1635
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx)
1636
+ else:
1637
+ dx1 = ax_dx[0:-1]
1638
+ dx2 = ax_dx[1:]
1639
+ a = -(dx2) / (dx1 * (dx1 + dx2))
1640
+ b = (dx2 - dx1) / (dx1 * dx2)
1641
+ c = dx1 / (dx2 * (dx1 + dx2))
1642
+ # fix the shape for broadcasting
1643
+ shape = [1] * N
1644
+ shape[axis] = -1
1645
+ a = a.reshape(shape)
1646
+ b = b.reshape(shape)
1647
+ c = c.reshape(shape)
1648
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
1649
+ out[tuple(slice1)] = (
1650
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1651
+ )
1652
+
1653
+ # Numerical differentiation: 1st order edges
1654
+ if edge_order == 1:
1655
+ slice1[axis] = 0
1656
+ slice2[axis] = 1
1657
+ slice3[axis] = 0
1658
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
1659
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
1660
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
1661
+
1662
+ slice1[axis] = -1
1663
+ slice2[axis] = -1
1664
+ slice3[axis] = -2
1665
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
1666
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
1667
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
1668
+
1669
+ # Numerical differentiation: 2nd order edges
1670
+ else:
1671
+ slice1[axis] = 0
1672
+ slice2[axis] = 0
1673
+ slice3[axis] = 1
1674
+ slice4[axis] = 2
1675
+ if uniform_spacing:
1676
+ a = -1.5 / ax_dx
1677
+ b = 2.0 / ax_dx
1678
+ c = -0.5 / ax_dx
1679
+ else:
1680
+ dx1 = ax_dx[0]
1681
+ dx2 = ax_dx[1]
1682
+ a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
1683
+ b = (dx1 + dx2) / (dx1 * dx2)
1684
+ c = -dx1 / (dx2 * (dx1 + dx2))
1685
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
1686
+ out[tuple(slice1)] = (
1687
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1688
+ )
1689
+
1690
+ slice1[axis] = -1
1691
+ slice2[axis] = -3
1692
+ slice3[axis] = -2
1693
+ slice4[axis] = -1
1694
+ if uniform_spacing:
1695
+ a = 0.5 / ax_dx
1696
+ b = -2.0 / ax_dx
1697
+ c = 1.5 / ax_dx
1698
+ else:
1699
+ dx1 = ax_dx[-2]
1700
+ dx2 = ax_dx[-1]
1701
+ a = (dx2) / (dx1 * (dx1 + dx2))
1702
+ b = -(dx2 + dx1) / (dx1 * dx2)
1703
+ c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
1704
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
1705
+ out[tuple(slice1)] = (
1706
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1707
+ )
1708
+
1709
+ outvals.append(out)
1710
+
1711
+ # reset the slice object in this dimension to ":"
1712
+ slice1[axis] = slice(None)
1713
+ slice2[axis] = slice(None)
1714
+ slice3[axis] = slice(None)
1715
+ slice4[axis] = slice(None)
1716
+
1717
+ if len_axes == 1:
1718
+ return outvals[0]
1719
+ else:
1720
+ return outvals
1721
+
1722
+
1723
+ # ### Type/shape etc queries ###
1724
+
1725
+
1726
+ def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None):
1727
+ if a.is_floating_point():
1728
+ result = torch.round(a, decimals=decimals)
1729
+ elif a.is_complex():
1730
+ # RuntimeError: "round_cpu" not implemented for 'ComplexFloat'
1731
+ result = torch.complex(
1732
+ torch.round(a.real, decimals=decimals),
1733
+ torch.round(a.imag, decimals=decimals),
1734
+ )
1735
+ else:
1736
+ # RuntimeError: "round_cpu" not implemented for 'int'
1737
+ result = a
1738
+ return result
1739
+
1740
+
1741
+ around = round
1742
+ round_ = round
1743
+
1744
+
1745
+ def real_if_close(a: ArrayLike, tol=100):
1746
+ if not torch.is_complex(a):
1747
+ return a
1748
+ if tol > 1:
1749
+ # Undocumented in numpy: if tol < 1, it's an absolute tolerance!
1750
+ # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon
1751
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577
1752
+ tol = tol * torch.finfo(a.dtype).eps
1753
+
1754
+ mask = torch.abs(a.imag) < tol
1755
+ return a.real if mask.all() else a
1756
+
1757
+
1758
+ def real(a: ArrayLike):
1759
+ return torch.real(a)
1760
+
1761
+
1762
+ def imag(a: ArrayLike):
1763
+ if a.is_complex():
1764
+ return a.imag
1765
+ return torch.zeros_like(a)
1766
+
1767
+
1768
+ def iscomplex(x: ArrayLike):
1769
+ if torch.is_complex(x):
1770
+ return x.imag != 0
1771
+ return torch.zeros_like(x, dtype=torch.bool)
1772
+
1773
+
1774
+ def isreal(x: ArrayLike):
1775
+ if torch.is_complex(x):
1776
+ return x.imag == 0
1777
+ return torch.ones_like(x, dtype=torch.bool)
1778
+
1779
+
1780
+ def iscomplexobj(x: ArrayLike):
1781
+ return torch.is_complex(x)
1782
+
1783
+
1784
+ def isrealobj(x: ArrayLike):
1785
+ return not torch.is_complex(x)
1786
+
1787
+
1788
+ def isneginf(x: ArrayLike, out: Optional[OutArray] = None):
1789
+ return torch.isneginf(x)
1790
+
1791
+
1792
+ def isposinf(x: ArrayLike, out: Optional[OutArray] = None):
1793
+ return torch.isposinf(x)
1794
+
1795
+
1796
+ def i0(x: ArrayLike):
1797
+ return torch.special.i0(x)
1798
+
1799
+
1800
+ def isscalar(a):
1801
+ # We need to use normalize_array_like, but we don't want to export it in funcs.py
1802
+ from ._normalizations import normalize_array_like
1803
+
1804
+ try:
1805
+ t = normalize_array_like(a)
1806
+ return t.numel() == 1
1807
+ except Exception:
1808
+ return False
1809
+
1810
+
1811
+ # ### Filter windows ###
1812
+
1813
+
1814
+ def hamming(M):
1815
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1816
+ return torch.hamming_window(M, periodic=False, dtype=dtype)
1817
+
1818
+
1819
+ def hanning(M):
1820
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1821
+ return torch.hann_window(M, periodic=False, dtype=dtype)
1822
+
1823
+
1824
+ def kaiser(M, beta):
1825
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1826
+ return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype)
1827
+
1828
+
1829
+ def blackman(M):
1830
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1831
+ return torch.blackman_window(M, periodic=False, dtype=dtype)
1832
+
1833
+
1834
+ def bartlett(M):
1835
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1836
+ return torch.bartlett_window(M, periodic=False, dtype=dtype)
1837
+
1838
+
1839
+ # ### Dtype routines ###
1840
+
1841
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666
1842
+
1843
+
1844
+ array_type = [
1845
+ [torch.float16, torch.float32, torch.float64],
1846
+ [None, torch.complex64, torch.complex128],
1847
+ ]
1848
+ array_precision = {
1849
+ torch.float16: 0,
1850
+ torch.float32: 1,
1851
+ torch.float64: 2,
1852
+ torch.complex64: 1,
1853
+ torch.complex128: 2,
1854
+ }
1855
+
1856
+
1857
+ def common_type(*tensors: ArrayLike):
1858
+ is_complex = False
1859
+ precision = 0
1860
+ for a in tensors:
1861
+ t = a.dtype
1862
+ if iscomplexobj(a):
1863
+ is_complex = True
1864
+ if not (t.is_floating_point or t.is_complex):
1865
+ p = 2 # array_precision[_nx.double]
1866
+ else:
1867
+ p = array_precision.get(t, None)
1868
+ if p is None:
1869
+ raise TypeError("can't get common type for non-numeric array")
1870
+ precision = builtins.max(precision, p)
1871
+ if is_complex:
1872
+ return array_type[1][precision]
1873
+ else:
1874
+ return array_type[0][precision]
1875
+
1876
+
1877
+ # ### histograms ###
1878
+
1879
+
1880
+ def histogram(
1881
+ a: ArrayLike,
1882
+ bins: ArrayLike = 10,
1883
+ range=None,
1884
+ normed=None,
1885
+ weights: Optional[ArrayLike] = None,
1886
+ density=None,
1887
+ ):
1888
+ if normed is not None:
1889
+ raise ValueError("normed argument is deprecated, use density= instead")
1890
+
1891
+ if weights is not None and weights.dtype.is_complex:
1892
+ raise NotImplementedError("complex weights histogram.")
1893
+
1894
+ is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex)
1895
+ is_w_int = weights is None or not weights.dtype.is_floating_point
1896
+ if is_a_int:
1897
+ a = a.double()
1898
+
1899
+ if weights is not None:
1900
+ weights = _util.cast_if_needed(weights, a.dtype)
1901
+
1902
+ if isinstance(bins, torch.Tensor):
1903
+ if bins.ndim == 0:
1904
+ # bins was a single int
1905
+ bins = operator.index(bins)
1906
+ else:
1907
+ bins = _util.cast_if_needed(bins, a.dtype)
1908
+
1909
+ if range is None:
1910
+ h, b = torch.histogram(a, bins, weight=weights, density=bool(density))
1911
+ else:
1912
+ h, b = torch.histogram(
1913
+ a, bins, range=range, weight=weights, density=bool(density)
1914
+ )
1915
+
1916
+ if not density and is_w_int:
1917
+ h = h.long()
1918
+ if is_a_int:
1919
+ b = b.long()
1920
+
1921
+ return h, b
1922
+
1923
+
1924
+ def histogram2d(
1925
+ x,
1926
+ y,
1927
+ bins=10,
1928
+ range: Optional[ArrayLike] = None,
1929
+ normed=None,
1930
+ weights: Optional[ArrayLike] = None,
1931
+ density=None,
1932
+ ):
1933
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821
1934
+ if len(x) != len(y):
1935
+ raise ValueError("x and y must have the same length.")
1936
+
1937
+ try:
1938
+ N = len(bins)
1939
+ except TypeError:
1940
+ N = 1
1941
+
1942
+ if N != 1 and N != 2:
1943
+ bins = [bins, bins]
1944
+
1945
+ h, e = histogramdd((x, y), bins, range, normed, weights, density)
1946
+
1947
+ return h, e[0], e[1]
1948
+
1949
+
1950
+ def histogramdd(
1951
+ sample,
1952
+ bins=10,
1953
+ range: Optional[ArrayLike] = None,
1954
+ normed=None,
1955
+ weights: Optional[ArrayLike] = None,
1956
+ density=None,
1957
+ ):
1958
+ # have to normalize manually because `sample` interpretation differs
1959
+ # for a list of lists and a 2D array
1960
+ if normed is not None:
1961
+ raise ValueError("normed argument is deprecated, use density= instead")
1962
+
1963
+ from ._normalizations import normalize_array_like, normalize_seq_array_like
1964
+
1965
+ if isinstance(sample, (list, tuple)):
1966
+ sample = normalize_array_like(sample).T
1967
+ else:
1968
+ sample = normalize_array_like(sample)
1969
+
1970
+ sample = torch.atleast_2d(sample)
1971
+
1972
+ if not (sample.dtype.is_floating_point or sample.dtype.is_complex):
1973
+ sample = sample.double()
1974
+
1975
+ # bins is either an int, or a sequence of ints or a sequence of arrays
1976
+ bins_is_array = not (
1977
+ isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins)
1978
+ )
1979
+ if bins_is_array:
1980
+ bins = normalize_seq_array_like(bins)
1981
+ bins_dtypes = [b.dtype for b in bins]
1982
+ bins = [_util.cast_if_needed(b, sample.dtype) for b in bins]
1983
+
1984
+ if range is not None:
1985
+ range = range.flatten().tolist()
1986
+
1987
+ if weights is not None:
1988
+ # range=... is required : interleave min and max values per dimension
1989
+ mm = sample.aminmax(dim=0)
1990
+ range = torch.cat(mm).reshape(2, -1).T.flatten()
1991
+ range = tuple(range.tolist())
1992
+ weights = _util.cast_if_needed(weights, sample.dtype)
1993
+ w_kwd = {"weight": weights}
1994
+ else:
1995
+ w_kwd = {}
1996
+
1997
+ h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd)
1998
+
1999
+ if bins_is_array:
2000
+ b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)]
2001
+
2002
+ return h, b
2003
+
2004
+
2005
+ # ### odds and ends
2006
+
2007
+
2008
+ def min_scalar_type(a: ArrayLike, /):
2009
+ # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288
2010
+
2011
+ from ._dtypes import DType
2012
+
2013
+ if a.numel() > 1:
2014
+ # numpy docs: "For non-scalar array a, returns the vector's dtype unmodified."
2015
+ return DType(a.dtype)
2016
+
2017
+ if a.dtype == torch.bool:
2018
+ dtype = torch.bool
2019
+
2020
+ elif a.dtype.is_complex:
2021
+ fi = torch.finfo(torch.float32)
2022
+ fits_in_single = a.dtype == torch.complex64 or (
2023
+ fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max
2024
+ )
2025
+ dtype = torch.complex64 if fits_in_single else torch.complex128
2026
+
2027
+ elif a.dtype.is_floating_point:
2028
+ for dt in [torch.float16, torch.float32, torch.float64]:
2029
+ fi = torch.finfo(dt)
2030
+ if fi.min <= a <= fi.max:
2031
+ dtype = dt
2032
+ break
2033
+ else:
2034
+ # must be integer
2035
+ for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
2036
+ # Prefer unsigned int where possible, as numpy does.
2037
+ ii = torch.iinfo(dt)
2038
+ if ii.min <= a <= ii.max:
2039
+ dtype = dt
2040
+ break
2041
+
2042
+ return DType(dtype)
2043
+
2044
+
2045
+ def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs):
2046
+ if mode != "constant":
2047
+ raise NotImplementedError
2048
+ value = kwargs.get("constant_values", 0)
2049
+ # `value` must be a python scalar for torch.nn.functional.pad
2050
+ typ = _dtypes_impl.python_type_for_torch(array.dtype)
2051
+ value = typ(value)
2052
+
2053
+ pad_width = torch.broadcast_to(pad_width, (array.ndim, 2))
2054
+ pad_width = torch.flip(pad_width, (0,)).flatten()
2055
+
2056
+ return torch.nn.functional.pad(array, tuple(pad_width), value=value)
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_ndarray.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import builtins
6
+ import math
7
+ import operator
8
+ from typing import Sequence
9
+
10
+ import torch
11
+
12
+ from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util
13
+ from ._normalizations import (
14
+ ArrayLike,
15
+ normalize_array_like,
16
+ normalizer,
17
+ NotImplementedType,
18
+ )
19
+
20
+
21
+ newaxis = None
22
+
23
+ FLAGS = [
24
+ "C_CONTIGUOUS",
25
+ "F_CONTIGUOUS",
26
+ "OWNDATA",
27
+ "WRITEABLE",
28
+ "ALIGNED",
29
+ "WRITEBACKIFCOPY",
30
+ "FNC",
31
+ "FORC",
32
+ "BEHAVED",
33
+ "CARRAY",
34
+ "FARRAY",
35
+ ]
36
+
37
+ SHORTHAND_TO_FLAGS = {
38
+ "C": "C_CONTIGUOUS",
39
+ "F": "F_CONTIGUOUS",
40
+ "O": "OWNDATA",
41
+ "W": "WRITEABLE",
42
+ "A": "ALIGNED",
43
+ "X": "WRITEBACKIFCOPY",
44
+ "B": "BEHAVED",
45
+ "CA": "CARRAY",
46
+ "FA": "FARRAY",
47
+ }
48
+
49
+
50
+ class Flags:
51
+ def __init__(self, flag_to_value: dict):
52
+ assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check
53
+ self._flag_to_value = flag_to_value
54
+
55
+ def __getattr__(self, attr: str):
56
+ if attr.islower() and attr.upper() in FLAGS:
57
+ return self[attr.upper()]
58
+ else:
59
+ raise AttributeError(f"No flag attribute '{attr}'")
60
+
61
+ def __getitem__(self, key):
62
+ if key in SHORTHAND_TO_FLAGS.keys():
63
+ key = SHORTHAND_TO_FLAGS[key]
64
+ if key in FLAGS:
65
+ try:
66
+ return self._flag_to_value[key]
67
+ except KeyError as e:
68
+ raise NotImplementedError(f"{key=}") from e
69
+ else:
70
+ raise KeyError(f"No flag key '{key}'")
71
+
72
+ def __setattr__(self, attr, value):
73
+ if attr.islower() and attr.upper() in FLAGS:
74
+ self[attr.upper()] = value
75
+ else:
76
+ super().__setattr__(attr, value)
77
+
78
+ def __setitem__(self, key, value):
79
+ if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys():
80
+ raise NotImplementedError("Modifying flags is not implemented")
81
+ else:
82
+ raise KeyError(f"No flag key '{key}'")
83
+
84
+
85
+ def create_method(fn, name=None):
86
+ name = name or fn.__name__
87
+
88
+ def f(*args, **kwargs):
89
+ return fn(*args, **kwargs)
90
+
91
+ f.__name__ = name
92
+ f.__qualname__ = f"ndarray.{name}"
93
+ return f
94
+
95
+
96
+ # Map ndarray.name_method -> np.name_func
97
+ # If name_func == None, it means that name_method == name_func
98
+ methods = {
99
+ "clip": None,
100
+ "nonzero": None,
101
+ "repeat": None,
102
+ "round": None,
103
+ "squeeze": None,
104
+ "swapaxes": None,
105
+ "ravel": None,
106
+ # linalg
107
+ "diagonal": None,
108
+ "dot": None,
109
+ "trace": None,
110
+ # sorting
111
+ "argsort": None,
112
+ "searchsorted": None,
113
+ # reductions
114
+ "argmax": None,
115
+ "argmin": None,
116
+ "any": None,
117
+ "all": None,
118
+ "max": None,
119
+ "min": None,
120
+ "ptp": None,
121
+ "sum": None,
122
+ "prod": None,
123
+ "mean": None,
124
+ "var": None,
125
+ "std": None,
126
+ # scans
127
+ "cumsum": None,
128
+ "cumprod": None,
129
+ # advanced indexing
130
+ "take": None,
131
+ "choose": None,
132
+ }
133
+
134
+ dunder = {
135
+ "abs": "absolute",
136
+ "invert": None,
137
+ "pos": "positive",
138
+ "neg": "negative",
139
+ "gt": "greater",
140
+ "lt": "less",
141
+ "ge": "greater_equal",
142
+ "le": "less_equal",
143
+ }
144
+
145
+ # dunder methods with right-looking and in-place variants
146
+ ri_dunder = {
147
+ "add": None,
148
+ "sub": "subtract",
149
+ "mul": "multiply",
150
+ "truediv": "divide",
151
+ "floordiv": "floor_divide",
152
+ "pow": "power",
153
+ "mod": "remainder",
154
+ "and": "bitwise_and",
155
+ "or": "bitwise_or",
156
+ "xor": "bitwise_xor",
157
+ "lshift": "left_shift",
158
+ "rshift": "right_shift",
159
+ "matmul": None,
160
+ }
161
+
162
+
163
+ def _upcast_int_indices(index):
164
+ if isinstance(index, torch.Tensor):
165
+ if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
166
+ return index.to(torch.int64)
167
+ elif isinstance(index, tuple):
168
+ return tuple(_upcast_int_indices(i) for i in index)
169
+ return index
170
+
171
+
172
+ # Used to indicate that a parameter is unspecified (as opposed to explicitly
173
+ # `None`)
174
+ class _Unspecified:
175
+ pass
176
+
177
+
178
+ _Unspecified.unspecified = _Unspecified()
179
+
180
+ ###############################################################
181
+ # ndarray class #
182
+ ###############################################################
183
+
184
+
185
+ class ndarray:
186
+ def __init__(self, t=None):
187
+ if t is None:
188
+ self.tensor = torch.Tensor()
189
+ elif isinstance(t, torch.Tensor):
190
+ self.tensor = t
191
+ else:
192
+ raise ValueError(
193
+ "ndarray constructor is not recommended; prefer"
194
+ "either array(...) or zeros/empty(...)"
195
+ )
196
+
197
+ # Register NumPy functions as methods
198
+ for method, name in methods.items():
199
+ fn = getattr(_funcs, name or method)
200
+ vars()[method] = create_method(fn, method)
201
+
202
+ # Regular methods but coming from ufuncs
203
+ conj = create_method(_ufuncs.conjugate, "conj")
204
+ conjugate = create_method(_ufuncs.conjugate)
205
+
206
+ for method, name in dunder.items():
207
+ fn = getattr(_ufuncs, name or method)
208
+ method = f"__{method}__"
209
+ vars()[method] = create_method(fn, method)
210
+
211
+ for method, name in ri_dunder.items():
212
+ fn = getattr(_ufuncs, name or method)
213
+ plain = f"__{method}__"
214
+ vars()[plain] = create_method(fn, plain)
215
+ rvar = f"__r{method}__"
216
+ vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
217
+ ivar = f"__i{method}__"
218
+ vars()[ivar] = create_method(
219
+ lambda self, other, fn=fn: fn(self, other, out=self), ivar
220
+ )
221
+
222
+ # There's no __idivmod__
223
+ __divmod__ = create_method(_ufuncs.divmod, "__divmod__")
224
+ __rdivmod__ = create_method(
225
+ lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
226
+ )
227
+
228
+ # prevent loop variables leaking into the ndarray class namespace
229
+ del ivar, rvar, name, plain, fn, method
230
+
231
+ @property
232
+ def shape(self):
233
+ return tuple(self.tensor.shape)
234
+
235
+ @property
236
+ def size(self):
237
+ return self.tensor.numel()
238
+
239
+ @property
240
+ def ndim(self):
241
+ return self.tensor.ndim
242
+
243
+ @property
244
+ def dtype(self):
245
+ return _dtypes.dtype(self.tensor.dtype)
246
+
247
+ @property
248
+ def strides(self):
249
+ elsize = self.tensor.element_size()
250
+ return tuple(stride * elsize for stride in self.tensor.stride())
251
+
252
+ @property
253
+ def itemsize(self):
254
+ return self.tensor.element_size()
255
+
256
+ @property
257
+ def flags(self):
258
+ # Note contiguous in torch is assumed C-style
259
+ return Flags(
260
+ {
261
+ "C_CONTIGUOUS": self.tensor.is_contiguous(),
262
+ "F_CONTIGUOUS": self.T.tensor.is_contiguous(),
263
+ "OWNDATA": self.tensor._base is None,
264
+ "WRITEABLE": True, # pytorch does not have readonly tensors
265
+ }
266
+ )
267
+
268
+ @property
269
+ def data(self):
270
+ return self.tensor.data_ptr()
271
+
272
+ @property
273
+ def nbytes(self):
274
+ return self.tensor.storage().nbytes()
275
+
276
+ @property
277
+ def T(self):
278
+ return self.transpose()
279
+
280
+ @property
281
+ def real(self):
282
+ return _funcs.real(self)
283
+
284
+ @real.setter
285
+ def real(self, value):
286
+ self.tensor.real = asarray(value).tensor
287
+
288
+ @property
289
+ def imag(self):
290
+ return _funcs.imag(self)
291
+
292
+ @imag.setter
293
+ def imag(self, value):
294
+ self.tensor.imag = asarray(value).tensor
295
+
296
+ # ctors
297
+ def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
298
+ if order != "K":
299
+ raise NotImplementedError(f"astype(..., order={order} is not implemented.")
300
+ if casting != "unsafe":
301
+ raise NotImplementedError(
302
+ f"astype(..., casting={casting} is not implemented."
303
+ )
304
+ if not subok:
305
+ raise NotImplementedError(f"astype(..., subok={subok} is not implemented.")
306
+ if not copy:
307
+ raise NotImplementedError(f"astype(..., copy={copy} is not implemented.")
308
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
309
+ t = self.tensor.to(torch_dtype)
310
+ return ndarray(t)
311
+
312
+ @normalizer
313
+ def copy(self: ArrayLike, order: NotImplementedType = "C"):
314
+ return self.clone()
315
+
316
+ @normalizer
317
+ def flatten(self: ArrayLike, order: NotImplementedType = "C"):
318
+ return torch.flatten(self)
319
+
320
+ def resize(self, *new_shape, refcheck=False):
321
+ # NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
322
+ if refcheck:
323
+ raise NotImplementedError(
324
+ f"resize(..., refcheck={refcheck} is not implemented."
325
+ )
326
+ if new_shape in [(), (None,)]:
327
+ return
328
+
329
+ # support both x.resize((2, 2)) and x.resize(2, 2)
330
+ if len(new_shape) == 1:
331
+ new_shape = new_shape[0]
332
+ if isinstance(new_shape, int):
333
+ new_shape = (new_shape,)
334
+
335
+ if builtins.any(x < 0 for x in new_shape):
336
+ raise ValueError("all elements of `new_shape` must be non-negative")
337
+
338
+ new_numel, old_numel = math.prod(new_shape), self.tensor.numel()
339
+
340
+ self.tensor.resize_(new_shape)
341
+
342
+ if new_numel >= old_numel:
343
+ # zero-fill new elements
344
+ assert self.tensor.is_contiguous()
345
+ b = self.tensor.flatten() # does not copy
346
+ b[old_numel:].zero_()
347
+
348
+ def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified):
349
+ if dtype is _Unspecified.unspecified:
350
+ dtype = self.dtype
351
+ if type is not _Unspecified.unspecified:
352
+ raise NotImplementedError(f"view(..., type={type} is not implemented.")
353
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
354
+ tview = self.tensor.view(torch_dtype)
355
+ return ndarray(tview)
356
+
357
+ @normalizer
358
+ def fill(self, value: ArrayLike):
359
+ # Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
360
+ # error out on D > 0 arrays
361
+ self.tensor.fill_(value)
362
+
363
+ def tolist(self):
364
+ return self.tensor.tolist()
365
+
366
+ def __iter__(self):
367
+ return (ndarray(x) for x in self.tensor.__iter__())
368
+
369
+ def __str__(self):
370
+ return (
371
+ str(self.tensor)
372
+ .replace("tensor", "torch.ndarray")
373
+ .replace("dtype=torch.", "dtype=")
374
+ )
375
+
376
+ __repr__ = create_method(__str__)
377
+
378
+ def __eq__(self, other):
379
+ try:
380
+ return _ufuncs.equal(self, other)
381
+ except (RuntimeError, TypeError):
382
+ # Failed to convert other to array: definitely not equal.
383
+ falsy = torch.full(self.shape, fill_value=False, dtype=bool)
384
+ return asarray(falsy)
385
+
386
+ def __ne__(self, other):
387
+ return ~(self == other)
388
+
389
+ def __index__(self):
390
+ try:
391
+ return operator.index(self.tensor.item())
392
+ except Exception as exc:
393
+ raise TypeError(
394
+ "only integer scalar arrays can be converted to a scalar index"
395
+ ) from exc
396
+
397
+ def __bool__(self):
398
+ return bool(self.tensor)
399
+
400
+ def __int__(self):
401
+ return int(self.tensor)
402
+
403
+ def __float__(self):
404
+ return float(self.tensor)
405
+
406
+ def __complex__(self):
407
+ return complex(self.tensor)
408
+
409
+ def is_integer(self):
410
+ try:
411
+ v = self.tensor.item()
412
+ result = int(v) == v
413
+ except Exception:
414
+ result = False
415
+ return result
416
+
417
+ def __len__(self):
418
+ return self.tensor.shape[0]
419
+
420
+ def __contains__(self, x):
421
+ return self.tensor.__contains__(x)
422
+
423
+ def transpose(self, *axes):
424
+ # np.transpose(arr, axis=None) but arr.transpose(*axes)
425
+ return _funcs.transpose(self, axes)
426
+
427
+ def reshape(self, *shape, order="C"):
428
+ # arr.reshape(shape) and arr.reshape(*shape)
429
+ return _funcs.reshape(self, shape, order=order)
430
+
431
+ def sort(self, axis=-1, kind=None, order=None):
432
+ # ndarray.sort works in-place
433
+ _funcs.copyto(self, _funcs.sort(self, axis, kind, order))
434
+
435
+ def item(self, *args):
436
+ # Mimic NumPy's implementation with three special cases (no arguments,
437
+ # a flat index and a multi-index):
438
+ # https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702
439
+ if args == ():
440
+ return self.tensor.item()
441
+ elif len(args) == 1:
442
+ # int argument
443
+ return self.ravel()[args[0]]
444
+ else:
445
+ return self.__getitem__(args)
446
+
447
+ def __getitem__(self, index):
448
+ tensor = self.tensor
449
+
450
+ def neg_step(i, s):
451
+ if not (isinstance(s, slice) and s.step is not None and s.step < 0):
452
+ return s
453
+
454
+ nonlocal tensor
455
+ tensor = torch.flip(tensor, (i,))
456
+
457
+ # Account for the fact that a slice includes the start but not the end
458
+ assert isinstance(s.start, int) or s.start is None
459
+ assert isinstance(s.stop, int) or s.stop is None
460
+ start = s.stop + 1 if s.stop else None
461
+ stop = s.start + 1 if s.start else None
462
+
463
+ return slice(start, stop, -s.step)
464
+
465
+ if isinstance(index, Sequence):
466
+ index = type(index)(neg_step(i, s) for i, s in enumerate(index))
467
+ else:
468
+ index = neg_step(0, index)
469
+ index = _util.ndarrays_to_tensors(index)
470
+ index = _upcast_int_indices(index)
471
+ return ndarray(tensor.__getitem__(index))
472
+
473
+ def __setitem__(self, index, value):
474
+ index = _util.ndarrays_to_tensors(index)
475
+ index = _upcast_int_indices(index)
476
+
477
+ if not _dtypes_impl.is_scalar(value):
478
+ value = normalize_array_like(value)
479
+ value = _util.cast_if_needed(value, self.tensor.dtype)
480
+
481
+ return self.tensor.__setitem__(index, value)
482
+
483
+ take = _funcs.take
484
+ put = _funcs.put
485
+
486
+ def __dlpack__(self, *, stream=None):
487
+ return self.tensor.__dlpack__(stream=stream)
488
+
489
+ def __dlpack_device__(self):
490
+ return self.tensor.__dlpack_device__()
491
+
492
+
493
+ def _tolist(obj):
494
+ """Recursively convert tensors into lists."""
495
+ a1 = []
496
+ for elem in obj:
497
+ if isinstance(elem, (list, tuple)):
498
+ elem = _tolist(elem)
499
+ if isinstance(elem, ndarray):
500
+ a1.append(elem.tensor.tolist())
501
+ else:
502
+ a1.append(elem)
503
+ return a1
504
+
505
+
506
+ # This is the ideally the only place which talks to ndarray directly.
507
+ # The rest goes through asarray (preferred) or array.
508
+
509
+
510
+ def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
511
+ if subok is not False:
512
+ raise NotImplementedError("'subok' parameter is not supported.")
513
+ if like is not None:
514
+ raise NotImplementedError("'like' parameter is not supported.")
515
+ if order != "K":
516
+ raise NotImplementedError
517
+
518
+ # a happy path
519
+ if (
520
+ isinstance(obj, ndarray)
521
+ and copy is False
522
+ and dtype is None
523
+ and ndmin <= obj.ndim
524
+ ):
525
+ return obj
526
+
527
+ if isinstance(obj, (list, tuple)):
528
+ # FIXME and they have the same dtype, device, etc
529
+ if obj and all(isinstance(x, torch.Tensor) for x in obj):
530
+ # list of arrays: *under torch.Dynamo* these are FakeTensors
531
+ obj = torch.stack(obj)
532
+ else:
533
+ # XXX: remove tolist
534
+ # lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
535
+ obj = _tolist(obj)
536
+
537
+ # is obj an ndarray already?
538
+ if isinstance(obj, ndarray):
539
+ obj = obj.tensor
540
+
541
+ # is a specific dtype requested?
542
+ torch_dtype = None
543
+ if dtype is not None:
544
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
545
+
546
+ tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
547
+ return ndarray(tensor)
548
+
549
+
550
+ def asarray(a, dtype=None, order="K", *, like=None):
551
+ return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
552
+
553
+
554
+ def ascontiguousarray(a, dtype=None, *, like=None):
555
+ arr = asarray(a, dtype=dtype, like=like)
556
+ if not arr.tensor.is_contiguous():
557
+ arr.tensor = arr.tensor.contiguous()
558
+ return arr
559
+
560
+
561
+ def from_dlpack(x, /):
562
+ t = torch.from_dlpack(x)
563
+ return ndarray(t)
564
+
565
+
566
+ def _extract_dtype(entry):
567
+ try:
568
+ dty = _dtypes.dtype(entry)
569
+ except Exception:
570
+ dty = asarray(entry).dtype
571
+ return dty
572
+
573
+
574
+ def can_cast(from_, to, casting="safe"):
575
+ from_ = _extract_dtype(from_)
576
+ to_ = _extract_dtype(to)
577
+
578
+ return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
579
+
580
+
581
+ def result_type(*arrays_and_dtypes):
582
+ tensors = []
583
+ for entry in arrays_and_dtypes:
584
+ try:
585
+ t = asarray(entry).tensor
586
+ except (RuntimeError, ValueError, TypeError):
587
+ dty = _dtypes.dtype(entry)
588
+ t = torch.empty(1, dtype=dty.torch_dtype)
589
+ tensors.append(t)
590
+
591
+ torch_dtype = _dtypes_impl.result_type_impl(*tensors)
592
+ return _dtypes.dtype(torch_dtype)
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_normalizations.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import functools
8
+ import inspect
9
+ import operator
10
+ import typing
11
+
12
+ import torch
13
+
14
+ from . import _dtypes, _dtypes_impl, _util
15
+
16
+
17
+ ArrayLike = typing.TypeVar("ArrayLike")
18
+ Scalar = typing.Union[int, float, complex, bool]
19
+ ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar]
20
+
21
+ DTypeLike = typing.TypeVar("DTypeLike")
22
+ AxisLike = typing.TypeVar("AxisLike")
23
+ NDArray = typing.TypeVar("NDArray")
24
+ CastingModes = typing.TypeVar("CastingModes")
25
+ KeepDims = typing.TypeVar("KeepDims")
26
+
27
+ # OutArray is to annotate the out= array argument.
28
+ #
29
+ # This one is special is several respects:
30
+ # First, It needs to be an NDArray, and we need to preserve the `result is out`
31
+ # semantics. Therefore, we cannot just extract the Tensor from the out array.
32
+ # So we never pass the out array to implementer functions and handle it in the
33
+ # `normalizer` below.
34
+ # Second, the out= argument can be either keyword or positional argument, and
35
+ # as a positional arg, it can be anywhere in the signature.
36
+ # To handle all this, we define a special `OutArray` annotation and dispatch on it.
37
+ #
38
+ OutArray = typing.TypeVar("OutArray")
39
+
40
+ try:
41
+ from typing import NotImplementedType
42
+ except ImportError:
43
+ NotImplementedType = typing.TypeVar("NotImplementedType")
44
+
45
+
46
+ def normalize_array_like(x, parm=None):
47
+ from ._ndarray import asarray
48
+
49
+ return asarray(x).tensor
50
+
51
+
52
+ def normalize_array_like_or_scalar(x, parm=None):
53
+ if _dtypes_impl.is_scalar_or_symbolic(x):
54
+ return x
55
+ return normalize_array_like(x, parm)
56
+
57
+
58
+ def normalize_optional_array_like_or_scalar(x, parm=None):
59
+ if x is None:
60
+ return None
61
+ return normalize_array_like_or_scalar(x, parm)
62
+
63
+
64
+ def normalize_optional_array_like(x, parm=None):
65
+ # This explicit normalizer is needed because otherwise normalize_array_like
66
+ # does not run for a parameter annotated as Optional[ArrayLike]
67
+ return None if x is None else normalize_array_like(x, parm)
68
+
69
+
70
+ def normalize_seq_array_like(x, parm=None):
71
+ return tuple(normalize_array_like(value) for value in x)
72
+
73
+
74
+ def normalize_dtype(dtype, parm=None):
75
+ # cf _decorators.dtype_to_torch
76
+ torch_dtype = None
77
+ if dtype is not None:
78
+ dtype = _dtypes.dtype(dtype)
79
+ torch_dtype = dtype.torch_dtype
80
+ return torch_dtype
81
+
82
+
83
+ def normalize_not_implemented(arg, parm):
84
+ if arg != parm.default:
85
+ raise NotImplementedError(f"'{parm.name}' parameter is not supported.")
86
+
87
+
88
+ def normalize_axis_like(arg, parm=None):
89
+ from ._ndarray import ndarray
90
+
91
+ if isinstance(arg, ndarray):
92
+ arg = operator.index(arg)
93
+ return arg
94
+
95
+
96
+ def normalize_ndarray(arg, parm=None):
97
+ # check the arg is an ndarray, extract its tensor attribute
98
+ if arg is None:
99
+ return arg
100
+
101
+ from ._ndarray import ndarray
102
+
103
+ if not isinstance(arg, ndarray):
104
+ raise TypeError(f"'{parm.name}' must be an array")
105
+ return arg.tensor
106
+
107
+
108
+ def normalize_outarray(arg, parm=None):
109
+ # almost normalize_ndarray, only return the array, not its tensor
110
+ if arg is None:
111
+ return arg
112
+ from ._ndarray import ndarray
113
+
114
+ # Dynamo can pass torch tensors as out arguments,
115
+ # wrap it in an ndarray before processing
116
+ if isinstance(arg, torch.Tensor):
117
+ arg = ndarray(arg)
118
+
119
+ if not isinstance(arg, ndarray):
120
+ raise TypeError(f"'{parm.name}' must be an array")
121
+ return arg
122
+
123
+
124
+ def normalize_casting(arg, parm=None):
125
+ if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]:
126
+ raise ValueError(
127
+ f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')"
128
+ )
129
+ return arg
130
+
131
+
132
+ normalizers = {
133
+ "ArrayLike": normalize_array_like,
134
+ "ArrayLikeOrScalar": normalize_array_like_or_scalar,
135
+ "Optional[ArrayLike]": normalize_optional_array_like,
136
+ "Sequence[ArrayLike]": normalize_seq_array_like,
137
+ "Optional[ArrayLikeOrScalar]": normalize_optional_array_like_or_scalar,
138
+ "Optional[NDArray]": normalize_ndarray,
139
+ "Optional[OutArray]": normalize_outarray,
140
+ "NDArray": normalize_ndarray,
141
+ "Optional[DTypeLike]": normalize_dtype,
142
+ "AxisLike": normalize_axis_like,
143
+ "NotImplementedType": normalize_not_implemented,
144
+ "Optional[CastingModes]": normalize_casting,
145
+ }
146
+
147
+
148
+ def maybe_normalize(arg, parm):
149
+ """Normalize arg if a normalizer is registered."""
150
+ normalizer = normalizers.get(parm.annotation, None)
151
+ return normalizer(arg, parm) if normalizer else arg
152
+
153
+
154
+ # ### Return value helpers ###
155
+
156
+
157
+ def maybe_copy_to(out, result, promote_scalar_result=False):
158
+ # NB: here out is either an ndarray or None
159
+ if out is None:
160
+ return result
161
+ elif isinstance(result, torch.Tensor):
162
+ if result.shape != out.shape:
163
+ can_fit = result.numel() == 1 and out.ndim == 0
164
+ if promote_scalar_result and can_fit:
165
+ result = result.squeeze()
166
+ else:
167
+ raise ValueError(
168
+ f"Bad size of the out array: out.shape = {out.shape}"
169
+ f" while result.shape = {result.shape}."
170
+ )
171
+ out.tensor.copy_(result)
172
+ return out
173
+ elif isinstance(result, (tuple, list)):
174
+ return type(result)(
175
+ maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result)
176
+ )
177
+ else:
178
+ raise AssertionError # We should never hit this path
179
+
180
+
181
+ def wrap_tensors(result):
182
+ from ._ndarray import ndarray
183
+
184
+ if isinstance(result, torch.Tensor):
185
+ return ndarray(result)
186
+ elif isinstance(result, (tuple, list)):
187
+ result = type(result)(wrap_tensors(x) for x in result)
188
+ return result
189
+
190
+
191
+ def array_or_scalar(values, py_type=float, return_scalar=False):
192
+ if return_scalar:
193
+ return py_type(values.item())
194
+ else:
195
+ from ._ndarray import ndarray
196
+
197
+ return ndarray(values)
198
+
199
+
200
+ # ### The main decorator to normalize arguments / postprocess the output ###
201
+
202
+
203
+ def normalizer(_func=None, *, promote_scalar_result=False):
204
+ def normalizer_inner(func):
205
+ @functools.wraps(func)
206
+ def wrapped(*args, **kwds):
207
+ sig = inspect.signature(func)
208
+ params = sig.parameters
209
+ first_param = next(iter(params.values()))
210
+
211
+ # NumPy's API does not have positional args before variadic positional args
212
+ if first_param.kind == inspect.Parameter.VAR_POSITIONAL:
213
+ args = [maybe_normalize(arg, first_param) for arg in args]
214
+ else:
215
+ # NB: extra unknown arguments: pass through, will raise in func(*args) below
216
+ args = (
217
+ tuple(
218
+ maybe_normalize(arg, parm)
219
+ for arg, parm in zip(args, params.values())
220
+ )
221
+ + args[len(params.values()) :]
222
+ )
223
+
224
+ kwds = {
225
+ name: maybe_normalize(arg, params[name]) if name in params else arg
226
+ for name, arg in kwds.items()
227
+ }
228
+
229
+ result = func(*args, **kwds)
230
+
231
+ # keepdims
232
+ bound_args = None
233
+ if "keepdims" in params and params["keepdims"].annotation == "KeepDims":
234
+ # keepdims can be in any position so we need sig.bind
235
+ bound_args = sig.bind(*args, **kwds).arguments
236
+ if bound_args.get("keepdims", False):
237
+ # In this case the first arg is the initial tensor and
238
+ # the second arg is (optionally) the axis
239
+ tensor = args[0]
240
+ axis = bound_args.get("axis")
241
+ result = _util.apply_keepdims(result, axis, tensor.ndim)
242
+
243
+ # out
244
+ if "out" in params:
245
+ # out can be in any position so we need sig.bind
246
+ if bound_args is None:
247
+ bound_args = sig.bind(*args, **kwds).arguments
248
+ out = bound_args.get("out")
249
+ result = maybe_copy_to(out, result, promote_scalar_result)
250
+ result = wrap_tensors(result)
251
+
252
+ return result
253
+
254
+ return wrapped
255
+
256
+ if _func is None:
257
+ return normalizer_inner
258
+ else:
259
+ return normalizer_inner(_func)
infer_4_47_1/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ Implementation of reduction operations, to be wrapped into arrays, dtypes etc
4
+ in the 'public' layer.
5
+
6
+ Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import functools
11
+ from typing import Optional, TYPE_CHECKING
12
+
13
+ import torch
14
+
15
+ from . import _dtypes_impl, _util
16
+
17
+
18
+ if TYPE_CHECKING:
19
+ from ._normalizations import (
20
+ ArrayLike,
21
+ AxisLike,
22
+ DTypeLike,
23
+ KeepDims,
24
+ NotImplementedType,
25
+ OutArray,
26
+ )
27
+
28
+
29
+ def _deco_axis_expand(func):
30
+ """
31
+ Generically handle axis arguments in reductions.
32
+ axis is *always* the 2nd arg in the function so no need to have a look at its signature
33
+ """
34
+
35
+ @functools.wraps(func)
36
+ def wrapped(a, axis=None, *args, **kwds):
37
+ if axis is not None:
38
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
39
+
40
+ if axis == ():
41
+ # So we insert a length-one axis and run the reduction along it.
42
+ # We cannot return a.clone() as this would sidestep the checks inside the function
43
+ newshape = _util.expand_shape(a.shape, axis=0)
44
+ a = a.reshape(newshape)
45
+ axis = (0,)
46
+
47
+ return func(a, axis, *args, **kwds)
48
+
49
+ return wrapped
50
+
51
+
52
+ def _atleast_float(dtype, other_dtype):
53
+ """Return a dtype that is real or complex floating-point.
54
+
55
+ For inputs that are boolean or integer dtypes, this returns the default
56
+ float dtype; inputs that are complex get converted to the default complex
57
+ dtype; real floating-point dtypes (`float*`) get passed through unchanged
58
+ """
59
+ if dtype is None:
60
+ dtype = other_dtype
61
+ if not (dtype.is_floating_point or dtype.is_complex):
62
+ return _dtypes_impl.default_dtypes().float_dtype
63
+ return dtype
64
+
65
+
66
+ @_deco_axis_expand
67
+ def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False):
68
+ return a.count_nonzero(axis)
69
+
70
+
71
+ @_deco_axis_expand
72
+ def argmax(
73
+ a: ArrayLike,
74
+ axis: AxisLike = None,
75
+ out: Optional[OutArray] = None,
76
+ *,
77
+ keepdims: KeepDims = False,
78
+ ):
79
+ if a.is_complex():
80
+ raise NotImplementedError(f"argmax with dtype={a.dtype}.")
81
+
82
+ axis = _util.allow_only_single_axis(axis)
83
+
84
+ if a.dtype == torch.bool:
85
+ # RuntimeError: "argmax_cpu" not implemented for 'Bool'
86
+ a = a.to(torch.uint8)
87
+
88
+ return torch.argmax(a, axis)
89
+
90
+
91
+ @_deco_axis_expand
92
+ def argmin(
93
+ a: ArrayLike,
94
+ axis: AxisLike = None,
95
+ out: Optional[OutArray] = None,
96
+ *,
97
+ keepdims: KeepDims = False,
98
+ ):
99
+ if a.is_complex():
100
+ raise NotImplementedError(f"argmin with dtype={a.dtype}.")
101
+
102
+ axis = _util.allow_only_single_axis(axis)
103
+
104
+ if a.dtype == torch.bool:
105
+ # RuntimeError: "argmin_cpu" not implemented for 'Bool'
106
+ a = a.to(torch.uint8)
107
+
108
+ return torch.argmin(a, axis)
109
+
110
+
111
+ @_deco_axis_expand
112
+ def any(
113
+ a: ArrayLike,
114
+ axis: AxisLike = None,
115
+ out: Optional[OutArray] = None,
116
+ keepdims: KeepDims = False,
117
+ *,
118
+ where: NotImplementedType = None,
119
+ ):
120
+ axis = _util.allow_only_single_axis(axis)
121
+ axis_kw = {} if axis is None else {"dim": axis}
122
+ return torch.any(a, **axis_kw)
123
+
124
+
125
+ @_deco_axis_expand
126
+ def all(
127
+ a: ArrayLike,
128
+ axis: AxisLike = None,
129
+ out: Optional[OutArray] = None,
130
+ keepdims: KeepDims = False,
131
+ *,
132
+ where: NotImplementedType = None,
133
+ ):
134
+ axis = _util.allow_only_single_axis(axis)
135
+ axis_kw = {} if axis is None else {"dim": axis}
136
+ return torch.all(a, **axis_kw)
137
+
138
+
139
+ @_deco_axis_expand
140
+ def amax(
141
+ a: ArrayLike,
142
+ axis: AxisLike = None,
143
+ out: Optional[OutArray] = None,
144
+ keepdims: KeepDims = False,
145
+ initial: NotImplementedType = None,
146
+ where: NotImplementedType = None,
147
+ ):
148
+ if a.is_complex():
149
+ raise NotImplementedError(f"amax with dtype={a.dtype}")
150
+
151
+ return a.amax(axis)
152
+
153
+
154
+ max = amax
155
+
156
+
157
+ @_deco_axis_expand
158
+ def amin(
159
+ a: ArrayLike,
160
+ axis: AxisLike = None,
161
+ out: Optional[OutArray] = None,
162
+ keepdims: KeepDims = False,
163
+ initial: NotImplementedType = None,
164
+ where: NotImplementedType = None,
165
+ ):
166
+ if a.is_complex():
167
+ raise NotImplementedError(f"amin with dtype={a.dtype}")
168
+
169
+ return a.amin(axis)
170
+
171
+
172
+ min = amin
173
+
174
+
175
+ @_deco_axis_expand
176
+ def ptp(
177
+ a: ArrayLike,
178
+ axis: AxisLike = None,
179
+ out: Optional[OutArray] = None,
180
+ keepdims: KeepDims = False,
181
+ ):
182
+ return a.amax(axis) - a.amin(axis)
183
+
184
+
185
+ @_deco_axis_expand
186
+ def sum(
187
+ a: ArrayLike,
188
+ axis: AxisLike = None,
189
+ dtype: Optional[DTypeLike] = None,
190
+ out: Optional[OutArray] = None,
191
+ keepdims: KeepDims = False,
192
+ initial: NotImplementedType = None,
193
+ where: NotImplementedType = None,
194
+ ):
195
+ assert dtype is None or isinstance(dtype, torch.dtype)
196
+
197
+ if dtype == torch.bool:
198
+ dtype = _dtypes_impl.default_dtypes().int_dtype
199
+
200
+ axis_kw = {} if axis is None else {"dim": axis}
201
+ return a.sum(dtype=dtype, **axis_kw)
202
+
203
+
204
+ @_deco_axis_expand
205
+ def prod(
206
+ a: ArrayLike,
207
+ axis: AxisLike = None,
208
+ dtype: Optional[DTypeLike] = None,
209
+ out: Optional[OutArray] = None,
210
+ keepdims: KeepDims = False,
211
+ initial: NotImplementedType = None,
212
+ where: NotImplementedType = None,
213
+ ):
214
+ axis = _util.allow_only_single_axis(axis)
215
+
216
+ if dtype == torch.bool:
217
+ dtype = _dtypes_impl.default_dtypes().int_dtype
218
+
219
+ axis_kw = {} if axis is None else {"dim": axis}
220
+ return a.prod(dtype=dtype, **axis_kw)
221
+
222
+
223
+ product = prod
224
+
225
+
226
+ @_deco_axis_expand
227
+ def mean(
228
+ a: ArrayLike,
229
+ axis: AxisLike = None,
230
+ dtype: Optional[DTypeLike] = None,
231
+ out: Optional[OutArray] = None,
232
+ keepdims: KeepDims = False,
233
+ *,
234
+ where: NotImplementedType = None,
235
+ ):
236
+ dtype = _atleast_float(dtype, a.dtype)
237
+
238
+ axis_kw = {} if axis is None else {"dim": axis}
239
+ result = a.mean(dtype=dtype, **axis_kw)
240
+
241
+ return result
242
+
243
+
244
+ @_deco_axis_expand
245
+ def std(
246
+ a: ArrayLike,
247
+ axis: AxisLike = None,
248
+ dtype: Optional[DTypeLike] = None,
249
+ out: Optional[OutArray] = None,
250
+ ddof=0,
251
+ keepdims: KeepDims = False,
252
+ *,
253
+ where: NotImplementedType = None,
254
+ ):
255
+ in_dtype = dtype
256
+ dtype = _atleast_float(dtype, a.dtype)
257
+ tensor = _util.cast_if_needed(a, dtype)
258
+ result = tensor.std(dim=axis, correction=ddof)
259
+ return _util.cast_if_needed(result, in_dtype)
260
+
261
+
262
+ @_deco_axis_expand
263
+ def var(
264
+ a: ArrayLike,
265
+ axis: AxisLike = None,
266
+ dtype: Optional[DTypeLike] = None,
267
+ out: Optional[OutArray] = None,
268
+ ddof=0,
269
+ keepdims: KeepDims = False,
270
+ *,
271
+ where: NotImplementedType = None,
272
+ ):
273
+ in_dtype = dtype
274
+ dtype = _atleast_float(dtype, a.dtype)
275
+ tensor = _util.cast_if_needed(a, dtype)
276
+ result = tensor.var(dim=axis, correction=ddof)
277
+ return _util.cast_if_needed(result, in_dtype)
278
+
279
+
280
+ # cumsum / cumprod are almost reductions:
281
+ # 1. no keepdims
282
+ # 2. axis=None flattens
283
+
284
+
285
+ def cumsum(
286
+ a: ArrayLike,
287
+ axis: AxisLike = None,
288
+ dtype: Optional[DTypeLike] = None,
289
+ out: Optional[OutArray] = None,
290
+ ):
291
+ if dtype == torch.bool:
292
+ dtype = _dtypes_impl.default_dtypes().int_dtype
293
+ if dtype is None:
294
+ dtype = a.dtype
295
+
296
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
297
+ axis = _util.normalize_axis_index(axis, a.ndim)
298
+
299
+ return a.cumsum(axis=axis, dtype=dtype)
300
+
301
+
302
+ def cumprod(
303
+ a: ArrayLike,
304
+ axis: AxisLike = None,
305
+ dtype: Optional[DTypeLike] = None,
306
+ out: Optional[OutArray] = None,
307
+ ):
308
+ if dtype == torch.bool:
309
+ dtype = _dtypes_impl.default_dtypes().int_dtype
310
+ if dtype is None:
311
+ dtype = a.dtype
312
+
313
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
314
+ axis = _util.normalize_axis_index(axis, a.ndim)
315
+
316
+ return a.cumprod(axis=axis, dtype=dtype)
317
+
318
+
319
+ cumproduct = cumprod
320
+
321
+
322
+ def average(
323
+ a: ArrayLike,
324
+ axis=None,
325
+ weights: ArrayLike = None,
326
+ returned=False,
327
+ *,
328
+ keepdims=False,
329
+ ):
330
+ if weights is None:
331
+ result = mean(a, axis=axis)
332
+ wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype)
333
+ else:
334
+ if not a.dtype.is_floating_point:
335
+ a = a.double()
336
+
337
+ # axis & weights
338
+ if a.shape != weights.shape:
339
+ if axis is None:
340
+ raise TypeError(
341
+ "Axis must be specified when shapes of a and weights differ."
342
+ )
343
+ if weights.ndim != 1:
344
+ raise TypeError(
345
+ "1D weights expected when shapes of a and weights differ."
346
+ )
347
+ if weights.shape[0] != a.shape[axis]:
348
+ raise ValueError(
349
+ "Length of weights not compatible with specified axis."
350
+ )
351
+
352
+ # setup weight to broadcast along axis
353
+ weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape)
354
+ weights = weights.swapaxes(-1, axis)
355
+
356
+ # do the work
357
+ result_dtype = _dtypes_impl.result_type_impl(a, weights)
358
+ numerator = sum(a * weights, axis, dtype=result_dtype)
359
+ wsum = sum(weights, axis, dtype=result_dtype)
360
+ result = numerator / wsum
361
+
362
+ # We process keepdims manually because the decorator does not deal with variadic returns
363
+ if keepdims:
364
+ result = _util.apply_keepdims(result, axis, a.ndim)
365
+
366
+ if returned:
367
+ if wsum.shape != result.shape:
368
+ wsum = torch.broadcast_to(wsum, result.shape).clone()
369
+ return result, wsum
370
+ else:
371
+ return result
372
+
373
+
374
+ # Not using deco_axis_expand as it assumes that axis is the second arg
375
+ def quantile(
376
+ a: ArrayLike,
377
+ q: ArrayLike,
378
+ axis: AxisLike = None,
379
+ out: Optional[OutArray] = None,
380
+ overwrite_input=False,
381
+ method="linear",
382
+ keepdims: KeepDims = False,
383
+ *,
384
+ interpolation: NotImplementedType = None,
385
+ ):
386
+ if overwrite_input:
387
+ # raise NotImplementedError("overwrite_input in quantile not implemented.")
388
+ # NumPy documents that `overwrite_input` MAY modify inputs:
389
+ # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile
390
+ # Here we choose to work out-of-place because why not.
391
+ pass
392
+
393
+ if not a.dtype.is_floating_point:
394
+ dtype = _dtypes_impl.default_dtypes().float_dtype
395
+ a = a.to(dtype)
396
+
397
+ # edge case: torch.quantile only supports float32 and float64
398
+ if a.dtype == torch.float16:
399
+ a = a.to(torch.float32)
400
+
401
+ if axis is None:
402
+ a = a.flatten()
403
+ q = q.flatten()
404
+ axis = (0,)
405
+ else:
406
+ axis = _util.normalize_axis_tuple(axis, a.ndim)
407
+
408
+ # FIXME(Mario) Doesn't np.quantile accept a tuple?
409
+ # torch.quantile does accept a number. If we don't want to implement the tuple behaviour
410
+ # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above.
411
+ axis = _util.allow_only_single_axis(axis)
412
+
413
+ q = _util.cast_if_needed(q, a.dtype)
414
+
415
+ return torch.quantile(a, q, axis=axis, interpolation=method)
416
+
417
+
418
+ def percentile(
419
+ a: ArrayLike,
420
+ q: ArrayLike,
421
+ axis: AxisLike = None,
422
+ out: Optional[OutArray] = None,
423
+ overwrite_input=False,
424
+ method="linear",
425
+ keepdims: KeepDims = False,
426
+ *,
427
+ interpolation: NotImplementedType = None,
428
+ ):
429
+ # np.percentile(float_tensor, 30) : q.dtype is int64 => q / 100.0 is float32
430
+ if _dtypes_impl.python_type_for_torch(q.dtype) == int:
431
+ q = q.to(_dtypes_impl.default_dtypes().float_dtype)
432
+ qq = q / 100.0
433
+
434
+ return quantile(
435
+ a,
436
+ qq,
437
+ axis=axis,
438
+ overwrite_input=overwrite_input,
439
+ method=method,
440
+ keepdims=keepdims,
441
+ interpolation=interpolation,
442
+ )
443
+
444
+
445
+ def median(
446
+ a: ArrayLike,
447
+ axis=None,
448
+ out: Optional[OutArray] = None,
449
+ overwrite_input=False,
450
+ keepdims: KeepDims = False,
451
+ ):
452
+ return quantile(
453
+ a,
454
+ torch.as_tensor(0.5),
455
+ axis=axis,
456
+ overwrite_input=overwrite_input,
457
+ out=out,
458
+ keepdims=keepdims,
459
+ )