Home
last modified time | relevance | path

Searched full:dtype (Results 1 – 25 of 7466) sorted by relevance

12345678910>>...299

/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/
H A Dbinary_ops_test.py48 pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
49 pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
55 rtol = 1e-15 if a.dtype == np.float64 else 1e-3
57 atol = 1e-15 if a.dtype == np.float64 else 1e-6
72 for dtype in self.float_types:
73 if dtype == dtypes.bfloat16.as_numpy_dtype:
82 np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
83 np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
84 expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
88 np.array([3, 3, -1.5, -8, 44], dtype=dtype),
[all …]
H A Dunary_ops_test.py68 dtypes.as_dtype(inp.dtype), inp.shape, name="a")
72 self.assertEqual(output.dtype, expected.dtype)
94 for dtype in self.numeric_types - {np.int8, np.uint8}:
96 array_ops.diag, np.array([1, 2, 3, 4], dtype=dtype),
99 dtype=dtype))
102 np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
103 np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
105 array_ops.diag, np.array([[1, 2], [3, 4]], dtype=dtype),
109 dtype=dtype))
113 np.array([[-1, 1]], dtype=dtype),
[all …]
H A Dternary_ops_test.py35 pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
36 pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
37 pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
50 expected = np.linspace(start, end, num, dtype=np.float32)
68 expected=np.array([1], dtype=np.int32))
74 expected=np.array([1, 3, 5], dtype=np.int32))
77 for dtype in self.numeric_types:
81 np.array(2, dtype=dtype),
82 np.array(7, dtype=dtype),
83 expected=np.array(7, dtype=dtype))
[all …]
H A Dnary_ops_test.py35 array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
55 [np.array([[1, 2, 3]], dtype=np.float32)],
56 expected=np.array([[1, 2, 3]], dtype=np.float32))
59 [np.array([1, 2], dtype=np.float32),
60 np.array([10, 20], dtype=np.float32)],
61 expected=np.array([11, 22], dtype=np.float32))
63 [np.array([-4], dtype=np.float32),
64 np.array([10], dtype=np.float32),
65 np.array([42], dtype=np.float32)],
66 expected=np.array([48], dtype=np.float32))
[all …]
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/
H A D_decomposed.py27 # Helper to check the passed in quant min and max are valid for the dtype
28 def _quant_min_max_bounds_check(quant_min, quant_max, dtype): argument
29 if dtype not in _DTYPE_TO_QVALUE_BOUNDS:
30 raise ValueError(f"Unsupported dtype: {dtype}")
31 quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype]
34 "quant_min out of bound for dtype, "
39 "quant_max out of bound for dtype, "
46 "int quant_min, int quant_max, ScalarType dtype) -> Tensor"
57 dtype: torch.dtype, argument
68 dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_binary_ufuncs.py85 self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs argument
93 # Handles exact dtype comparisons between arrays and tensors
95 # Allows array dtype to be float32 when comparing with bfloat16 tensors
96 # since NumPy doesn't support the bfloat16 dtype
99 if expected.dtype == np.float32:
100 assert actual.dtype in (
106 assert expected.dtype == torch_to_numpy_dtype_dict[actual.dtype]
110 torch.from_numpy(expected).to(actual.dtype),
120 def _test_reference_numerics(self, dtype, op, gen, equal_nan=True): argument
125 numpy_to_torch_dtype_dict[expected.dtype.type], dtype
[all …]
H A Dtest_tensor_creation_ops.py35 def _generate_input(shape, dtype, device, with_extremal): argument
37 x = torch.tensor((), dtype=dtype, device=device)
39 if dtype.is_floating_point or dtype.is_complex:
41 if dtype == torch.bfloat16:
45 x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
47 if with_extremal and dtype.is_floating_point:
52 elif with_extremal and dtype.is_complex:
56 elif dtype == torch.bool:
57 x = torch.zeros(shape, dtype=dtype, device=device)
60 x = torch.randint(15, 100, shape, dtype=dtype, device=device)
[all …]
H A Dtest_unary_ufuncs.py86 def test_float_domains(self, device, dtype, op): argument
92 low_tensor = torch.tensor(low, device=device, dtype=dtype)
98 # and the dtype is imprecise (like bfloat16 is)
113 high_tensor = torch.tensor(high, device=device, dtype=dtype)
134 self, actual, expected, msg, *, dtype, exact_dtype=True, **kwargs argument
142 # Handles exact dtype comparisons between arrays and tensors
145 actual.dtype is torch.bfloat16
146 or expected.dtype != torch_to_numpy_dtype_dict[actual.dtype]
148 # Allows array dtype to be float32 when comparing with bfloat16 tensors
149 # since NumPy doesn't support the bfloat16 dtype
[all …]
H A Dtest_type_promotion.py29 # the default dtype being torch.float and again with the default dtype
51 int_tensor = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
55 expected = torch.ones([4, 4, 4], dtype=torch.int32, device=device)
57 long_tensor = torch.ones([4, 4, 4], dtype=torch.int64, device=device)
62 self.assertEqual(int_tensor.dtype, torch.int32)
64 bool_tensor = torch.tensor([1, 1, 1], dtype=torch.bool, device=device)
65 uint8_tensor = torch.tensor([1, 1, 1], dtype=torch.uint8, device=device)
75 int16_tensor = torch.tensor([1, 1, 1], dtype=torch.int16, device=device)
80 dont_promote = torch.ones(3, dtype=torch.uint8, device=device) + 5
81 self.assertEqual(dont_promote.dtype, torch.uint8)
[all …]
H A Dtest_reductions.py31 def _generate_input(shape, dtype, device, with_extremal): argument
33 x = torch.tensor((), dtype=dtype, device=device)
35 if dtype.is_floating_point or dtype.is_complex:
37 if dtype == torch.bfloat16:
41 x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
43 if with_extremal and dtype.is_floating_point:
48 elif with_extremal and dtype.is_complex:
52 elif dtype == torch.bool:
53 x = torch.zeros(shape, dtype=dtype, device=device)
56 x = torch.randint(15, 100, shape, dtype=dtype, device=device)
[all …]
H A Dtest_linalg.py42 # Protects against includes accidentally setting the default dtype
88 def test_inner(self, device, dtype): argument
91 a = torch.randn(a_sizes, dtype=dtype, device=device)
92 b = torch.randn(b_sizes, dtype=dtype, device=device)
120 … torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
125 def test_outer(self, device, dtype): argument
127 if dtype == torch.bfloat16:
144 out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
148 out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
152 a = torch.randn(50).to(device=device, dtype=dtype)
[all …]
H A Dtest_sort_and_select.py90 y_inds = torch.tensor((), dtype=torch.int64, device=device)
98 res2ind = torch.tensor((), device=device, dtype=torch.long)
129 res2ind = torch.tensor((), device=device, dtype=torch.long)
198 def test_stable_sort(self, device, dtype): argument
201 x = torch.tensor([0, 1] * ncopies, dtype=dtype, device=device)
215 def test_sort_large(self, device, dtype): argument
216 t0 = torch.randperm(8192, device=device).to(dtype)
226 self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device))
230 def test_sort_restride(self, device, dtype): argument
232 tensor = torch.randn((3, 5), dtype=dtype, device=device)[:, 0]
[all …]
H A Dtest_sparse.py121 " Please use torch.sparse_coo_tensor((0,), dtype=)"
122 x_ref = torch.sparse_coo_tensor((0,), dtype=torch.float64)
129 x_ref = torch.tensor([[1, 2], [3, 4]], dtype=torch.float64).to_sparse()
137 " Please use torch.sparse_coo_tensor(indices, values, dtype=, device=)"
138 … x_ref = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], dtype=torch.float64)
140 torch.tensor([1, 2, 3, 4], dtype=torch.float64))
145 " Please use torch.sparse_coo_tensor(indices, values, shape, dtype=, device=)"
146 … = torch.sparse_coo_tensor([[0, 0, 1, 1], [0, 1, 0, 1]], [1, 2, 3, 4], (2, 3), dtype=torch.float64)
148 torch.tensor([1, 2, 3, 4], dtype=torch.float64), (2, 3))
153 " Please use torch.sparse_coo_tensor(shape, dtype=, device=)"
[all …]
H A Dtest_spectral_ops.py103 device=x.device, dtype=torch.cdouble)
107 slc = torch.empty(n_fft, device=x.device, dtype=x.dtype)
116 def skip_helper_for_fft(device, dtype): argument
118 if dtype not in (torch.half, torch.complex32):
134 def test_reference_1d(self, device, dtype, op): argument
142 (torch.randn(67, device=device, dtype=dtype),
143 torch.randn(80, device=device, dtype=dtype),
144 torch.randn(12, 14, device=device, dtype=dtype),
145 torch.randn(9, 6, 3, device=device, dtype=dtype)),
155 (torch.randn(4, 5, 6, 7, device=device, dtype=dtype),),
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
H A Dsets_test.py38 def _values(values, dtype): argument
41 dtype=(np.str_ if (dtype == dtypes.string) else dtype.as_numpy_dtype))
44 def _constant(values, dtype): argument
45 return constant_op.constant(_values(values, dtype), dtype=dtype)
48 def _dense_to_sparse(dense, dtype): argument
60 values.append(str(cell) if dtype == dtypes.string else cell)
65 constant_op.constant(values, dtype),
73 for dtype in _DTYPES:
74 self._test_set_size_2d(dtype)
76 def _test_set_size_2d(self, dtype): argument
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dmath_grad_test.py68 def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None): argument
70 if dtype in (dtypes.complex64, dtypes.complex128):
79 shape, bias=bias), dtype=dtype)
91 [3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
93 [3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
97 [3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
99 [3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
106 inputs = constant_op.constant([1.0], dtype=dtypes.float32)
114 inputs = constant_op.constant([1.0], dtype=dtypes.float32)
125 inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
[all …]
H A Dinit_ops.py20 def _initializer(shape, dtype=dtypes.float32, partition_info=None):
24 dtype: (Optional) Type of the output `Tensor`.
30 A `Tensor` of type `dtype` and `shape`.
54 def __call__(self, shape, dtype=None, partition_info=None): argument
59 dtype: Optional dtype of the tensor. If not provided use the initializer
60 dtype.
105 To migrate to TF2, please use `tf.zeros_initializer` instead. The `dtype`
107 `tf.zeros_initializer.__init__()`. However, you can specify the `dtype` in
115 initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32)
123 variable = tf.Variable(initializer(shape=[3, 3], dtype=tf.float32))
[all …]
H A Dinit_ops_v2.py42 def __call__(self, shape, dtype=None, **kwargs):
43 # returns a tensor of shape `shape` and dtype `dtype`
48 def __call__(self, shape, dtype=None, **kwargs): argument
53 dtype: Optional dtype of the tensor. If not provided will return tensor
95 config.pop("dtype", None)
115 the Initializer object, without knowing the shape and dtype of the variable
121 ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),
122 ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))
125 <tf.Variable ... shape=(3,) ... numpy=array([0., 0., 0.], dtype=float32)>
130 [0., 0., 0.]], dtype=float32)>
[all …]
H A Dmath_ops_test.py45 x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
61 x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
78 x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
108 for y, dtype in complex_ys:
112 self.assertEqual(y_result.dtype, dtype)
139 for y, dtype in complex_ys:
143 self.assertEqual(y_result.dtype, dtype)
150 for dtype in [np.float16, np.float32, np.double]:
151 x_np = np.random.rand(5, 5).astype(dtype)
158 for dtype in [np.float16, np.float32, np.double]:
[all …]
/aosp_15_r20/external/pytorch/torch/masked/
H A D_ops.py15 from torch.types import _dtype as DType unknown
19 # The JIT doesn't understand Union, nor torch.dtype here
20 DType = int variable
87 dtype=torch.bool)``.
116 reduction, depends on input dtype. For instance, for float32, uint8,
168 sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
169 prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
170 cumsum=(("dim__as_int",), ("dtype=None", "mask=None")),
171 cumprod=(("dim__as_int",), ("dtype=None", "mask=None")),
172 amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
[all …]
/aosp_15_r20/external/pytorch/test/xpu/
H A Dtest_conv.py55 dtype=torch.double, argument
64 dtype=dtype,
73 dtype=dtype,
77 bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
97 dummy_out, device=device, dtype=dtype, requires_grad=True
100 if dtype == torch.float:
107 def test_Conv2d_large_workspace(self, device, dtype): argument
115 conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
117 x = torch.randn(size, device=device, dtype=dtype)
125 def test_ConvTranspose2d_large_output_padding(self, device, dtype): argument
[all …]
H A Dtest_gemm.py25 dtype = t.dtype
26 numpy_dtype = dtype
27 if dtype in {torch.bfloat16, torch.half}:
29 if dtype.is_complex:
56 res3_t = torch.from_numpy(res3).to(dtype)
62 res3 = torch.from_numpy(res3).to(dtype)
66 def _test_addmm_impl(self, func, activation, device, dtype): argument
67 M = torch.randn(10, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
68 m1 = torch.randn(10, 50, device="cpu", dtype=torch.float32).to(dtype).to(device)
69 m2 = torch.randn(50, 25, device="cpu", dtype=torch.float32).to(dtype).to(device)
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_flex_attention.py80 def _head_offset(dtype: torch.dtype): argument
82 head_offset = torch.rand(H, device="cuda", dtype=dtype)
128 dtype: torch.dtype = None, argument
130 """Clones the query, key, and value tensors and moves them to the specified dtype."""
131 if dtype is None:
132 dtype = query.dtype
133 query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)
134 key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)
135 value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)
172 dtype = ref_out.dtype
[all …]
H A Dtest_flex_decoding.py87 return torch.tril(torch.ones(Mkv, Mkv, dtype=torch.bool, device="cuda"))[
140 def _head_offset(dtype: torch.dtype): argument
142 head_offset = torch.rand(Hq, device="cuda", dtype=dtype)
197 dtype: torch.dtype = None, argument
199 """Clones the query, key, and value tensors and moves them to the specified dtype."""
200 if dtype is None:
201 dtype = query.dtype
202 query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)
203 key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)
204 value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/training/
H A Dftrl_test.py38 for dtype in [dtypes.half, dtypes.float32]:
42 dtype=dtype)
44 dtype=dtype)
46 var0 = variables.Variable([0.0, 0.0], dtype=dtype)
47 var1 = variables.Variable([0.0, 0.0], dtype=dtype)
48 grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
49 grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
81 for dtype in [dtypes.half, dtypes.float32]:
83 var0 = variables.Variable([1.0, 2.0], dtype=dtype)
84 var1 = variables.Variable([4.0, 3.0], dtype=dtype)
[all …]

12345678910>>...299