Home
last modified time | relevance | path

Searched full:grad (Results 1 – 25 of 1887) sorted by relevance

12345678910>>...76

/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dmath_grad.py37 def _ArgMaxGrad(op, grad): argument
38 del op, grad
43 def _ArgMinGrad(op, grad): argument
44 del op, grad
49 def _EuclideanNormGrad(op, grad): argument
58 grad = array_ops.reshape(grad, output_shape_kept_dims)
60 return math_ops.truediv(op.inputs[0], output / grad), None
63 def SmartBroadcastGradientArgs(x, y, grad): argument
73 grad: The incoming gradient tensor for a broadcasting binary op.
80 * A boolean, which if True, indicates that x's shape differs from grad's
[all …]
H A Darray_grad.py38 def _PackGrad(op, grad): argument
40 return array_ops.unstack(grad, num=op.get_attr("N"), axis=op.get_attr("axis"))
49 def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index): argument
54 grad: `Tensor` or `IndexedSlices` representing the gradients with respect to
102 # Degenerate concatenation, just return grad.
104 return grad + [None] if end_value_index <= dim_index else [None] + grad
110 if isinstance(grad, ops.Tensor):
119 out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
128 grad_context = control_flow_util.GetOutputContext(grad.op)
150 out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
[all …]
H A Dnn_grad.py31 def _Conv2DBackpropInputGrad(op, grad): argument
36 grad: the tensor representing the gradient w.r.t. the output
46 grad,
56 grad,
68 def _Conv2DBackpropFilterGrad(op, grad): argument
74 grad,
84 grad,
95 def _DepthwiseConv2dNativeBackpropInputGrad(op, grad): argument
100 grad: the tensor representing the gradient w.r.t. the output
108 grad,
[all …]
H A Dcontrol_flow_grad.py31 def _SwitchGrad(op, *grad): argument
50 if grad[1] is not None:
52 control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
56 elif grad[0] is not None:
58 # the Exit branch, which is grad[0]. grad[1] is empty at this point.
59 # Use grad[0] for both inputs to merge for now, but update the second
61 merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
70 zero_grad = grad[1 - op_ctxt.branch]
78 [grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
80 return merge(grad, name="cond_grad")[0], None
[all …]
H A Dtensor_array_grad.py48 TensorArray*Grad is being called in, by looking at the input gradient
58 TensorArray*Grad call.
83 def _TensorArrayReadGrad(op, grad): argument
88 grad: Gradient `Tensor` to TensorArrayRead.
92 force the write of `grad` to the gradient `TensorArray`.
94 # Note: the forward flow dependency in the call to grad() is necessary for
103 grad_source = _GetGradSource(grad)
106 .grad(source=grad_source, flow=flow))
107 w_g = g.write(index, grad)
122 A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
[all …]
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml6 # and `grad == grads[0]`, in all the derivative formulas in this file.
35 # uses `grad` instead of `grads[idx]`, then all but the first output will
63 # - 'grad', the gradient of the output (often spelled grad_output
77 # 'grad' (this case is special-cased in our code generation).
83 # permitted to mix the use of "grad", "grads", and
99 # grad, and `grad_input_mask[1]` is true if `input1` requires grad.
104 # grad. If we want to support more fine-grained signalling,
140 # called 'grad' (even though it really is a grad-grad).
147 # value of the argument "foo_p", its forward grad "foo_t" and the result of the
156 # will always have their forward grad formula called. This function is responsible
[all …]
/aosp_15_r20/external/pytorch/test/dynamo/
H A Dtest_hooks.py21 def global_hook_0(grad): argument
22 return grad * 4
25 def global_hook_1(grad): argument
26 return grad / 2
29 def global_hook_2(grad): argument
30 return grad * 3
44 x.register_hook(lambda grad: grad * 2)
52 self.assertEqual(v.grad, torch.tensor([2.0, 4.0, 6.0]))
57 x.register_hook(lambda grad: grad * 2)
65 self.assertEqual(v.grad, torch.tensor([2.0, 4.0, 6.0]))
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
H A Ddecompose_resource_ops.td101 // accum = accum * momentum + grad;
106 $var_resource, $accum_resource, $lr, $grad, $momentum,
111 (CreateTFReadVariableOp $src_op, $grad, $accum_resource),
114 $grad
123 // accum = accum * momentum + grad;
124 // var -= grad * lr + accum * momentum * lr
128 $var_resource, $accum_resource, $lr, $grad, $momentum,
133 (CreateTFReadVariableOp $src_op, $grad, $accum_resource),
136 $grad
142 (TF_MulOp $grad, $lr),
[all …]
/aosp_15_r20/external/pytorch/test/xpu/
H A Dtest_conv.py101 (g,) = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
186 i.grad.data,
187 torch.cat([i1.grad.data, i2.grad.data], 1),
192 m.bias.grad.data,
193 torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
198 m.weight.grad.data,
199 torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
249 i.grad.data,
250 torch.cat([i1.grad.data, i2.grad.data], 1),
255 m.bias.grad.data,
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.h57 const Tensor& grad,
72 const Tensor& grad,
76 const at::Tensor& grad,
81 at::Tensor grad,
95 const Tensor& grad,
100 const Tensor& grad,
116 at::Tensor grad,
123 at::Tensor grad,
127 const at::Tensor& grad,
131 const at::Tensor& grad,
[all …]
H A DFunctionsManual.cpp99 const Tensor& grad, in copysign_tensor_self_backward() argument
104 return grad * ratio; in copysign_tensor_self_backward()
205 const Tensor& grad, in scale_grad_by_count() argument
208 return (grad / mask.sum(dims, true)) * mask; in scale_grad_by_count()
222 const Tensor& grad, in _euclidean_dist_backward() argument
226 if (!grad.defined()) { in _euclidean_dist_backward()
230 Tensor ratio = grad / res; in _euclidean_dist_backward()
238 const Tensor& grad, in norm_backward() argument
242 return norm_backward(grad, self, p_, norm, {}, true); in norm_backward()
246 Tensor grad, in norm_backward() argument
[all …]
H A Dinput_metadata.cpp57 at::Tensor grad, in maybe_reduce() argument
60 const auto message = incompatible_shape_error_message(i, grad); in maybe_reduce()
67 if (is_nested_ || is_cpp_nested_tensor() || grad.is_nested() || in maybe_reduce()
68 ::torch::autograd::is_cpp_nested_tensor(grad)) { in maybe_reduce()
69 if (!is_same_shape(grad)) { in maybe_reduce()
70 if (is_expandable_to_shape(grad)) { in maybe_reduce()
71 return reduce_grad(grad); in maybe_reduce()
76 return grad; in maybe_reduce()
81 auto desired = grad.sym_sizes(); in maybe_reduce()
111 return reduce_grad(grad); in maybe_reduce()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dtraining_ops_gpu.cu.cc37 T* var, T* accum, const T* lr, const T* epsilon, const T* grad, in SparseApplyAdagradKernel() argument
55 T grad_i = grad[grad_index]; in SparseApplyAdagradKernel()
76 T* var, T* accum, const T* lr, const T* l1, const T* l2, const T* grad, in SparseApplyProximalAdagradKernel() argument
94 T grad_i = grad[grad_index]; in SparseApplyProximalAdagradKernel()
101 // compute v = w - lr * grad. in SparseApplyProximalAdagradKernel()
118 const T* grad, const Tindex* indices, in SparseApplyFtrlKernel() argument
138 const T grad_i = grad[grad_index]; in SparseApplyFtrlKernel()
184 const T* const beta2_, const T* const epsilon_, const T* grad, in ApplyAdamKernel() argument
203 auto g_i = grad[i]; in ApplyAdamKernel()
223 T* var, T* accum, const T* lr, const T* grad, const Tindex* indices, in SparseApplyKerasMomentumKernel() argument
[all …]
H A Dtraining_ops.cc50 typename TTypes<T>::ConstFlat grad) { in operator ()()
51 var.device(d) -= grad * lr(); in operator ()()
63 typename TTypes<T>::ConstFlat grad) { in operator ()()
65 accum * rho() + grad.square() * (static_cast<T>(1) - rho()); in operator ()()
67 (accum_update + epsilon()).sqrt() * (accum + epsilon()).rsqrt() * grad; in operator ()()
80 typename TTypes<T>::ConstFlat grad) { in operator ()()
86 // compute v = w - lr * grad. in operator ()()
87 prox_var.device(d) -= grad * lr(); in operator ()()
109 typename TTypes<T>::ConstFlat grad) { in operator ()()
111 gradient_accum.device(d) += grad; in operator ()()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/linalg/sparse/
H A Dsparse_csr_matrix_grad.py26 def _DenseToCSRSparseMatrixGrad(op, grad): argument
30 grad, type=op.get_attr("T")))
36 def _CSRSparseMatrixToDenseGrad(op, grad): argument
39 op.inputs[0], type=grad.dtype)
42 values=array_ops.gather_nd(grad, coo_sparse_tensor.indices),
43 dense_shape=grad.shape)
47 def _SparseTensorToCSRSparseMatrixGrad(op, grad): argument
50 grad, type=op.get_attr("T")).values
111 def _SparseMatrixAddGrad(op, grad): argument
117 # d(a*A + b*B)/dA . grad = a * grad
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/risc/
H A Drisc_grad.py21 def _RiscAbsGrad(_, grad): argument
28 def _RiscAddGrad(_, grad): argument
35 def _RiscBinaryArithmeticGrad(_, grad): argument
42 def _RiscBinaryComparisonGrad(_, grad): argument
49 def _RiscBitcastGrad(_, grad): argument
56 def _RiscBroadcastGrad(_, grad): argument
63 def _RiscCastGrad(_, grad): argument
70 def _RiscCholeskyGrad(_, grad): argument
77 def _RiscCeilGrad(_, grad): argument
84 def _RiscConcatGrad(_, grad): argument
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_autograd.py135 torch.autograd.grad(out.sum(), y)
139 torch.autograd.grad(out.sum(), x)
142 torch.autograd.grad(out.sum(), y_safe)
145 torch.autograd.grad(out.sum(), (x, y_safe))
162 torch.autograd.grad(out.sum(), (b, y_safe))
208 # Accessing .grad on leaf
210 foo = dummy.grad
213 # Accessing .grad on non-leaf
215 foo = dummy.grad
218 # Accessing .grad on non-leaf that retains gradients
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/training/
H A Dtraining_ops_test.py69 def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None): argument
77 apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
80 self.assertAllCloseAccordingToType(x - lr * grad * (y + grad * grad)**
82 self.assertAllCloseAccordingToType(y + grad * grad, self.evaluate(accum))
89 grad, argument
102 apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
106 accum_update = y + grad * grad
107 linear_update = z + grad - (accum_update**(-lr_power) - y**
134 grad, argument
152 grad,
[all …]
H A Drmsprop.py119 grad = tf.constant([0.1, 0.2, 0.3])
121 optimizer.apply_gradients(zip([grad], [x]))
128 grad = tf.constant([0.1, 0.2, 0.3])
130 optimizer.apply_gradients(zip([grad], [x]))
209 def _apply_dense(self, grad, var): argument
223 grad,
234 grad,
237 def _resource_apply_dense(self, grad, var): argument
247 math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
248 math_ops.cast(self._decay_tensor, grad.dtype.base_dtype),
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/algorithms/_comm_hooks/
H A Ddefault_hooks.py72 def _decompress(state: LowPrecisionState, grad: torch.Tensor):
76 orig_grad_data = grad.data
77 grad.data = grad.data.to(state.parameter_type)
80 if grad.device.type == "privateuse1":
83 device_type = grad.device.type
87 f"Device {grad.device} does not have a \
95 def allreduce_hook(state: DefaultState, grad: torch.Tensor):
101grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.
103 # Average grad by pre-division factor. Together pre- and post-division factors
107 grad.div_(state.gradient_predivide_factor)
[all …]
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_convolution.py507 return op.weight.grad
744 i.grad.data,
745 torch.cat([i1.grad.data, i2.grad.data], 1),
750 m.weight.grad.data,
751 torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
790 i.grad.data,
791 torch.cat([i1.grad.data, i2.grad.data], 1),
796 m.weight.grad.data,
797 torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
828 i.grad.data,
[all …]
/aosp_15_r20/external/pytorch/test/cpp/api/
H A Dautograd.cpp56 ASSERT_VARIABLE_EQ(x.grad(), y + torch::ones({2, 2})); in TEST()
57 ASSERT_VARIABLE_EQ(y.grad(), x + torch::ones({2, 2}) * 2); in TEST()
68 ASSERT_VARIABLE_EQ(x.grad(), 2 * (y + torch::ones({2, 2}))); in TEST()
69 ASSERT_VARIABLE_EQ(y.grad(), 2 * (x + torch::ones({2, 2}) * 2)); in TEST()
73 // basic grad in TEST()
77 auto grad_res = grad({res}, {x, y}, {torch::ones({2, 2})}); in TEST()
91 ASSERT_VARIABLE_EQ(x.grad(), x_grad); in TEST()
92 ASSERT_VARIABLE_EQ(y.grad(), y_grad); in TEST()
94 Variable grad_sum = 2 * x.grad() + y.grad(); in TEST()
95 auto x_hv = grad({grad_sum}, {x}, {torch::ones({2, 2})}, {}, true); in TEST()
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_distributed_patterns.py89 grad = mod.weight.grad
90 new_grad = reduce_scatter(grad)
92 mod.weight.grad = new_grad
148 y.register_hook(lambda grad: grad + obj.val + closure_var)
167 self.assertEqual(x0.grad, x2.grad)
168 self.assertEqual(x1.grad, x3.grad)
261 self.assertEqual(w1.grad, x1.cos())
298 self.assertEqual(w1.grad, x1.cos())
314 self.assertEqual(inp1.grad, inp2.grad)
315 self.assertEqual(m1.weight.grad, m2.weight.grad)
[all …]
H A Dtest_compiled_autograd.py58 def hook1(grad): argument
59 return grad * 2
132 assert(w.grad is not None)
155 yield model[0].weight.grad
156 yield model[0].bias.grad
157 yield model[2].weight.grad
158 yield model[2].bias.grad
174 yield model[0].weight.grad
175 yield model[0].bias.grad
176 yield model[2].weight.grad
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/cc/gradients/
H A Dmath_grad.cc92 // grad(x) = grad(y) * conj(dy/dx) in SquareGrad()
123 // grad(x) = grad(y) * conj(dy/dx) in ExpGrad()
124 // = grad(y) * conj(y) in ExpGrad()
137 // grad(x) = grad(y) * conj(dy/dx) in Expm1Grad()
150 // grad(x) = grad(y) * conj(dy/dx) in LogGrad()
164 // grad(x) = grad(y) * conj(dy/dx) in Log1pGrad()
177 // grad(x) = grad(y) * conj(dy/dx) in SinhGrad()
190 // grad(x) = grad(y) * conj(dy/dx) in CoshGrad()
203 auto grad = grad_inputs[0]; in TanhGrad() local
206 Scope grad_scope = scope.WithControlDependencies(grad); in TanhGrad()
[all …]

12345678910>>...76