Home
last modified time | relevance | path

Searched full:backward (Results 1 – 25 of 12065) sorted by relevance

12345678910>>...483

/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/
H A Dsymbolic_script.cpp55 def backward(grad_output):
58 return torch.mean(self, dtype=dtype), backward
67 def backward(grad_output):
71 return torch.mean(self, dim, keepdim, dtype=dtype), backward
78 def backward(grad_output):
82 return result, backward
143 def backward(grad_output):
148 return std_out, backward
155 def backward(grad_output):
160 return std_out, backward
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_autograd.py131 out.sum().backward()
228 result.sum().backward(go, create_graph=True)
246 def backward(ctx, grad_output): member in TestAutograd.test_function.MyFunction
275 def backward(ctx, grad_output): member in TestAutograd.test_once_differentiable.MyFunction
301 def backward(ctx, grad): member in TestAutograd.test_function_returns_input.MyFunction
306 MyFunction.apply(v).backward()
311 MyFunction.apply(v.clone()).backward()
321 def backward(ctx, grad): member in TestAutograd.test_function_returns_undefined_tensor.MyFunction
324 # Test that undefined tensors returned from custom backward function
328 MyFunction.apply(x).backward()
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_compiled_autograd.py131 loss.backward()
154 result.backward()
173 result.backward()
190 def backward(ctx, grad): function
194 sin.register_autograd(backward, setup_context=setup_context)
199 y.backward()
213 result.backward()
230 result.backward()
247 result.backward()
265 result.backward()
[all …]
/aosp_15_r20/external/pytorch/test/dynamo/
H A Dtest_hooks.py51 v.backward(torch.tensor([1.0, 2.0, 3.0]))
64 v.backward(torch.tensor([1.0, 2.0, 3.0]))
80 v.backward(torch.tensor([1.0, 2.0, 3.0]))
95 v.backward(torch.tensor([1.0, 2.0, 3.0]))
113 v.backward(torch.tensor([1.0, 2.0, 3.0]))
132 v.backward(torch.tensor([1.0, 2.0, 3.0]))
152 v.backward(torch.tensor([1.0, 2.0, 3.0]))
171 v.backward(torch.tensor([1.0, 2.0, 3.0]))
189 v[0].backward(torch.tensor([1.0, 2.0, 3.0]))
205 v.backward(torch.tensor([1.0, 2.0, 3.0]))
[all …]
H A Dtest_autograd_function.py26 def backward(ctx, grad_output): member in CustomFunc1
41 def backward(ctx, grad_output): member in CustomFunc3
89 # Note that forward, setup_context, and backward are @staticmethods
106 def backward(ctx, grad_output): member in LinearFunction
131 def backward(ctx, grad_out1, grad_out2): member in MaterializingGradFunction
146 def backward(ctx, grad_output): member in CustomFuncBwdPrintGraphBreak
162 def backward(ctx, grad_output): member in CustomFuncStrideBwd
180 def backward(ctx, grad_output): member in CustomFuncSaveForBwd
199 def backward(ctx, grad_output): member in ContextSaveAndMark
212 def backward(ctx, grad_output): member in ContextMarkAndSave
[all …]
/aosp_15_r20/external/pytorch/torch/autograd/
H A Dfunction.py36 r"""Save given tensors for a future call to :func:`~Function.backward`.
41 All tensors intended to be used in the backward pass should be saved
47 nor outputs of :func:`forward`, are saved for backward, your custom Function
48 may not support double backward.
49 Custom Functions that do not support double backward should decorate their
50 :func:`backward` method with ``@once_differentiable`` so that performing
51 double backward raises an error. If you'd like to support double backward,
52 you can either recompute intermediaries based on the inputs during backward
54 …`double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backw…
57 In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
[all …]
H A Dgraph.py77 r"""Register a backward hook.
92 See :ref:`backward-hooks-execution` for more information on how when this hook
102 >>> b.sum().backward(retain_graph=True)
107 >>> b.sum().backward(retain_graph=True)
115 r"""Register a backward pre-hook.
129 See :ref:`backward-hooks-execution` for more information on how when this hook
138 >>> b.sum().backward(retain_graph=True)
143 >>> b.sum().backward(retain_graph=True)
215 operation saves a tensor for backward (this includes intermediary results
223 namely when executing :func:`torch.Tensor.backward()` or
[all …]
H A Danomaly_mode.py16 - Running the forward pass with detection enabled will allow the backward
18 backward function.
19 - If ``check_nan`` is ``True``, any backward computation that generate "nan"
35 ... def backward(ctx, gO):
36 ... # Error during the backward pass
37 ... raise RuntimeError("Some error in backward")
44 >>> out.backward()
47 File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
48 torch.autograd.backward(self, gradient, retain_graph, create_graph)
49 File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
[all …]
/aosp_15_r20/external/pytorch/test/cpp/api/
H A Dautograd.cpp39 y[0].backward(); in TEST()
47 y[0].backward(); in TEST()
54 backward({res.sum()}, {}); in TEST()
64 backward({res}, {torch::ones({2, 2})}, {}, true); in TEST()
66 backward({res}, {torch::ones({2, 2})}); in TEST()
87 res.backward(torch::ones({2, 2}), false, true); in TEST()
123 x.backward(grad_output, false, true); in TEST()
160 static variable_list backward( in TEST() function
207 out.backward({}, /*keep_graph=*/true); in TEST()
210 out.backward({}, /*keep_graph=*/true); in TEST()
[all …]
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/rpc/
H A Ddist_autograd_test.py173 dist_autograd.backward(context_id, [loss])
187 dist_autograd.backward(context_id, [loss])
202 def backward(ctx, input): member in SimulateBackwardError
204 raise Exception("Simulate error on backward pass") # noqa: TRY002
252 torch.autograd.backward(tensors)
258 dist_autograd.backward(context_id, tensors)
619 dist_autograd.backward(context_id, [loss], retain_graph=True)
630 loss_local.backward()
636 dist_autograd.backward(context_id, [loss])
651 local_ret.backward()
[all …]
/aosp_15_r20/external/pytorch/docs/source/rpc/
H A Ddistributed_autograd.rst41 The main motivation behind distributed autograd is to enable running a backward
51 used to execute the backward pass. For more details see
55 pass to ensure the backward pass is executed appropriately. For this purpose,
61 The input for this function during the backward pass is received from the
66 node to the appropriate ``send`` function during the backward pass.
69 function on a remote node during the backward pass.
83 Each forward and backward pass that uses distributed autograd is assigned a
90 1. Multiple nodes running distributed backward passes might accumulate
92 tensor would have gradients from a variety of distributed backward passes
94 calling :meth:`torch.autograd.backward` multiple times locally. In order to
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/fsdp/
H A D_runtime_utils.py54 BACKWARD = auto() variable in _PrefetchMode
190 "FSDP optimizer in backward only supported with use_orig_params=True!"
260 # Stream for overlapping gradient reduction with the backward pass gradient
355 registering post-backward hooks for these current parameters. This function
383 # Register post-backward hooks to reshard the parameters and reduce-scatter
388 # set the grad to None in the backward pass.
445 and registering pre-backward hooks on the forward outputs.
457 output (Any): Forward pass output; pre-backward hooks are registered on
472 # Register pre-backward hooks to unshard the flat parameters for the
490 # with the intention that they are immediately used for backward
[all …]
/aosp_15_r20/external/pytorch/torch/_custom_op/
H A Dautograd.py17 # and register something that is actually a backward formula
25 # As explained in NOTE ["backward", "save_for_backward", and "autograd"],
26 # after the user gives us "backward" and "save_for_backward", we generate
29 if custom_op._has_impl('save_for_backward') or custom_op._has_impl('backward'):
31 'save_for_backward' if custom_op._has_impl('backward')
32 else 'backward'
34 found = 'save_for_backward' if missing == 'backward' else 'backward'
39 f"To use the CustomOp API to register a backward formula, "
40 f"please provide us both a backward function and a "
41 f"'save for backward' function via `impl_backward` and "
[all …]
/aosp_15_r20/external/pytorch/functorch/notebooks/
H A Daot_autograd_optimizations.ipynb17 …"* AOT Autograd traces the forward and backward graph ahead of time. Presence of forward and backw…
18 …"* AOT Autograd provides simple mechanisms to compile the extracted forward and backward graphs th…
67 "loss.backward()"
76 …racted forward and backward graphs. Internally, AOT uses `__torch_dispatch__` based tracing mechan…
78 …"AOT Autograd then sends these forward and backward graphs to the user supplied compilers. So, let…
119 "# The compiler_fn is called after the forward and backward graphs are extracted.\n",
132 "res.sum().backward()\n",
140backward graph. You can see that in addition to the original input of the forward pass, the forwar…
148 …"Now that we understand how to use AOT Autograd to print forward and backward graphs, let us use A…
160 "# Lets compile the forward and backward through ts_compile.\n",
[all …]
/aosp_15_r20/platform_testing/libraries/app-helpers/spectatio/spectatio-util/src/android/platform/spectatio/utils/
H A DSpectatioUiUtil.java394 * Scroll using forward and backward buttons on device screen and check if the given text is
401 * @param backward {@link BySelector} for the button to use for scrolling backward/up.
406 BySelector forward, BySelector backward, String text) throws MissingUiElementException { in scrollAndCheckIfUiElementExist() argument
407 return scrollAndFindUiObject(forward, backward, text) != null; in scrollAndCheckIfUiElementExist()
411 * Scroll by performing forward and backward gestures on device screen and check if the given
431 * Scroll by performing forward and backward gestures on device screen and check if the given
464 * Scroll using forward and backward buttons on device screen and check if the given target is
471 * @param backward {@link BySelector} for the button to use for scrolling backward/up.
476 BySelector forward, BySelector backward, BySelector target) in scrollAndCheckIfUiElementExist() argument
478 return scrollAndFindUiObject(forward, backward, target) != null; in scrollAndCheckIfUiElementExist()
[all …]
/aosp_15_r20/external/pytorch/torch/utils/
H A Dcheckpoint.py249 # to be filled out during the backward.
268 def backward(ctx, *args): member in CheckpointFunction
272 " with .grad() or passing an `inputs` parameter to .backward()."
274 " or call .backward() without passing the `inputs` argument."
309 # run backward() with only tensor that requires grad
321 torch.autograd.backward(outputs_with_grad, args_with_grad)
356 Instead of keeping tensors needed for backward alive until they are used in
357 gradient computation during backward, forward computation in checkpointed
358 regions omits saving tensors for backward and recomputes them during the
359 backward pass. Activation checkpointing can be applied to any part of a
[all …]
/aosp_15_r20/external/pytorch/test/xpu/
H A Dtest_conv.py119 out.backward(torch.ones_like(out))
139 x.backward(torch.randn_like(x))
161 output.backward(grad_output)
170 output1.backward(grad_output[:, :offset].contiguous())
177 output2.backward(grad_output[:, offset:].contiguous())
226 output.backward(grad_output)
235 output1.backward(grad_output[:, :offset].contiguous())
242 output2.backward(grad_output[:, offset:].contiguous())
277 output.backward(grad, retain_graph=True)
282 output.backward(grad.contiguous())
[all …]
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/
H A Dtest_fully_shard_comm.py60 # For recording FSDP events like unshard or post-backward
130 # for non-async and non-default streams (like in pre-backward)
194 # pre-backward all-gather
300 reduce-scatters during forward and backward.
333 loss.sum().backward()
375 loss.sum().backward()
408 ref_loss.backward()
413 loss.backward()
460 # Check the order for normal 1 forward, 1 backward, 1 optimizer step
474 loss.sum().backward()
[all …]
/aosp_15_r20/external/pytorch/torch/fft/
H A D__init__.py43 * ``"backward"`` - no normalization
46 Calling the backward transform (:func:`~torch.fft.ifft`) with the same
51 Default is ``"backward"`` (no normalization).
83 norm (str, optional): Normalization mode. For the backward transform
87 * ``"backward"`` - normalize by ``1/n``
95 Default is ``"backward"`` (normalize by ``1/n``).
138 * ``"backward"`` - no normalization
142 Calling the backward transform (:func:`~torch.fft.ifft2`) with the same
147 Default is ``"backward"`` (no normalization).
184 norm (str, optional): Normalization mode. For the backward transform
[all …]
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_module_hooks.py206 out[0].sum().backward()
208 model(x)[0].sum().backward()
228 out[0].sum().backward()
230 model(x)[0].sum().backward()
250 out[0].sum().backward()
252 model(x)[0].sum().backward()
272 out[0].sum().backward()
274 model(x)[0].sum().backward()
277 # Backward pre hook can affect subsequent gradient computation
288 out.sum().backward()
[all …]
/aosp_15_r20/external/pytorch/docs/source/notes/
H A Dautograd.rst45 in order to execute the backward pass. For example, the function
52 during the backward pass. See :doc:`/notes/extending` for more information.
104 …ill be marked as non-differentiable. This will make it error out in the backward if used on tensor…
131 forward and backward passes:
133 During the forward pass, an operation is only recorded in the backward graph if
135 During the backward pass (``.backward()``), only leaf tensors with
143 backward graph associated with them. Thus their gradients will be needed
155 the forward pass, they won't have their ``.grad`` fields updated in the backward
156 pass because they won't be part of the backward graph in the first place, as
178 - Excludes operations from being recorded in backward graph
[all …]
H A Dextending.rst31 The first part of this doc is focused on backward mode AD as it is the most widely used
41 memory usage: If you implemented your forward and backward passes using a
44 engine. If you'd like to reduce the number of buffers saved for the backward pass,
50 backward graph is (most likely) already able to be recorded by autograd. In this case, you do
51 not need to implement the backward function yourself. Consider using a plain
57 If you'd like to alter the gradients during the backward pass or perform a side
67 :meth:`~Function.backward` methods.
70 `double backward <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutori…
95 - :meth:`~Function.backward` (or :meth:`~Function.vjp`) defines the gradient formula.
112 used to save any tensors to be used in the backward pass. Non-tensors should
[all …]
H A Damp_examples.rst49 # Scales loss. Calls backward() on scaled loss to create scaled gradients.
50 # Backward passes under autocast are not recommended.
51 # Backward ops run in the same dtype autocast chose for corresponding forward ops.
52 scaler.scale(loss).backward()
67 All gradients produced by ``scaler.scale(loss).backward()`` are scaled. If you wish to modify or i…
68 the parameters' ``.grad`` attributes between ``backward()`` and ``scaler.step(optimizer)``, you sh…
93 scaler.scale(loss).backward()
131 the next backward pass will add scaled grads to unscaled grads (or grads scaled by a different fact…
149 scaler.scale(loss).backward()
187 loss.backward()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/gpu/
H A Dgpu_conv_rewriter.cc65 // Try to match a backward filter pattern that contains "conv".
69 VLOG(2) << "Trying to match convolution backward filter."; in MatchBackwardFilter()
75 << " is a forward convolution. All grouped backward filters are " in MatchBackwardFilter()
77 "backward filter " in MatchBackwardFilter()
79 "point. No need to fold to backward filter."; in MatchBackwardFilter()
87 // Backward filter convolution is implemented in XLA as the forward in MatchBackwardFilter()
135 // backward filter. A backward filter: in MatchBackwardFilter()
143 // backward filter conv, we have to rely on heuristics. Empirically forward in MatchBackwardFilter()
144 // convolutions have very small kernel dimensions, while in the backward pass in MatchBackwardFilter()
146 // output dimensions, return foward conv; otherwise proceed with backward in MatchBackwardFilter()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/pipelining/
H A Dschedules.py54 BACKWARD = 2 variable in _ComputationType
66 _ComputationType.BACKWARD: "B",
82 return _ComputationType.BACKWARD
102 BACKWARD = _ComputationType.BACKWARD variable
113 B = BACKWARD
204 1. Forward action for a microbatch must be before the Backward action for that microbatch
265 expected_computation = _ComputationType.BACKWARD
269 elif prev_computation == _ComputationType.BACKWARD:
272 f"[{mb_index=}] already finished backward computation"
277 expected_computation = _ComputationType.BACKWARD
[all …]

12345678910>>...483