Home
last modified time | relevance | path

Searched full:dropout (Results 1 – 25 of 474) sorted by relevance

12345678910>>...19

/aosp_15_r20/external/pytorch/torch/csrc/api/src/nn/modules/
H A Dtransformer.cpp31 .dropout(options.dropout()))); in reset()
35 dropout = this->register_module("dropout", Dropout(options.dropout())); in reset()
44 dropout1 = this->register_module("dropout1", Dropout(options.dropout())); in reset()
45 dropout2 = this->register_module("dropout2", Dropout(options.dropout())); in reset()
53 // dropout->reset_parameters(); in reset_parameters()
75 src2 = linear2(dropout(F::gelu(linear1(ret)))); in forward()
77 src2 = linear2(dropout(F::relu(linear1(ret)))); in forward()
83 src2 = linear2(dropout(callable_activation(linear1(ret)))); in forward()
110 .dropout(options.dropout()))); in reset()
117 .dropout(options.dropout()))); in reset()
[all …]
H A Drnn.cpp64 0 <= options_base.dropout() && options_base.dropout() <= 1, in reset()
65 "dropout should be a number in range [0, 1] ", in reset()
69 if (options_base.dropout() > 0 && options_base.num_layers() == 1) { in reset()
71 "dropout option adds dropout after all but last ", in reset()
72 "recurrent layer, so non-zero dropout expects ", in reset()
73 "num_layers greater than 1, but got dropout=", in reset()
74 options_base.dropout(), in reset()
374 << ", dropout=" << options_base.dropout() in pretty_print()
424 .dropout(options_.dropout()) in RNNImpl()
458 options_base.dropout(), in forward_helper()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/legacy_rnn/
H A Drnn_cell_wrapper_impl.py35 """Operator adding dropout to inputs and outputs of the given cell."""
48 """Create a cell with added input, state, and/or output dropout.
51 then the same dropout mask is applied at every step, as described in:
52 [A Theoretically Grounded Application of Dropout in Recurrent
55 Otherwise a different dropout mask is applied at every time step.
65 probability; if it is constant and 1, no input dropout will be added.
67 probability; if it is constant and 1, no output dropout will be added.
69 probability; if it is constant and 1, no output dropout will be added.
70 State dropout is performed on the outgoing states of the cell. **Note**
71 the state components to which dropout is applied when `state_keep_prob`
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/legacy_tf_layers/
H A Dcore.py16 """Contains the core layers: Dense, Dropout.
190 @keras_export(v1=['keras.__internal__.legacy.layers.Dropout'])
191 @tf_export(v1=['layers.Dropout'])
192 class Dropout(keras_layers.Dropout, base.Layer): class
193 """Applies Dropout to the input.
195 Dropout consists in randomly setting a fraction `rate` of input units to 0
201 rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
204 binary dropout mask that will be multiplied with the input.
206 `(batch_size, timesteps, features)`, and you want the dropout mask
220 super(Dropout, self).__init__(rate=rate,
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/
H A Dtransformerlayer.h7 #include <torch/nn/modules/dropout.h>
34 /// 8).dropout(0.1));
66 /// feedforward dropout layer
67 Dropout dropout = nullptr; variable
77 /// pre feedfastward, dropout layer
78 Dropout dropout1 = nullptr;
79 /// post feedfastward, dropout layer
80 Dropout dropout2 = nullptr;
109 /// 8).dropout(0.2));
146 /// Dropout, post self attention
[all …]
H A Ddropout.h4 #include <torch/nn/options/dropout.h>
31 "dropout probability has to be between 0 and 1, but got ", in reset()
41 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
43 /// Applies dropout over a 1-D input.
44 /// See https://pytorch.org/docs/main/nn.html#torch.nn.Dropout to learn
52 /// Dropout model(DropoutOptions().p(0.42).inplace(true));
60 /// Pretty prints the `Dropout` module into the given `stream`.
66 /// provides, and examples of how to use `Dropout` with
69 TORCH_MODULE(Dropout);
73 /// Applies dropout over a 2-D input.
[all …]
/aosp_15_r20/external/pytorch/torch/nn/modules/
H A Dtransformer.py13 from .dropout import Dropout
76 dropout: the dropout value (default=0.1).
106 dropout: float = 0.1,
128 dropout,
150 dropout,
645 dropout: the dropout value (default=0.1).
701 dropout: float = 0.1,
715 dropout=dropout,
722 self.dropout = Dropout(dropout)
728 self.dropout1 = Dropout(dropout)
[all …]
H A Drnn.py68 "dropout",
80 dropout: float
92 dropout: float = 0.0,
106 self.dropout = float(dropout)
113 not isinstance(dropout, numbers.Number)
114 or not 0 <= dropout <= 1
115 or isinstance(dropout, bool)
118 "dropout should be a number in range [0, 1] "
122 if dropout > 0 and num_layers == 1:
124 "dropout option adds dropout after all but last "
[all …]
H A Ddropout.py8 "Dropout",
26 f"dropout probability has to be between 0 and 1, but got {p}"
35 class Dropout(_DropoutNd): class
61 >>> m = nn.Dropout(p=0.2)
70 return F.dropout(input, self.p, self.training, self.inplace)
88 (as is normally the case in early convolution layers) then i.i.d. dropout
133 (as is normally the case in early convolution layers) then i.i.d. dropout
146 Due to historical reasons, this class will perform 1D channel-wise dropout
185 (as is normally the case in early convolution layers) then i.i.d. dropout
216 r"""Applies Alpha Dropout over the input.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/
H A Drecurrent.py278 when calling it. This is for use with cells that use dropout.
1098 """Object that hold dropout related fields for RNN Cell.
1103 dropout: a float number within range [0, 1). The ratio that the input
1104 tensor need to dropout.
1106 recurrent state weights need to dropout.
1107 This object will create and cache created dropout masks, and reuse them for
1117 """Create the cache for dropout and recurrent dropout mask.
1139 """Reset the cached dropout masks if any.
1150 """Reset the cached recurrent dropout masks if any.
1163 self.dropout,
[all …]
H A Ddense_attention.py46 dropout: Float between 0 and 1. Fraction of the units to drop for the
65 training mode (adding dropout) or in inference mode (no dropout).
76 def __init__(self, causal=False, dropout=0.0, argument
80 self.dropout = dropout
115 training mode (adding dropout) or in inference mode (no dropout).
135 return nn.dropout(weights, rate=self.dropout)
214 'dropout': self.dropout,
242 dropout: Float between 0 and 1. Fraction of the units to drop for the
263 training mode (adding dropout) or in inference mode (no dropout).
381 dropout: Float between 0 and 1. Fraction of the units to drop for the
[all …]
H A Dconvolutional_recurrent.py73 when calling it. This is for use with cells that use dropout.
473 dropout: Float between 0 and 1.
484 training mode or in inference mode. Only relevant when `dropout` or
508 dropout=0., argument
536 self.dropout = min(1., max(0., dropout))
591 # dropout matrices for input units
593 # dropout matrices for recurrent units
597 if 0 < self.dropout < 1.:
690 'dropout': self.dropout,
773 dropout: Float between 0 and 1.
[all …]
H A Dcore.py136 @keras_export('keras.layers.Dropout')
137 class Dropout(Layer): class
138 """Applies Dropout to the input.
140 The Dropout layer randomly sets input units to 0 with a frequency of `rate`
145 Note that the Dropout layer only applies when `training` is set to True
150 (This is in contrast to setting `trainable=False` for a Dropout layer.
151 `trainable` does not affect the layer's behavior, as Dropout does
155 >>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,))
175 binary dropout mask that will be multiplied with the input.
178 you want the dropout mask to be the same for all timesteps,
[all …]
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_dropout.py74 o_ref = torch.dropout(x_ref, p, train)
82 self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
83 self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
90 self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
91 self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
129 … # In this test, we verify that dropout preserves the layout and data for different memory formats.
130 # We check whether, we get same values for the output of dropout, when the probability
131 # of dropout is 0 or very close to 0.
174 self._test_dropout(nn.Dropout, device, input)
176 self._test_dropout_discontiguous(nn.Dropout, device)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/cudnn/
H A DDescriptors.h234 // Initialize a dropout descriptor's RNG state.
236 …void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& … in initialize_rng()
237 TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); in initialize_rng()
243 …AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size… in initialize_rng()
246 // Restore a dropout descriptor given a dropout probability and existing RNG state.
247 void set(cudnnHandle_t handle, float dropout, at::Tensor state_) { in set()
248 TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); in set()
253 …AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0… in set()
256 // Restore a dropout descriptor corresponding to no dropout
258 // NB: seed doesn't matter when dropout = 0, because no random number in set_no_dropout()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/
H A Drnn.h40 /// If non-zero, adds dropout with the given probability to the output of each
42 TORCH_ARG(double, dropout) = 0.0;
57 /// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
82 /// If non-zero, introduces a `Dropout` layer on the outputs of each
83 /// RNN layer except the last layer, with dropout probability equal to
84 /// `dropout`. Default: 0
85 TORCH_ARG(double, dropout) = 0.0;
115 /// If non-zero, introduces a `Dropout` layer on the outputs of each
116 /// LSTM layer except the last layer, with dropout probability equal to
117 /// `dropout`. Default: 0
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/examples/speech_commands/
H A Dmodels.py106 placeholder node that can be used to control the dropout amount.
119 TensorFlow node outputting logits results, and optionally a dropout
182 TensorFlow node outputting logits results, and optionally a dropout
239 During training, dropout nodes are introduced after each relu, controlled by a
248 TensorFlow node outputting logits results, and optionally a dropout
275 first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
302 second_dropout = tf.nn.dropout(second_relu, rate=dropout_rate)
363 During training, dropout nodes are introduced after the relu, controlled by a
372 TensorFlow node outputting logits results, and optionally a dropout
401 first_dropout = tf.nn.dropout(first_relu, rate=dropout_rate)
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/
H A Drnn.py104 dropout=0.0, argument
116 self.dropout = float(dropout)
126 not isinstance(dropout, numbers.Number)
127 or not 0 <= dropout <= 1 # type: ignore[operator]
128 or isinstance(dropout, bool)
131 "dropout should be a number in range [0, 1] "
135 if dropout > 0 and num_layers == 1: # type: ignore[operator]
137 "dropout option adds dropout after all but last "
138 "recurrent layer, so non-zero dropout expects "
139 f"num_layers greater than 1, but got dropout={dropout} and "
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/quantizable/modules/
H A Drnn.py341 dropout: float = 0.0,
353 self.dropout = float(dropout)
359 not isinstance(dropout, numbers.Number)
360 or not 0 <= dropout <= 1
361 or isinstance(dropout, bool)
364 "dropout should be a number in range [0, 1] "
368 if dropout > 0:
370 "dropout option for quantizable LSTM is ignored. "
376 "dropout option adds dropout after all but last "
377 "recurrent layer, so non-zero dropout expects "
[all …]
/aosp_15_r20/external/pytorch/test/cpp/api/
H A Dmoduledict.cpp124 {"dropout", Dropout(0.5).ptr()}, in TEST_F()
128 std::vector<std::string> expected{"linear", "conv", "dropout"}; in TEST_F()
134 ASSERT_TRUE(dict["dropout"]->as<Dropout>()); in TEST_F()
164 {"dropout", Dropout(0.5).ptr()}, in TEST_F()
241 {"test", Dropout(0.5).ptr()}, in TEST_F()
248 ASSERT_TRUE(modules[2]->as<Dropout>()); in TEST_F()
285 {"dropout", Dropout(0.5).ptr()}, in TEST_F()
296 " (dropout): torch::nn::Dropout(p=0.5, inplace=false)\n" in TEST_F()
299 …input_size=4, hidden_size=5, num_layers=1, bias=true, batch_first=false, dropout=0, bidirectional=… in TEST_F()
H A Dtransformer.cpp33 double dropout = 0.0; in get_a_test_layer() local
39 .dropout(dropout)); in get_a_test_layer()
770 " (dropout): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
774 " (dropout1): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
775 " (dropout2): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
793 " (dropout): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
797 " (dropout1): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
798 " (dropout2): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
805 " (dropout): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
809 " (dropout1): torch::nn::Dropout(p=0.1, inplace=false)\n" in TEST_F()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/quantization/pt2e/
H A Dexport_utils.py40 Switch dropout patterns in the model between train and eval modes.
42 Dropout has different behavior in train vs eval mode. For exported models,
44 the dropout behavior between the two modes, so here we need to rewrite the aten
45 dropout patterns manually to achieve the same effect.
59 return F.dropout(x, p=0.5, training=True, inplace=inplace)
62 return F.dropout(x, p=0.5, training=False, inplace=inplace)
178 This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm.
190 This is equivalent to model.train() but only for certain special ops like dropout, batchnorm.
202 ops only, which are currently dropout and batchnorm.
207 is already specialized at export time. Additionally, other ops beyond dropout and batchnorm
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/
H A Ddropout.h3 #include <torch/nn/options/dropout.h>
14 inline Tensor dropout(Tensor input, double p, bool training, bool inplace) { in dropout() function
17 "dropout probability has to be between 0 and 1, but got ", in dropout()
22 return torch::dropout(input, p, training); in dropout()
30 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.dropout
39 /// F::dropout(input, F::DropoutFuncOptions().p(0.5));
41 inline Tensor dropout(Tensor input, const DropoutFuncOptions& options = {}) {
42 return detail::dropout(
60 "dropout probability has to be between 0 and 1, but got ", in _dropoutNd_helper()
161 false, "dropout probability has to be between 0 and 1, but got ", p); in alpha_dropout()
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/
H A Dfuse_attention.py78 return torch.nn.functional.dropout(
100 return torch.nn.functional.dropout(
123 # attn_weight = torch.dropout(attn_weight, dropout_p)
143 attn_weight = torch.dropout(attn_weight, dropout_p, True)
169 attn_weight = torch.dropout(attn_weight, dropout_p, True)
195 # no dropout version of pattern 7
229 attn_weight = torch.dropout(attn_weight, dropout_p, True)
250 # no dropout version of 9
302 return torch.nn.functional.dropout(
323 attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p)
[all …]
/aosp_15_r20/external/pytorch/benchmarks/functional_autograd_benchmark/
H A Dtorchaudio_models.py393 dropout: the dropout value (default=0.1).
399 def __init__(self, d_model, dropout=0.1, max_len=5000): argument
401 self.dropout = nn.Dropout(p=dropout)
425 return self.dropout(x)
431 def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5): argument
441 self.pos_encoder = PositionalEncoding(ninp, dropout)
442 encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
568 def __init__(self, dropout=0.0): argument
572 dropout (float): probability of dropping an attention weight.
582 self.dropout = dropout
[all …]

12345678910>>...19