/aosp_15_r20/external/mesa3d/src/freedreno/decode/ |
H A D | rnnutil.c | 21 finddom(struct rnn *rnn, uint32_t regbase) in finddom() argument 23 if (rnndec_checkaddr(rnn->vc, rnn->dom[0], regbase, 0)) in finddom() 24 return rnn->dom[0]; in finddom() 25 return rnn->dom[1]; in finddom() 29 _rnn_init(struct rnn *rnn, int nocolor) in _rnn_init() argument 33 rnn->db = rnn_newdb(); in _rnn_init() 34 rnn->vc_nocolor = rnndec_newcontext(rnn->db); in _rnn_init() 35 rnn->vc_nocolor->colors = &envy_null_colors; in _rnn_init() 37 rnn->vc = rnn->vc_nocolor; in _rnn_init() 39 rnn->vc = rnndec_newcontext(rnn->db); in _rnn_init() [all …]
|
H A D | script.c | 39 /* An rnn based decoder, which can either be decoding current register 44 struct rnn base; 52 to_rnndec(struct rnn *rnn) in to_rnndec() argument 54 return (struct rnndec *)rnn; in to_rnndec() 58 rnn_val(struct rnn *rnn, uint32_t regbase) in rnn_val() argument 60 struct rnndec *rnndec = to_rnndec(rnn); in rnn_val() 142 /* Expose rnn decode to script environment as "rnn" library: 146 struct rnn *rnn; member 152 push_rnndoff(lua_State *L, struct rnn *rnn, struct rnndelem *elem, in push_rnndoff() argument 156 rnndoff->rnn = rnn; in push_rnndoff() [all …]
|
H A D | rnnutil.h | 18 #include "rnn.h" 21 struct rnn { struct 33 void _rnn_init(struct rnn *rnn, int nocolor); argument 34 struct rnn *rnn_new(int nocolor); 35 void rnn_load_file(struct rnn *rnn, char *file, char *domain); 36 void rnn_load(struct rnn *rnn, const char *gpuname); 37 uint32_t rnn_regbase(struct rnn *rnn, const char *name); 38 const char *rnn_regname(struct rnn *rnn, uint32_t regbase, int color); 39 struct rnndecaddrinfo *rnn_reginfo(struct rnn *rnn, uint32_t regbase); 41 const char *rnn_enumname(struct rnn *rnn, const char *name, uint32_t val); [all …]
|
H A D | rddecompiler.c | 150 static struct rnn *rnn; variable 159 rnn = rnn_new(true); in init_rnn() 160 rnn_load(rnn, gpuname); in init_rnn() 166 return rnn_enumname(rnn, "adreno_pm4_type3_packets", opc); in pktname() 225 struct rnndecaddrinfo *info = rnn_reginfo(rnn, regbase); in decompile_register() 236 char *decoded = rnndec_decodeval(rnn->vc, info->typeinfo, dword); in decompile_register() 249 * TODO: Make RNN optionally return compilable reg name. in decompile_register() 253 printlvl(level, "pkt4(cs, REG_%s_%s, (%u), %u);\n", rnn->variant, in decompile_register() 257 rnn->variant, reg_name, field_name, reg_idx, cnt, dword); in decompile_register() 279 struct rnndecaddrinfo *info = rnn_reginfo(rnn, regbase); in decompile_register_reg_bunch() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/ |
H A D | bidirectional_sequence_rnn_test.cc | 15 // Unit test for TFLite Bidirectional RNN op. 863 BidirectionalRNNOpModel rnn(/*batches=*/2, /*sequence_len=*/16, in TEST_P() local 870 rnn.SetFwWeights(weights); in TEST_P() 871 rnn.SetBwWeights(weights); in TEST_P() 872 rnn.SetFwBias(biases); in TEST_P() 873 rnn.SetBwBias(biases); in TEST_P() 874 rnn.SetFwRecurrentWeights(recurrent_weights); in TEST_P() 875 rnn.SetBwRecurrentWeights(recurrent_weights); in TEST_P() 877 const int input_sequence_size = rnn.input_size() * rnn.sequence_len(); in TEST_P() 880 rnn.SetInput(0, batch_start, batch_end); in TEST_P() [all …]
|
H A D | unidirectional_sequence_rnn_test.cc | 15 // Unit test for TFLite Sequential RNN op. 280 UnidirectionalRNNOpModel rnn(/*batches=*/2, /*sequence_len=*/16, in TEST() local 282 rnn.SetWeights(rnn_weights); in TEST() 283 rnn.SetBias(rnn_bias); in TEST() 284 rnn.SetRecurrentWeights(rnn_recurrent_weights); in TEST() 286 const int input_sequence_size = rnn.input_size() * rnn.sequence_len(); in TEST() 289 rnn.SetInput(0, batch_start, batch_end); in TEST() 290 rnn.SetInput(input_sequence_size, batch_start, batch_end); in TEST() 292 ASSERT_EQ(rnn.Invoke(), kTfLiteOk); in TEST() 295 float* golden_end = golden_start + rnn.num_units() * rnn.sequence_len(); in TEST() [all …]
|
H A D | basic_rnn_test.cc | 15 // Unit test for TFLite RNN op. 260 RNNOpModel rnn(2, 16, 8); in TEST() local 261 rnn.SetWeights(rnn_weights); in TEST() 262 rnn.SetBias(rnn_bias); in TEST() 263 rnn.SetRecurrentWeights(rnn_recurrent_weights); in TEST() 266 (rnn.input_size() * rnn.num_batches()); in TEST() 269 float* batch_start = rnn_input + i * rnn.input_size(); in TEST() 270 float* batch_end = batch_start + rnn.input_size(); in TEST() 271 rnn.SetInput(0, batch_start, batch_end); in TEST() 272 rnn.SetInput(rnn.input_size(), batch_start, batch_end); in TEST() [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cudnn/ |
H A D | RNN.cpp | 9 #include <ATen/native/RNN.h> 183 oss << "unrecognized cuDNN RNN mode " << fn_mode; in set_mode() 271 // descriptor to make the RNN descriptor initialization go through. in set() 295 // NB: cuDNN RNN API does not support 2d descriptors, so we in rnn_descriptor_sequence() 367 // Suppose you want to run RNN on the following variable 492 RNNDescriptorParams rnn; member 520 rnn_desc = fn.rnn.descriptor(handle, fn.dropout.descriptor(handle)); in RNNDescriptors() 586 AT_ERROR("unknown cuDNN RNN mode ", mode); in _num_linear_layers() 679 Returns weight and bias tensors for each layer of the RNN. These tensors 688 fn: The RNN function object holding the RNN state [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | RNN.cpp | 1 #include <ATen/native/RNN.h> 172 std::vector<int64_t> _output_size(const RNNParams& rnn) { in _output_size() argument 173 auto output_channels = is_single_direction ? rnn.hidden_size in _output_size() 174 : rnn.hidden_size * rnn.num_directions; in _output_size() 175 return {rnn.seq_length, rnn.mini_batch, output_channels}; in _output_size() 232 RNNParams rnn( in mkldnn_rnn_layer() local 242 auto output_size = _output_size</*is_single_direction*/ true>(rnn); in mkldnn_rnn_layer() 248 auto weight_ih = _shuffle_weight(w0, rnn.mode); in mkldnn_rnn_layer() 249 auto weight_hh = _shuffle_weight(w1, rnn.mode); in mkldnn_rnn_layer() 253 ? _shuffle_bias(w2, w3, rnn.mode) in mkldnn_rnn_layer() [all …]
|
/aosp_15_r20/external/rnnoise/src/ |
H A D | rnn.c | 37 #include "rnn.h" 160 void compute_rnn(RNNState *rnn, float *gains, float *vad, const float *input) { in compute_rnn() argument 165 compute_dense(rnn->model->input_dense, dense_out, input); in compute_rnn() 166 compute_gru(rnn->model->vad_gru, rnn->vad_gru_state, dense_out); in compute_rnn() 167 compute_dense(rnn->model->vad_output, vad, rnn->vad_gru_state); in compute_rnn() 168 for (i=0;i<rnn->model->input_dense_size;i++) noise_input[i] = dense_out[i]; in compute_rnn() 169 …for (i=0;i<rnn->model->vad_gru_size;i++) noise_input[i+rnn->model->input_dense_size] = rnn->vad_gr… in compute_rnn() 170 …for (i=0;i<INPUT_SIZE;i++) noise_input[i+rnn->model->input_dense_size+rnn->model->vad_gru_size] = … in compute_rnn() 171 compute_gru(rnn->model->noise_gru, rnn->noise_gru_state, noise_input); in compute_rnn() 173 for (i=0;i<rnn->model->vad_gru_size;i++) denoise_input[i] = rnn->vad_gru_state[i]; in compute_rnn() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/ |
H A D | recurrent.py | 51 'RNN `implementation=2` is not supported when `recurrent_dropout` is set. ' 57 """Wrapper allowing a stack of RNN cells to behave as a single cell. 62 cells: List of RNN cell instances. 75 lstm_layer = tf.keras.layers.RNN(stacked_lstm) 93 # existing behavior. This is only useful when use RNN(return_state=True) 99 'natural order of states if you rely on the RNN states, ' 100 'eg RNN(return_state=True).') 197 @keras_export('keras.layers.RNN') 198 class RNN(Layer): class 201 See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) [all …]
|
H A D | recurrent_v2.py | 109 See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) 110 for details about the usage of RNN API. 118 >>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4)) 119 >>> output = rnn(inputs) 122 >>> rnn = tf.keras.layers.RNN( 126 >>> whole_sequence_output, final_state = rnn(inputs) 221 See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) 222 for details about the usage of RNN API. 315 Unrolling can speed-up a RNN, 323 RNN calculation. However, most TensorFlow data is batch-major, so by [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ |
H A D | rnn.py | 15 """RNN helpers for TensorFlow models.""" 94 """Infer the dtype of an RNN state. 98 state: RNN's hidden state. Must be a Tensor or a nested iterable containing 157 """Calculate one step of a dynamic RNN minibatch. 240 """Run RNN step. Pass through either no or some past state.""" 342 "keras.layers.RNN(cell))`, which is equivalent to " 369 inputs: The RNN inputs. 378 initial_state_fw: (optional) An initial state for the forward RNN. This must 386 Required if initial_states are not provided or RNN states have a 401 at the beginning and end of the RNN calculation. However, most TensorFlow [all …]
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/miopen/ |
H A D | RNN_miopen.cpp | 2 #include <ATen/native/RNN.h> 111 oss << "unrecognized miopen RNN mode " << fn_mode; in set_mode() 201 RNNDescriptorParams rnn; member 215 rnn_desc = fn.rnn.descriptor(); in RNNDescriptors() 326 AT_ERROR("Unknown miopen RNN mode : ", mode); in _num_linear_layers() 330 …<std::vector<Tensor>, size_t> get_parameters(miopenHandle_t handle, const RNNDescriptorParams& rnn, in get_parameters() argument 335 int64_t num_linear_layers = _num_linear_layers(rnn.rnn_mode); in get_parameters() 336 int64_t num_layers = rnn.num_directions() * rnn.num_layers; in get_parameters() 340 auto bias_mode = rnn.bias_mode; in get_parameters() 433 std::vector<int64_t> _hidden_size(const RNNDescriptorParams& rnn, const TensorDescriptorListParams&… in _hidden_size() argument [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/modules/ |
H A D | rnn.h | 6 #include <torch/nn/options/rnn.h> 8 #include <torch/nn/utils/rnn.h> 23 /// Base class for all RNN implementations (intended for code sharing). 29 /// Initializes the parameters of the RNN module. 41 /// Pretty prints the RNN module into the given `stream`. 50 /// in cuDNN implementations of respective RNN `forward()` methods. It is 56 /// The RNN's options. 91 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 93 /// A multi-layer Elman RNN module with Tanh or ReLU activation. 94 /// See https://pytorch.org/docs/main/generated/torch.nn.RNN.html to learn [all …]
|
/aosp_15_r20/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | RNNTest.cpp | 23 #include "RNN.h" 201 ASSERT_EQ(execution.setInput(RNN::k##X##Tensor, X##_.data(), sizeof(float) * X##_.size()), \ in Invoke() 209 ASSERT_EQ(execution.setOutput(RNN::k##X##Tensor, X##_.data(), sizeof(float) * X##_.size()), \ in Invoke() 216 ASSERT_EQ(execution.setInput(RNN::kActivationParam, &activation_, sizeof(activation_)), in Invoke() 240 BasicRNNOpModel rnn(2, 16, 8); in TEST() local 241 rnn.SetWeights( in TEST() 262 rnn.SetBias({0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568, in TEST() 266 rnn.SetRecurrentWeights( in TEST() 282 rnn.ResetHiddenState(); in TEST() 284 sizeof(rnn_input) / sizeof(float) / (rnn.input_size() * rnn.num_batches()); in TEST() [all …]
|
D | RNN.cpp | 19 #include "RNN.h" 30 RNN::RNN(const Operation& operation, RunTimeOperandInfo* operands) { in RNN() function in android::nn::RNN 31 NNTRACE_TRANS("RNN::RNN"); in RNN() 45 bool RNN::Prepare(const Operation& operation, RunTimeOperandInfo* operands, Shape* hiddenStateShape, in Prepare() 47 NNTRACE_TRANS("RNN::Prepare"); in Prepare() 81 bool RNN::Eval() { in Eval() 116 bool RNN::RNNStep(const T* inputData, const Shape& inputShape, const T* hiddenStateInputData, in RNNStep() 120 NNTRACE_COMP("RNN::Eval"); in RNNStep() 136 bool RNN::RNNStep(const T* inputData, const Shape& inputShape, const T* auxInputData, in RNNStep() 143 NNTRACE_COMP("RNN::Eval"); in RNNStep() [all …]
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | rnn.py | 14 from torch.nn.utils.rnn import PackedSequence 21 "RNN", 49 r"""Base class for RNN modules (RNN, LSTM, GRU). 51 … Implements aspects of RNNs shared by the RNN, LSTM, and GRU classes, such as module initialization 154 raise ValueError("Unrecognized RNN mode: " + mode) 260 import torch.backends.cudnn.rnn as rnn namespace 273 rnn.get_cudnn_mode(self.mode), 467 class RNN(RNNBase): class 470 Apply a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}` 513 would mean stacking two RNNs together to form a `stacked RNN`, [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/ |
H A D | rnn_cell_test.py | 15 """Tests for RNN cells.""" 41 from tensorflow.python.ops import rnn 57 """RNN Cell generating (output, new_state) = (input + 1, state + 1).""" 107 """RNN Cell generating (output, new_state) = (input + 1, state + 1). 166 evaluate output of rnn cell and state or either of them separately. It 206 rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4) 217 outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) 250 outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32) 252 dropped_outputs, _ = rnn.static_rnn( 283 dynamic_outputs, dynamic_state = rnn.static_rnn( [all …]
|
H A D | rnn_test.py | 15 """Tests for rnn module.""" 35 from tensorflow.python.ops import rnn 48 """RNN Cell generating (output, new_state) = (input + 1, state + 1).""" 63 """RNN Cell generating (output, new_state) = (input + 1, state + 1).""" 81 """RNN Cell generating (output, new_state) = (input + 1, state + 1).""" 100 """RNN Cell its state as a TensorArray.""" 134 rnn.dynamic_rnn( 156 "RNN cell only supports floating"): 158 rnn.dynamic_rnn(cell, inputs, dtype=dtypes.int32) 173 outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32) [all …]
|
/aosp_15_r20/external/pytorch/test/cpp/api/ |
H A D | rnn.cpp | 18 auto rnn = model->add(rnn_model, "rnn"); in test_RNN_xor() local 31 x = std::get<0>(rnn->forward(x))[T - 1]; in test_RNN_xor() 276 ASSERT_TRUE(test_RNN_xor<RNN>([](int s) { in TEST_F() 277 return RNN(RNNOptions(s, s).nonlinearity(torch::kReLU).num_layers(2)); in TEST_F() 282 ASSERT_TRUE(test_RNN_xor<RNN>([](int s) { in TEST_F() 283 return RNN(RNNOptions(s, s).nonlinearity(torch::kTanh).num_layers(2)); in TEST_F() 364 ASSERT_TRUE(test_RNN_xor<RNN>( in TEST_F() 366 return RNN(RNNOptions(s, s).nonlinearity(torch::kReLU).num_layers(2)); in TEST_F() 371 ASSERT_TRUE(test_RNN_xor<RNN>( in TEST_F() 373 return RNN(RNNOptions(s, s).nonlinearity(torch::kTanh).num_layers(2)); in TEST_F() [all …]
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | annotated_ops | 343 lstm, rnn 344 gru, rnn 345 rnn_tanh, rnn 346 rnn_relu, rnn 347 lstm_cell, rnn 348 gru_cell, rnn 349 rnn_tanh_cell, rnn 350 rnn_relu_cell, rnn 351 quantized_lstm_cell, rnn 352 quantized_gru_cell, rnn [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/layers/legacy_rnn/ |
H A D | rnn_cell_impl.py | 16 """Module implementing RNN Cells. 18 This module provides a number of basic commonly used RNN cells, such as LSTM 22 calling the `rnn` ops several times. 186 """Abstract object representing an RNN cell. 197 An RNN cell, in the most abstract setting, is anything that has 210 # Attribute that indicates whether the cell is a TF RNN cell, due the slight 211 # difference between TF and Keras RNN cell. Notably the state is not wrapped 217 """Run this RNN cell on inputs, starting from the given state. 344 # TODO(b/134773139): Remove when contrib RNN cells implement `get_config` 351 # function for legacy RNN cells, so do not generate an input signature. [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/g3doc/examples/convert/ |
H A D | rnn.md | 1 # TensorFlow RNN conversion to TensorFlow Lite 5 TensorFlow Lite supports converting TensorFlow RNN models to TensorFlow Lite’s 10 Since there are many variants of RNN APIs in TensorFlow, our approach has been 13 1. Provide **native support for standard TensorFlow RNN APIs** like Keras LSTM. 16 **user-defined** **RNN implementations** to plug in and get converted to 22 RNN interfaces. 122 ## “Bring your own TensorFlow RNN” to TensorFlow Lite 124 If a user's RNN interface is different from the standard supported ones, there 127 **Option 1:** Write adapter code in TensorFlow python to adapt the RNN interface 128 to the Keras RNN interface. This means a tf.function with [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/toco/ |
H A D | model_cmdline_flags_test.cc | 32 "--rnn_states={state_array:rnn/BasicLSTMCellZeroState/zeros," in TEST() 33 "back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4}," in TEST() 34 "{state_array:rnn/BasicLSTMCellZeroState/zeros_1," in TEST() 35 "back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}", in TEST() 41 {{"state_array", "rnn/BasicLSTMCellZeroState/zeros"}, in TEST() 42 {"back_edge_source_array", "rnn/basic_lstm_cell/Add_1"}, in TEST() 45 {{"state_array", "rnn/BasicLSTMCellZeroState/zeros_1"}, in TEST() 46 {"back_edge_source_array", "rnn/basic_lstm_cell/Mul_2"}, in TEST()
|