xref: /aosp_15_r20/external/android-nn-driver/test/UnidirectionalSequenceLstm.hpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li 
6*3e777be0SXin Li #pragma once
7*3e777be0SXin Li 
8*3e777be0SXin Li #include "DriverTestHelpers.hpp"
9*3e777be0SXin Li 
10*3e777be0SXin Li #include <armnn/utility/IgnoreUnused.hpp>
11*3e777be0SXin Li 
12*3e777be0SXin Li #include <array>
13*3e777be0SXin Li 
14*3e777be0SXin Li using ArmnnDriver   = armnn_driver::ArmnnDriver;
15*3e777be0SXin Li using DriverOptions = armnn_driver::DriverOptions;
16*3e777be0SXin Li using RequestArgument = V1_0::RequestArgument;
17*3e777be0SXin Li 
18*3e777be0SXin Li #ifdef ARMNN_ANDROID_S
19*3e777be0SXin Li #include <nnapi/Types.h>
20*3e777be0SXin Li #endif
21*3e777be0SXin Li 
22*3e777be0SXin Li using namespace driverTestHelpers;
23*3e777be0SXin Li using namespace android::hardware;
24*3e777be0SXin Li 
25*3e777be0SXin Li namespace
26*3e777be0SXin Li {
27*3e777be0SXin Li 
28*3e777be0SXin Li template<typename T>
CreateRequestArgument(const std::vector<T> & value,unsigned int poolIndex)29*3e777be0SXin Li RequestArgument CreateRequestArgument(const std::vector<T>& value, unsigned int poolIndex)
30*3e777be0SXin Li {
31*3e777be0SXin Li     V1_0::DataLocation inputInloc = {};
32*3e777be0SXin Li     inputInloc.poolIndex = poolIndex;
33*3e777be0SXin Li     inputInloc.offset = 0;
34*3e777be0SXin Li     inputInloc.length = value.size() * sizeof(T);
35*3e777be0SXin Li     RequestArgument inputRequestArgument = {};
36*3e777be0SXin Li     inputRequestArgument.location = inputInloc;
37*3e777be0SXin Li     inputRequestArgument.dimensions = hidl_vec<uint32_t>{};
38*3e777be0SXin Li     return inputRequestArgument;
39*3e777be0SXin Li }
40*3e777be0SXin Li 
41*3e777be0SXin Li // Helper function to create an OperandLifeTime::NO_VALUE for testing.
42*3e777be0SXin Li // To be used on optional input operands that have no values - these are valid and should be tested.
CreateNoValueLifeTime(const hidl_vec<uint32_t> & dimensions)43*3e777be0SXin Li V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
44*3e777be0SXin Li {
45*3e777be0SXin Li     // Only create a NO_VALUE for optional operands that have no elements
46*3e777be0SXin Li     if (dimensions.size() == 0 || dimensions[0] == 0)
47*3e777be0SXin Li     {
48*3e777be0SXin Li         return V1_0::OperandLifeTime::NO_VALUE;
49*3e777be0SXin Li     }
50*3e777be0SXin Li     return V1_0::OperandLifeTime::CONSTANT_COPY;
51*3e777be0SXin Li }
52*3e777be0SXin Li 
53*3e777be0SXin Li template<typename HalModel>
ExecuteModel(const HalModel & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)54*3e777be0SXin Li void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
55*3e777be0SXin Li {
56*3e777be0SXin Li     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
57*3e777be0SXin Li     if (preparedModel.get() != nullptr)
58*3e777be0SXin Li     {
59*3e777be0SXin Li         Execute(preparedModel, request);
60*3e777be0SXin Li     }
61*3e777be0SXin Li }
62*3e777be0SXin Li 
63*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
64*3e777be0SXin Li 
65*3e777be0SXin Li template<>
ExecuteModel(const armnn_driver::hal_1_2::HalPolicy::Model & model,armnn_driver::ArmnnDriver & driver,const V1_0::Request & request)66*3e777be0SXin Li void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
67*3e777be0SXin Li                                                            armnn_driver::ArmnnDriver& driver,
68*3e777be0SXin Li                                                            const V1_0::Request& request)
69*3e777be0SXin Li {
70*3e777be0SXin Li     android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
71*3e777be0SXin Li     if (preparedModel.get() != nullptr)
72*3e777be0SXin Li     {
73*3e777be0SXin Li         Execute(preparedModel, request);
74*3e777be0SXin Li     }
75*3e777be0SXin Li }
76*3e777be0SXin Li 
77*3e777be0SXin Li #endif
78*3e777be0SXin Li 
79*3e777be0SXin Li } // anonymous namespace
80*3e777be0SXin Li 
81*3e777be0SXin Li // Add our own tests here since we fail the unidirectional sequence lstm
82*3e777be0SXin Li // tests which Google supplies (because of non-const weights)
83*3e777be0SXin Li template <typename HalPolicy>
UnidirectionalSequenceLstmTestImpl(const hidl_vec<uint32_t> & inputDimensions,const std::vector<float> & inputValue,const hidl_vec<uint32_t> & inputToInputWeightsDimensions,const std::vector<float> & inputToInputWeightsValue,const hidl_vec<uint32_t> & inputToForgetWeightsDimensions,const std::vector<float> & inputToForgetWeightsValue,const hidl_vec<uint32_t> & inputToCellWeightsDimensions,const std::vector<float> & inputToCellWeightsValue,const hidl_vec<uint32_t> & inputToOutputWeightsDimensions,const std::vector<float> & inputToOutputWeightsValue,const hidl_vec<uint32_t> & recurrentToInputWeightsDimensions,const std::vector<float> & recurrentToInputWeightsValue,const hidl_vec<uint32_t> & recurrentToForgetWeightsDimensions,const std::vector<float> & recurrentToForgetWeightsValue,const hidl_vec<uint32_t> & recurrentToCellWeightsDimensions,const std::vector<float> & recurrentToCellWeightsValue,const hidl_vec<uint32_t> & recurrentToOutputWeightsDimensions,const std::vector<float> & recurrentToOutputWeightsValue,const hidl_vec<uint32_t> & cellToInputWeightsDimensions,const std::vector<float> & cellToInputWeightsValue,const hidl_vec<uint32_t> & cellToForgetWeightsDimensions,const std::vector<float> & cellToForgetWeightsValue,const hidl_vec<uint32_t> & cellToOutputWeightsDimensions,const std::vector<float> & cellToOutputWeightsValue,const hidl_vec<uint32_t> & inputGateBiasDimensions,const std::vector<float> & inputGateBiasValue,const hidl_vec<uint32_t> & forgetGateBiasDimensions,const std::vector<float> & forgetGateBiasValue,const hidl_vec<uint32_t> & cellBiasDimensions,const std::vector<float> & cellBiasValue,const hidl_vec<uint32_t> & outputGateBiasDimensions,const std::vector<float> & outputGateBiasValue,const hidl_vec<uint32_t> & projectionWeightsDimensions,const std::vector<float> & projectionWeightsValue,const hidl_vec<uint32_t> & projectionBiasDimensions,const std::vector<float> & projectionBiasValue,const hidl_vec<uint32_t> & outputStateInDimensions,const std::vector<float> & outputStateInValue,const hidl_vec<uint32_t> & cellStateInDimensions,const std::vector<float> & cellStateInValue,const hidl_vec<uint32_t> & activationFunctionDimensions,const std::vector<int32_t> & activationFunctionValue,const hidl_vec<uint32_t> & cellClippingThresholdDimensions,const std::vector<float> & cellClippingThresholdValue,const hidl_vec<uint32_t> & projectionClippingThresholdDimensions,const std::vector<float> & projectionClippingThresholdValue,const bool & timeMajorValue,const hidl_vec<uint32_t> & inputLayerNormWeightsDimensions,const std::vector<float> & inputLayerNormWeightsValue,const hidl_vec<uint32_t> & forgetLayerNormWeightsDimensions,const std::vector<float> & forgetLayerNormWeightsValue,const hidl_vec<uint32_t> & cellLayerNormWeightsDimensions,const std::vector<float> & cellLayerNormWeightsValue,const hidl_vec<uint32_t> & outputLayerNormWeightsDimensions,const std::vector<float> & outputLayerNormWeightsValue,const hidl_vec<uint32_t> & outputDimensions,const std::vector<float> & outputValue,const hidl_vec<uint32_t> &,const std::vector<float> &,const hidl_vec<uint32_t> &,const std::vector<float> &,armnn::Compute compute,float epsilonValue=0)84*3e777be0SXin Li void UnidirectionalSequenceLstmTestImpl(const hidl_vec<uint32_t>& inputDimensions,
85*3e777be0SXin Li                                         const std::vector<float>& inputValue,
86*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputToInputWeightsDimensions,
87*3e777be0SXin Li                                         const std::vector<float>& inputToInputWeightsValue,
88*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputToForgetWeightsDimensions,
89*3e777be0SXin Li                                         const std::vector<float>& inputToForgetWeightsValue,
90*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputToCellWeightsDimensions,
91*3e777be0SXin Li                                         const std::vector<float>& inputToCellWeightsValue,
92*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputToOutputWeightsDimensions,
93*3e777be0SXin Li                                         const std::vector<float>& inputToOutputWeightsValue,
94*3e777be0SXin Li                                         const hidl_vec<uint32_t>& recurrentToInputWeightsDimensions,
95*3e777be0SXin Li                                         const std::vector<float>& recurrentToInputWeightsValue,
96*3e777be0SXin Li                                         const hidl_vec<uint32_t>& recurrentToForgetWeightsDimensions,
97*3e777be0SXin Li                                         const std::vector<float>& recurrentToForgetWeightsValue,
98*3e777be0SXin Li                                         const hidl_vec<uint32_t>& recurrentToCellWeightsDimensions,
99*3e777be0SXin Li                                         const std::vector<float>& recurrentToCellWeightsValue,
100*3e777be0SXin Li                                         const hidl_vec<uint32_t>& recurrentToOutputWeightsDimensions,
101*3e777be0SXin Li                                         const std::vector<float>& recurrentToOutputWeightsValue,
102*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellToInputWeightsDimensions,
103*3e777be0SXin Li                                         const std::vector<float>& cellToInputWeightsValue,
104*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellToForgetWeightsDimensions,
105*3e777be0SXin Li                                         const std::vector<float>& cellToForgetWeightsValue,
106*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellToOutputWeightsDimensions,
107*3e777be0SXin Li                                         const std::vector<float>& cellToOutputWeightsValue,
108*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputGateBiasDimensions,
109*3e777be0SXin Li                                         const std::vector<float>& inputGateBiasValue,
110*3e777be0SXin Li                                         const hidl_vec<uint32_t>& forgetGateBiasDimensions,
111*3e777be0SXin Li                                         const std::vector<float>& forgetGateBiasValue,
112*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellBiasDimensions,
113*3e777be0SXin Li                                         const std::vector<float>& cellBiasValue,
114*3e777be0SXin Li                                         const hidl_vec<uint32_t>& outputGateBiasDimensions,
115*3e777be0SXin Li                                         const std::vector<float>& outputGateBiasValue,
116*3e777be0SXin Li                                         const hidl_vec<uint32_t>& projectionWeightsDimensions,
117*3e777be0SXin Li                                         const std::vector<float>& projectionWeightsValue,
118*3e777be0SXin Li                                         const hidl_vec<uint32_t>& projectionBiasDimensions,
119*3e777be0SXin Li                                         const std::vector<float>& projectionBiasValue,
120*3e777be0SXin Li                                         const hidl_vec<uint32_t>& outputStateInDimensions,
121*3e777be0SXin Li                                         const std::vector<float>& outputStateInValue,
122*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellStateInDimensions,
123*3e777be0SXin Li                                         const std::vector<float>& cellStateInValue,
124*3e777be0SXin Li                                         const hidl_vec<uint32_t>& activationFunctionDimensions,
125*3e777be0SXin Li                                         const std::vector<int32_t>& activationFunctionValue,
126*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellClippingThresholdDimensions,
127*3e777be0SXin Li                                         const std::vector<float>& cellClippingThresholdValue,
128*3e777be0SXin Li                                         const hidl_vec<uint32_t>& projectionClippingThresholdDimensions,
129*3e777be0SXin Li                                         const std::vector<float>& projectionClippingThresholdValue,
130*3e777be0SXin Li                                         const bool& timeMajorValue,
131*3e777be0SXin Li                                         const hidl_vec<uint32_t>& inputLayerNormWeightsDimensions,
132*3e777be0SXin Li                                         const std::vector<float>& inputLayerNormWeightsValue,
133*3e777be0SXin Li                                         const hidl_vec<uint32_t>& forgetLayerNormWeightsDimensions,
134*3e777be0SXin Li                                         const std::vector<float>& forgetLayerNormWeightsValue,
135*3e777be0SXin Li                                         const hidl_vec<uint32_t>& cellLayerNormWeightsDimensions,
136*3e777be0SXin Li                                         const std::vector<float>& cellLayerNormWeightsValue,
137*3e777be0SXin Li                                         const hidl_vec<uint32_t>& outputLayerNormWeightsDimensions,
138*3e777be0SXin Li                                         const std::vector<float>& outputLayerNormWeightsValue,
139*3e777be0SXin Li                                         const hidl_vec<uint32_t>& outputDimensions,
140*3e777be0SXin Li                                         const std::vector<float>& outputValue,
141*3e777be0SXin Li                                         const hidl_vec<uint32_t>&, // outputStateOutDimensions,
142*3e777be0SXin Li                                         const std::vector<float>&, // outputStateOutValue,
143*3e777be0SXin Li                                         const hidl_vec<uint32_t>&, // cellStateOutDimensions,
144*3e777be0SXin Li                                         const std::vector<float>&, // cellStateOutValue,
145*3e777be0SXin Li                                         armnn::Compute compute,
146*3e777be0SXin Li                                         float epsilonValue = 0)
147*3e777be0SXin Li {
148*3e777be0SXin Li     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(compute));
149*3e777be0SXin Li     using Model = typename HalPolicy::Model;
150*3e777be0SXin Li     Model model = {};
151*3e777be0SXin Li 
152*3e777be0SXin Li     // Inputs:
153*3e777be0SXin Li     // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
154*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
155*3e777be0SXin Li     AddInputOperand<HalPolicy>(model, inputDimensions);
156*3e777be0SXin Li 
157*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
158*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
159*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
160*3e777be0SXin Li                                 inputToInputWeightsDimensions,
161*3e777be0SXin Li                                 inputToInputWeightsValue,
162*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
163*3e777be0SXin Li                                 CreateNoValueLifeTime(inputToInputWeightsDimensions));
164*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
165*3e777be0SXin Li     //     [num_units, input_size].
166*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, inputToForgetWeightsDimensions, inputToForgetWeightsValue);
167*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
168*3e777be0SXin Li     // [num_units, input_size].
169*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, inputToCellWeightsDimensions, inputToCellWeightsValue);
170*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
171*3e777be0SXin Li     //     [num_units, input_size].
172*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, inputToOutputWeightsDimensions, inputToOutputWeightsValue);
173*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
174*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
175*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
176*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
177*3e777be0SXin Li                                 recurrentToInputWeightsDimensions,
178*3e777be0SXin Li                                 recurrentToInputWeightsValue,
179*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
180*3e777be0SXin Li                                 CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
181*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
182*3e777be0SXin Li     //     [num_units, output_size].
183*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
184*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
185*3e777be0SXin Li     //     [num_units, output_size].
186*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, recurrentToCellWeightsDimensions, recurrentToCellWeightsValue);
187*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
188*3e777be0SXin Li     //     [num_units, output_size].
189*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
190*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
191*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
192*3e777be0SXin Li                                 cellToInputWeightsDimensions,
193*3e777be0SXin Li                                 cellToInputWeightsValue,
194*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
195*3e777be0SXin Li                                 CreateNoValueLifeTime(cellToInputWeightsDimensions));
196*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
197*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
198*3e777be0SXin Li                                 cellToForgetWeightsDimensions,
199*3e777be0SXin Li                                 cellToForgetWeightsValue,
200*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
201*3e777be0SXin Li                                 CreateNoValueLifeTime(cellToForgetWeightsDimensions));
202*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
203*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
204*3e777be0SXin Li                                 cellToOutputWeightsDimensions,
205*3e777be0SXin Li                                 cellToOutputWeightsValue,
206*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
207*3e777be0SXin Li                                 CreateNoValueLifeTime(cellToOutputWeightsDimensions));
208*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
209*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
210*3e777be0SXin Li                                 inputGateBiasDimensions,
211*3e777be0SXin Li                                 inputGateBiasValue,
212*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
213*3e777be0SXin Li                                 CreateNoValueLifeTime(inputGateBiasDimensions));
214*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
215*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, forgetGateBiasDimensions, forgetGateBiasValue);
216*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
217*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, cellBiasDimensions, cellBiasValue);
218*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
219*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model, outputGateBiasDimensions, outputGateBiasValue);
220*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
221*3e777be0SXin Li     //     [output_size, num_units].
222*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
223*3e777be0SXin Li                                 projectionWeightsDimensions,
224*3e777be0SXin Li                                 projectionWeightsValue,
225*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
226*3e777be0SXin Li                                 CreateNoValueLifeTime(projectionWeightsDimensions));
227*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
228*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
229*3e777be0SXin Li                                 projectionBiasDimensions,
230*3e777be0SXin Li                                 projectionBiasValue,
231*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
232*3e777be0SXin Li                                 CreateNoValueLifeTime(projectionBiasDimensions));
233*3e777be0SXin Li 
234*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
235*3e777be0SXin Li     AddInputOperand<HalPolicy>(model, outputStateInDimensions);
236*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
237*3e777be0SXin Li     AddInputOperand<HalPolicy>(model, cellStateInDimensions);
238*3e777be0SXin Li 
239*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
240*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
241*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
242*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
243*3e777be0SXin Li                                 activationFunctionDimensions,
244*3e777be0SXin Li                                 activationFunctionValue,
245*3e777be0SXin Li                                 HalPolicy::OperandType::INT32);
246*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
247*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
248*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
249*3e777be0SXin Li                                 cellClippingThresholdDimensions,
250*3e777be0SXin Li                                 cellClippingThresholdValue,
251*3e777be0SXin Li                                 HalPolicy::OperandType::FLOAT32);
252*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
253*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
254*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
255*3e777be0SXin Li                                 projectionClippingThresholdDimensions,
256*3e777be0SXin Li                                 projectionClippingThresholdValue,
257*3e777be0SXin Li                                 HalPolicy::OperandType::FLOAT32);
258*3e777be0SXin Li 
259*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
260*3e777be0SXin Li     AddBoolOperand<HalPolicy>(model, timeMajorValue);
261*3e777be0SXin Li 
262*3e777be0SXin Li     // Normalization:
263*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
264*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
265*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
266*3e777be0SXin Li                                 inputLayerNormWeightsDimensions,
267*3e777be0SXin Li                                 inputLayerNormWeightsValue,
268*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
269*3e777be0SXin Li                                 CreateNoValueLifeTime(inputLayerNormWeightsDimensions));
270*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
271*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
272*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
273*3e777be0SXin Li                                 forgetLayerNormWeightsDimensions,
274*3e777be0SXin Li                                 forgetLayerNormWeightsValue,
275*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
276*3e777be0SXin Li                                 CreateNoValueLifeTime(forgetLayerNormWeightsDimensions));
277*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
278*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
279*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
280*3e777be0SXin Li                                 cellLayerNormWeightsDimensions,
281*3e777be0SXin Li                                 cellLayerNormWeightsValue,
282*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
283*3e777be0SXin Li                                 CreateNoValueLifeTime(cellLayerNormWeightsDimensions));
284*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
285*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
286*3e777be0SXin Li     AddTensorOperand<HalPolicy>(model,
287*3e777be0SXin Li                                 outputLayerNormWeightsDimensions,
288*3e777be0SXin Li                                 outputLayerNormWeightsValue,
289*3e777be0SXin Li                                 HalPolicy::OperandType::TENSOR_FLOAT32,
290*3e777be0SXin Li                                 CreateNoValueLifeTime(outputLayerNormWeightsDimensions));
291*3e777be0SXin Li 
292*3e777be0SXin Li     // Outputs:
293*3e777be0SXin Li     // 00: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
294*3e777be0SXin Li     // [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
295*3e777be0SXin Li     AddOutputOperand<HalPolicy>(model, outputDimensions);
296*3e777be0SXin Li     // 01: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
297*3e777be0SXin Li     // [batch_size, output_size]. This output is optional and can be omitted. If this output
298*3e777be0SXin Li     // is present then output #2 must be present as well.
299*3e777be0SXin Li     //AddOutputOperand<HalPolicy>(model, hiddenStateOutDimensions);
300*3e777be0SXin Li     // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
301*3e777be0SXin Li     // [batch_size, num_units]. This output is optional and can be omitted.
302*3e777be0SXin Li     //AddOutputOperand<HalPolicy>(model, cellStateOutDimensions);
303*3e777be0SXin Li 
304*3e777be0SXin Li     // make the lstm operation
305*3e777be0SXin Li     model.operations.resize(1);
306*3e777be0SXin Li     model.operations[0].type = HalPolicy::OperationType::UNIDIRECTIONAL_SEQUENCE_LSTM;
307*3e777be0SXin Li 
308*3e777be0SXin Li     model.operations[0].inputs = hidl_vec<uint32_t> {0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,
309*3e777be0SXin Li                                                      14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27};
310*3e777be0SXin Li     model.operations[0].outputs = hidl_vec<uint32_t> {28};
311*3e777be0SXin Li 
312*3e777be0SXin Li     // define the input values
313*3e777be0SXin Li     hidl_vec<RequestArgument> inputArguments;
314*3e777be0SXin Li     inputArguments.resize(3);
315*3e777be0SXin Li 
316*3e777be0SXin Li     inputArguments[0] = CreateRequestArgument<float>(inputValue, 0);
317*3e777be0SXin Li     inputArguments[1] = CreateRequestArgument<float>(outputStateInValue, 1);
318*3e777be0SXin Li     inputArguments[2] = CreateRequestArgument<float>(cellStateInValue, 2);
319*3e777be0SXin Li 
320*3e777be0SXin Li     // define the expected output values
321*3e777be0SXin Li     hidl_vec<RequestArgument> outputArguments;
322*3e777be0SXin Li     outputArguments.resize(1);
323*3e777be0SXin Li 
324*3e777be0SXin Li     outputArguments[0] = CreateRequestArgument<float>(outputValue, 3);
325*3e777be0SXin Li 
326*3e777be0SXin Li     V1_0::Request request = {};
327*3e777be0SXin Li     request.inputs  = inputArguments;
328*3e777be0SXin Li     request.outputs = outputArguments;
329*3e777be0SXin Li 
330*3e777be0SXin Li     // set the input data
331*3e777be0SXin Li     AddPoolAndSetData(inputValue.size(), request, inputValue.data());
332*3e777be0SXin Li     AddPoolAndSetData(outputStateInValue.size(), request, outputStateInValue.data());
333*3e777be0SXin Li     AddPoolAndSetData(cellStateInValue.size(), request, cellStateInValue.data());
334*3e777be0SXin Li 
335*3e777be0SXin Li     // add memory for the outputs
336*3e777be0SXin Li     android::sp<IMemory> outputMemory = AddPoolAndGetData<float>(outputValue.size(), request);
337*3e777be0SXin Li     float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
338*3e777be0SXin Li 
339*3e777be0SXin Li     // make the prepared model and run the execution
340*3e777be0SXin Li     ExecuteModel(model, *driver, request);
341*3e777be0SXin Li 
342*3e777be0SXin Li     // check the results
343*3e777be0SXin Li     if (epsilonValue != 0)
344*3e777be0SXin Li     {
345*3e777be0SXin Li         for (size_t i = 0; i < outputValue.size(); ++i)
346*3e777be0SXin Li         {
347*3e777be0SXin Li             DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx(outputData[i]).epsilon(epsilonValue),
348*3e777be0SXin Li                                   "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
349*3e777be0SXin Li         }
350*3e777be0SXin Li     }
351*3e777be0SXin Li     else
352*3e777be0SXin Li     {
353*3e777be0SXin Li         for (size_t i = 0; i < outputValue.size(); ++i)
354*3e777be0SXin Li         {
355*3e777be0SXin Li             DOCTEST_CHECK_MESSAGE(outputValue[i] == doctest::Approx(outputData[i]),
356*3e777be0SXin Li                                   "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
357*3e777be0SXin Li         }
358*3e777be0SXin Li     }
359*3e777be0SXin Li }
360*3e777be0SXin Li 
361*3e777be0SXin Li template<typename HalPolicy>
UnidirectionalSequenceLstmLayerFloat32TestImpl(armnn::Compute compute)362*3e777be0SXin Li void UnidirectionalSequenceLstmLayerFloat32TestImpl(armnn::Compute compute)
363*3e777be0SXin Li {
364*3e777be0SXin Li     uint32_t batchSize  = 3;
365*3e777be0SXin Li     uint32_t timeSize   = 2;
366*3e777be0SXin Li     uint32_t inputSize  = 3;
367*3e777be0SXin Li     uint32_t outputSize = 4;
368*3e777be0SXin Li     uint32_t numUnits   = outputSize;
369*3e777be0SXin Li 
370*3e777be0SXin Li     // Inputs:
371*3e777be0SXin Li     // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
372*3e777be0SXin Li     //     [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
373*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
374*3e777be0SXin Li     hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
375*3e777be0SXin Li     std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
376*3e777be0SXin Li                                   3., 2., 1., 2., 3., 4.,
377*3e777be0SXin Li                                   5., 4., 3., 2., 1., 2.};
378*3e777be0SXin Li 
379*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
380*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
381*3e777be0SXin Li     hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
382*3e777be0SXin Li     std::vector<float> inputToInputWeightsValue{-0.49536117f, -0.0556083915f, -0.102400711f,
383*3e777be0SXin Li                                                 -0.117484632f, 0.3298470976f, -0.1179017122f,
384*3e777be0SXin Li                                                 0.214305695f, 0.42135173085f, 0.003878414626f,
385*3e777be0SXin Li                                                 -0.348303917f, -0.1881275477f, 0.0343011027f};
386*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
387*3e777be0SXin Li     //     [num_units, input_size].
388*3e777be0SXin Li     hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
389*3e777be0SXin Li     std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
390*3e777be0SXin Li                                                  -0.3810434485f, 0.268383264f, -0.009807467424f,
391*3e777be0SXin Li                                                  -0.3522925403f, -0.24275735512f, -0.28344226125f,
392*3e777be0SXin Li                                                  0.13512269116f, -0.4932442977f, -0.10039821991f};
393*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
394*3e777be0SXin Li     hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
395*3e777be0SXin Li     std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
396*3e777be0SXin Li                                                0.386399507f, -0.259465157985f, -0.16545993089f,
397*3e777be0SXin Li                                                -0.4230232555f, 0.341664791103f, -0.18127849691f,
398*3e777be0SXin Li                                                -0.2277662414f, -0.55275535589f, 0.34184026718f};
399*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
400*3e777be0SXin Li     //     [num_units, input_size].
401*3e777be0SXin Li     hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
402*3e777be0SXin Li     std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
403*3e777be0SXin Li                                                  0.53969591851f, 0.23393625035f, -0.27140527306f,
404*3e777be0SXin Li                                                  0.50009280443f, 0.07511717046f, 0.3998299249f,
405*3e777be0SXin Li                                                  -0.51717478049f, 0.1889653282f, -0.367323637f};
406*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
407*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
408*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
409*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
410*3e777be0SXin Li     std::vector<float> recurrentToInputWeightsValue{-0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
411*3e777be0SXin Li                                                     -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
412*3e777be0SXin Li                                                     0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
413*3e777be0SXin Li                                                     0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f};
414*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
415*3e777be0SXin Li     //     [num_units, output_size].
416*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
417*3e777be0SXin Li     std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
418*3e777be0SXin Li                                                      -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
419*3e777be0SXin Li                                                      -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
420*3e777be0SXin Li                                                      -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f};
421*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
422*3e777be0SXin Li     //     [num_units, output_size].
423*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
424*3e777be0SXin Li     std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
425*3e777be0SXin Li                                                    -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
426*3e777be0SXin Li                                                    0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
427*3e777be0SXin Li                                                    0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f};
428*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
429*3e777be0SXin Li     //     [num_units, output_size].
430*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
431*3e777be0SXin Li     std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
432*3e777be0SXin Li                                                      -0.17879831790f, -0.15185534954f, -0.16918526583f,
433*3e777be0SXin Li                                                      -0.10087361183f, -0.5436913968f, 0.016758225858f,
434*3e777be0SXin Li                                                      0.30454617738f, -0.41493862867f, -0.005565764375f,
435*3e777be0SXin Li                                                      -0.12584099173f, -0.12319286912f, 0.2407919466f,
436*3e777be0SXin Li                                                      -0.08879069983f};
437*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
438*3e777be0SXin Li     hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
439*3e777be0SXin Li     std::vector<float> cellToInputWeightsValue;
440*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
441*3e777be0SXin Li     hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
442*3e777be0SXin Li     std::vector<float> cellToForgetWeightsValue;
443*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
444*3e777be0SXin Li     hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
445*3e777be0SXin Li     std::vector<float> cellToOutputWeightsValue;
446*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
447*3e777be0SXin Li     hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
448*3e777be0SXin Li     std::vector<float> inputGateBiasValue(numUnits, 0.0f);
449*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
450*3e777be0SXin Li     hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
451*3e777be0SXin Li     std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
452*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
453*3e777be0SXin Li     hidl_vec<uint32_t> cellBiasDimensions{numUnits};
454*3e777be0SXin Li     std::vector<float> cellBiasValue(numUnits, 0.0f);
455*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
456*3e777be0SXin Li     hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
457*3e777be0SXin Li     std::vector<float> outputGateBiasValue(numUnits, 0.0f);
458*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
459*3e777be0SXin Li     //     [output_size, num_units].
460*3e777be0SXin Li     hidl_vec<uint32_t> projectionWeightsDimensions{0};
461*3e777be0SXin Li     std::vector<float> projectionWeightsValue;
462*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
463*3e777be0SXin Li     hidl_vec<uint32_t> projectionBiasDimensions{0};
464*3e777be0SXin Li     std::vector<float> projectionBiasValue;
465*3e777be0SXin Li 
466*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
467*3e777be0SXin Li     hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
468*3e777be0SXin Li     std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
469*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
470*3e777be0SXin Li     hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
471*3e777be0SXin Li     std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
472*3e777be0SXin Li 
473*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
474*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
475*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
476*3e777be0SXin Li     hidl_vec<uint32_t>   activationFunctionDimensions{};
477*3e777be0SXin Li     std::vector<int32_t> activationFunctionValue{4};
478*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
479*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
480*3e777be0SXin Li     hidl_vec<uint32_t>   cellClippingThresholdDimensions{};
481*3e777be0SXin Li     std::vector<float>   cellClippingThresholdValue{10.0f};
482*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
483*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
484*3e777be0SXin Li     hidl_vec<uint32_t>   projectionClippingThresholdDimensions{};
485*3e777be0SXin Li     std::vector<float>   projectionClippingThresholdValue{0.f};
486*3e777be0SXin Li 
487*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
488*3e777be0SXin Li     bool timeMajorValue = false;
489*3e777be0SXin Li 
490*3e777be0SXin Li     // Normalization:
491*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
492*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
493*3e777be0SXin Li     hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
494*3e777be0SXin Li     std::vector<float> inputLayerNormWeightsValue;
495*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
496*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
497*3e777be0SXin Li     hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
498*3e777be0SXin Li     std::vector<float> forgetLayerNormWeightsValue;
499*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
500*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
501*3e777be0SXin Li     hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
502*3e777be0SXin Li     std::vector<float> cellLayerNormWeightsValue;
503*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
504*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
505*3e777be0SXin Li     hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
506*3e777be0SXin Li     std::vector<float> outputLayerNormWeightsValue;
507*3e777be0SXin Li 
508*3e777be0SXin Li     // Outputs:
509*3e777be0SXin Li     // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
510*3e777be0SXin Li     //    [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
511*3e777be0SXin Li     hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
512*3e777be0SXin Li     std::vector<float> outputValue{-0.07149004f, -0.1621171f, -0.17516759f, -0.0232934225f,
513*3e777be0SXin Li                                    -0.16810727f, -0.41412935f, -0.5498753f, -0.00803578f,
514*3e777be0SXin Li                                    -0.06687349f, 0.204077631f, -0.4276504f, -0.03123213f,
515*3e777be0SXin Li                                    -0.12000261f, -0.0941918f, -0.45639035f, -0.02870186f,
516*3e777be0SXin Li                                    -0.03429216f, 0.20824050f, -0.6569892f, -0.004152651f,
517*3e777be0SXin Li                                    -0.10493034f, 0.14210969f, -0.58347696f, -0.03297536f};
518*3e777be0SXin Li 
519*3e777be0SXin Li     // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
520*3e777be0SXin Li     //    [batch_size, output_size]. This output is optional and can be omitted. If this output
521*3e777be0SXin Li     //    is present then output #2 must be present as well.
522*3e777be0SXin Li     hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
523*3e777be0SXin Li     std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
524*3e777be0SXin Li     // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
525*3e777be0SXin Li     //    [batch_size, num_units]. This output is optional and can be omitted.
526*3e777be0SXin Li     hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
527*3e777be0SXin Li     std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
528*3e777be0SXin Li 
529*3e777be0SXin Li     UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
530*3e777be0SXin Li                                                   inputToInputWeightsDimensions, inputToInputWeightsValue,
531*3e777be0SXin Li                                                   inputToForgetWeightsDimensions, inputToForgetWeightsValue,
532*3e777be0SXin Li                                                   inputToCellWeightsDimensions, inputToCellWeightsValue,
533*3e777be0SXin Li                                                   inputToOutputWeightsDimensions, inputToOutputWeightsValue,
534*3e777be0SXin Li                                                   recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
535*3e777be0SXin Li                                                   recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
536*3e777be0SXin Li                                                   recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
537*3e777be0SXin Li                                                   recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
538*3e777be0SXin Li                                                   cellToInputWeightsDimensions, cellToInputWeightsValue,
539*3e777be0SXin Li                                                   cellToForgetWeightsDimensions, cellToForgetWeightsValue,
540*3e777be0SXin Li                                                   cellToOutputWeightsDimensions, cellToOutputWeightsValue,
541*3e777be0SXin Li                                                   inputGateBiasDimensions, inputGateBiasValue,
542*3e777be0SXin Li                                                   forgetGateBiasDimensions, forgetGateBiasValue,
543*3e777be0SXin Li                                                   cellBiasDimensions, cellBiasValue,
544*3e777be0SXin Li                                                   outputGateBiasDimensions, outputGateBiasValue,
545*3e777be0SXin Li                                                   projectionWeightsDimensions, projectionWeightsValue,
546*3e777be0SXin Li                                                   projectionBiasDimensions, projectionBiasValue,
547*3e777be0SXin Li                                                   outputStateInDimensions, outputStateInValue,
548*3e777be0SXin Li                                                   cellStateInDimensions, cellStateInValue,
549*3e777be0SXin Li                                                   activationFunctionDimensions, activationFunctionValue,
550*3e777be0SXin Li                                                   cellClippingThresholdDimensions, cellClippingThresholdValue,
551*3e777be0SXin Li                                                   projectionClippingThresholdDimensions,
552*3e777be0SXin Li                                                   projectionClippingThresholdValue,
553*3e777be0SXin Li                                                   timeMajorValue,
554*3e777be0SXin Li                                                   inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
555*3e777be0SXin Li                                                   forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
556*3e777be0SXin Li                                                   cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
557*3e777be0SXin Li                                                   outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
558*3e777be0SXin Li                                                   outputDimensions, outputValue,
559*3e777be0SXin Li                                                   hiddenStateOutDimensions, hiddenStateOutValue,
560*3e777be0SXin Li                                                   cellStateOutDimensions, cellStateOutValue,
561*3e777be0SXin Li                                                   compute);
562*3e777be0SXin Li }
563*3e777be0SXin Li 
564*3e777be0SXin Li template<typename HalPolicy>
UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(armnn::Compute compute)565*3e777be0SXin Li void UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(armnn::Compute compute)
566*3e777be0SXin Li {
567*3e777be0SXin Li     uint32_t batchSize  = 3;
568*3e777be0SXin Li     uint32_t timeSize   = 2;
569*3e777be0SXin Li     uint32_t inputSize  = 3;
570*3e777be0SXin Li     uint32_t outputSize = 4;
571*3e777be0SXin Li     uint32_t numUnits   = outputSize;
572*3e777be0SXin Li 
573*3e777be0SXin Li     // Inputs:
574*3e777be0SXin Li     // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
575*3e777be0SXin Li     //     [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
576*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
577*3e777be0SXin Li     hidl_vec<uint32_t> inputDimensions{timeSize, batchSize, inputSize};
578*3e777be0SXin Li     std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
579*3e777be0SXin Li                                   3., 2., 1., 2., 3., 4.,
580*3e777be0SXin Li                                   5., 4., 3., 2., 1., 2.};
581*3e777be0SXin Li 
582*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
583*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
584*3e777be0SXin Li     hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
585*3e777be0SXin Li     std::vector<float> inputToInputWeightsValue{0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
586*3e777be0SXin Li                                                 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
587*3e777be0SXin Li                                                 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
588*3e777be0SXin Li                                                 -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f};
589*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
590*3e777be0SXin Li     //     [num_units, input_size].
591*3e777be0SXin Li     hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
592*3e777be0SXin Li     std::vector<float> inputToForgetWeightsValue{-0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
593*3e777be0SXin Li                                                  -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
594*3e777be0SXin Li                                                  -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
595*3e777be0SXin Li                                                  -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f};
596*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
597*3e777be0SXin Li     hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
598*3e777be0SXin Li     std::vector<float> inputToCellWeightsValue{-0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
599*3e777be0SXin Li                                                0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
600*3e777be0SXin Li                                                0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
601*3e777be0SXin Li                                                -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f};
602*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
603*3e777be0SXin Li     //     [num_units, input_size].
604*3e777be0SXin Li     hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
605*3e777be0SXin Li     std::vector<float> inputToOutputWeightsValue{-0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
606*3e777be0SXin Li                                                  -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
607*3e777be0SXin Li                                                  0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
608*3e777be0SXin Li                                                  -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f};
609*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
610*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
611*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
612*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
613*3e777be0SXin Li     std::vector<float> recurrentToInputWeightsValue{0.23788475990f, -0.24948765337f, 0.50044941902f,
614*3e777be0SXin Li                                                     0.14431896805f, -0.115940228137f, -0.717082679f,
615*3e777be0SXin Li                                                     -0.17208620906f, 0.17850610617f, -0.16702319684f,
616*3e777be0SXin Li                                                     -0.11384502053f, -0.309785276245f, -0.3316611672f,
617*3e777be0SXin Li                                                     0.52380162477f, -0.06839632987f, -0.391478359627f,
618*3e777be0SXin Li                                                     -0.10756178963f};
619*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
620*3e777be0SXin Li     //     [num_units, output_size].
621*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
622*3e777be0SXin Li     std::vector<float> recurrentToForgetWeightsValue{0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
623*3e777be0SXin Li                                                      0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
624*3e777be0SXin Li                                                      -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
625*3e777be0SXin Li                                                      0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f};
626*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
627*3e777be0SXin Li     //     [num_units, output_size].
628*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
629*3e777be0SXin Li     std::vector<float> recurrentToCellWeightsValue{0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
630*3e777be0SXin Li                                                    -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
631*3e777be0SXin Li                                                    -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
632*3e777be0SXin Li                                                    -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f};
633*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
634*3e777be0SXin Li     //     [num_units, output_size].
635*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
636*3e777be0SXin Li     std::vector<float> recurrentToOutputWeightsValue{-0.079031050201f, 0.041414566286f, -0.583727357285f,
637*3e777be0SXin Li                                                      0.1025384515f, -0.172372072937f, 0.09214124082f,
638*3e777be0SXin Li                                                      0.178184121827f, -0.2439443916f, 0.104485116899f,
639*3e777be0SXin Li                                                      0.2600405514f, 0.064414866268f, 0.24141204357f,
640*3e777be0SXin Li                                                      0.281875759363f, -0.14234502664f, 0.15126448862f,
641*3e777be0SXin Li                                                      -0.24421440064f};
642*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
643*3e777be0SXin Li     hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
644*3e777be0SXin Li     std::vector<float> cellToInputWeightsValue;
645*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
646*3e777be0SXin Li     hidl_vec<uint32_t> cellToForgetWeightsDimensions{0};
647*3e777be0SXin Li     std::vector<float> cellToForgetWeightsValue;
648*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
649*3e777be0SXin Li     hidl_vec<uint32_t> cellToOutputWeightsDimensions{0};
650*3e777be0SXin Li     std::vector<float> cellToOutputWeightsValue;
651*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
652*3e777be0SXin Li     hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
653*3e777be0SXin Li     std::vector<float> inputGateBiasValue(numUnits, 0.0f);
654*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
655*3e777be0SXin Li     hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
656*3e777be0SXin Li     std::vector<float> forgetGateBiasValue(numUnits, 1.0f);
657*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
658*3e777be0SXin Li     hidl_vec<uint32_t> cellBiasDimensions{numUnits};
659*3e777be0SXin Li     std::vector<float> cellBiasValue(numUnits, 0.0f);
660*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
661*3e777be0SXin Li     hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
662*3e777be0SXin Li     std::vector<float> outputGateBiasValue(numUnits, 0.0f);
663*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
664*3e777be0SXin Li     //     [output_size, num_units].
665*3e777be0SXin Li     hidl_vec<uint32_t> projectionWeightsDimensions{0};
666*3e777be0SXin Li     std::vector<float> projectionWeightsValue;
667*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
668*3e777be0SXin Li     hidl_vec<uint32_t> projectionBiasDimensions{0};
669*3e777be0SXin Li     std::vector<float> projectionBiasValue;
670*3e777be0SXin Li 
671*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
672*3e777be0SXin Li     hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
673*3e777be0SXin Li     std::vector<float> outputStateInValue(batchSize * outputSize, 0.0f);
674*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
675*3e777be0SXin Li     hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
676*3e777be0SXin Li     std::vector<float> cellStateInValue(batchSize * numUnits, 0.0f);
677*3e777be0SXin Li 
678*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
679*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
680*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
681*3e777be0SXin Li     hidl_vec<uint32_t>   activationFunctionDimensions{};
682*3e777be0SXin Li     std::vector<int32_t> activationFunctionValue{4};
683*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
684*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
685*3e777be0SXin Li     hidl_vec<uint32_t>   cellClippingThresholdDimensions{};
686*3e777be0SXin Li     std::vector<float>   cellClippingThresholdValue{10.0f};
687*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
688*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
689*3e777be0SXin Li     hidl_vec<uint32_t>   projectionClippingThresholdDimensions{};
690*3e777be0SXin Li     std::vector<float>   projectionClippingThresholdValue{0.f};
691*3e777be0SXin Li 
692*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
693*3e777be0SXin Li     bool timeMajorValue = true;
694*3e777be0SXin Li 
695*3e777be0SXin Li     // Normalization:
696*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
697*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
698*3e777be0SXin Li     hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
699*3e777be0SXin Li     std::vector<float> inputLayerNormWeightsValue;
700*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
701*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
702*3e777be0SXin Li     hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
703*3e777be0SXin Li     std::vector<float> forgetLayerNormWeightsValue;
704*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
705*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
706*3e777be0SXin Li     hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
707*3e777be0SXin Li     std::vector<float> cellLayerNormWeightsValue;
708*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
709*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
710*3e777be0SXin Li     hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
711*3e777be0SXin Li     std::vector<float> outputLayerNormWeightsValue;
712*3e777be0SXin Li 
713*3e777be0SXin Li     // Outputs:
714*3e777be0SXin Li     // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
715*3e777be0SXin Li     //    [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
716*3e777be0SXin Li     hidl_vec<uint32_t> outputDimensions{timeSize, batchSize, outputSize};
717*3e777be0SXin Li     std::vector<float> outputValue{0.135657698f, 0.124672532f, 0.0212090332f, -0.0530203655f,
718*3e777be0SXin Li                                    0.106138252f, 0.0404792242f, 0.0151643595f, -0.00675163185f,
719*3e777be0SXin Li                                    -0.0128514022f, 0.0644884035f, 0.0709072053f, -0.0454045124f,
720*3e777be0SXin Li                                    0.16288602f,  0.16649379f,  0.02770456f, -0.03698075f,
721*3e777be0SXin Li                                    0.11171641f,  0.043119f  ,  0.0762981f , -0.01228541f,
722*3e777be0SXin Li                                    0.10439701f,  0.21439962f,  0.11919238f, -0.08390583f};
723*3e777be0SXin Li 
724*3e777be0SXin Li     // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
725*3e777be0SXin Li     //    [batch_size, output_size]. This output is optional and can be omitted. If this output
726*3e777be0SXin Li     //    is present then output #2 must be present as well.
727*3e777be0SXin Li     hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
728*3e777be0SXin Li     std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
729*3e777be0SXin Li     // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
730*3e777be0SXin Li     //    [batch_size, num_units]. This output is optional and can be omitted.
731*3e777be0SXin Li     hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
732*3e777be0SXin Li     std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
733*3e777be0SXin Li 
734*3e777be0SXin Li     UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
735*3e777be0SXin Li                                                   inputToInputWeightsDimensions, inputToInputWeightsValue,
736*3e777be0SXin Li                                                   inputToForgetWeightsDimensions, inputToForgetWeightsValue,
737*3e777be0SXin Li                                                   inputToCellWeightsDimensions, inputToCellWeightsValue,
738*3e777be0SXin Li                                                   inputToOutputWeightsDimensions, inputToOutputWeightsValue,
739*3e777be0SXin Li                                                   recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
740*3e777be0SXin Li                                                   recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
741*3e777be0SXin Li                                                   recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
742*3e777be0SXin Li                                                   recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
743*3e777be0SXin Li                                                   cellToInputWeightsDimensions, cellToInputWeightsValue,
744*3e777be0SXin Li                                                   cellToForgetWeightsDimensions, cellToForgetWeightsValue,
745*3e777be0SXin Li                                                   cellToOutputWeightsDimensions, cellToOutputWeightsValue,
746*3e777be0SXin Li                                                   inputGateBiasDimensions, inputGateBiasValue,
747*3e777be0SXin Li                                                   forgetGateBiasDimensions, forgetGateBiasValue,
748*3e777be0SXin Li                                                   cellBiasDimensions, cellBiasValue,
749*3e777be0SXin Li                                                   outputGateBiasDimensions, outputGateBiasValue,
750*3e777be0SXin Li                                                   projectionWeightsDimensions, projectionWeightsValue,
751*3e777be0SXin Li                                                   projectionBiasDimensions, projectionBiasValue,
752*3e777be0SXin Li                                                   outputStateInDimensions, outputStateInValue,
753*3e777be0SXin Li                                                   cellStateInDimensions, cellStateInValue,
754*3e777be0SXin Li                                                   activationFunctionDimensions, activationFunctionValue,
755*3e777be0SXin Li                                                   cellClippingThresholdDimensions, cellClippingThresholdValue,
756*3e777be0SXin Li                                                   projectionClippingThresholdDimensions,
757*3e777be0SXin Li                                                   projectionClippingThresholdValue,
758*3e777be0SXin Li                                                   timeMajorValue,
759*3e777be0SXin Li                                                   inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
760*3e777be0SXin Li                                                   forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
761*3e777be0SXin Li                                                   cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
762*3e777be0SXin Li                                                   outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
763*3e777be0SXin Li                                                   outputDimensions, outputValue,
764*3e777be0SXin Li                                                   hiddenStateOutDimensions, hiddenStateOutValue,
765*3e777be0SXin Li                                                   cellStateOutDimensions, cellStateOutValue,
766*3e777be0SXin Li                                                   compute);
767*3e777be0SXin Li }
768*3e777be0SXin Li 
769*3e777be0SXin Li template<typename HalPolicy>
UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::Compute compute)770*3e777be0SXin Li void UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::Compute compute)
771*3e777be0SXin Li {
772*3e777be0SXin Li     uint32_t batchSize  = 2;
773*3e777be0SXin Li     uint32_t timeSize   = 3;
774*3e777be0SXin Li     uint32_t inputSize  = 4;
775*3e777be0SXin Li     uint32_t outputSize = 5;
776*3e777be0SXin Li     uint32_t numUnits   = 6;
777*3e777be0SXin Li 
778*3e777be0SXin Li     // Inputs:
779*3e777be0SXin Li     // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
780*3e777be0SXin Li     //     [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
781*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
782*3e777be0SXin Li     hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
783*3e777be0SXin Li     std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
784*3e777be0SXin Li                                   3., 2., 1., 2., 3., 4.,
785*3e777be0SXin Li                                   5., 4., 3., 2., 1., 2.,
786*3e777be0SXin Li                                   1., 2., 3., 4., 5., 4.};
787*3e777be0SXin Li 
788*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
789*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
790*3e777be0SXin Li     hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
791*3e777be0SXin Li     std::vector<float> inputToInputWeightsValue{0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
792*3e777be0SXin Li                                                 -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
793*3e777be0SXin Li                                                 -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
794*3e777be0SXin Li                                                 -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
795*3e777be0SXin Li                                                 -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
796*3e777be0SXin Li                                                 -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f};
797*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
798*3e777be0SXin Li     //     [num_units, input_size].
799*3e777be0SXin Li     hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
800*3e777be0SXin Li     std::vector<float> inputToForgetWeightsValue{-0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
801*3e777be0SXin Li                                                  0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
802*3e777be0SXin Li                                                  0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
803*3e777be0SXin Li                                                  -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
804*3e777be0SXin Li                                                  -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
805*3e777be0SXin Li                                                  0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
806*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
807*3e777be0SXin Li     hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
808*3e777be0SXin Li     std::vector<float> inputToCellWeightsValue{-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
809*3e777be0SXin Li                                                -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
810*3e777be0SXin Li                                                -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
811*3e777be0SXin Li                                                -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
812*3e777be0SXin Li                                                -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
813*3e777be0SXin Li                                                0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f};
814*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
815*3e777be0SXin Li     //     [num_units, input_size].
816*3e777be0SXin Li     hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
817*3e777be0SXin Li     std::vector<float> inputToOutputWeightsValue{-0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
818*3e777be0SXin Li                                                  -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
819*3e777be0SXin Li                                                  -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
820*3e777be0SXin Li                                                  0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
821*3e777be0SXin Li                                                  0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
822*3e777be0SXin Li                                                  -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f};
823*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
824*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
825*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
826*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
827*3e777be0SXin Li     std::vector<float> recurrentToInputWeightsValue{-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
828*3e777be0SXin Li                                                     -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
829*3e777be0SXin Li                                                     -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
830*3e777be0SXin Li                                                     -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
831*3e777be0SXin Li                                                     0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
832*3e777be0SXin Li                                                     0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
833*3e777be0SXin Li                                                     -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
834*3e777be0SXin Li                                                     0.14283475f, -0.07390571f};
835*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
836*3e777be0SXin Li     //     [num_units, output_size].
837*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
838*3e777be0SXin Li     std::vector<float> recurrentToForgetWeightsValue{-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
839*3e777be0SXin Li                                                      0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
840*3e777be0SXin Li                                                      -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
841*3e777be0SXin Li                                                      0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
842*3e777be0SXin Li                                                      0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
843*3e777be0SXin Li                                                      -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
844*3e777be0SXin Li                                                      -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
845*3e777be0SXin Li                                                      0.061878487f, -0.04729229f};
846*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
847*3e777be0SXin Li     //     [num_units, output_size].
848*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
849*3e777be0SXin Li     std::vector<float> recurrentToCellWeightsValue{-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
850*3e777be0SXin Li                                                    0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
851*3e777be0SXin Li                                                    0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
852*3e777be0SXin Li                                                    -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
853*3e777be0SXin Li                                                    0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
854*3e777be0SXin Li                                                    0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
855*3e777be0SXin Li                                                    -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
856*3e777be0SXin Li                                                    -0.019443132f, -0.030755889f};
857*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
858*3e777be0SXin Li     //     [num_units, output_size].
859*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
860*3e777be0SXin Li     std::vector<float> recurrentToOutputWeightsValue{0.025825322f, -0.05813119f, 0.09495884f,
861*3e777be0SXin Li                                                      -0.045984812f,-0.01255415f, -0.0026479573f,
862*3e777be0SXin Li                                                      -0.08196161f, -0.054914974f, -0.0046604523f,
863*3e777be0SXin Li                                                      -0.029587349f, -0.044576716f, -0.07480124f,
864*3e777be0SXin Li                                                      -0.082868785f, 0.023254942f, 0.027502948f,
865*3e777be0SXin Li                                                      -0.0039728214f, -0.08683098f, -0.08116779f,
866*3e777be0SXin Li                                                      -0.014675607f, -0.037924774f, -0.023314456f,
867*3e777be0SXin Li                                                      -0.007401714f, -0.09255757f, 0.029460307f,
868*3e777be0SXin Li                                                      -0.08829125f, -0.005139627f, -0.08989442f,
869*3e777be0SXin Li                                                      -0.0555066f, 0.13596267f, 0.025062224f};
870*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
871*3e777be0SXin Li     hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
872*3e777be0SXin Li     std::vector<float> cellToInputWeightsValue{0.040369894f, 0.030746894f, 0.24704495f,
873*3e777be0SXin Li                                                0.018586371f, -0.037586458f, -0.15312155f};
874*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
875*3e777be0SXin Li     hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
876*3e777be0SXin Li     std::vector<float> cellToForgetWeightsValue{-0.01998659f, -0.15568835f, -0.24248174f,
877*3e777be0SXin Li                                                 -0.012770197f, 0.041331276f, -0.072311886f};
878*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
879*3e777be0SXin Li     hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
880*3e777be0SXin Li     std::vector<float> cellToOutputWeightsValue{0.08286371f, -0.08261836f, -0.51210177f,
881*3e777be0SXin Li                                                 0.002913762f, 0.17764764f, -0.5495371f};
882*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
883*3e777be0SXin Li     hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
884*3e777be0SXin Li     std::vector<float> inputGateBiasValue{0.02234832f, 0.14757581f, 0.18176508f,
885*3e777be0SXin Li                                           0.10380666f, 0.053110216f, -0.06928846f};
886*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
887*3e777be0SXin Li     hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
888*3e777be0SXin Li     std::vector<float> forgetGateBiasValue{0.035185695f, -0.042891346f, -0.03032477f,
889*3e777be0SXin Li                                            0.23027696f, 0.11098921f, 0.08989442f};
890*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
891*3e777be0SXin Li     hidl_vec<uint32_t> cellBiasDimensions{numUnits};
892*3e777be0SXin Li     std::vector<float> cellBiasValue{-0.024379363f, 0.0055531194f, 0.23377132f,
893*3e777be0SXin Li                                      0.033463873f, -0.1483596f, 0.029460307f};
894*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
895*3e777be0SXin Li     hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
896*3e777be0SXin Li     std::vector<float> outputGateBiasValue{0.046159424f, -0.0012809046f, 0.03563469f,
897*3e777be0SXin Li                                            0.12648113f, 0.027195795f, 0.35373217f};
898*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
899*3e777be0SXin Li     //     [output_size, num_units].
900*3e777be0SXin Li     hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
901*3e777be0SXin Li     std::vector<float> projectionWeightsValue{-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
902*3e777be0SXin Li                                               0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
903*3e777be0SXin Li                                               -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
904*3e777be0SXin Li                                               -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
905*3e777be0SXin Li                                               0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
906*3e777be0SXin Li                                               0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f};
907*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
908*3e777be0SXin Li     hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
909*3e777be0SXin Li     std::vector<float> projectionBiasValue(outputSize, 0.f);
910*3e777be0SXin Li 
911*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
912*3e777be0SXin Li     hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
913*3e777be0SXin Li     std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
914*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
915*3e777be0SXin Li     hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
916*3e777be0SXin Li     std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
917*3e777be0SXin Li 
918*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
919*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
920*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
921*3e777be0SXin Li     hidl_vec<uint32_t>   activationFunctionDimensions{};
922*3e777be0SXin Li     std::vector<int32_t> activationFunctionValue{4};
923*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
924*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
925*3e777be0SXin Li     hidl_vec<uint32_t>   cellClippingThresholdDimensions{};
926*3e777be0SXin Li     std::vector<float>   cellClippingThresholdValue{10.0f};
927*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
928*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
929*3e777be0SXin Li     hidl_vec<uint32_t>   projectionClippingThresholdDimensions{};
930*3e777be0SXin Li     std::vector<float>   projectionClippingThresholdValue{0.f};
931*3e777be0SXin Li 
932*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
933*3e777be0SXin Li     bool timeMajorValue = false;
934*3e777be0SXin Li 
935*3e777be0SXin Li     // Normalization:
936*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
937*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
938*3e777be0SXin Li     hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
939*3e777be0SXin Li     std::vector<float> inputLayerNormWeightsValue;
940*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
941*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
942*3e777be0SXin Li     hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
943*3e777be0SXin Li     std::vector<float> forgetLayerNormWeightsValue;
944*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
945*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
946*3e777be0SXin Li     hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
947*3e777be0SXin Li     std::vector<float> cellLayerNormWeightsValue;
948*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
949*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
950*3e777be0SXin Li     hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
951*3e777be0SXin Li     std::vector<float> outputLayerNormWeightsValue;
952*3e777be0SXin Li 
953*3e777be0SXin Li     // Outputs:
954*3e777be0SXin Li     // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
955*3e777be0SXin Li     //    [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
956*3e777be0SXin Li     hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
957*3e777be0SXin Li     std::vector<float> outputValue{-0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
958*3e777be0SXin Li                                    -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
959*3e777be0SXin Li                                    -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
960*3e777be0SXin Li                                    0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
961*3e777be0SXin Li                                    -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
962*3e777be0SXin Li                                    -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0127171f};
963*3e777be0SXin Li 
964*3e777be0SXin Li     // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
965*3e777be0SXin Li     //    [batch_size, output_size]. This output is optional and can be omitted. If this output
966*3e777be0SXin Li     //    is present then output #2 must be present as well.
967*3e777be0SXin Li     hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
968*3e777be0SXin Li     std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
969*3e777be0SXin Li     // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
970*3e777be0SXin Li     //    [batch_size, num_units]. This output is optional and can be omitted.
971*3e777be0SXin Li     hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
972*3e777be0SXin Li     std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
973*3e777be0SXin Li 
974*3e777be0SXin Li     UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
975*3e777be0SXin Li                                                   inputToInputWeightsDimensions, inputToInputWeightsValue,
976*3e777be0SXin Li                                                   inputToForgetWeightsDimensions, inputToForgetWeightsValue,
977*3e777be0SXin Li                                                   inputToCellWeightsDimensions, inputToCellWeightsValue,
978*3e777be0SXin Li                                                   inputToOutputWeightsDimensions, inputToOutputWeightsValue,
979*3e777be0SXin Li                                                   recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
980*3e777be0SXin Li                                                   recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
981*3e777be0SXin Li                                                   recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
982*3e777be0SXin Li                                                   recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
983*3e777be0SXin Li                                                   cellToInputWeightsDimensions, cellToInputWeightsValue,
984*3e777be0SXin Li                                                   cellToForgetWeightsDimensions, cellToForgetWeightsValue,
985*3e777be0SXin Li                                                   cellToOutputWeightsDimensions, cellToOutputWeightsValue,
986*3e777be0SXin Li                                                   inputGateBiasDimensions, inputGateBiasValue,
987*3e777be0SXin Li                                                   forgetGateBiasDimensions, forgetGateBiasValue,
988*3e777be0SXin Li                                                   cellBiasDimensions, cellBiasValue,
989*3e777be0SXin Li                                                   outputGateBiasDimensions, outputGateBiasValue,
990*3e777be0SXin Li                                                   projectionWeightsDimensions, projectionWeightsValue,
991*3e777be0SXin Li                                                   projectionBiasDimensions, projectionBiasValue,
992*3e777be0SXin Li                                                   outputStateInDimensions, outputStateInValue,
993*3e777be0SXin Li                                                   cellStateInDimensions, cellStateInValue,
994*3e777be0SXin Li                                                   activationFunctionDimensions, activationFunctionValue,
995*3e777be0SXin Li                                                   cellClippingThresholdDimensions, cellClippingThresholdValue,
996*3e777be0SXin Li                                                   projectionClippingThresholdDimensions,
997*3e777be0SXin Li                                                   projectionClippingThresholdValue,
998*3e777be0SXin Li                                                   timeMajorValue,
999*3e777be0SXin Li                                                   inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1000*3e777be0SXin Li                                                   forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1001*3e777be0SXin Li                                                   cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1002*3e777be0SXin Li                                                   outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1003*3e777be0SXin Li                                                   outputDimensions, outputValue,
1004*3e777be0SXin Li                                                   hiddenStateOutDimensions, hiddenStateOutValue,
1005*3e777be0SXin Li                                                   cellStateOutDimensions, cellStateOutValue,
1006*3e777be0SXin Li                                                   compute, 0.0031454);
1007*3e777be0SXin Li }
1008*3e777be0SXin Li 
1009*3e777be0SXin Li template<typename HalPolicy>
UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::Compute compute)1010*3e777be0SXin Li void UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::Compute compute)
1011*3e777be0SXin Li {
1012*3e777be0SXin Li     uint32_t batchSize  = 3;
1013*3e777be0SXin Li     uint32_t timeSize   = 2;
1014*3e777be0SXin Li     uint32_t inputSize  = 3;
1015*3e777be0SXin Li     uint32_t outputSize = 4;
1016*3e777be0SXin Li     uint32_t numUnits   = 5;
1017*3e777be0SXin Li 
1018*3e777be0SXin Li     // Inputs:
1019*3e777be0SXin Li     // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
1020*3e777be0SXin Li     //     [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
1021*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1022*3e777be0SXin Li     hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
1023*3e777be0SXin Li     std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
1024*3e777be0SXin Li                                   3., 2., 1., 2., 3., 4.,
1025*3e777be0SXin Li                                   5., 4., 3., 2., 1., 2.};
1026*3e777be0SXin Li 
1027*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1028*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
1029*3e777be0SXin Li     hidl_vec<uint32_t> inputToInputWeightsDimensions{numUnits, inputSize};
1030*3e777be0SXin Li     std::vector<float> inputToInputWeightsValue{-0.49536117f, -0.0556083915f, -0.102400711f,
1031*3e777be0SXin Li                                                 -0.117484632f, 0.3298470976f, -0.1179017122f,
1032*3e777be0SXin Li                                                 0.214305695f, 0.42135173085f, 0.003878414626f,
1033*3e777be0SXin Li                                                 -0.348303917f, -0.1881275477f, 0.0343011027f,
1034*3e777be0SXin Li                                                 -0.38837709614f, -0.05636804124f, 0.4259087456f};
1035*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1036*3e777be0SXin Li     //     [num_units, input_size].
1037*3e777be0SXin Li     hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1038*3e777be0SXin Li     std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
1039*3e777be0SXin Li                                                  -0.3810434485f, 0.268383264f, -0.009807467424f,
1040*3e777be0SXin Li                                                  -0.3522925403f, -0.24275735512f, -0.28344226125f,
1041*3e777be0SXin Li                                                  0.13512269116f, -0.4932442977f, -0.10039821991f,
1042*3e777be0SXin Li                                                  0.2726137042f, 0.09216640889f, -0.06551410215f};
1043*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1044*3e777be0SXin Li     hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1045*3e777be0SXin Li     std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
1046*3e777be0SXin Li                                                0.386399507f, -0.259465157985f, -0.16545993089f,
1047*3e777be0SXin Li                                                -0.4230232555f, 0.341664791103f, -0.18127849691f,
1048*3e777be0SXin Li                                                -0.2277662414f, -0.55275535589f, 0.34184026718f,
1049*3e777be0SXin Li                                                0.3954237699f, -0.19407111404f, 0.30412107706f};
1050*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1051*3e777be0SXin Li     //     [num_units, input_size].
1052*3e777be0SXin Li     hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1053*3e777be0SXin Li     std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
1054*3e777be0SXin Li                                                  0.53969591851f, 0.23393625035f, -0.27140527306f,
1055*3e777be0SXin Li                                                  0.50009280443f, 0.07511717046f, 0.3998299249f,
1056*3e777be0SXin Li                                                  -0.51717478049f, 0.1889653282f, -0.367323637f,
1057*3e777be0SXin Li                                                  -0.12584099173f, -0.12319286912f, 0.2407919466f};
1058*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1059*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1060*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
1061*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToInputWeightsDimensions{numUnits, outputSize};
1062*3e777be0SXin Li     std::vector<float> recurrentToInputWeightsValue{-0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
1063*3e777be0SXin Li                                                     -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
1064*3e777be0SXin Li                                                     0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
1065*3e777be0SXin Li                                                     0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
1066*3e777be0SXin Li                                                     0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f};
1067*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1068*3e777be0SXin Li     //     [num_units, output_size].
1069*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1070*3e777be0SXin Li     std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
1071*3e777be0SXin Li                                                      -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
1072*3e777be0SXin Li                                                      -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
1073*3e777be0SXin Li                                                      -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
1074*3e777be0SXin Li                                                      0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f};
1075*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1076*3e777be0SXin Li     //     [num_units, output_size].
1077*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1078*3e777be0SXin Li     std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
1079*3e777be0SXin Li                                                    -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
1080*3e777be0SXin Li                                                    0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
1081*3e777be0SXin Li                                                    0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
1082*3e777be0SXin Li                                                    0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f};
1083*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1084*3e777be0SXin Li     //     [num_units, output_size].
1085*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1086*3e777be0SXin Li     std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
1087*3e777be0SXin Li                                                      -0.17879831790f,-0.15185534954f, -0.16918526583f,
1088*3e777be0SXin Li                                                      -0.10087361183f, -0.5436913968f, 0.016758225858f,
1089*3e777be0SXin Li                                                      0.30454617738f, -0.41493862867f, -0.005565764375f,
1090*3e777be0SXin Li                                                      -0.12584099173f, -0.12319286912f, 0.2407919466f,
1091*3e777be0SXin Li                                                      -0.08879069983f, 0.11178309f, 0.09481031f,
1092*3e777be0SXin Li                                                      -0.26424935f, 0.46261835f};
1093*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1094*3e777be0SXin Li     hidl_vec<uint32_t> cellToInputWeightsDimensions{numUnits};
1095*3e777be0SXin Li     std::vector<float> cellToInputWeightsValue{0.05f, 0.1f, 0.25f, 0.15f, -0.02f};
1096*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1097*3e777be0SXin Li     hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1098*3e777be0SXin Li     std::vector<float> cellToForgetWeightsValue{-0.02f, -0.15f, -0.25f, -0.03f, 0.15f};
1099*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1100*3e777be0SXin Li     hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1101*3e777be0SXin Li     std::vector<float> cellToOutputWeightsValue{0.1f, -0.1f, -0.5f, 0.05f, 0.01f};
1102*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1103*3e777be0SXin Li     hidl_vec<uint32_t> inputGateBiasDimensions{numUnits};
1104*3e777be0SXin Li     std::vector<float> inputGateBiasValue{0.03f, 0.15f, 0.22f, 0.38f, 0.05f};
1105*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1106*3e777be0SXin Li     hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1107*3e777be0SXin Li     std::vector<float> forgetGateBiasValue{0.1f, -0.3f, -0.2f, 0.1f, 0.4f};
1108*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1109*3e777be0SXin Li     hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1110*3e777be0SXin Li     std::vector<float> cellBiasValue{-0.05f, 0.72f, 0.25f, 0.08f, 0.1f};
1111*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1112*3e777be0SXin Li     hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1113*3e777be0SXin Li     std::vector<float> outputGateBiasValue{0.05f, -0.01f, 0.2f, 0.1f, -0.2f};
1114*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1115*3e777be0SXin Li     //     [output_size, num_units].
1116*3e777be0SXin Li     hidl_vec<uint32_t> projectionWeightsDimensions{numUnits, outputSize};
1117*3e777be0SXin Li     std::vector<float> projectionWeightsValue{-0.1f, 0.2f, 0.01f, -0.2f,
1118*3e777be0SXin Li                                               0.1f, 0.5f,  0.3f, 0.08f,
1119*3e777be0SXin Li                                               0.07f, 0.2f, -0.4f,  0.2f,
1120*3e777be0SXin Li                                               0.5f, -0.4f, 0.3f, -0.2f,
1121*3e777be0SXin Li                                               0.3f, 0.08f, -0.07f, 0.2f};
1122*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1123*3e777be0SXin Li     hidl_vec<uint32_t> projectionBiasDimensions{outputSize};
1124*3e777be0SXin Li     std::vector<float> projectionBiasValue(outputSize, 0.f);
1125*3e777be0SXin Li 
1126*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1127*3e777be0SXin Li     hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1128*3e777be0SXin Li     std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
1129*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1130*3e777be0SXin Li     hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1131*3e777be0SXin Li     std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
1132*3e777be0SXin Li 
1133*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
1134*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
1135*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1136*3e777be0SXin Li     hidl_vec<uint32_t>   activationFunctionDimensions{};
1137*3e777be0SXin Li     std::vector<int32_t> activationFunctionValue{4};
1138*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1139*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
1140*3e777be0SXin Li     hidl_vec<uint32_t>   cellClippingThresholdDimensions{};
1141*3e777be0SXin Li     std::vector<float>   cellClippingThresholdValue{10.0f};
1142*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1143*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1144*3e777be0SXin Li     hidl_vec<uint32_t>   projectionClippingThresholdDimensions{};
1145*3e777be0SXin Li     std::vector<float>   projectionClippingThresholdValue{0.f};
1146*3e777be0SXin Li 
1147*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
1148*3e777be0SXin Li     bool timeMajorValue = false;
1149*3e777be0SXin Li 
1150*3e777be0SXin Li     // Normalization:
1151*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
1152*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
1153*3e777be0SXin Li     hidl_vec<uint32_t> inputLayerNormWeightsDimensions{numUnits};
1154*3e777be0SXin Li     std::vector<float> inputLayerNormWeightsValue{0.1f, 0.2f, 0.3f, 0.5f, 0.8f};
1155*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1156*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
1157*3e777be0SXin Li     hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{numUnits};
1158*3e777be0SXin Li     std::vector<float> forgetLayerNormWeightsValue{0.1f, 0.2f, 0.3f, 0.5f, 0.2f};
1159*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1160*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
1161*3e777be0SXin Li     hidl_vec<uint32_t> cellLayerNormWeightsDimensions{numUnits};
1162*3e777be0SXin Li     std::vector<float> cellLayerNormWeightsValue{0.7f, 0.2f, 0.3f, 0.8f, 0.5f};
1163*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
1164*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
1165*3e777be0SXin Li     hidl_vec<uint32_t> outputLayerNormWeightsDimensions{numUnits};
1166*3e777be0SXin Li     std::vector<float> outputLayerNormWeightsValue{0.6f, 0.2f, 0.2f, 0.5f, 0.1f};
1167*3e777be0SXin Li 
1168*3e777be0SXin Li     // Outputs:
1169*3e777be0SXin Li     // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
1170*3e777be0SXin Li     //    [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
1171*3e777be0SXin Li     hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
1172*3e777be0SXin Li     std::vector<float> outputValue{0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
1173*3e777be0SXin Li                                    0.11458f, 0.0407109f, 0.300327f, 0.174301f,
1174*3e777be0SXin Li                                    0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
1175*3e777be0SXin Li                                    0.108008f, 0.0386623f, 0.273471f, 0.167115f,
1176*3e777be0SXin Li                                    0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
1177*3e777be0SXin Li                                    0.106649f, 0.0276847f, 0.229863f, 0.166958f};
1178*3e777be0SXin Li 
1179*3e777be0SXin Li     // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
1180*3e777be0SXin Li     //    [batch_size, output_size]. This output is optional and can be omitted. If this output
1181*3e777be0SXin Li     //    is present then output #2 must be present as well.
1182*3e777be0SXin Li     hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
1183*3e777be0SXin Li     std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
1184*3e777be0SXin Li     // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
1185*3e777be0SXin Li     //    [batch_size, num_units]. This output is optional and can be omitted.
1186*3e777be0SXin Li     hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1187*3e777be0SXin Li     std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
1188*3e777be0SXin Li 
1189*3e777be0SXin Li     UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1190*3e777be0SXin Li                                                   inputToInputWeightsDimensions, inputToInputWeightsValue,
1191*3e777be0SXin Li                                                   inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1192*3e777be0SXin Li                                                   inputToCellWeightsDimensions, inputToCellWeightsValue,
1193*3e777be0SXin Li                                                   inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1194*3e777be0SXin Li                                                   recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1195*3e777be0SXin Li                                                   recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1196*3e777be0SXin Li                                                   recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1197*3e777be0SXin Li                                                   recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1198*3e777be0SXin Li                                                   cellToInputWeightsDimensions, cellToInputWeightsValue,
1199*3e777be0SXin Li                                                   cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1200*3e777be0SXin Li                                                   cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1201*3e777be0SXin Li                                                   inputGateBiasDimensions, inputGateBiasValue,
1202*3e777be0SXin Li                                                   forgetGateBiasDimensions, forgetGateBiasValue,
1203*3e777be0SXin Li                                                   cellBiasDimensions, cellBiasValue,
1204*3e777be0SXin Li                                                   outputGateBiasDimensions, outputGateBiasValue,
1205*3e777be0SXin Li                                                   projectionWeightsDimensions, projectionWeightsValue,
1206*3e777be0SXin Li                                                   projectionBiasDimensions, projectionBiasValue,
1207*3e777be0SXin Li                                                   outputStateInDimensions, outputStateInValue,
1208*3e777be0SXin Li                                                   cellStateInDimensions, cellStateInValue,
1209*3e777be0SXin Li                                                   activationFunctionDimensions, activationFunctionValue,
1210*3e777be0SXin Li                                                   cellClippingThresholdDimensions, cellClippingThresholdValue,
1211*3e777be0SXin Li                                                   projectionClippingThresholdDimensions,
1212*3e777be0SXin Li                                                   projectionClippingThresholdValue,
1213*3e777be0SXin Li                                                   timeMajorValue,
1214*3e777be0SXin Li                                                   inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1215*3e777be0SXin Li                                                   forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1216*3e777be0SXin Li                                                   cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1217*3e777be0SXin Li                                                   outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1218*3e777be0SXin Li                                                   outputDimensions, outputValue,
1219*3e777be0SXin Li                                                   hiddenStateOutDimensions, hiddenStateOutValue,
1220*3e777be0SXin Li                                                   cellStateOutDimensions, cellStateOutValue,
1221*3e777be0SXin Li                                                   compute);
1222*3e777be0SXin Li }
1223*3e777be0SXin Li 
1224*3e777be0SXin Li template<typename HalPolicy>
UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTestImpl(armnn::Compute compute)1225*3e777be0SXin Li void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTestImpl(armnn::Compute compute)
1226*3e777be0SXin Li {
1227*3e777be0SXin Li     uint32_t batchSize  = 3;
1228*3e777be0SXin Li     uint32_t timeSize   = 2;
1229*3e777be0SXin Li     uint32_t inputSize  = 3;
1230*3e777be0SXin Li     uint32_t outputSize = 4;
1231*3e777be0SXin Li     uint32_t numUnits   = outputSize;
1232*3e777be0SXin Li 
1233*3e777be0SXin Li     // Inputs:
1234*3e777be0SXin Li     // 00: The input: A 3-D tensor of shape: If time-major: [max_time, batch_size, input_size] If batch-major:
1235*3e777be0SXin Li     //     [batch_size, max_time, input_size] where “max_time” is the number of timesteps (sequence length),
1236*3e777be0SXin Li     //     “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1237*3e777be0SXin Li     hidl_vec<uint32_t> inputDimensions{batchSize, timeSize, inputSize};
1238*3e777be0SXin Li     std::vector<float> inputValue{1., 2., 3., 4., 5., 4.,
1239*3e777be0SXin Li                                   3., 2., 1., 2., 3., 4.,
1240*3e777be0SXin Li                                   5., 4., 3., 2., 1., 2.};
1241*3e777be0SXin Li 
1242*3e777be0SXin Li     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1243*3e777be0SXin Li     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
1244*3e777be0SXin Li     hidl_vec<uint32_t> inputToInputWeightsDimensions{0};
1245*3e777be0SXin Li     std::vector<float> inputToInputWeightsValue;
1246*3e777be0SXin Li     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1247*3e777be0SXin Li     //     [num_units, input_size].
1248*3e777be0SXin Li     hidl_vec<uint32_t> inputToForgetWeightsDimensions{numUnits, inputSize};
1249*3e777be0SXin Li     std::vector<float> inputToForgetWeightsValue{0.2415594226f, 0.15400093799f, 0.4566498398f,
1250*3e777be0SXin Li                                                  -0.3810434485f, 0.268383264f, -0.009807467424f,
1251*3e777be0SXin Li                                                  -0.3522925403f, -0.24275735512f, -0.28344226125f,
1252*3e777be0SXin Li                                                  0.13512269116f, -0.4932442977f, -0.10039821991f};
1253*3e777be0SXin Li     // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1254*3e777be0SXin Li     hidl_vec<uint32_t> inputToCellWeightsDimensions{numUnits, inputSize};
1255*3e777be0SXin Li     std::vector<float> inputToCellWeightsValue{-0.2504855627f, 0.184490025045f, -0.2480507493f,
1256*3e777be0SXin Li                                                0.386399507f, -0.259465157985f, -0.16545993089f,
1257*3e777be0SXin Li                                                -0.4230232555f, 0.341664791103f, -0.18127849691f,
1258*3e777be0SXin Li                                                -0.2277662414f, -0.55275535589f, 0.34184026718f};
1259*3e777be0SXin Li     // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1260*3e777be0SXin Li     //     [num_units, input_size].
1261*3e777be0SXin Li     hidl_vec<uint32_t> inputToOutputWeightsDimensions{numUnits, inputSize};
1262*3e777be0SXin Li     std::vector<float> inputToOutputWeightsValue{0.2303854227f, 0.5218806862f, -0.4865379333f,
1263*3e777be0SXin Li                                                  0.53969591851f, 0.23393625035f, -0.27140527306f,
1264*3e777be0SXin Li                                                  0.50009280443f, 0.07511717046f, 0.3998299249f,
1265*3e777be0SXin Li                                                  -0.51717478049f, 0.1889653282f, -0.367323637f};
1266*3e777be0SXin Li     // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1267*3e777be0SXin Li     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1268*3e777be0SXin Li     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
1269*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToInputWeightsDimensions{0};
1270*3e777be0SXin Li     std::vector<float> recurrentToInputWeightsValue;
1271*3e777be0SXin Li     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1272*3e777be0SXin Li     //     [num_units, output_size].
1273*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToForgetWeightsDimensions{numUnits, outputSize};
1274*3e777be0SXin Li     std::vector<float> recurrentToForgetWeightsValue{-0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
1275*3e777be0SXin Li                                                      -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
1276*3e777be0SXin Li                                                      -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
1277*3e777be0SXin Li                                                      -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f};
1278*3e777be0SXin Li     // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1279*3e777be0SXin Li     //     [num_units, output_size].
1280*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToCellWeightsDimensions{numUnits, outputSize};
1281*3e777be0SXin Li     std::vector<float> recurrentToCellWeightsValue{-0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
1282*3e777be0SXin Li                                                    -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
1283*3e777be0SXin Li                                                    0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
1284*3e777be0SXin Li                                                    0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f};
1285*3e777be0SXin Li     // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1286*3e777be0SXin Li     //     [num_units, output_size].
1287*3e777be0SXin Li     hidl_vec<uint32_t> recurrentToOutputWeightsDimensions{numUnits, outputSize};
1288*3e777be0SXin Li     std::vector<float> recurrentToOutputWeightsValue{-0.32921677827f, 0.32624614238f, -0.1388191282f,
1289*3e777be0SXin Li                                                      -0.17879831790f, -0.15185534954f, -0.16918526583f,
1290*3e777be0SXin Li                                                      -0.10087361183f, -0.5436913968f, 0.016758225858f,
1291*3e777be0SXin Li                                                      0.30454617738f, -0.41493862867f, -0.005565764375f,
1292*3e777be0SXin Li                                                      -0.12584099173f, -0.12319286912f, 0.2407919466f,
1293*3e777be0SXin Li                                                      -0.08879069983f};
1294*3e777be0SXin Li     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1295*3e777be0SXin Li     hidl_vec<uint32_t> cellToInputWeightsDimensions{0};
1296*3e777be0SXin Li     std::vector<float> cellToInputWeightsValue;
1297*3e777be0SXin Li     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1298*3e777be0SXin Li     hidl_vec<uint32_t> cellToForgetWeightsDimensions{numUnits};
1299*3e777be0SXin Li     std::vector<float> cellToForgetWeightsValue{0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
1300*3e777be0SXin Li     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1301*3e777be0SXin Li     hidl_vec<uint32_t> cellToOutputWeightsDimensions{numUnits};
1302*3e777be0SXin Li     std::vector<float> cellToOutputWeightsValue{-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
1303*3e777be0SXin Li     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1304*3e777be0SXin Li     hidl_vec<uint32_t> inputGateBiasDimensions{0};
1305*3e777be0SXin Li     std::vector<float> inputGateBiasValue;
1306*3e777be0SXin Li     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1307*3e777be0SXin Li     hidl_vec<uint32_t> forgetGateBiasDimensions{numUnits};
1308*3e777be0SXin Li     std::vector<float> forgetGateBiasValue{1., 1., 1., 1.};
1309*3e777be0SXin Li     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1310*3e777be0SXin Li     hidl_vec<uint32_t> cellBiasDimensions{numUnits};
1311*3e777be0SXin Li     std::vector<float> cellBiasValue{0., 0., 0., 0.};
1312*3e777be0SXin Li     // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1313*3e777be0SXin Li     hidl_vec<uint32_t> outputGateBiasDimensions{numUnits};
1314*3e777be0SXin Li     std::vector<float> outputGateBiasValue{0., 0., 0., 0.};
1315*3e777be0SXin Li     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1316*3e777be0SXin Li     //     [output_size, num_units].
1317*3e777be0SXin Li     hidl_vec<uint32_t> projectionWeightsDimensions{0};
1318*3e777be0SXin Li     std::vector<float> projectionWeightsValue;
1319*3e777be0SXin Li     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1320*3e777be0SXin Li     hidl_vec<uint32_t> projectionBiasDimensions{0};
1321*3e777be0SXin Li     std::vector<float> projectionBiasValue;
1322*3e777be0SXin Li 
1323*3e777be0SXin Li     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1324*3e777be0SXin Li     hidl_vec<uint32_t> outputStateInDimensions{batchSize, outputSize};
1325*3e777be0SXin Li     std::vector<float> outputStateInValue(batchSize * outputSize, 0.f);
1326*3e777be0SXin Li     // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1327*3e777be0SXin Li     hidl_vec<uint32_t> cellStateInDimensions{batchSize, numUnits};
1328*3e777be0SXin Li     std::vector<float> cellStateInValue(batchSize * numUnits, 0.f);
1329*3e777be0SXin Li 
1330*3e777be0SXin Li     // Constant scalar values (the VTS test adds these as tensors of dim {})
1331*3e777be0SXin Li     // 20: The activation function: A value indicating the activation function:
1332*3e777be0SXin Li     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1333*3e777be0SXin Li     hidl_vec<uint32_t>   activationFunctionDimensions{};
1334*3e777be0SXin Li     std::vector<int32_t> activationFunctionValue{4};
1335*3e777be0SXin Li     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1336*3e777be0SXin Li     //     If set to 0.0 then clipping is disabled.
1337*3e777be0SXin Li     hidl_vec<uint32_t>   cellClippingThresholdDimensions{};
1338*3e777be0SXin Li     std::vector<float>   cellClippingThresholdValue{10.0f};
1339*3e777be0SXin Li     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1340*3e777be0SXin Li     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1341*3e777be0SXin Li     hidl_vec<uint32_t>   projectionClippingThresholdDimensions{};
1342*3e777be0SXin Li     std::vector<float>   projectionClippingThresholdValue{0.f};
1343*3e777be0SXin Li 
1344*3e777be0SXin Li     // 23: Time-major if true, batch-major if false.
1345*3e777be0SXin Li     bool timeMajorValue = false;
1346*3e777be0SXin Li 
1347*3e777be0SXin Li     // Normalization:
1348*3e777be0SXin Li     // 24:The input layer normalization weights. A 1-D tensor of shape [num_units].
1349*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at input gate.
1350*3e777be0SXin Li     hidl_vec<uint32_t> inputLayerNormWeightsDimensions{0};
1351*3e777be0SXin Li     std::vector<float> inputLayerNormWeightsValue;
1352*3e777be0SXin Li     // 25:The forget layer normalization weights. A 1-D tensor of shape [num_units].
1353*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at forget gate.
1354*3e777be0SXin Li     hidl_vec<uint32_t> forgetLayerNormWeightsDimensions{0};
1355*3e777be0SXin Li     std::vector<float> forgetLayerNormWeightsValue;
1356*3e777be0SXin Li     // 26:The cell layer normalization weights. A 1-D tensor of shape [num_units].
1357*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at cell gate.
1358*3e777be0SXin Li     hidl_vec<uint32_t> cellLayerNormWeightsDimensions{0};
1359*3e777be0SXin Li     std::vector<float> cellLayerNormWeightsValue;
1360*3e777be0SXin Li     // 27:The output layer normalization weights. A 1-D tensor of shape [num_units].
1361*3e777be0SXin Li     //    Used to rescale normalized inputs to activation at output gate.
1362*3e777be0SXin Li     hidl_vec<uint32_t> outputLayerNormWeightsDimensions{0};
1363*3e777be0SXin Li     std::vector<float> outputLayerNormWeightsValue;
1364*3e777be0SXin Li 
1365*3e777be0SXin Li     // Outputs:
1366*3e777be0SXin Li     // 0: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16. Shape:  if time-major:
1367*3e777be0SXin Li     //    [max_time, batch_size, output_size] If batch-major: [batch_size, max_time, output_size]
1368*3e777be0SXin Li     hidl_vec<uint32_t> outputDimensions{batchSize, timeSize, outputSize};
1369*3e777be0SXin Li     std::vector<float> outputValue{-0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
1370*3e777be0SXin Li                                    -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
1371*3e777be0SXin Li                                    -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
1372*3e777be0SXin Li                                    -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
1373*3e777be0SXin Li                                    -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
1374*3e777be0SXin Li                                    -0.031675f, 0.125987f, -0.526695f, -0.110093f};
1375*3e777be0SXin Li 
1376*3e777be0SXin Li     // 1: The hidden state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
1377*3e777be0SXin Li     //    [batch_size, output_size]. This output is optional and can be omitted. If this output
1378*3e777be0SXin Li     //    is present then output #2 must be present as well.
1379*3e777be0SXin Li     hidl_vec<uint32_t> hiddenStateOutDimensions{batchSize, outputSize};
1380*3e777be0SXin Li     std::vector<float> hiddenStateOutValue(batchSize * outputSize, 0.f);
1381*3e777be0SXin Li     // 2: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32/16, of shape
1382*3e777be0SXin Li     //    [batch_size, num_units]. This output is optional and can be omitted.
1383*3e777be0SXin Li     hidl_vec<uint32_t> cellStateOutDimensions{batchSize, numUnits};
1384*3e777be0SXin Li     std::vector<float> cellStateOutValue(batchSize * numUnits, 0.f);
1385*3e777be0SXin Li 
1386*3e777be0SXin Li     UnidirectionalSequenceLstmTestImpl<HalPolicy>(inputDimensions, inputValue,
1387*3e777be0SXin Li                                                   inputToInputWeightsDimensions, inputToInputWeightsValue,
1388*3e777be0SXin Li                                                   inputToForgetWeightsDimensions, inputToForgetWeightsValue,
1389*3e777be0SXin Li                                                   inputToCellWeightsDimensions, inputToCellWeightsValue,
1390*3e777be0SXin Li                                                   inputToOutputWeightsDimensions, inputToOutputWeightsValue,
1391*3e777be0SXin Li                                                   recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
1392*3e777be0SXin Li                                                   recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue,
1393*3e777be0SXin Li                                                   recurrentToCellWeightsDimensions, recurrentToCellWeightsValue,
1394*3e777be0SXin Li                                                   recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue,
1395*3e777be0SXin Li                                                   cellToInputWeightsDimensions, cellToInputWeightsValue,
1396*3e777be0SXin Li                                                   cellToForgetWeightsDimensions, cellToForgetWeightsValue,
1397*3e777be0SXin Li                                                   cellToOutputWeightsDimensions, cellToOutputWeightsValue,
1398*3e777be0SXin Li                                                   inputGateBiasDimensions, inputGateBiasValue,
1399*3e777be0SXin Li                                                   forgetGateBiasDimensions, forgetGateBiasValue,
1400*3e777be0SXin Li                                                   cellBiasDimensions, cellBiasValue,
1401*3e777be0SXin Li                                                   outputGateBiasDimensions, outputGateBiasValue,
1402*3e777be0SXin Li                                                   projectionWeightsDimensions, projectionWeightsValue,
1403*3e777be0SXin Li                                                   projectionBiasDimensions, projectionBiasValue,
1404*3e777be0SXin Li                                                   outputStateInDimensions, outputStateInValue,
1405*3e777be0SXin Li                                                   cellStateInDimensions, cellStateInValue,
1406*3e777be0SXin Li                                                   activationFunctionDimensions, activationFunctionValue,
1407*3e777be0SXin Li                                                   cellClippingThresholdDimensions, cellClippingThresholdValue,
1408*3e777be0SXin Li                                                   projectionClippingThresholdDimensions,
1409*3e777be0SXin Li                                                   projectionClippingThresholdValue,
1410*3e777be0SXin Li                                                   timeMajorValue,
1411*3e777be0SXin Li                                                   inputLayerNormWeightsDimensions, inputLayerNormWeightsValue,
1412*3e777be0SXin Li                                                   forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue,
1413*3e777be0SXin Li                                                   cellLayerNormWeightsDimensions, cellLayerNormWeightsValue,
1414*3e777be0SXin Li                                                   outputLayerNormWeightsDimensions, outputLayerNormWeightsValue,
1415*3e777be0SXin Li                                                   outputDimensions, outputValue,
1416*3e777be0SXin Li                                                   hiddenStateOutDimensions, hiddenStateOutValue,
1417*3e777be0SXin Li                                                   cellStateOutDimensions, cellStateOutValue,
1418*3e777be0SXin Li                                                   compute);
1419*3e777be0SXin Li }