1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <ArmNNProfilingServiceInitialiser.hpp>
7 #include <ProfilingOptionsConverter.hpp>
8 #include <Runtime.hpp>
9
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/IRuntime.hpp>
12 #include <armnn/INetwork.hpp>
13
14 #include <armnn/profiling/ArmNNProfiling.hpp>
15
16 #include <common/include/LabelsAndEventClasses.hpp>
17 #include <common/include/Processes.hpp>
18
19 #include <test/ProfilingTestUtils.hpp>
20
21 #ifdef WITH_VALGRIND
22 #include <valgrind/memcheck.h>
23 #endif
24
25 #include <doctest/doctest.h>
26 #include "RuntimeTests.hpp"
27 #include <TestUtils.hpp>
28
29 #ifdef ARMNN_LEAK_CHECKING_ENABLED
30 #include <HeapProfiling.hpp>
31 #include <LeakChecking.hpp>
32 #endif
33
34 namespace armnn
35 {
36
RuntimeLoadedNetworksReserve(armnn::RuntimeImpl * runtime)37 void RuntimeLoadedNetworksReserve(armnn::RuntimeImpl* runtime)
38 {
39 runtime->m_LoadedNetworks.reserve(1);
40 }
41
42 } // namespace armnn
43
44 TEST_SUITE("Runtime")
45 {
46 TEST_CASE("RuntimeUnloadNetwork")
47 {
48 // build 2 mock-networks and load them into the runtime
49 armnn::IRuntime::CreationOptions options;
50 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
51
52 // Mock network 1.
53 armnn::NetworkId networkIdentifier1 = 1;
54 armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
55 mockNetwork1->AddInputLayer(0, "test layer");
56 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
57 runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec()));
58
59 // Mock network 2.
60 armnn::NetworkId networkIdentifier2 = 2;
61 armnn::INetworkPtr mockNetwork2(armnn::INetwork::Create());
62 mockNetwork2->AddInputLayer(0, "test layer");
63 runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
64
65 // Unloads one by its networkID.
66 CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
67
68 CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
69 }
70
71 TEST_CASE("RuntimePreImportInputs")
72 {
73 armnn::IRuntime::CreationOptions options;
74 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
75 armnn::NetworkId networkId = 1;
76 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
77
78 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
79 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
80 ARMNN_NO_DEPRECATE_WARN_BEGIN
81 auto addLayer = testNetwork->AddAdditionLayer("add layer");
82 ARMNN_NO_DEPRECATE_WARN_END
83 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
84
85 TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
86
87 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
88 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
89
90 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
91 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
92
93 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
94 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
95
96 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
97
98 std::string er;
99 armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
100 runtime->LoadNetwork(networkId,
101 Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
102 er,
103 networkProperties);
104
105 std::vector<int> inputData1(4, 10);
106 std::vector<int> inputData2(4, 20);
107 std::vector<int> output(4);
108
109 ConstTensor inputTensor1({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData1.data());
110 ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
111 Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
112
113 auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
114 CHECK(importedInputVec1.size() == 1);
115 CHECK(importedInputVec1[0] == 0);
116
117 auto memHandle = runtime->CreateWorkingMemHandle(networkId);
118
119 runtime->Execute(*memHandle.get(), {{1, inputTensor2}}, {{2, outputTensor}}, {0 /* pre-imported id */});
120 for (auto val: output) {
121 CHECK(val == 30);
122 }
123
124 auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
125 CHECK(importedInputVec2.size() == 1);
126 CHECK(importedInputVec2[0] == 1);
127
128 runtime->Execute(*memHandle.get(), {{0, inputTensor1}}, {{2, outputTensor}}, {1 /* pre-imported id */});
129 for (auto val: output) {
130 CHECK(val == 30);
131 }
132
133 runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1});
134 for (auto val: output) {
135 CHECK(val == 30);
136 }
137 // Duplicate ImportedInputId and LayerBindingId
138 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 0});,
139 armnn::InvalidArgumentException);
140 // Duplicate LayerBindingId
141 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{1, inputTensor2}}, {{2, outputTensor}}, {1});,
142 armnn::InvalidArgumentException);
143 // Incorrect ImportedInputId
144 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{1, inputTensor2}}, {{2, outputTensor}}, {10});,
145 armnn::InvalidArgumentException);
146 // Incorrect LayerBindingId
147 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {1});,
148 armnn::InvalidArgumentException);
149 // Incorrect layer binding id and ImportedInputId
150 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
151 armnn::InvalidArgumentException);
152 auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
153 CHECK(importedInputVec3[0] == 2);
154 // Too many ImportedInputIds
155 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
156 armnn::InvalidArgumentException);
157 // Too many InputTensors
158 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(),
159 {{0, inputTensor2},
160 {1, inputTensor2},
161 {2, inputTensor2}},
162 {{2, outputTensor}});, armnn::InvalidArgumentException);
163 // Too few ImportedInputIds
164 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0});,
165 armnn::InvalidArgumentException);
166 runtime->ClearImportedInputs(networkId, {1});
167 runtime->Execute(*memHandle.get(), {{1, inputTensor2}}, {{2, outputTensor}}, {0}, {});
168 for (auto val: output) {
169 CHECK(val == 30);
170 }
171 // Using deleted pre-imported input
172 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1}, {});,
173 armnn::InvalidArgumentException);
174
175 // Trying to delete deleted pre-imported tensor
176 CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {1});, armnn::InvalidArgumentException);
177
178 // Trying to delete unknown pre-imported tensor
179 CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
180 }
181
182 TEST_CASE("RuntimePreImportOutputs")
183 {
184 armnn::IRuntime::CreationOptions options;
185 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
186
187 armnn::NetworkId networkId = 1;
188
189 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
190 TensorInfo tensorInfo{{4}, armnn::DataType::Float32, 0.0f, 0, true};
191
192 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
193 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
194
195 ActivationDescriptor activationDescriptor;
196 activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
197 activationDescriptor.m_A = 2.0f;
198 activationDescriptor.m_B = 0.0f;
199 auto activationLayer1 = testNetwork->AddActivationLayer(activationDescriptor, "add layer");
200 auto outputLayer1 = testNetwork->AddOutputLayer(2, "output layer");
201
202 inputLayer1->GetOutputSlot(0).Connect(activationLayer1->GetInputSlot(0));
203
204 activationLayer1->GetOutputSlot(0).Connect(outputLayer1->GetInputSlot(0));
205 activationLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
206
207 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 1 layer");
208
209 activationDescriptor.m_A = 4.0f;
210 activationDescriptor.m_B = 2.0f;
211 auto activationLayer2 = testNetwork->AddActivationLayer(activationDescriptor, "add layer");
212 auto outputLayer2 = testNetwork->AddOutputLayer(3, "output layer");
213
214 inputLayer2->GetOutputSlot(0).Connect(activationLayer2->GetInputSlot(0));
215 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
216
217 activationLayer2->GetOutputSlot(0).Connect(outputLayer2->GetInputSlot(0));
218 activationLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
219
220 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
221
222 std::string er;
223 armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
224 runtime->LoadNetwork(networkId,
225 Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
226 er,
227 networkProperties);
228
229 std::vector<float> inputData1(4, 1.0f);
230 std::vector<float> inputData2(4, 3.0f);
231
232 std::vector<float> outputData1(4);
233 std::vector<float> outputData2(4);
234
235 ConstTensor inputTensor1(tensorInfo, inputData1.data());
236 ConstTensor inputTensor2(tensorInfo, inputData2.data());
237
238 Tensor outputTensor1{tensorInfo, outputData1.data()};
239 Tensor outputTensor2{tensorInfo, outputData2.data()};
240
241 InputTensors inputTensors = {{0, inputTensor1}, {1, inputTensor2}};
242
243 std::pair<LayerBindingId, class Tensor> output1{2, outputTensor1};
244 std::pair<LayerBindingId, class Tensor> output2{3, outputTensor2};
245
246 auto testOutputs = [&]()
__anon93245f6b0102() 247 {
248 for (auto val : outputData1)
249 {
250 CHECK(val == 1.0f);
251 }
252
253 for (auto val : outputData2)
254 {
255 CHECK(val == 3.0f);
256 }
257 };
258
259 auto memHandle = runtime->CreateWorkingMemHandle(networkId);
260
261 runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
262 testOutputs();
263
264 auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
265 CHECK(importedOutputVec.size() == 2);
266 CHECK(importedOutputVec[0] == 0);
267 CHECK(importedOutputVec[1] == 1);
268
269 runtime->Execute(*memHandle.get(), inputTensors, {}, {}, importedOutputVec);
270 testOutputs();
271
272 runtime->Execute(*memHandle.get(), inputTensors, {output1}, {}, {1});
273 testOutputs();
274
275 runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
276 testOutputs();
277
278 auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
279 CHECK(importedInputVec.size() == 2);
280 CHECK(importedInputVec[0] == 0);
281 CHECK(importedInputVec[1] == 1);
282
283 runtime->Execute(*memHandle.get(), {}, {}, importedInputVec, importedOutputVec);
284 testOutputs();
285
286 runtime->Execute(*memHandle.get(), {{0, inputTensor1}}, {output2}, {1}, {0});
287 testOutputs();
288
289 // Too many ids
290 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output1, output2}, {}, {0, 1});,
291 armnn::InvalidArgumentException);
292
293 // Duplicate ids
294 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {1});,
295 armnn::InvalidArgumentException);
296
297 // Duplicate ids
298 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output1, output1}, {}, {});,
299 armnn::InvalidArgumentException);
300
301 // Duplicate ids
302 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {}, {}, {0, 0}),
303 armnn::InvalidArgumentException);
304
305 // Unknown id
306 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output1}, {}, {3});,
307 armnn::InvalidArgumentException);
308
309 // Unknown id
310 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {{4, outputTensor2}}, {}, {1});,
311 armnn::InvalidArgumentException);
312
313 // Input id for output
314 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {{0, outputTensor2}}, {}, {1});,
315 armnn::InvalidArgumentException);
316
317 // Input id for output
318 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {{0, outputTensor2}}, {}, {1});,
319 armnn::InvalidArgumentException);
320
321 // Output id for input
322 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{2, inputTensor1}}, {{0, outputTensor2}}, {1}, {1, 0});,
323 armnn::InvalidArgumentException);
324
325 runtime->ClearImportedOutputs(networkId, {1});
326
327 runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
328 testOutputs();
329
330 // Trying to use deleted pre-imported tensor
331 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {}, {}, importedOutputVec),
332 armnn::InvalidArgumentException);
333
334 // Trying to delete deleted pre-imported tensor
335 CHECK_THROWS_AS(runtime->ClearImportedOutputs(networkId, {1});, armnn::InvalidArgumentException);
336
337 // Trying to delete unknown pre-imported tensor
338 CHECK_THROWS_AS(runtime->ClearImportedOutputs(networkId, {10});, armnn::InvalidArgumentException);
339 }
340
341 // Note: the current builds we don't do valgrind and gperftools based leak checking at the same
342 // time, so in practice WITH_VALGRIND and ARMNN_LEAK_CHECKING_ENABLED are exclusive. The
343 // valgrind tests can stay for x86 builds, but on hikey Valgrind is just way too slow
344 // to be integrated into the CI system.
345
346 #ifdef ARMNN_LEAK_CHECKING_ENABLED
347
348 struct DisableGlobalLeakChecking
349 {
DisableGlobalLeakCheckingDisableGlobalLeakChecking350 DisableGlobalLeakChecking()
351 {
352 ARMNN_LOCAL_LEAK_CHECKING_ONLY();
353 }
354 };
355
356 TEST_CASE_FIXTURE(DisableGlobalLeakChecking, "RuntimeHeapMemoryUsageSanityChecks")
357 {
358 CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
359 {
360 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
361 {
362 ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
363 CHECK(ARMNN_NO_LEAKS_IN_SCOPE() == true);
364 std::unique_ptr<char[]> dummyAllocation(new char[1000]);
365 // "A leak of 1000 bytes is expected here. "
366 // "Please make sure environment variable: HEAPCHECK=draconian is set!"
367 CHECK((ARMNN_NO_LEAKS_IN_SCOPE() == false));
368 CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
369 CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
370 }
371 CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
372 CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
373 CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
374 }
375 }
376
377 #endif // ARMNN_LEAK_CHECKING_ENABLED
378
379 // Note: this part of the code is due to be removed when we fully trust the gperftools based results.
380 #ifdef WITH_VALGRIND
381 // Run with the following command to get all the amazing output (in the devenv/build folder) :)
382 // valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
383 TEST_CASE("RuntimeMemoryLeak")
384 {
385 // From documentation:
386
387 // This means that no pointer to the block can be found. The block is classified as "lost",
388 // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
389 unsigned long leakedBefore = 0;
390 unsigned long leakedAfter = 0;
391
392 // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
393 // the programmer could, at least in principle, have freed it before program exit.
394 // We want to test this in case memory is not freed as early as it could have been.
395 unsigned long reachableBefore = 0;
396 unsigned long reachableAfter = 0;
397
398 // Needed as out params but we don't test them.
399 unsigned long dubious = 0;
400 unsigned long suppressed = 0;
401
402 armnn::NetworkId networkIdentifier1 = 1;
403
404 // ensure that runtime is large enough before checking for memory leaks
405 // otherwise when loading the network it will automatically reserve memory that won't be released until destruction
406 armnn::IRuntime::CreationOptions options;
407 armnn::RuntimeImpl runtime(options);
408 armnn::RuntimeLoadedNetworksReserve(&runtime);
409
410 {
411 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
412
413 armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
414 mockNetwork1->AddInputLayer(0, "test layer");
415
416 // Warm-up load/unload pair to put the runtime in a stable state (memory-wise).
417 runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
418 runtime.UnloadNetwork(networkIdentifier1);
419
420 // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
421 VALGRIND_DO_QUICK_LEAK_CHECK;
422 VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
423
424 // The actual test.
425 runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
426 runtime.UnloadNetwork(networkIdentifier1);
427
428 VALGRIND_DO_ADDED_LEAK_CHECK;
429 VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
430 }
431
432 // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
433 CHECK(leakedBefore == leakedAfter);
434 CHECK(reachableBefore == reachableAfter);
435
436 // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
437 // so they are assigned to, but still considered unused, causing a warning.
438 armnn::IgnoreUnused(dubious);
439 armnn::IgnoreUnused(suppressed);
440 }
441 #endif // WITH_VALGRIND
442
443 TEST_CASE("RuntimeCpuRef")
444 {
445 using namespace armnn;
446
447 // Create runtime in which test will run
448 armnn::IRuntime::CreationOptions options;
449 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
450
451 // build up the structure of the network
452 INetworkPtr net(INetwork::Create());
453
454 IConnectableLayer* input = net->AddInputLayer(0);
455
456 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
457 NormalizationDescriptor descriptor;
458 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
459
460 IConnectableLayer* output = net->AddOutputLayer(0);
461
462 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
463 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
464
465 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
466 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
467
468 // optimize the network
469 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
470 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
471
472 // Load it into the runtime. It should success.
473 armnn::NetworkId netId;
474 CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
475 }
476
477 TEST_CASE("RuntimeFallbackToCpuRef")
478 {
479 using namespace armnn;
480
481 // Create runtime in which test will run
482 armnn::IRuntime::CreationOptions options;
483 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
484
485 // build up the structure of the network
486 INetworkPtr net(INetwork::Create());
487
488 IConnectableLayer* input = net->AddInputLayer(0);
489
490 // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
491 NormalizationDescriptor descriptor;
492 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
493
494 IConnectableLayer* output = net->AddOutputLayer(0);
495
496 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
497 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
498
499 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
500 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
501
502 // Allow fallback to CpuRef.
503 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
504 // optimize the network
505 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
506
507 // Load it into the runtime. It should succeed.
508 armnn::NetworkId netId;
509 CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
510 }
511
512 TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
513 {
514 // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
515 using namespace armnn;
516
517 // Create runtime in which test will run
518 armnn::IRuntime::CreationOptions options;
519 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
520
521 // build up the structure of the network
522 INetworkPtr net(INetwork::Create());
523 armnn::IConnectableLayer* input = net->AddInputLayer(0,"input");
524 armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(armnn::SoftmaxDescriptor(), "softmax");
525 armnn::IConnectableLayer* output = net->AddOutputLayer(0, "output");
526
527 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
528 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
529
530 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(armnn::TensorShape({ 1, 5 }),
531 armnn::DataType::QAsymmU8,
532 1.0f / 255,
533 0));
534
535 softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(armnn::TensorShape({ 1, 5 }),
536 armnn::DataType::QAsymmU8,
537 0.0f,
538 0));
539
540 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
541 std::vector<std::string> errMessages;
542
543 try
544 {
545 armnn::IOptimizedNetworkPtr optNet = Optimize(*net,
546 backends,
547 runtime->GetDeviceSpec(),
548 OptimizerOptionsOpaque(),
549 errMessages);
550 FAIL("An exception should have been thrown");
551 }
552 catch (const armnn::InvalidArgumentException&)
553 {
554 // Different exceptions are thrown on different backends
555 }
556 CHECK(errMessages.size() > 0);
557 }
558
559 TEST_CASE("RuntimeBackendOptions")
560 {
561 using namespace armnn;
562
563 IRuntime::CreationOptions creationOptions;
564 auto& backendOptions = creationOptions.m_BackendOptions;
565
566
567 // Define Options on explicit construction
568 BackendOptions options1("FakeBackend1",
569 {
570 { "Option1", 1.3f },
571 { "Option2", true }
572 });
573
574 // Add an option after construction
575 options1.AddOption({ "Option3", "some_value" });
576
577 // Add the options to CreationOptions struct
578 backendOptions.push_back(options1);
579
580 // Add more Options via inplace explicit construction
581 backendOptions.emplace_back(BackendOptions{ "FakeBackend1",
582 {{ "Option4", 42 }}
583 });
584
585
586 // First group
587 CHECK(backendOptions[0].GetBackendId().Get() == "FakeBackend1");
588 CHECK(backendOptions[0].GetOption(0).GetName() == "Option1");
589 CHECK(backendOptions[0].GetOption(0).GetValue().IsFloat() == true);
590 CHECK(backendOptions[0].GetOption(0).GetValue().AsFloat() == 1.3f);
591
592 CHECK(backendOptions[0].GetOption(1).GetName() == "Option2");
593 CHECK(backendOptions[0].GetOption(1).GetValue().IsBool() == true);
594 CHECK(backendOptions[0].GetOption(1).GetValue().AsBool() == true);
595
596 CHECK(backendOptions[0].GetOption(2).GetName() == "Option3");
597 CHECK(backendOptions[0].GetOption(2).GetValue().IsString() == true);
598 CHECK(backendOptions[0].GetOption(2).GetValue().AsString() == "some_value");
599
600 // Second group
601 CHECK(backendOptions[1].GetBackendId().Get() == "FakeBackend1");
602 CHECK(backendOptions[1].GetOption(0).GetName() == "Option4");
603 CHECK(backendOptions[1].GetOption(0).GetValue().IsInt() == true);
604 CHECK(backendOptions[1].GetOption(0).GetValue().AsInt() == 42);
605 }
606
607 TEST_CASE("ProfilingDisable")
608 {
609 using namespace armnn;
610
611 LogLevelSwapper logLevelSwapper(arm::pipe::LogSeverity::Fatal);
612
613 // Create runtime in which the test will run
614 armnn::IRuntime::CreationOptions options;
615 armnn::RuntimeImpl runtime(options);
616
617 // build up the structure of the network
618 INetworkPtr net(INetwork::Create());
619
620 IConnectableLayer* input = net->AddInputLayer(0);
621
622 // This layer configuration isn't supported by CpuAcc, should fall back to CpuRef.
623 NormalizationDescriptor descriptor;
624 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
625
626 IConnectableLayer* output = net->AddOutputLayer(0);
627
628 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
629 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
630
631 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
632 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
633
634 // optimize the network
635 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
636 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec());
637
638 // Load it into the runtime. It should succeed.
639 armnn::NetworkId netId;
640 CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
641
642 armnn::ArmNNProfilingServiceInitialiser initialiser;
643 ProfilingServiceRuntimeHelper profilingServiceHelper(
644 arm::pipe::MAX_ARMNN_COUNTER, initialiser, GetProfilingService(&runtime));
645 BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
646 auto readableBuffer = bufferManager.GetReadableBuffer();
647
648 // Profiling is not enabled, the post-optimisation structure should not be created
649 CHECK(!readableBuffer);
650 }
651
652 TEST_CASE("ProfilingEnableCpuRef")
653 {
654 using namespace armnn;
655 using namespace arm::pipe;
656
657 // Create runtime in which the test will run
658 armnn::IRuntime::CreationOptions options;
659 options.m_ProfilingOptions.m_EnableProfiling = true;
660 options.m_ProfilingOptions.m_TimelineEnabled = true;
661
662 armnn::RuntimeImpl runtime(options);
663 GetProfilingService(&runtime).ResetExternalProfilingOptions(
664 ConvertExternalProfilingOptions(options.m_ProfilingOptions), false);
665
666 armnn::ArmNNProfilingServiceInitialiser initialiser;
667 ProfilingServiceRuntimeHelper profilingServiceHelper(
668 arm::pipe::MAX_ARMNN_COUNTER, initialiser, GetProfilingService(&runtime));
669 profilingServiceHelper.ForceTransitionToState(ProfilingState::NotConnected);
670 profilingServiceHelper.ForceTransitionToState(ProfilingState::WaitingForAck);
671 profilingServiceHelper.ForceTransitionToState(ProfilingState::Active);
672
673 // build up the structure of the network
674 INetworkPtr net(INetwork::Create());
675
676 IConnectableLayer* input = net->AddInputLayer(0, "input");
677
678 NormalizationDescriptor descriptor;
679 IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor, "normalization");
680
681 IConnectableLayer* output = net->AddOutputLayer(0, "output");
682
683 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
684 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
685
686 input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
687 normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
688
689 // optimize the network
690 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
691 IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec());
692
693 ProfilingGuid optNetGuid = optNet->GetGuid();
694
695 // Load it into the runtime. It should succeed.
696 armnn::NetworkId netId;
697 CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
698
699 BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
700 auto readableBuffer = bufferManager.GetReadableBuffer();
701
702 // Profiling is enabled, the post-optimisation structure should be created
703 CHECK(readableBuffer != nullptr);
704
705 unsigned int size = readableBuffer->GetSize();
706
707 const unsigned char* readableData = readableBuffer->GetReadableData();
708 CHECK(readableData != nullptr);
709
710 unsigned int offset = 0;
711
712 // Verify Header
713 VerifyTimelineHeaderBinary(readableData, offset, size - 8);
714
715 // Post-optimisation network
716 // Network entity
717 VerifyTimelineEntityBinaryPacketData(optNetGuid, readableData, offset);
718
719 // Entity - Type relationship
720 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
721 arm::pipe::EmptyOptional(),
722 optNetGuid,
723 LabelsAndEventClasses::NETWORK_GUID,
724 LabelsAndEventClasses::TYPE_GUID,
725 readableData,
726 offset);
727
728 // Network - START OF LIFE
729 ProfilingGuid networkSolEventGuid = VerifyTimelineEventBinaryPacket(arm::pipe::EmptyOptional(),
730 arm::pipe::EmptyOptional(),
731 arm::pipe::EmptyOptional(),
732 readableData,
733 offset);
734
735 // Network - START OF LIFE event relationship
736 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
737 arm::pipe::EmptyOptional(),
738 optNetGuid,
739 networkSolEventGuid,
740 LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
741 readableData,
742 offset);
743
744 // Process ID Label
745 int processID = arm::pipe::GetCurrentProcessId();
746 std::stringstream ss;
747 ss << processID;
748 std::string processIdLabel = ss.str();
749 VerifyTimelineLabelBinaryPacketData(arm::pipe::EmptyOptional(), processIdLabel, readableData, offset);
750
751 // Entity - Process ID relationship
752 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
753 arm::pipe::EmptyOptional(),
754 optNetGuid,
755 arm::pipe::EmptyOptional(),
756 LabelsAndEventClasses::PROCESS_ID_GUID,
757 readableData,
758 offset);
759
760 // Input layer
761 // Input layer entity
762 VerifyTimelineEntityBinaryPacketData(input->GetGuid(), readableData, offset);
763
764 // Name Entity
765 ProfilingGuid inputLabelGuid = VerifyTimelineLabelBinaryPacketData(
766 arm::pipe::EmptyOptional(), "input", readableData, offset);
767
768 // Entity - Name relationship
769 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
770 arm::pipe::EmptyOptional(),
771 input->GetGuid(),
772 inputLabelGuid,
773 LabelsAndEventClasses::NAME_GUID,
774 readableData,
775 offset);
776
777 // Entity - Type relationship
778 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
779 arm::pipe::EmptyOptional(),
780 input->GetGuid(),
781 LabelsAndEventClasses::LAYER_GUID,
782 LabelsAndEventClasses::TYPE_GUID,
783 readableData,
784 offset);
785
786 // Network - Input layer relationship
787 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
788 arm::pipe::EmptyOptional(),
789 optNetGuid,
790 input->GetGuid(),
791 LabelsAndEventClasses::CHILD_GUID,
792 readableData,
793 offset);
794
795 // Normalization layer
796 // Normalization layer entity
797 VerifyTimelineEntityBinaryPacketData(normalize->GetGuid(), readableData, offset);
798
799 // Name entity
800 ProfilingGuid normalizationLayerNameGuid = VerifyTimelineLabelBinaryPacketData(
801 arm::pipe::EmptyOptional(), "normalization", readableData, offset);
802
803 // Entity - Name relationship
804 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
805 arm::pipe::EmptyOptional(),
806 normalize->GetGuid(),
807 normalizationLayerNameGuid,
808 LabelsAndEventClasses::NAME_GUID,
809 readableData,
810 offset);
811
812 // Entity - Type relationship
813 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
814 arm::pipe::EmptyOptional(),
815 normalize->GetGuid(),
816 LabelsAndEventClasses::LAYER_GUID,
817 LabelsAndEventClasses::TYPE_GUID,
818 readableData,
819 offset);
820
821 // Network - Normalize layer relationship
822 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
823 arm::pipe::EmptyOptional(),
824 optNetGuid,
825 normalize->GetGuid(),
826 LabelsAndEventClasses::CHILD_GUID,
827 readableData,
828 offset);
829
830 // Input layer - Normalize layer relationship
831 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
832 arm::pipe::EmptyOptional(),
833 input->GetGuid(),
834 normalize->GetGuid(),
835 LabelsAndEventClasses::CONNECTION_GUID,
836 readableData,
837 offset);
838
839 // Normalization workload
840 // Normalization workload entity
841 ProfilingGuid normalizationWorkloadGuid = VerifyTimelineEntityBinaryPacketData(
842 arm::pipe::EmptyOptional(), readableData, offset);
843
844 // Entity - Type relationship
845 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
846 arm::pipe::EmptyOptional(),
847 normalizationWorkloadGuid,
848 LabelsAndEventClasses::WORKLOAD_GUID,
849 LabelsAndEventClasses::TYPE_GUID,
850 readableData,
851 offset);
852
853 // BackendId entity
854 ProfilingGuid cpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
855 arm::pipe::EmptyOptional(), "CpuRef", readableData, offset);
856
857 // Entity - BackendId relationship
858 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
859 arm::pipe::EmptyOptional(),
860 normalizationWorkloadGuid,
861 cpuRefLabelGuid,
862 LabelsAndEventClasses::BACKENDID_GUID,
863 readableData,
864 offset);
865
866 // Normalize layer - Normalize workload relationship
867 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
868 arm::pipe::EmptyOptional(),
869 normalize->GetGuid(),
870 normalizationWorkloadGuid,
871 LabelsAndEventClasses::CHILD_GUID,
872 readableData,
873 offset);
874
875 // Output layer
876 // Output layer entity
877 VerifyTimelineEntityBinaryPacketData(output->GetGuid(), readableData, offset);
878
879 // Name entity
880 ProfilingGuid outputLabelGuid = VerifyTimelineLabelBinaryPacketData(
881 arm::pipe::EmptyOptional(), "output", readableData, offset);
882
883 // Entity - Name relationship
884 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
885 arm::pipe::EmptyOptional(),
886 output->GetGuid(),
887 outputLabelGuid,
888 LabelsAndEventClasses::NAME_GUID,
889 readableData,
890 offset);
891
892 // Entity - Type relationship
893 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
894 arm::pipe::EmptyOptional(),
895 output->GetGuid(),
896 LabelsAndEventClasses::LAYER_GUID,
897 LabelsAndEventClasses::TYPE_GUID,
898 readableData,
899 offset);
900
901 // Network - Output layer relationship
902 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
903 arm::pipe::EmptyOptional(),
904 optNetGuid,
905 output->GetGuid(),
906 LabelsAndEventClasses::CHILD_GUID,
907 readableData,
908 offset);
909
910 // Normalize layer - Output layer relationship
911 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
912 arm::pipe::EmptyOptional(),
913 normalize->GetGuid(),
914 output->GetGuid(),
915 LabelsAndEventClasses::CONNECTION_GUID,
916 readableData,
917 offset);
918
919 bufferManager.MarkRead(readableBuffer);
920
921 // Creates structures for input & output.
922 std::vector<float> inputData(16);
923 std::vector<float> outputData(16);
924
925 TensorInfo inputTensorInfo = runtime.GetInputTensorInfo(netId, 0);
926 inputTensorInfo.SetConstant(true);
927 InputTensors inputTensors
928 {
929 {0, ConstTensor(inputTensorInfo, inputData.data())}
930 };
931 OutputTensors outputTensors
932 {
933 {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())}
934 };
935
936 // Does the inference.
937 runtime.EnqueueWorkload(netId, inputTensors, outputTensors);
938
939 // Get readable buffer for input workload
940 auto inputReadableBuffer = bufferManager.GetReadableBuffer();
941 CHECK(inputReadableBuffer != nullptr);
942
943 // Get readable buffer for output workload
944 auto outputReadableBuffer = bufferManager.GetReadableBuffer();
945 CHECK(outputReadableBuffer != nullptr);
946
947 // Get readable buffer for inference timeline
948 auto inferenceReadableBuffer = bufferManager.GetReadableBuffer();
949 CHECK(inferenceReadableBuffer != nullptr);
950
951 // Validate input workload data
952 size = inputReadableBuffer->GetSize();
953 CHECK(size == 164);
954
955 readableData = inputReadableBuffer->GetReadableData();
956 CHECK(readableData != nullptr);
957
958 offset = 0;
959
960 // Verify Header
961 VerifyTimelineHeaderBinary(readableData, offset, 156);
962
963 // Input workload
964 // Input workload entity
965 ProfilingGuid inputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(
966 arm::pipe::EmptyOptional(), readableData, offset);
967
968 // Entity - Type relationship
969 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
970 arm::pipe::EmptyOptional(),
971 inputWorkloadGuid,
972 LabelsAndEventClasses::WORKLOAD_GUID,
973 LabelsAndEventClasses::TYPE_GUID,
974 readableData,
975 offset);
976
977 // BackendId entity
978 ProfilingGuid CpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
979 arm::pipe::EmptyOptional(), "CpuRef", readableData, offset);
980
981 // Entity - BackendId relationship
982 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
983 arm::pipe::EmptyOptional(),
984 inputWorkloadGuid,
985 CpuRefLabelGuid,
986 LabelsAndEventClasses::BACKENDID_GUID,
987 readableData,
988 offset);
989
990 // Input layer - Input workload relationship
991 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
992 arm::pipe::EmptyOptional(),
993 input->GetGuid(),
994 inputWorkloadGuid,
995 LabelsAndEventClasses::CHILD_GUID,
996 readableData,
997 offset);
998
999 bufferManager.MarkRead(inputReadableBuffer);
1000
1001 // Validate output workload data
1002 size = outputReadableBuffer->GetSize();
1003 CHECK(size == 164);
1004
1005 readableData = outputReadableBuffer->GetReadableData();
1006 CHECK(readableData != nullptr);
1007
1008 offset = 0;
1009
1010 // Verify Header
1011 VerifyTimelineHeaderBinary(readableData, offset, 156);
1012
1013 // Output workload
1014 // Output workload entity
1015 ProfilingGuid outputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(
1016 arm::pipe::EmptyOptional(), readableData, offset);
1017
1018 // Entity - Type relationship
1019 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1020 arm::pipe::EmptyOptional(),
1021 outputWorkloadGuid,
1022 LabelsAndEventClasses::WORKLOAD_GUID,
1023 LabelsAndEventClasses::TYPE_GUID,
1024 readableData,
1025 offset);
1026
1027 // BackendId entity
1028 VerifyTimelineLabelBinaryPacketData(arm::pipe::EmptyOptional(), "CpuRef", readableData, offset);
1029
1030 // Entity - BackendId relationship
1031 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1032 arm::pipe::EmptyOptional(),
1033 outputWorkloadGuid,
1034 CpuRefLabelGuid,
1035 LabelsAndEventClasses::BACKENDID_GUID,
1036 readableData,
1037 offset);
1038
1039 // Output layer - Output workload relationship
1040 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1041 arm::pipe::EmptyOptional(),
1042 output->GetGuid(),
1043 outputWorkloadGuid,
1044 LabelsAndEventClasses::CHILD_GUID,
1045 readableData,
1046 offset);
1047
1048 bufferManager.MarkRead(outputReadableBuffer);
1049
1050 // Validate inference data
1051 size = inferenceReadableBuffer->GetSize();
1052 CHECK(size == 976 + 8 * ThreadIdSize);
1053
1054 readableData = inferenceReadableBuffer->GetReadableData();
1055 CHECK(readableData != nullptr);
1056
1057 offset = 0;
1058
1059 // Verify Header
1060 VerifyTimelineHeaderBinary(readableData, offset, 968 + 8 * ThreadIdSize);
1061
1062 // Inference timeline trace
1063 // Inference entity
1064 ProfilingGuid inferenceGuid = VerifyTimelineEntityBinaryPacketData(
1065 arm::pipe::EmptyOptional(), readableData, offset);
1066
1067 // Entity - Type relationship
1068 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1069 arm::pipe::EmptyOptional(),
1070 inferenceGuid,
1071 LabelsAndEventClasses::INFERENCE_GUID,
1072 LabelsAndEventClasses::TYPE_GUID,
1073 readableData,
1074 offset);
1075
1076 // Network - Inference relationship
1077 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1078 arm::pipe::EmptyOptional(),
1079 optNetGuid,
1080 inferenceGuid,
1081 LabelsAndEventClasses::EXECUTION_OF_GUID,
1082 readableData,
1083 offset);
1084
1085 // Start Inference life
1086 // Event packet - timeline, threadId, eventGuid
1087 ProfilingGuid inferenceEventGuid = VerifyTimelineEventBinaryPacket(
1088 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1089
1090 // Inference - event relationship
1091 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1092 arm::pipe::EmptyOptional(),
1093 inferenceGuid,
1094 inferenceEventGuid,
1095 LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
1096 readableData,
1097 offset);
1098
1099 // Execution
1100 // Input workload execution
1101 // Input workload execution entity
1102 ProfilingGuid inputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
1103 arm::pipe::EmptyOptional(), readableData, offset);
1104
1105 // Entity - Type relationship
1106 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1107 arm::pipe::EmptyOptional(),
1108 inputWorkloadExecutionGuid,
1109 LabelsAndEventClasses::WORKLOAD_EXECUTION_GUID,
1110 LabelsAndEventClasses::TYPE_GUID,
1111 readableData,
1112 offset);
1113
1114 // Inference - Workload execution relationship
1115 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1116 arm::pipe::EmptyOptional(),
1117 inferenceGuid,
1118 inputWorkloadExecutionGuid,
1119 LabelsAndEventClasses::CHILD_GUID,
1120 readableData,
1121 offset);
1122
1123 // Workload - Workload execution relationship
1124 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1125 arm::pipe::EmptyOptional(),
1126 inputWorkloadGuid,
1127 inputWorkloadExecutionGuid,
1128 LabelsAndEventClasses::EXECUTION_OF_GUID,
1129 readableData,
1130 offset);
1131
1132 // Start Input workload execution life
1133 // Event packet - timeline, threadId, eventGuid
1134 ProfilingGuid inputWorkloadExecutionSOLEventId = VerifyTimelineEventBinaryPacket(
1135 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1136
1137 // Input workload execution - event relationship
1138 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1139 arm::pipe::EmptyOptional(),
1140 inputWorkloadExecutionGuid,
1141 inputWorkloadExecutionSOLEventId,
1142 LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
1143 readableData,
1144 offset);
1145
1146 // End of Input workload execution life
1147 // Event packet - timeline, threadId, eventGuid
1148 ProfilingGuid inputWorkloadExecutionEOLEventId = VerifyTimelineEventBinaryPacket(
1149 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1150
1151 // Input workload execution - event relationship
1152 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1153 arm::pipe::EmptyOptional(),
1154 inputWorkloadExecutionGuid,
1155 inputWorkloadExecutionEOLEventId,
1156 LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
1157 readableData,
1158 offset);
1159
1160 // Normalize workload execution
1161 // Normalize workload execution entity
1162 ProfilingGuid normalizeWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
1163 arm::pipe::EmptyOptional(), readableData, offset);
1164
1165 // Entity - Type relationship
1166 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1167 arm::pipe::EmptyOptional(),
1168 normalizeWorkloadExecutionGuid,
1169 LabelsAndEventClasses::WORKLOAD_EXECUTION_GUID,
1170 LabelsAndEventClasses::TYPE_GUID,
1171 readableData,
1172 offset);
1173
1174 // Inference - Workload execution relationship
1175 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1176 arm::pipe::EmptyOptional(),
1177 inferenceGuid,
1178 normalizeWorkloadExecutionGuid,
1179 LabelsAndEventClasses::CHILD_GUID,
1180 readableData,
1181 offset);
1182
1183 // Workload - Workload execution relationship
1184 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1185 arm::pipe::EmptyOptional(),
1186 normalizationWorkloadGuid,
1187 normalizeWorkloadExecutionGuid,
1188 LabelsAndEventClasses::EXECUTION_OF_GUID,
1189 readableData,
1190 offset);
1191
1192 // Start Normalize workload execution life
1193 // Event packet - timeline, threadId, eventGuid
1194 ProfilingGuid normalizationWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
1195 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1196
1197 // Normalize workload execution - event relationship
1198 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1199 arm::pipe::EmptyOptional(),
1200 normalizeWorkloadExecutionGuid,
1201 normalizationWorkloadExecutionSOLEventGuid,
1202 LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
1203 readableData,
1204 offset);
1205
1206 // End of Normalize workload execution life
1207 // Event packet - timeline, threadId, eventGuid
1208 ProfilingGuid normalizationWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
1209 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1210
1211 // Normalize workload execution - event relationship
1212 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1213 arm::pipe::EmptyOptional(),
1214 normalizeWorkloadExecutionGuid,
1215 normalizationWorkloadExecutionEOLEventGuid,
1216 LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
1217 readableData,
1218 offset);
1219
1220 // Output workload execution
1221 // Output workload execution entity
1222 ProfilingGuid outputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
1223 arm::pipe::EmptyOptional(), readableData, offset);
1224
1225 // Entity - Type relationship
1226 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
1227 arm::pipe::EmptyOptional(),
1228 outputWorkloadExecutionGuid,
1229 LabelsAndEventClasses::WORKLOAD_EXECUTION_GUID,
1230 LabelsAndEventClasses::TYPE_GUID,
1231 readableData,
1232 offset);
1233
1234 // Inference - Workload execution relationship
1235 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1236 arm::pipe::EmptyOptional(),
1237 inferenceGuid,
1238 outputWorkloadExecutionGuid,
1239 LabelsAndEventClasses::CHILD_GUID,
1240 readableData,
1241 offset);
1242
1243 // Workload - Workload execution relationship
1244 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
1245 arm::pipe::EmptyOptional(),
1246 outputWorkloadGuid,
1247 outputWorkloadExecutionGuid,
1248 LabelsAndEventClasses::EXECUTION_OF_GUID,
1249 readableData,
1250 offset);
1251
1252 // Start Output workload execution life
1253 // Event packet - timeline, threadId, eventGuid
1254 ProfilingGuid outputWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
1255 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1256
1257 // Output workload execution - event relationship
1258 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1259 arm::pipe::EmptyOptional(),
1260 outputWorkloadExecutionGuid,
1261 outputWorkloadExecutionSOLEventGuid,
1262 LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
1263 readableData,
1264 offset);
1265
1266 // End of Normalize workload execution life
1267 // Event packet - timeline, threadId, eventGuid
1268 ProfilingGuid outputWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
1269 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1270
1271 // Output workload execution - event relationship
1272 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1273 arm::pipe::EmptyOptional(),
1274 outputWorkloadExecutionGuid,
1275 outputWorkloadExecutionEOLEventGuid,
1276 LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
1277 readableData,
1278 offset);
1279
1280 // End of Inference life
1281 // Event packet - timeline, threadId, eventGuid
1282 ProfilingGuid inferenceEOLEventGuid = VerifyTimelineEventBinaryPacket(
1283 arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), arm::pipe::EmptyOptional(), readableData, offset);
1284
1285 // Inference - event relationship
1286 VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
1287 arm::pipe::EmptyOptional(),
1288 inferenceGuid,
1289 inferenceEOLEventGuid,
1290 LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
1291 readableData,
1292 offset);
1293
1294 bufferManager.MarkRead(inferenceReadableBuffer);
1295 }
1296
1297 TEST_CASE("ProfilingPostOptimisationStructureCpuRef")
1298 {
1299 VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
1300 }
1301
1302 TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
1303 {
1304 // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
1305 // that network but specify that the import memory source is Malloc.
1306
1307 armnn::IRuntime::CreationOptions options;
1308 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
1309 armnn::NetworkId networkId = 1;
1310 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
1311
1312 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
1313 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
1314 ARMNN_NO_DEPRECATE_WARN_BEGIN
1315 auto addLayer = testNetwork->AddAdditionLayer("add layer");
1316 ARMNN_NO_DEPRECATE_WARN_END
1317 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
1318
1319 TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
1320
1321 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
1322 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1323
1324 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
1325 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1326
1327 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
1328 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1329
1330 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
1331
1332 OptimizerOptionsOpaque optimizedOptions;
1333 // Hard set import and export to off.
1334 optimizedOptions.SetImportEnabled(false);
1335 optimizedOptions.SetExportEnabled(false);
1336 IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
1337 CHECK(optNet);
1338
1339 std::string er;
1340 // Load the network passing an import memory source.
1341 armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
1342 // There should be an InvalidArgumentException.
1343 runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
1344 CHECK(er.find("However, it was disabled when this network was optimized") != -1);
1345 }
1346
1347 TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
1348 {
1349 // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
1350 // that network but specify that the export memory source as Malloc.
1351
1352 armnn::IRuntime::CreationOptions options;
1353 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
1354 armnn::NetworkId networkId = 1;
1355 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
1356
1357 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
1358 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
1359 ARMNN_NO_DEPRECATE_WARN_BEGIN
1360 auto addLayer = testNetwork->AddAdditionLayer("add layer");
1361 ARMNN_NO_DEPRECATE_WARN_END
1362 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
1363
1364 TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
1365
1366 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
1367 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1368
1369 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
1370 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1371
1372 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
1373 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1374
1375 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
1376
1377 OptimizerOptionsOpaque optimizedOptions;
1378 // Hard set import and export to off.
1379 optimizedOptions.SetImportEnabled(false);
1380 optimizedOptions.SetExportEnabled(false);
1381 IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
1382 CHECK(optNet);
1383
1384 std::string er;
1385 // Load the network passing an import memory source.
1386 armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
1387 // There should be an InvalidArgumentException.
1388 runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
1389 CHECK(er.find("However, it was disabled when this network was optimized") != -1);
1390 }
1391
1392 TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
1393 {
1394 // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
1395 // that network but specify that the import memory source is Undefined.
1396
1397 armnn::IRuntime::CreationOptions options;
1398 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
1399 armnn::NetworkId networkId = 1;
1400 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
1401
1402 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
1403 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
1404 ARMNN_NO_DEPRECATE_WARN_BEGIN
1405 auto addLayer = testNetwork->AddAdditionLayer("add layer");
1406 ARMNN_NO_DEPRECATE_WARN_END
1407 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
1408
1409 TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
1410
1411 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
1412 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1413
1414 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
1415 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1416
1417 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
1418 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1419
1420 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
1421
1422 OptimizerOptionsOpaque optimizedOptions;
1423 // Hard set import and export to off.
1424 optimizedOptions.SetImportEnabled(true);
1425 optimizedOptions.SetExportEnabled(false);
1426 IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
1427 CHECK(optNet);
1428
1429 std::string er;
1430 // Load the network passing an import memory source.
1431 armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
1432 // There should be an InvalidArgumentException.
1433 runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
1434 CHECK(er.find("However, it was enabled when this network was optimized") != -1);
1435 }
1436
1437 TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
1438 {
1439 // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
1440 // that network but specify that the export memory source is Undefined.
1441
1442 armnn::IRuntime::CreationOptions options;
1443 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
1444 armnn::NetworkId networkId = 1;
1445 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
1446
1447 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
1448 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
1449 ARMNN_NO_DEPRECATE_WARN_BEGIN
1450 auto addLayer = testNetwork->AddAdditionLayer("add layer");
1451 ARMNN_NO_DEPRECATE_WARN_END
1452 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
1453
1454 TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
1455
1456 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
1457 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1458
1459 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
1460 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1461
1462 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
1463 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1464
1465 std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
1466
1467 OptimizerOptionsOpaque optimizedOptions;
1468 // Hard set import and export to off.
1469 optimizedOptions.SetImportEnabled(false);
1470 optimizedOptions.SetExportEnabled(true);
1471 IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
1472 CHECK(optNet);
1473
1474 std::string er;
1475 // Load the network passing an import memory source.
1476 armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
1477 // There should be an InvalidArgumentException.
1478 runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
1479 CHECK(er.find("However, it was enabled when this network was optimized") != -1);
1480 }
1481
1482 TEST_CASE("SyncExecutePreImportInputsHappyPath")
1483 {
1484 // In this test case we'll mix "Pre Import" and pass by reference tensors as input.
1485 //
1486 // * Create a small network that takes two inputs.
1487 // * Optimize it specifying that the inputs and outputs will not be imported or exported.
1488 // * Create some malloc input and output tensors.
1489 // * Use ImportInputs to import only one of the two inputs.
1490 // * Call EnqueueWorkload passing one input tensor and one reference to a pre-imported tensor.
1491
1492 armnn::IRuntime::CreationOptions options;
1493 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
1494 armnn::NetworkId networkId = 1;
1495 armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
1496
1497 auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
1498 auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
1499 ARMNN_NO_DEPRECATE_WARN_BEGIN
1500 auto addLayer = testNetwork->AddAdditionLayer("add layer");
1501 ARMNN_NO_DEPRECATE_WARN_END
1502 auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
1503
1504 TensorInfo tensorInfo{ { 4 }, armnn::DataType::Signed32 };
1505
1506 inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
1507 inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1508
1509 inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
1510 inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1511
1512 addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
1513 addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1514
1515 std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
1516
1517 std::string er;
1518 armnn::INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Undefined);
1519 runtime->LoadNetwork(networkId, Optimize(*testNetwork, backends, runtime->GetDeviceSpec()), er, networkProperties);
1520
1521 std::vector<int> inputData1(4, 10);
1522 std::vector<int> inputData2(4, 20);
1523 std::vector<int> output(4);
1524
1525 ConstTensor inputTensor1({ { 4 }, armnn::DataType::Signed32, 0.0f, 0, true }, inputData1.data());
1526 ConstTensor inputTensor2({ { 4 }, armnn::DataType::Signed32, 0.0f, 0, true }, inputData2.data());
1527 Tensor outputTensor({ { 4 }, armnn::DataType::Signed32 }, output.data());
1528
1529 // An extra check here: the number of inputs provided to ImportInputs should not exceed the number of inputs
1530 // to the network.
1531 CHECK_THROWS_AS(runtime->ImportInputs(networkId, { { 0, inputTensor1 }, { 0, inputTensor1 }, { 0, inputTensor1 } },
1532 MemorySource::Malloc),
1533 armnn::MemoryImportException);
1534
1535 // Pre Import one of the two input tensors.
1536 std::vector<ImportedOutputId> importedInputVec =
1537 runtime->ImportInputs(networkId, { { 0, inputTensor1 } }, MemorySource::Malloc);
1538 CHECK(importedInputVec.size() == 1);
1539 CHECK(importedInputVec[0] == 0);
1540
1541 // We've pre-imported tensor 1 and we'll pass tensor 2 by reference.
1542 InputTensors inputTensors{ { 1, inputTensor2 } };
1543 OutputTensors outputTensors{ { 2, outputTensor } };
1544
1545 // Do the inference
1546 auto ret = runtime->EnqueueWorkload(networkId, inputTensors, outputTensors, importedInputVec,
1547 std::vector<ImportedOutputId>());
1548 REQUIRE(ret == Status::Success);
1549 }
1550 }
1551