1 //
2 // Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #if defined(ARMCOMPUTECL_ENABLED)
7 #include <cl/ClBackend.hpp>
8 #endif
9 #if defined(ARMCOMPUTENEON_ENABLED)
10 #include <neon/NeonBackend.hpp>
11 #endif
12 #include <reference/RefBackend.hpp>
13 #include <armnn/BackendHelper.hpp>
14
15 #include <Network.hpp>
16
17 #include <doctest/doctest.h>
18
19 #include <vector>
20 #include <string>
21
22 using namespace armnn;
23
24 #if defined(ARMCOMPUTENEON_ENABLED) && defined(ARMCOMPUTECL_ENABLED)
25
26 TEST_SUITE("BackendsCompatibility")
27 {
28 // Partially disabled Test Suite
29 TEST_CASE("Neon_Cl_DirectCompatibility_Test")
30 {
31 auto neonBackend = std::make_unique<NeonBackend>();
32 auto clBackend = std::make_unique<ClBackend>();
33
34 TensorHandleFactoryRegistry registry;
35 neonBackend->RegisterTensorHandleFactories(registry);
36 clBackend->RegisterTensorHandleFactories(registry);
37
38 const BackendId& neonBackendId = neonBackend->GetId();
39 const BackendId& clBackendId = clBackend->GetId();
40
41 BackendsMap backends;
42 backends[neonBackendId] = std::move(neonBackend);
43 backends[clBackendId] = std::move(clBackend);
44
45 armnn::Graph graph;
46
47 armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
48
49 inputLayer->SetBackendId(neonBackendId);
50
51 armnn::SoftmaxDescriptor smDesc;
52 armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
53 softmaxLayer1->SetBackendId(clBackendId);
54
55 armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
56 softmaxLayer2->SetBackendId(neonBackendId);
57
58 armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
59 softmaxLayer3->SetBackendId(clBackendId);
60
61 armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
62 softmaxLayer4->SetBackendId(neonBackendId);
63
64 armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
65 outputLayer->SetBackendId(clBackendId);
66
67 inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
68 softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
69 softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
70 softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
71 softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
72
73 graph.TopologicalSort();
74
75 std::vector<std::string> errors;
76 auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
77
78 CHECK(result.m_Error == false);
79 CHECK(result.m_Warning == false);
80
81 // OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
82 // OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
83 // OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
84 // OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
85 // OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
86
87 // // Check that the correct factory was selected
88 // CHECK(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
89 // CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
90 // CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
91 // CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
92 // CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
93
94 // // Check that the correct strategy was selected
95 // CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
96 // CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
97 // CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
98 // CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
99 // CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
100
101 graph.AddCompatibilityLayers(backends, registry);
102
103 // Test for copy layers
104 int copyCount= 0;
105 graph.ForEachLayer([©Count](Layer* layer)
__anon7267a1730102(Layer* layer) 106 {
107 if (layer->GetType() == LayerType::MemCopy)
108 {
109 copyCount++;
110 }
111 });
112 // CHECK(copyCount == 0);
113
114 // Test for import layers
115 int importCount= 0;
116 graph.ForEachLayer([&importCount](Layer *layer)
__anon7267a1730202(Layer *layer) 117 {
118 if (layer->GetType() == LayerType::MemImport)
119 {
120 importCount++;
121 }
122 });
123 // CHECK(importCount == 0);
124 }
125
126 }
127 #endif
128
129 TEST_SUITE("BackendCapability")
130 {
131
132 namespace
133 {
134 #if defined(ARMNNREF_ENABLED) || defined(ARMCOMPUTENEON_ENABLED) || defined(ARMCOMPUTECL_ENABLED)
CapabilityTestHelper(BackendCapabilities & capabilities,std::vector<std::pair<std::string,bool>> capabilityVector)135 void CapabilityTestHelper(BackendCapabilities &capabilities,
136 std::vector<std::pair<std::string, bool>> capabilityVector)
137 {
138 for (auto pair : capabilityVector)
139 {
140 CHECK_MESSAGE(armnn::HasCapability(pair.first, capabilities),
141 pair.first << " capability was not been found");
142 CHECK_MESSAGE(armnn::HasCapability(BackendOptions::BackendOption{pair.first, pair.second}, capabilities),
143 pair.first << " capability set incorrectly");
144 }
145 }
146 #endif
147
148 #if defined(ARMNNREF_ENABLED)
149
150 TEST_CASE("Ref_Backends_Unknown_Capability_Test")
151 {
152 auto refBackend = std::make_unique<RefBackend>();
153 auto refCapabilities = refBackend->GetCapabilities();
154
155 armnn::BackendOptions::BackendOption AsyncExecutionFalse{"AsyncExecution", false};
156 CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
157
158 armnn::BackendOptions::BackendOption AsyncExecutionInt{"AsyncExecution", 50};
159 CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
160
161 armnn::BackendOptions::BackendOption AsyncExecutionFloat{"AsyncExecution", 0.0f};
162 CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
163
164 armnn::BackendOptions::BackendOption AsyncExecutionString{"AsyncExecution", "true"};
165 CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
166
167 CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
168 armnn::BackendOptions::BackendOption unknownCapability{"Telekinesis", true};
169 CHECK(!armnn::HasCapability(unknownCapability, refCapabilities));
170 }
171
172 TEST_CASE ("Ref_Backends_Capability_Test")
173 {
174 auto refBackend = std::make_unique<RefBackend>();
175 auto refCapabilities = refBackend->GetCapabilities();
176
177 CapabilityTestHelper(refCapabilities,
178 {{"NonConstWeights", true},
179 {"AsyncExecution", true},
180 {"ProtectedContentAllocation", false},
181 {"ConstantTensorsAsInputs", true},
182 {"PreImportIOTensors", true},
183 {"ExternallyManagedMemory", true},
184 {"MultiAxisPacking", false}});
185 }
186
187 #endif
188
189 #if defined(ARMCOMPUTENEON_ENABLED)
190
191 TEST_CASE ("Neon_Backends_Capability_Test")
192 {
193 auto neonBackend = std::make_unique<NeonBackend>();
194 auto neonCapabilities = neonBackend->GetCapabilities();
195
196 CapabilityTestHelper(neonCapabilities,
197 {{"NonConstWeights", true},
198 {"AsyncExecution", false},
199 {"ProtectedContentAllocation", false},
200 {"ConstantTensorsAsInputs", true},
201 {"PreImportIOTensors", false},
202 {"ExternallyManagedMemory", true},
203 {"MultiAxisPacking", false}});
204 }
205
206 #endif
207
208 #if defined(ARMCOMPUTECL_ENABLED)
209
210 TEST_CASE ("Cl_Backends_Capability_Test")
211 {
212 auto clBackend = std::make_unique<ClBackend>();
213 auto clCapabilities = clBackend->GetCapabilities();
214
215 CapabilityTestHelper(clCapabilities,
216 {{"NonConstWeights", false},
217 {"AsyncExecution", false},
218 {"ProtectedContentAllocation", true},
219 {"ConstantTensorsAsInputs", true},
220 {"PreImportIOTensors", false},
221 {"ExternallyManagedMemory", true},
222 {"MultiAxisPacking", false}});
223 }
224
225 #endif
226 }
227 }
228