1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization primitive tests with multi queue
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vktCustomInstancesDevices.hpp"
26 #include "vkDefs.hpp"
27 #include "vktTestCase.hpp"
28 #include "vktTestCaseUtil.hpp"
29 #include "vkRef.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkMemUtil.hpp"
32 #include "vkBarrierUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkDeviceUtil.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkCmdUtil.hpp"
38 #include "vkSafetyCriticalUtil.hpp"
39 #include "deRandom.hpp"
40 #include "deUniquePtr.hpp"
41 #include "deSharedPtr.hpp"
42 #include "tcuTestLog.hpp"
43 #include "vktSynchronizationUtil.hpp"
44 #include "vktSynchronizationOperation.hpp"
45 #include "vktSynchronizationOperationTestData.hpp"
46 #include "vktSynchronizationOperationResources.hpp"
47 #include "vktTestGroupUtil.hpp"
48 #include "tcuCommandLine.hpp"
49
50 #include <set>
51 #include <unordered_map>
52
53 namespace vkt
54 {
55
56 namespace synchronization
57 {
58
59 namespace
60 {
61 using namespace vk;
62 using de::MovePtr;
63 using de::SharedPtr;
64 using de::UniquePtr;
65
66 enum QueueType
67 {
68 QUEUETYPE_WRITE,
69 QUEUETYPE_READ
70 };
71
72 struct QueuePair
73 {
QueuePairvkt::synchronization::__anonfbc4a3910111::QueuePair74 QueuePair(const uint32_t familyWrite, const uint32_t familyRead, const VkQueue write, const VkQueue read)
75 : familyIndexWrite(familyWrite)
76 , familyIndexRead(familyRead)
77 , queueWrite(write)
78 , queueRead(read)
79 {
80 }
81
82 uint32_t familyIndexWrite;
83 uint32_t familyIndexRead;
84 VkQueue queueWrite;
85 VkQueue queueRead;
86 };
87
88 struct Queue
89 {
Queuevkt::synchronization::__anonfbc4a3910111::Queue90 Queue(const uint32_t familyOp, const VkQueue queueOp) : family(familyOp), queue(queueOp)
91 {
92 }
93
94 uint32_t family;
95 VkQueue queue;
96 };
97
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)98 bool checkQueueFlags(VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
99 {
100 if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
101 availableFlags |= VK_QUEUE_TRANSFER_BIT;
102
103 return (availableFlags & neededFlags) != 0;
104 }
105
106 class MultiQueues
107 {
108 struct QueueData
109 {
110 VkQueueFlags flags;
111 std::vector<VkQueue> queue;
112 };
113
MultiQueues(Context & context,SynchronizationType type,bool timelineSemaphore)114 MultiQueues(Context &context, SynchronizationType type, bool timelineSemaphore)
115 #ifdef CTS_USES_VULKANSC
116 : m_instance(createCustomInstanceFromContext(context))
117 ,
118 #else
119 :
120 #endif // CTS_USES_VULKANSC
121 m_queueCount(0)
122 {
123 #ifdef CTS_USES_VULKANSC
124 const InstanceInterface &instanceDriver = m_instance.getDriver();
125 const VkPhysicalDevice physicalDevice =
126 chooseDevice(instanceDriver, m_instance, context.getTestContext().getCommandLine());
127 const VkInstance instance = m_instance;
128 #else
129 const InstanceInterface &instanceDriver = context.getInstanceInterface();
130 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
131 const VkInstance instance = context.getInstance();
132 #endif // CTS_USES_VULKANSC
133 const std::vector<VkQueueFamilyProperties> queueFamilyProperties =
134 getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
135
136 for (uint32_t queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
137 {
138 addQueueIndex(queuePropertiesNdx, std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
139 queueFamilyProperties[queuePropertiesNdx].queueFlags);
140 }
141
142 std::vector<VkDeviceQueueCreateInfo> queueInfos;
143 const float queuePriorities[2] = {1.0f, 1.0f}; //get max 2 queues from one family
144
145 for (std::map<uint32_t, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
146 {
147 const VkDeviceQueueCreateInfo queueInfo = {
148 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //VkStructureType sType;
149 DE_NULL, //const void* pNext;
150 (VkDeviceQueueCreateFlags)0u, //VkDeviceQueueCreateFlags flags;
151 it->first, //uint32_t queueFamilyIndex;
152 static_cast<uint32_t>(it->second.queue.size()), //uint32_t queueCount;
153 &queuePriorities[0] //const float* pQueuePriorities;
154 };
155 queueInfos.push_back(queueInfo);
156 }
157
158 {
159 VkPhysicalDeviceFeatures2 createPhysicalFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL,
160 context.getDeviceFeatures()};
161 VkPhysicalDeviceTimelineSemaphoreFeatures timelineSemaphoreFeatures{
162 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, true};
163 VkPhysicalDeviceSynchronization2FeaturesKHR synchronization2Features{
164 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, true};
165 void **nextPtr = &createPhysicalFeature.pNext;
166
167 std::vector<const char *> deviceExtensions;
168 if (timelineSemaphore)
169 {
170 if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_timeline_semaphore"))
171 deviceExtensions.push_back("VK_KHR_timeline_semaphore");
172 addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
173 }
174 if (type == SynchronizationType::SYNCHRONIZATION2)
175 {
176 deviceExtensions.push_back("VK_KHR_synchronization2");
177 addToChainVulkanStructure(&nextPtr, synchronization2Features);
178 }
179
180 void *pNext = &createPhysicalFeature;
181 #ifdef CTS_USES_VULKANSC
182 VkDeviceObjectReservationCreateInfo memReservationInfo =
183 context.getTestContext().getCommandLine().isSubProcess() ?
184 context.getResourceInterface()->getStatMax() :
185 resetDeviceObjectReservationCreateInfo();
186 memReservationInfo.pNext = pNext;
187 pNext = &memReservationInfo;
188
189 VkPhysicalDeviceVulkanSC10Features sc10Features = createDefaultSC10Features();
190 sc10Features.pNext = pNext;
191 pNext = &sc10Features;
192
193 VkPipelineCacheCreateInfo pcCI;
194 std::vector<VkPipelinePoolSize> poolSizes;
195 if (context.getTestContext().getCommandLine().isSubProcess())
196 {
197 if (context.getResourceInterface()->getCacheDataSize() > 0)
198 {
199 pcCI = {
200 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
201 DE_NULL, // const void* pNext;
202 VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
203 VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT, // VkPipelineCacheCreateFlags flags;
204 context.getResourceInterface()->getCacheDataSize(), // uintptr_t initialDataSize;
205 context.getResourceInterface()->getCacheData() // const void* pInitialData;
206 };
207 memReservationInfo.pipelineCacheCreateInfoCount = 1;
208 memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
209 }
210
211 poolSizes = context.getResourceInterface()->getPipelinePoolSizes();
212 if (!poolSizes.empty())
213 {
214 memReservationInfo.pipelinePoolSizeCount = uint32_t(poolSizes.size());
215 memReservationInfo.pPipelinePoolSizes = poolSizes.data();
216 }
217 }
218 #endif // CTS_USES_VULKANSC
219
220 const VkDeviceCreateInfo deviceInfo = {
221 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //VkStructureType sType;
222 pNext, //const void* pNext;
223 0u, //VkDeviceCreateFlags flags;
224 static_cast<uint32_t>(queueInfos.size()), //uint32_t queueCreateInfoCount;
225 &queueInfos[0], //const VkDeviceQueueCreateInfo* pQueueCreateInfos;
226 0u, //uint32_t enabledLayerCount;
227 DE_NULL, //const char* const* ppEnabledLayerNames;
228 static_cast<uint32_t>(deviceExtensions.size()), //uint32_t enabledExtensionCount;
229 deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0], //const char* const* ppEnabledExtensionNames;
230 DE_NULL //const VkPhysicalDeviceFeatures* pEnabledFeatures;
231 };
232
233 m_logicalDevice = createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(),
234 context.getPlatformInterface(), instance, instanceDriver,
235 physicalDevice, &deviceInfo);
236 #ifndef CTS_USES_VULKANSC
237 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance,
238 *m_logicalDevice, context.getUsedApiVersion(),
239 context.getTestContext().getCommandLine()));
240 #else
241 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
242 new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice,
243 context.getTestContext().getCommandLine(), context.getResourceInterface(),
244 context.getDeviceVulkanSC10Properties(), context.getDeviceProperties(),
245 context.getUsedApiVersion()),
246 vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
247 #endif // CTS_USES_VULKANSC
248 m_allocator = MovePtr<Allocator>(new SimpleAllocator(
249 *m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instanceDriver, physicalDevice)));
250
251 for (std::map<uint32_t, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
252 for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
253 m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
254 }
255 }
256
addQueueIndex(const uint32_t queueFamilyIndex,const uint32_t count,const VkQueueFlags flags)257 void addQueueIndex(const uint32_t queueFamilyIndex, const uint32_t count, const VkQueueFlags flags)
258 {
259 QueueData dataToPush;
260 dataToPush.flags = flags;
261 dataToPush.queue.resize(count);
262 m_queues[queueFamilyIndex] = dataToPush;
263
264 m_queueCount++;
265 }
266
267 public:
~MultiQueues()268 ~MultiQueues()
269 {
270 }
271
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead) const272 std::vector<QueuePair> getQueuesPairs(const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead) const
273 {
274 std::map<uint32_t, QueueData> queuesWrite;
275 std::map<uint32_t, QueueData> queuesRead;
276 std::vector<QueuePair> queuesPairs;
277
278 for (std::map<uint32_t, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
279 {
280 const bool writeQueue = checkQueueFlags(it->second.flags, flagsWrite);
281 const bool readQueue = checkQueueFlags(it->second.flags, flagsRead);
282
283 if (!(writeQueue || readQueue))
284 continue;
285
286 if (writeQueue && readQueue)
287 {
288 queuesWrite[it->first] = it->second;
289 queuesRead[it->first] = it->second;
290 }
291 else if (writeQueue)
292 queuesWrite[it->first] = it->second;
293 else if (readQueue)
294 queuesRead[it->first] = it->second;
295 }
296
297 for (std::map<uint32_t, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
298 for (std::map<uint32_t, QueueData>::iterator read = queuesRead.begin(); read != queuesRead.end(); ++read)
299 {
300 const int writeSize = static_cast<int>(write->second.queue.size());
301 const int readSize = static_cast<int>(read->second.queue.size());
302
303 for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
304 for (int readNdx = 0; readNdx < readSize; ++readNdx)
305 {
306 if (write->second.queue[writeNdx] != read->second.queue[readNdx])
307 {
308 queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx],
309 read->second.queue[readNdx]));
310 writeNdx = readNdx = std::max(writeSize, readSize); //exit from the loops
311 }
312 }
313 }
314
315 if (queuesPairs.empty())
316 TCU_THROW(NotSupportedError, "Queue not found");
317
318 return queuesPairs;
319 }
320
getDefaultQueue(const VkQueueFlags flagsOp) const321 Queue getDefaultQueue(const VkQueueFlags flagsOp) const
322 {
323 for (std::map<uint32_t, QueueData>::const_iterator it = m_queues.begin(); it != m_queues.end(); ++it)
324 {
325 if (checkQueueFlags(it->second.flags, flagsOp))
326 return Queue(it->first, it->second.queue[0]);
327 }
328
329 TCU_THROW(NotSupportedError, "Queue not found");
330 }
331
getQueue(const uint32_t familyIdx,const uint32_t queueIdx)332 Queue getQueue(const uint32_t familyIdx, const uint32_t queueIdx)
333 {
334 return Queue(familyIdx, m_queues[familyIdx].queue[queueIdx]);
335 }
336
getQueueFamilyFlags(const uint32_t familyIdx)337 VkQueueFlags getQueueFamilyFlags(const uint32_t familyIdx)
338 {
339 return m_queues[familyIdx].flags;
340 }
341
queueFamilyCount(const uint32_t familyIdx)342 uint32_t queueFamilyCount(const uint32_t familyIdx)
343 {
344 return (uint32_t)m_queues[familyIdx].queue.size();
345 }
346
familyCount(void) const347 uint32_t familyCount(void) const
348 {
349 return (uint32_t)m_queues.size();
350 }
351
totalQueueCount(void)352 uint32_t totalQueueCount(void)
353 {
354 uint32_t count = 0;
355
356 for (uint32_t familyIdx = 0; familyIdx < familyCount(); familyIdx++)
357 {
358 count += queueFamilyCount(familyIdx);
359 }
360
361 return count;
362 }
363
getDevice(void) const364 VkDevice getDevice(void) const
365 {
366 return *m_logicalDevice;
367 }
368
getDeviceInterface(void) const369 const DeviceInterface &getDeviceInterface(void) const
370 {
371 return *m_deviceDriver;
372 }
373
getAllocator(void)374 Allocator &getAllocator(void)
375 {
376 return *m_allocator;
377 }
378
getInstance(Context & context,SynchronizationType type,bool timelineSemaphore)379 static SharedPtr<MultiQueues> getInstance(Context &context, SynchronizationType type, bool timelineSemaphore)
380 {
381 uint32_t index = ((uint32_t)type << 1) | ((uint32_t)timelineSemaphore);
382 if (!m_multiQueues[index])
383 m_multiQueues[index] = SharedPtr<MultiQueues>(new MultiQueues(context, type, timelineSemaphore));
384
385 return m_multiQueues[index];
386 }
destroy()387 static void destroy()
388 {
389 m_multiQueues.clear();
390 }
391
392 private:
393 #ifdef CTS_USES_VULKANSC
394 CustomInstance m_instance;
395 #endif // CTS_USES_VULKANSC
396 Move<VkDevice> m_logicalDevice;
397 #ifndef CTS_USES_VULKANSC
398 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
399 #else
400 de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter> m_deviceDriver;
401 #endif // CTS_USES_VULKANSC
402 MovePtr<Allocator> m_allocator;
403 std::map<uint32_t, QueueData> m_queues;
404 uint32_t m_queueCount;
405
406 static std::unordered_map<uint32_t, SharedPtr<MultiQueues>> m_multiQueues;
407 };
408 std::unordered_map<uint32_t, SharedPtr<MultiQueues>> MultiQueues::m_multiQueues;
409
createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const uint32_t writeFamily,const uint32_t readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)410 void createBarrierMultiQueue(SynchronizationWrapperPtr synchronizationWrapper, const VkCommandBuffer &cmdBuffer,
411 const SyncInfo &writeSync, const SyncInfo &readSync, const Resource &resource,
412 const uint32_t writeFamily, const uint32_t readFamily, const VkSharingMode sharingMode,
413 const bool secondQueue = false)
414 {
415 if (resource.getType() == RESOURCE_TYPE_IMAGE)
416 {
417 VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
418 secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
419 secondQueue ? 0u : writeSync.accessMask,
420 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
421 !secondQueue ? 0u : readSync.accessMask, writeSync.imageLayout, readSync.imageLayout,
422 resource.getImage().handle, resource.getImage().subresourceRange);
423
424 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
425 {
426 imageMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
427 imageMemoryBarrier2.dstQueueFamilyIndex = readFamily;
428
429 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
430 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
431 }
432 else if (!secondQueue)
433 {
434 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
435 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
436 }
437 }
438 else
439 {
440 VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
441 secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
442 secondQueue ? 0u : writeSync.accessMask,
443 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask,
444 !secondQueue ? 0u : readSync.accessMask, resource.getBuffer().handle, resource.getBuffer().offset,
445 resource.getBuffer().size);
446
447 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
448 {
449 bufferMemoryBarrier2.srcQueueFamilyIndex = writeFamily;
450 bufferMemoryBarrier2.dstQueueFamilyIndex = readFamily;
451 }
452
453 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
454 synchronizationWrapper->cmdPipelineBarrier(cmdBuffer, &dependencyInfo);
455 }
456 }
457
458 class BaseTestInstance : public TestInstance
459 {
460 public:
BaseTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,bool timelineSemaphore)461 BaseTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
462 const OperationSupport &writeOp, const OperationSupport &readOp,
463 PipelineCacheData &pipelineCacheData, bool timelineSemaphore)
464 : TestInstance(context)
465 , m_type(type)
466 , m_queues(MultiQueues::getInstance(context, type, timelineSemaphore))
467 , m_opContext(new OperationContext(context, type, m_queues->getDeviceInterface(), m_queues->getDevice(),
468 m_queues->getAllocator(), pipelineCacheData))
469 , m_resourceDesc(resourceDesc)
470 , m_writeOp(writeOp)
471 , m_readOp(readOp)
472 {
473 }
474
475 protected:
476 const SynchronizationType m_type;
477 const SharedPtr<MultiQueues> m_queues;
478 const UniquePtr<OperationContext> m_opContext;
479 const ResourceDescription m_resourceDesc;
480 const OperationSupport &m_writeOp;
481 const OperationSupport &m_readOp;
482 };
483
484 class BinarySemaphoreTestInstance : public BaseTestInstance
485 {
486 public:
BinarySemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)487 BinarySemaphoreTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
488 const OperationSupport &writeOp, const OperationSupport &readOp,
489 PipelineCacheData &pipelineCacheData, const VkSharingMode sharingMode)
490 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
491 , m_sharingMode(sharingMode)
492 {
493 }
494
iterate(void)495 tcu::TestStatus iterate(void)
496 {
497 const DeviceInterface &vk = m_opContext->getDeviceInterface();
498 const VkDevice device = m_opContext->getDevice();
499 const std::vector<QueuePair> queuePairs =
500 m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
501
502 for (uint32_t pairNdx = 0; pairNdx < static_cast<uint32_t>(queuePairs.size()); ++pairNdx)
503 {
504 const UniquePtr<Resource> resource(
505 new Resource(*m_opContext, m_resourceDesc,
506 m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
507 const UniquePtr<Operation> writeOp(m_writeOp.build(*m_opContext, *resource));
508 const UniquePtr<Operation> readOp(m_readOp.build(*m_opContext, *resource));
509
510 const Move<VkCommandPool> cmdPool[] = {
511 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
512 queuePairs[pairNdx].familyIndexWrite),
513 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
514 queuePairs[pairNdx].familyIndexRead)};
515 const Move<VkCommandBuffer> ptrCmdBuffer[] = {makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
516 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])};
517 const VkCommandBufferSubmitInfoKHR cmdBufferInfos[] = {
518 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
519 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ]),
520 };
521 const Unique<VkSemaphore> semaphore(createSemaphore(vk, device));
522 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo =
523 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
524 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo =
525 makeCommonSemaphoreSubmitInfo(*semaphore, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
526 SynchronizationWrapperPtr synchronizationWrapper[]{
527 getSynchronizationWrapper(m_type, vk, false),
528 getSynchronizationWrapper(m_type, vk, false),
529 };
530
531 synchronizationWrapper[QUEUETYPE_WRITE]->addSubmitInfo(0u, DE_NULL, 1u, &cmdBufferInfos[QUEUETYPE_WRITE],
532 1u, &signalSemaphoreSubmitInfo);
533 synchronizationWrapper[QUEUETYPE_READ]->addSubmitInfo(1u, &waitSemaphoreSubmitInfo, 1u,
534 &cmdBufferInfos[QUEUETYPE_READ], 0u, DE_NULL);
535
536 const SyncInfo writeSync = writeOp->getOutSyncInfo();
537 const SyncInfo readSync = readOp->getInSyncInfo();
538 VkCommandBuffer writeCmdBuffer = cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
539 VkCommandBuffer readCmdBuffer = cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
540
541 beginCommandBuffer(vk, writeCmdBuffer);
542 writeOp->recordCommands(writeCmdBuffer);
543 createBarrierMultiQueue(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync,
544 *resource, queuePairs[pairNdx].familyIndexWrite,
545 queuePairs[pairNdx].familyIndexRead, m_sharingMode);
546 endCommandBuffer(vk, writeCmdBuffer);
547
548 beginCommandBuffer(vk, readCmdBuffer);
549 createBarrierMultiQueue(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync,
550 *resource, queuePairs[pairNdx].familyIndexWrite,
551 queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
552 readOp->recordCommands(readCmdBuffer);
553 endCommandBuffer(vk, readCmdBuffer);
554
555 VK_CHECK(synchronizationWrapper[QUEUETYPE_WRITE]->queueSubmit(queuePairs[pairNdx].queueWrite, DE_NULL));
556 VK_CHECK(synchronizationWrapper[QUEUETYPE_READ]->queueSubmit(queuePairs[pairNdx].queueRead, DE_NULL));
557 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
558 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
559
560 {
561 const Data expected = writeOp->getData();
562 const Data actual = readOp->getData();
563
564 #ifdef CTS_USES_VULKANSC
565 if (m_context.getTestContext().getCommandLine().isSubProcess())
566 #endif // CTS_USES_VULKANSC
567 {
568 if (isIndirectBuffer(m_resourceDesc.type))
569 {
570 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
571 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
572
573 if (actualValue < expectedValue)
574 return tcu::TestStatus::fail("Counter value is smaller than expected");
575 }
576 else
577 {
578 if (0 != deMemCmp(expected.data, actual.data, expected.size))
579 return tcu::TestStatus::fail("Memory contents don't match");
580 }
581 }
582 }
583 }
584 return tcu::TestStatus::pass("OK");
585 }
586
587 private:
588 const VkSharingMode m_sharingMode;
589 };
590
591 template <typename T>
makeVkSharedPtr(Move<T> move)592 inline SharedPtr<Move<T>> makeVkSharedPtr(Move<T> move)
593 {
594 return SharedPtr<Move<T>>(new Move<T>(move));
595 }
596
597 class TimelineSemaphoreTestInstance : public BaseTestInstance
598 {
599 public:
TimelineSemaphoreTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const SharedPtr<OperationSupport> & writeOp,const SharedPtr<OperationSupport> & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)600 TimelineSemaphoreTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
601 const SharedPtr<OperationSupport> &writeOp, const SharedPtr<OperationSupport> &readOp,
602 PipelineCacheData &pipelineCacheData, const VkSharingMode sharingMode)
603 : BaseTestInstance(context, type, resourceDesc, *writeOp, *readOp, pipelineCacheData, true)
604 , m_sharingMode(sharingMode)
605 {
606 uint32_t maxQueues = 0;
607 std::vector<uint32_t> queueFamilies;
608
609 if (m_queues->totalQueueCount() < 2)
610 TCU_THROW(NotSupportedError, "Not enough queues");
611
612 for (uint32_t familyNdx = 0; familyNdx < m_queues->familyCount(); familyNdx++)
613 {
614 maxQueues = std::max(m_queues->queueFamilyCount(familyNdx), maxQueues);
615 queueFamilies.push_back(familyNdx);
616 }
617
618 // Create a chain of operations copying data from one resource
619 // to another across at least every single queue of the system
620 // at least once. Each of the operation will be executing with
621 // a dependency on the previous using timeline points.
622 m_opSupports.push_back(writeOp);
623 m_opQueues.push_back(m_queues->getDefaultQueue(writeOp->getQueueFlags(*m_opContext)));
624
625 for (uint32_t queueIdx = 0; queueIdx < maxQueues; queueIdx++)
626 {
627 for (uint32_t familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
628 {
629 for (uint32_t copyOpIdx = 0; copyOpIdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpIdx++)
630 {
631 if (isResourceSupported(s_copyOps[copyOpIdx], resourceDesc))
632 {
633 SharedPtr<OperationSupport> opSupport(
634 makeOperationSupport(s_copyOps[copyOpIdx], m_resourceDesc).release());
635
636 if (!checkQueueFlags(opSupport->getQueueFlags(*m_opContext),
637 m_queues->getQueueFamilyFlags(familyIdx)))
638 continue;
639
640 m_opSupports.push_back(opSupport);
641 m_opQueues.push_back(
642 m_queues->getQueue(familyIdx, queueIdx % m_queues->queueFamilyCount(familyIdx)));
643 break;
644 }
645 }
646 }
647 }
648
649 m_opSupports.push_back(readOp);
650 m_opQueues.push_back(m_queues->getDefaultQueue(readOp->getQueueFlags(*m_opContext)));
651
652 // Now create the resources with the usage associated to the
653 // operation performed on the resource.
654 for (uint32_t opIdx = 0; opIdx < (m_opSupports.size() - 1); opIdx++)
655 {
656 uint32_t usage =
657 m_opSupports[opIdx]->getOutResourceUsageFlags() | m_opSupports[opIdx + 1]->getInResourceUsageFlags();
658
659 m_resources.push_back(
660 SharedPtr<Resource>(new Resource(*m_opContext, m_resourceDesc, usage, m_sharingMode, queueFamilies)));
661 }
662
663 // Finally create the operations using the resources.
664 m_ops.push_back(SharedPtr<Operation>(m_opSupports[0]->build(*m_opContext, *m_resources[0]).release()));
665 for (uint32_t opIdx = 1; opIdx < (m_opSupports.size() - 1); opIdx++)
666 m_ops.push_back(SharedPtr<Operation>(
667 m_opSupports[opIdx]->build(*m_opContext, *m_resources[opIdx - 1], *m_resources[opIdx]).release()));
668 m_ops.push_back(SharedPtr<Operation>(
669 m_opSupports[m_opSupports.size() - 1]->build(*m_opContext, *m_resources.back()).release()));
670 }
671
iterate(void)672 tcu::TestStatus iterate(void)
673 {
674 const DeviceInterface &vk = m_opContext->getDeviceInterface();
675 const VkDevice device = m_opContext->getDevice();
676 de::Random rng(1234);
677 const Unique<VkSemaphore> semaphore(createSemaphoreType(vk, device, VK_SEMAPHORE_TYPE_TIMELINE));
678 std::vector<SharedPtr<Move<VkCommandPool>>> cmdPools;
679 std::vector<SharedPtr<Move<VkCommandBuffer>>> ptrCmdBuffers;
680 std::vector<VkCommandBufferSubmitInfoKHR> cmdBufferInfos;
681 std::vector<uint64_t> timelineValues;
682
683 cmdPools.resize(m_queues->familyCount());
684 for (uint32_t familyIdx = 0; familyIdx < m_queues->familyCount(); familyIdx++)
685 cmdPools[familyIdx] = makeVkSharedPtr(
686 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, familyIdx));
687
688 ptrCmdBuffers.resize(m_ops.size());
689 cmdBufferInfos.resize(m_ops.size());
690 for (uint32_t opIdx = 0; opIdx < m_ops.size(); opIdx++)
691 {
692 uint64_t increment = 1 + rng.getUint8();
693
694 ptrCmdBuffers[opIdx] = makeVkSharedPtr(makeCommandBuffer(vk, device, **cmdPools[m_opQueues[opIdx].family]));
695 cmdBufferInfos[opIdx] = makeCommonCommandBufferSubmitInfo(**ptrCmdBuffers[opIdx]);
696
697 timelineValues.push_back(timelineValues.empty() ? increment : (timelineValues.back() + increment));
698 }
699
700 for (uint32_t opIdx = 0; opIdx < m_ops.size(); opIdx++)
701 {
702 VkCommandBuffer cmdBuffer = cmdBufferInfos[opIdx].commandBuffer;
703 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(
704 *semaphore, (opIdx == 0 ? 0u : timelineValues[opIdx - 1]), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
705 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(
706 *semaphore, timelineValues[opIdx], VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
707 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, true);
708
709 synchronizationWrapper->addSubmitInfo(opIdx == 0 ? 0u : 1u, &waitSemaphoreSubmitInfo, 1u,
710 &cmdBufferInfos[opIdx], 1u, &signalSemaphoreSubmitInfo,
711 opIdx == 0 ? false : true, true);
712
713 beginCommandBuffer(vk, cmdBuffer);
714
715 if (opIdx > 0)
716 {
717 const SyncInfo writeSync = m_ops[opIdx - 1]->getOutSyncInfo();
718 const SyncInfo readSync = m_ops[opIdx]->getInSyncInfo();
719 const Resource &resource = *m_resources[opIdx - 1].get();
720
721 createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource,
722 m_opQueues[opIdx - 1].family, m_opQueues[opIdx].family, m_sharingMode, true);
723 }
724
725 m_ops[opIdx]->recordCommands(cmdBuffer);
726
727 if (opIdx < (m_ops.size() - 1))
728 {
729 const SyncInfo writeSync = m_ops[opIdx]->getOutSyncInfo();
730 const SyncInfo readSync = m_ops[opIdx + 1]->getInSyncInfo();
731 const Resource &resource = *m_resources[opIdx].get();
732
733 createBarrierMultiQueue(synchronizationWrapper, cmdBuffer, writeSync, readSync, resource,
734 m_opQueues[opIdx].family, m_opQueues[opIdx + 1].family, m_sharingMode);
735 }
736
737 endCommandBuffer(vk, cmdBuffer);
738
739 VK_CHECK(synchronizationWrapper->queueSubmit(m_opQueues[opIdx].queue, DE_NULL));
740 }
741
742 VK_CHECK(vk.queueWaitIdle(m_opQueues.back().queue));
743
744 {
745 const Data expected = m_ops.front()->getData();
746 const Data actual = m_ops.back()->getData();
747
748 if (isIndirectBuffer(m_resourceDesc.type))
749 {
750 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
751 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
752
753 if (actualValue < expectedValue)
754 return tcu::TestStatus::fail("Counter value is smaller than expected");
755 }
756 else
757 {
758 if (0 != deMemCmp(expected.data, actual.data, expected.size))
759 return tcu::TestStatus::fail("Memory contents don't match");
760 }
761 }
762
763 // Make the validation layers happy.
764 for (uint32_t opIdx = 0; opIdx < m_opQueues.size(); opIdx++)
765 VK_CHECK(vk.queueWaitIdle(m_opQueues[opIdx].queue));
766
767 return tcu::TestStatus::pass("OK");
768 }
769
770 private:
771 const VkSharingMode m_sharingMode;
772 std::vector<SharedPtr<OperationSupport>> m_opSupports;
773 std::vector<SharedPtr<Operation>> m_ops;
774 std::vector<SharedPtr<Resource>> m_resources;
775 std::vector<Queue> m_opQueues;
776 };
777
778 class FenceTestInstance : public BaseTestInstance
779 {
780 public:
FenceTestInstance(Context & context,SynchronizationType type,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)781 FenceTestInstance(Context &context, SynchronizationType type, const ResourceDescription &resourceDesc,
782 const OperationSupport &writeOp, const OperationSupport &readOp,
783 PipelineCacheData &pipelineCacheData, const VkSharingMode sharingMode)
784 : BaseTestInstance(context, type, resourceDesc, writeOp, readOp, pipelineCacheData, false)
785 , m_sharingMode(sharingMode)
786 {
787 }
788
iterate(void)789 tcu::TestStatus iterate(void)
790 {
791 const DeviceInterface &vk = m_opContext->getDeviceInterface();
792 const VkDevice device = m_opContext->getDevice();
793 const std::vector<QueuePair> queuePairs =
794 m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
795
796 for (uint32_t pairNdx = 0; pairNdx < static_cast<uint32_t>(queuePairs.size()); ++pairNdx)
797 {
798 const UniquePtr<Resource> resource(
799 new Resource(*m_opContext, m_resourceDesc,
800 m_writeOp.getOutResourceUsageFlags() | m_readOp.getInResourceUsageFlags()));
801 const UniquePtr<Operation> writeOp(m_writeOp.build(*m_opContext, *resource));
802 const UniquePtr<Operation> readOp(m_readOp.build(*m_opContext, *resource));
803 const Move<VkCommandPool> cmdPool[]{
804 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
805 queuePairs[pairNdx].familyIndexWrite),
806 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
807 queuePairs[pairNdx].familyIndexRead)};
808 const Move<VkCommandBuffer> ptrCmdBuffer[]{makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
809 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])};
810 const VkCommandBufferSubmitInfoKHR cmdBufferInfos[]{
811 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_WRITE]),
812 makeCommonCommandBufferSubmitInfo(*ptrCmdBuffer[QUEUETYPE_READ])};
813 SynchronizationWrapperPtr synchronizationWrapper[]{
814 getSynchronizationWrapper(m_type, vk, false),
815 getSynchronizationWrapper(m_type, vk, false),
816 };
817 const SyncInfo writeSync = writeOp->getOutSyncInfo();
818 const SyncInfo readSync = readOp->getInSyncInfo();
819 VkCommandBuffer writeCmdBuffer = cmdBufferInfos[QUEUETYPE_WRITE].commandBuffer;
820 VkCommandBuffer readCmdBuffer = cmdBufferInfos[QUEUETYPE_READ].commandBuffer;
821
822 beginCommandBuffer(vk, writeCmdBuffer);
823 writeOp->recordCommands(writeCmdBuffer);
824 createBarrierMultiQueue(synchronizationWrapper[QUEUETYPE_WRITE], writeCmdBuffer, writeSync, readSync,
825 *resource, queuePairs[pairNdx].familyIndexWrite,
826 queuePairs[pairNdx].familyIndexRead, m_sharingMode);
827 endCommandBuffer(vk, writeCmdBuffer);
828
829 submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_WRITE], vk, device, queuePairs[pairNdx].queueWrite,
830 writeCmdBuffer);
831
832 beginCommandBuffer(vk, readCmdBuffer);
833 createBarrierMultiQueue(synchronizationWrapper[QUEUETYPE_READ], readCmdBuffer, writeSync, readSync,
834 *resource, queuePairs[pairNdx].familyIndexWrite,
835 queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
836 readOp->recordCommands(readCmdBuffer);
837 endCommandBuffer(vk, readCmdBuffer);
838
839 submitCommandsAndWait(synchronizationWrapper[QUEUETYPE_READ], vk, device, queuePairs[pairNdx].queueRead,
840 readCmdBuffer);
841
842 {
843 const Data expected = writeOp->getData();
844 const Data actual = readOp->getData();
845
846 #ifdef CTS_USES_VULKANSC
847 if (m_context.getTestContext().getCommandLine().isSubProcess())
848 #endif // CTS_USES_VULKANSC
849 {
850 if (isIndirectBuffer(m_resourceDesc.type))
851 {
852 const uint32_t expectedValue = reinterpret_cast<const uint32_t *>(expected.data)[0];
853 const uint32_t actualValue = reinterpret_cast<const uint32_t *>(actual.data)[0];
854
855 if (actualValue < expectedValue)
856 return tcu::TestStatus::fail("Counter value is smaller than expected");
857 }
858 else
859 {
860 if (0 != deMemCmp(expected.data, actual.data, expected.size))
861 return tcu::TestStatus::fail("Memory contents don't match");
862 }
863 }
864 }
865 }
866 return tcu::TestStatus::pass("OK");
867 }
868
869 private:
870 const VkSharingMode m_sharingMode;
871 };
872
873 class BaseTestCase : public TestCase
874 {
875 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,SynchronizationType type,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)876 BaseTestCase(tcu::TestContext &testCtx, const std::string &name, SynchronizationType type,
877 const SyncPrimitive syncPrimitive, const ResourceDescription resourceDesc, const OperationName writeOp,
878 const OperationName readOp, const VkSharingMode sharingMode, PipelineCacheData &pipelineCacheData)
879 : TestCase(testCtx, name)
880 , m_type(type)
881 , m_resourceDesc(resourceDesc)
882 , m_writeOp(makeOperationSupport(writeOp, resourceDesc).release())
883 , m_readOp(makeOperationSupport(readOp, resourceDesc).release())
884 , m_syncPrimitive(syncPrimitive)
885 , m_sharingMode(sharingMode)
886 , m_pipelineCacheData(pipelineCacheData)
887 {
888 }
889
initPrograms(SourceCollections & programCollection) const890 void initPrograms(SourceCollections &programCollection) const
891 {
892 m_writeOp->initPrograms(programCollection);
893 m_readOp->initPrograms(programCollection);
894
895 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
896 {
897 for (uint32_t copyOpNdx = 0; copyOpNdx < DE_LENGTH_OF_ARRAY(s_copyOps); copyOpNdx++)
898 {
899 if (isResourceSupported(s_copyOps[copyOpNdx], m_resourceDesc))
900 makeOperationSupport(s_copyOps[copyOpNdx], m_resourceDesc)->initPrograms(programCollection);
901 }
902 }
903 }
904
checkSupport(Context & context) const905 void checkSupport(Context &context) const
906 {
907 if (m_type == SynchronizationType::SYNCHRONIZATION2)
908 context.requireDeviceFunctionality("VK_KHR_synchronization2");
909 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE)
910 context.requireDeviceFunctionality("VK_KHR_timeline_semaphore");
911
912 const InstanceInterface &instance = context.getInstanceInterface();
913 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
914 const std::vector<VkQueueFamilyProperties> queueFamilyProperties =
915 getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
916 if (m_sharingMode == VK_SHARING_MODE_CONCURRENT && queueFamilyProperties.size() < 2)
917 TCU_THROW(NotSupportedError, "Concurrent requires more than 1 queue family");
918
919 if (m_syncPrimitive == SYNC_PRIMITIVE_TIMELINE_SEMAPHORE &&
920 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
921 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
922
923 if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
924 {
925 VkImageFormatProperties imageFormatProperties;
926 const uint32_t usage = m_writeOp->getOutResourceUsageFlags() | m_readOp->getInResourceUsageFlags();
927 const VkResult formatResult = instance.getPhysicalDeviceImageFormatProperties(
928 physicalDevice, m_resourceDesc.imageFormat, m_resourceDesc.imageType, VK_IMAGE_TILING_OPTIMAL, usage,
929 (VkImageCreateFlags)0, &imageFormatProperties);
930
931 if (formatResult != VK_SUCCESS)
932 TCU_THROW(NotSupportedError, "Image format is not supported");
933
934 if ((imageFormatProperties.sampleCounts & m_resourceDesc.imageSamples) != m_resourceDesc.imageSamples)
935 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
936 }
937 }
938
createInstance(Context & context) const939 TestInstance *createInstance(Context &context) const
940 {
941 switch (m_syncPrimitive)
942 {
943 case SYNC_PRIMITIVE_FENCE:
944 return new FenceTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData,
945 m_sharingMode);
946 case SYNC_PRIMITIVE_BINARY_SEMAPHORE:
947 return new BinarySemaphoreTestInstance(context, m_type, m_resourceDesc, *m_writeOp, *m_readOp,
948 m_pipelineCacheData, m_sharingMode);
949 case SYNC_PRIMITIVE_TIMELINE_SEMAPHORE:
950 return new TimelineSemaphoreTestInstance(context, m_type, m_resourceDesc, m_writeOp, m_readOp,
951 m_pipelineCacheData, m_sharingMode);
952 default:
953 DE_ASSERT(0);
954 return DE_NULL;
955 }
956 }
957
958 private:
959 const SynchronizationType m_type;
960 const ResourceDescription m_resourceDesc;
961 const SharedPtr<OperationSupport> m_writeOp;
962 const SharedPtr<OperationSupport> m_readOp;
963 const SyncPrimitive m_syncPrimitive;
964 const VkSharingMode m_sharingMode;
965 PipelineCacheData &m_pipelineCacheData;
966 };
967
968 struct TestData
969 {
970 SynchronizationType type;
971 PipelineCacheData *pipelineCacheData;
972 };
973
createTests(tcu::TestCaseGroup * group,TestData data)974 void createTests(tcu::TestCaseGroup *group, TestData data)
975 {
976 tcu::TestContext &testCtx = group->getTestContext();
977
978 static const struct
979 {
980 const char *name;
981 SyncPrimitive syncPrimitive;
982 int numOptions;
983 } groups[] = {{"fence", SYNC_PRIMITIVE_FENCE, 1},
984 {"binary_semaphore", SYNC_PRIMITIVE_BINARY_SEMAPHORE, 1},
985 {"timeline_semaphore", SYNC_PRIMITIVE_TIMELINE_SEMAPHORE, 1}};
986
987 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
988 {
989 MovePtr<tcu::TestCaseGroup> synchGroup(new tcu::TestCaseGroup(testCtx, groups[groupNdx].name));
990
991 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
992 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
993 {
994 const OperationName writeOp = s_writeOps[writeOpNdx];
995 const OperationName readOp = s_readOps[readOpNdx];
996 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
997 bool empty = true;
998
999 MovePtr<tcu::TestCaseGroup> opGroup(new tcu::TestCaseGroup(testCtx, opGroupName.c_str()));
1000
1001 for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
1002 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1003 {
1004 const ResourceDescription &resource = s_resources[resourceNdx];
1005 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1006 {
1007 std::string name = getResourceName(resource);
1008 VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1009
1010 // queue family sharing mode used for resource
1011 if (optionNdx)
1012 {
1013 name += "_concurrent";
1014 sharingMode = VK_SHARING_MODE_CONCURRENT;
1015 }
1016 else
1017 name += "_exclusive";
1018
1019 opGroup->addChild(new BaseTestCase(testCtx, name, data.type, groups[groupNdx].syncPrimitive,
1020 resource, writeOp, readOp, sharingMode,
1021 *data.pipelineCacheData));
1022 empty = false;
1023 }
1024 }
1025 if (!empty)
1026 synchGroup->addChild(opGroup.release());
1027 }
1028 group->addChild(synchGroup.release());
1029 }
1030 }
1031
cleanupGroup(tcu::TestCaseGroup * group,TestData data)1032 void cleanupGroup(tcu::TestCaseGroup *group, TestData data)
1033 {
1034 DE_UNREF(group);
1035 DE_UNREF(data.pipelineCacheData);
1036 // Destroy singleton object
1037 MultiQueues::destroy();
1038 }
1039
1040 } // namespace
1041
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,SynchronizationType type,PipelineCacheData & pipelineCacheData)1042 tcu::TestCaseGroup *createSynchronizedOperationMultiQueueTests(tcu::TestContext &testCtx, SynchronizationType type,
1043 PipelineCacheData &pipelineCacheData)
1044 {
1045 TestData data{type, &pipelineCacheData};
1046
1047 // Synchronization of a memory-modifying operation
1048 return createTestGroup(testCtx, "multi_queue", createTests, data, cleanupGroup);
1049 }
1050
1051 } // namespace synchronization
1052 } // namespace vkt
1053