1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file vktSparseResourcesImageSparseBinding.cpp
21 * \brief Sparse fully resident images with mipmaps tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSparseResourcesBufferSparseBinding.hpp"
25 #include "vktSparseResourcesTestsUtil.hpp"
26 #include "vktSparseResourcesBase.hpp"
27 #include "vktTestCaseUtil.hpp"
28
29 #include "vkDefs.hpp"
30 #include "vkRef.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkPlatform.hpp"
33 #include "vkPrograms.hpp"
34 #include "vkMemUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vkBuilderUtil.hpp"
37 #include "vkImageUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkTypeUtil.hpp"
40 #include "vkCmdUtil.hpp"
41
42 #include "deUniquePtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "tcuTextureUtil.hpp"
45
46 #include <string>
47 #include <vector>
48
49 using namespace vk;
50
51 namespace vkt
52 {
53 namespace sparse
54 {
55 namespace
56 {
57
58 class ImageSparseBindingCase : public TestCase
59 {
60 public:
61 ImageSparseBindingCase(tcu::TestContext &testCtx, const std::string &name, const ImageType imageType,
62 const tcu::UVec3 &imageSize, const VkFormat format, const bool useDeviceGroups = false);
63
64 TestInstance *createInstance(Context &context) const;
65 virtual void checkSupport(Context &context) const;
66
67 private:
68 const bool m_useDeviceGroups;
69 const ImageType m_imageType;
70 const tcu::UVec3 m_imageSize;
71 const VkFormat m_format;
72 };
73
ImageSparseBindingCase(tcu::TestContext & testCtx,const std::string & name,const ImageType imageType,const tcu::UVec3 & imageSize,const VkFormat format,const bool useDeviceGroups)74 ImageSparseBindingCase::ImageSparseBindingCase(tcu::TestContext &testCtx, const std::string &name,
75 const ImageType imageType, const tcu::UVec3 &imageSize,
76 const VkFormat format, const bool useDeviceGroups)
77
78 : TestCase(testCtx, name)
79 , m_useDeviceGroups(useDeviceGroups)
80 , m_imageType(imageType)
81 , m_imageSize(imageSize)
82 , m_format(format)
83 {
84 }
85
checkSupport(Context & context) const86 void ImageSparseBindingCase::checkSupport(Context &context) const
87 {
88 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SPARSE_BINDING);
89
90 #ifndef CTS_USES_VULKANSC
91 if (m_format == VK_FORMAT_A8_UNORM_KHR)
92 context.requireDeviceFunctionality("VK_KHR_maintenance5");
93 #endif // CTS_USES_VULKANSC
94
95 if (!isImageSizeSupported(context.getInstanceInterface(), context.getPhysicalDevice(), m_imageType, m_imageSize))
96 TCU_THROW(NotSupportedError, "Image size not supported for device");
97
98 if (formatIsR64(m_format))
99 {
100 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
101
102 if (context.getShaderImageAtomicInt64FeaturesEXT().sparseImageInt64Atomics == VK_FALSE)
103 {
104 TCU_THROW(NotSupportedError, "sparseImageInt64Atomics is not supported for device");
105 }
106 }
107 }
108
109 class ImageSparseBindingInstance : public SparseResourcesBaseInstance
110 {
111 public:
112 ImageSparseBindingInstance(Context &context, const ImageType imageType, const tcu::UVec3 &imageSize,
113 const VkFormat format, const bool useDeviceGroups);
114
115 tcu::TestStatus iterate(void);
116
117 private:
118 const bool m_useDeviceGroups;
119 const ImageType m_imageType;
120 const tcu::UVec3 m_imageSize;
121 const VkFormat m_format;
122 };
123
ImageSparseBindingInstance(Context & context,const ImageType imageType,const tcu::UVec3 & imageSize,const VkFormat format,const bool useDeviceGroups)124 ImageSparseBindingInstance::ImageSparseBindingInstance(Context &context, const ImageType imageType,
125 const tcu::UVec3 &imageSize, const VkFormat format,
126 const bool useDeviceGroups)
127
128 : SparseResourcesBaseInstance(context, useDeviceGroups)
129 , m_useDeviceGroups(useDeviceGroups)
130 , m_imageType(imageType)
131 , m_imageSize(imageSize)
132 , m_format(format)
133 {
134 }
135
iterate(void)136 tcu::TestStatus ImageSparseBindingInstance::iterate(void)
137 {
138 const InstanceInterface &instance = m_context.getInstanceInterface();
139
140 {
141 // Create logical device supporting both sparse and compute queues
142 QueueRequirementsVec queueRequirements;
143 queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
144 queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
145
146 createDeviceSupportingQueues(queueRequirements, false, m_format == VK_FORMAT_A8_UNORM_KHR);
147 }
148
149 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
150 VkImageCreateInfo imageSparseInfo;
151 std::vector<DeviceMemorySp> deviceMemUniquePtrVec;
152
153 const DeviceInterface &deviceInterface = getDeviceInterface();
154 const Queue &sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
155 const Queue &computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
156 const PlanarFormatDescription formatDescription = getPlanarFormatDescription(m_format);
157
158 // Go through all physical devices
159 for (uint32_t physDevID = 0; physDevID < m_numPhysicalDevices; ++physDevID)
160 {
161 const uint32_t firstDeviceID = physDevID;
162 const uint32_t secondDeviceID = (firstDeviceID + 1) % m_numPhysicalDevices;
163
164 imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType;
165 imageSparseInfo.pNext = DE_NULL; //const void* pNext;
166 imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; //VkImageCreateFlags flags;
167 imageSparseInfo.imageType = mapImageType(m_imageType); //VkImageType imageType;
168 imageSparseInfo.format = m_format; //VkFormat format;
169 imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent;
170 imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //uint32_t arrayLayers;
171 imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples;
172 imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling;
173 imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout;
174 imageSparseInfo.usage =
175 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; //VkImageUsageFlags usage;
176 imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode;
177 imageSparseInfo.queueFamilyIndexCount = 0u; //uint32_t queueFamilyIndexCount;
178 imageSparseInfo.pQueueFamilyIndices = DE_NULL; //const uint32_t* pQueueFamilyIndices;
179
180 if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
181 {
182 imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
183 }
184
185 {
186 VkImageFormatProperties imageFormatProperties;
187 if (instance.getPhysicalDeviceImageFormatProperties(
188 physicalDevice, imageSparseInfo.format, imageSparseInfo.imageType, imageSparseInfo.tiling,
189 imageSparseInfo.usage, imageSparseInfo.flags,
190 &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
191 {
192 TCU_THROW(NotSupportedError, "Image format does not support sparse binding operations");
193 }
194
195 imageSparseInfo.mipLevels =
196 getMipmapCount(m_format, formatDescription, imageFormatProperties, imageSparseInfo.extent);
197 }
198
199 // Create sparse image
200 const Unique<VkImage> imageSparse(createImage(deviceInterface, getDevice(), &imageSparseInfo));
201
202 // Create sparse image memory bind semaphore
203 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
204
205 // Get sparse image general memory requirements
206 const VkMemoryRequirements imageMemoryRequirements =
207 getImageMemoryRequirements(deviceInterface, getDevice(), *imageSparse);
208
209 // Check if required image memory size does not exceed device limits
210 if (imageMemoryRequirements.size >
211 getPhysicalDeviceProperties(instance, getPhysicalDevice(secondDeviceID)).limits.sparseAddressSpaceSize)
212 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
213
214 DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
215
216 {
217 std::vector<VkSparseMemoryBind> sparseMemoryBinds;
218 const uint32_t numSparseBinds =
219 static_cast<uint32_t>(imageMemoryRequirements.size / imageMemoryRequirements.alignment);
220 const uint32_t memoryType = findMatchingMemoryType(instance, getPhysicalDevice(secondDeviceID),
221 imageMemoryRequirements, MemoryRequirement::Any);
222
223 if (memoryType == NO_MATCH_FOUND)
224 return tcu::TestStatus::fail("No matching memory type found");
225
226 if (firstDeviceID != secondDeviceID)
227 {
228 VkPeerMemoryFeatureFlags peerMemoryFeatureFlags = (VkPeerMemoryFeatureFlags)0;
229 const uint32_t heapIndex =
230 getHeapIndexForMemoryType(instance, getPhysicalDevice(secondDeviceID), memoryType);
231 deviceInterface.getDeviceGroupPeerMemoryFeatures(getDevice(), heapIndex, firstDeviceID, secondDeviceID,
232 &peerMemoryFeatureFlags);
233
234 if (((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT) == 0) ||
235 ((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_COPY_DST_BIT) == 0))
236 {
237 TCU_THROW(NotSupportedError, "Peer memory does not support COPY_SRC and COPY_DST");
238 }
239 }
240
241 for (uint32_t sparseBindNdx = 0; sparseBindNdx < numSparseBinds; ++sparseBindNdx)
242 {
243 const VkSparseMemoryBind sparseMemoryBind =
244 makeSparseMemoryBind(deviceInterface, getDevice(), imageMemoryRequirements.alignment, memoryType,
245 imageMemoryRequirements.alignment * sparseBindNdx);
246
247 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(
248 Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory),
249 Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
250
251 sparseMemoryBinds.push_back(sparseMemoryBind);
252 }
253
254 const VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo = makeSparseImageOpaqueMemoryBindInfo(
255 *imageSparse, static_cast<uint32_t>(sparseMemoryBinds.size()), sparseMemoryBinds.data());
256
257 const VkDeviceGroupBindSparseInfo devGroupBindSparseInfo = {
258 VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, //VkStructureType sType;
259 DE_NULL, //const void* pNext;
260 firstDeviceID, //uint32_t resourceDeviceIndex;
261 secondDeviceID, //uint32_t memoryDeviceIndex;
262 };
263
264 const VkBindSparseInfo bindSparseInfo = {
265 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
266 m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL, //const void* pNext;
267 0u, //uint32_t waitSemaphoreCount;
268 DE_NULL, //const VkSemaphore* pWaitSemaphores;
269 0u, //uint32_t bufferBindCount;
270 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
271 1u, //uint32_t imageOpaqueBindCount;
272 &opaqueBindInfo, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
273 0u, //uint32_t imageBindCount;
274 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
275 1u, //uint32_t signalSemaphoreCount;
276 &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
277 };
278
279 // Submit sparse bind commands for execution
280 VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
281 }
282
283 uint32_t imageSizeInBytes = 0;
284
285 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
286 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
287 imageSizeInBytes +=
288 getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription,
289 planeNdx, mipmapNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
290
291 std::vector<VkBufferImageCopy> bufferImageCopy(formatDescription.numPlanes * imageSparseInfo.mipLevels);
292 {
293 uint32_t bufferOffset = 0;
294 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
295 {
296 const VkImageAspectFlags aspect =
297 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
298
299 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
300 {
301 bufferImageCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx] = {
302 bufferOffset, // VkDeviceSize bufferOffset;
303 0u, // uint32_t bufferRowLength;
304 0u, // uint32_t bufferImageHeight;
305 makeImageSubresourceLayers(
306 aspect, mipmapNdx, 0u,
307 imageSparseInfo.arrayLayers), // VkImageSubresourceLayers imageSubresource;
308 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
309 vk::getPlaneExtent(formatDescription, imageSparseInfo.extent, planeNdx,
310 mipmapNdx) // VkExtent3D imageExtent;
311 };
312 bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers,
313 formatDescription, planeNdx, mipmapNdx,
314 BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
315 }
316 }
317 }
318
319 // Create command buffer for compute and transfer operations
320 const Unique<VkCommandPool> commandPool(
321 makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
322 const Unique<VkCommandBuffer> commandBuffer(
323 allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
324
325 // Start recording commands
326 beginCommandBuffer(deviceInterface, *commandBuffer);
327
328 const VkBufferCreateInfo inputBufferCreateInfo =
329 makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
330 const Unique<VkBuffer> inputBuffer(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
331 const de::UniquePtr<Allocation> inputBufferAlloc(
332 bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
333
334 std::vector<uint8_t> referenceData(imageSizeInBytes);
335 for (uint32_t valueNdx = 0; valueNdx < imageSizeInBytes; ++valueNdx)
336 {
337 referenceData[valueNdx] = static_cast<uint8_t>((valueNdx % imageMemoryRequirements.alignment) + 1u);
338 }
339
340 {
341 deMemcpy(inputBufferAlloc->getHostPtr(), referenceData.data(), imageSizeInBytes);
342 flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
343
344 const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier(
345 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, *inputBuffer, 0u, imageSizeInBytes);
346 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT,
347 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier,
348 0u, DE_NULL);
349 }
350
351 {
352 std::vector<VkImageMemoryBarrier> imageSparseTransferDstBarriers;
353
354 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
355 {
356 const VkImageAspectFlags aspect =
357 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
358
359 imageSparseTransferDstBarriers.push_back(makeImageMemoryBarrier(
360 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
361 *imageSparse,
362 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers),
363 sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex :
364 VK_QUEUE_FAMILY_IGNORED,
365 sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex :
366 VK_QUEUE_FAMILY_IGNORED));
367 }
368 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
369 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL,
370 static_cast<uint32_t>(imageSparseTransferDstBarriers.size()),
371 imageSparseTransferDstBarriers.data());
372 }
373
374 deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageSparse,
375 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
376 static_cast<uint32_t>(bufferImageCopy.size()), bufferImageCopy.data());
377
378 {
379 std::vector<VkImageMemoryBarrier> imageSparseTransferSrcBarriers;
380
381 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
382 {
383 const VkImageAspectFlags aspect =
384 (formatDescription.numPlanes > 1) ? getPlaneAspect(planeNdx) : VK_IMAGE_ASPECT_COLOR_BIT;
385
386 imageSparseTransferSrcBarriers.push_back(makeImageMemoryBarrier(
387 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
388 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *imageSparse,
389 makeImageSubresourceRange(aspect, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)));
390 }
391
392 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
393 VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL,
394 static_cast<uint32_t>(imageSparseTransferSrcBarriers.size()),
395 imageSparseTransferSrcBarriers.data());
396 }
397
398 const VkBufferCreateInfo outputBufferCreateInfo =
399 makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
400 const Unique<VkBuffer> outputBuffer(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
401 const de::UniquePtr<Allocation> outputBufferAlloc(
402 bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
403
404 deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
405 *outputBuffer, static_cast<uint32_t>(bufferImageCopy.size()),
406 bufferImageCopy.data());
407
408 {
409 const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier(
410 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *outputBuffer, 0u, imageSizeInBytes);
411
412 deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
413 VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier,
414 0u, DE_NULL);
415 }
416
417 // End recording commands
418 endCommandBuffer(deviceInterface, *commandBuffer);
419
420 const VkPipelineStageFlags stageBits[] = {VK_PIPELINE_STAGE_TRANSFER_BIT};
421
422 // Submit commands for execution and wait for completion
423 submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u,
424 &imageMemoryBindSemaphore.get(), stageBits, 0, DE_NULL, m_useDeviceGroups, firstDeviceID);
425
426 // Retrieve data from buffer to host memory
427 invalidateAlloc(deviceInterface, getDevice(), *outputBufferAlloc);
428
429 // Wait for sparse queue to become idle
430 deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
431
432 const uint8_t *outputData = static_cast<const uint8_t *>(outputBufferAlloc->getHostPtr());
433 bool ignoreLsb6Bits = areLsb6BitsDontCare(imageSparseInfo.format);
434 bool ignoreLsb4Bits = areLsb4BitsDontCare(imageSparseInfo.format);
435
436 for (uint32_t planeNdx = 0; planeNdx < formatDescription.numPlanes; ++planeNdx)
437 {
438 for (uint32_t mipmapNdx = 0; mipmapNdx < imageSparseInfo.mipLevels; ++mipmapNdx)
439 {
440 const uint32_t mipLevelSizeInBytes = getImageMipLevelSizeInBytes(
441 imageSparseInfo.extent, imageSparseInfo.arrayLayers, formatDescription, planeNdx, mipmapNdx);
442 const uint32_t bufferOffset = static_cast<uint32_t>(
443 bufferImageCopy[planeNdx * imageSparseInfo.mipLevels + mipmapNdx].bufferOffset);
444
445 // Validate results
446 for (size_t byteNdx = 0; byteNdx < mipLevelSizeInBytes; byteNdx++)
447 {
448 const uint8_t res = *(outputData + bufferOffset + byteNdx);
449 const uint8_t ref = referenceData[bufferOffset + byteNdx];
450
451 uint8_t mask = 0xFF;
452
453 if (!(byteNdx & 0x01) && (ignoreLsb6Bits))
454 mask = 0xC0;
455 else if (!(byteNdx & 0x01) && (ignoreLsb4Bits))
456 mask = 0xF0;
457
458 if ((res & mask) != (ref & mask))
459 {
460 return tcu::TestStatus::fail("Failed");
461 }
462 }
463 }
464 }
465 }
466
467 return tcu::TestStatus::pass("Passed");
468 }
469
createInstance(Context & context) const470 TestInstance *ImageSparseBindingCase::createInstance(Context &context) const
471 {
472 return new ImageSparseBindingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
473 }
474
getSparseBindingTestFormats(ImageType imageType,bool addExtraFormat)475 std::vector<TestFormat> getSparseBindingTestFormats(ImageType imageType, bool addExtraFormat)
476 {
477 auto formats = getTestFormats(imageType);
478 #ifndef CTS_USES_VULKANSC
479 if (addExtraFormat)
480 formats.push_back(TestFormat{VK_FORMAT_A8_UNORM_KHR});
481 #endif // CTS_USES_VULKANSC
482 return formats;
483 }
484
485 } // namespace
486
createImageSparseBindingTestsCommon(tcu::TestContext & testCtx,de::MovePtr<tcu::TestCaseGroup> testGroup,const bool useDeviceGroup=false)487 tcu::TestCaseGroup *createImageSparseBindingTestsCommon(tcu::TestContext &testCtx,
488 de::MovePtr<tcu::TestCaseGroup> testGroup,
489 const bool useDeviceGroup = false)
490 {
491 const std::vector<TestImageParameters> imageParameters{
492 {IMAGE_TYPE_1D,
493 {tcu::UVec3(512u, 1u, 1u), tcu::UVec3(1024u, 1u, 1u), tcu::UVec3(11u, 1u, 1u)},
494 getSparseBindingTestFormats(IMAGE_TYPE_1D, !useDeviceGroup)},
495 {IMAGE_TYPE_1D_ARRAY,
496 {tcu::UVec3(512u, 1u, 64u), tcu::UVec3(1024u, 1u, 8u), tcu::UVec3(11u, 1u, 3u)},
497 getSparseBindingTestFormats(IMAGE_TYPE_1D_ARRAY, !useDeviceGroup)},
498 {IMAGE_TYPE_2D,
499 {tcu::UVec3(512u, 256u, 1u), tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u)},
500 getSparseBindingTestFormats(IMAGE_TYPE_2D, !useDeviceGroup)},
501 {IMAGE_TYPE_2D_ARRAY,
502 {tcu::UVec3(512u, 256u, 6u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u)},
503 getSparseBindingTestFormats(IMAGE_TYPE_2D_ARRAY, !useDeviceGroup)},
504 {IMAGE_TYPE_3D,
505 {tcu::UVec3(512u, 256u, 6u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u)},
506 getSparseBindingTestFormats(IMAGE_TYPE_3D, !useDeviceGroup)},
507 {IMAGE_TYPE_CUBE,
508 {tcu::UVec3(256u, 256u, 1u), tcu::UVec3(128u, 128u, 1u), tcu::UVec3(137u, 137u, 1u)},
509 getSparseBindingTestFormats(IMAGE_TYPE_CUBE, !useDeviceGroup)},
510 {IMAGE_TYPE_CUBE_ARRAY,
511 {tcu::UVec3(256u, 256u, 6u), tcu::UVec3(128u, 128u, 8u), tcu::UVec3(137u, 137u, 3u)},
512 getSparseBindingTestFormats(IMAGE_TYPE_CUBE_ARRAY, !useDeviceGroup)}};
513
514 for (size_t imageTypeNdx = 0; imageTypeNdx < imageParameters.size(); ++imageTypeNdx)
515 {
516 const ImageType imageType = imageParameters[imageTypeNdx].imageType;
517 de::MovePtr<tcu::TestCaseGroup> imageTypeGroup(
518 new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str()));
519
520 for (size_t formatNdx = 0; formatNdx < imageParameters[imageTypeNdx].formats.size(); ++formatNdx)
521 {
522 VkFormat format = imageParameters[imageTypeNdx].formats[formatNdx].format;
523 tcu::UVec3 imageSizeAlignment = getImageSizeAlignment(format);
524 de::MovePtr<tcu::TestCaseGroup> formatGroup(
525 new tcu::TestCaseGroup(testCtx, getImageFormatID(format).c_str()));
526
527 for (size_t imageSizeNdx = 0; imageSizeNdx < imageParameters[imageTypeNdx].imageSizes.size();
528 ++imageSizeNdx)
529 {
530 const tcu::UVec3 imageSize = imageParameters[imageTypeNdx].imageSizes[imageSizeNdx];
531
532 // skip test for images with odd sizes for some YCbCr formats
533 if ((imageSize.x() % imageSizeAlignment.x()) != 0)
534 continue;
535 if ((imageSize.y() % imageSizeAlignment.y()) != 0)
536 continue;
537
538 std::ostringstream stream;
539 stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
540
541 formatGroup->addChild(
542 new ImageSparseBindingCase(testCtx, stream.str(), imageType, imageSize, format, useDeviceGroup));
543 }
544 imageTypeGroup->addChild(formatGroup.release());
545 }
546 testGroup->addChild(imageTypeGroup.release());
547 }
548
549 return testGroup.release();
550 }
551
createImageSparseBindingTests(tcu::TestContext & testCtx)552 tcu::TestCaseGroup *createImageSparseBindingTests(tcu::TestContext &testCtx)
553 {
554 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_binding"));
555 return createImageSparseBindingTestsCommon(testCtx, testGroup);
556 }
557
createDeviceGroupImageSparseBindingTests(tcu::TestContext & testCtx)558 tcu::TestCaseGroup *createDeviceGroupImageSparseBindingTests(tcu::TestContext &testCtx)
559 {
560 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_binding"));
561 return createImageSparseBindingTestsCommon(testCtx, testGroup, true);
562 }
563
564 } // namespace sparse
565 } // namespace vkt
566