1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
44
45 #include "deMath.h"
46 #include "deUniquePtr.hpp"
47 #include "deSharedPtr.hpp"
48 #include "deStringUtil.hpp"
49
50 #include "tcuImageCompare.hpp"
51 #include "tcuTexture.hpp"
52 #include "tcuTextureUtil.hpp"
53 #include "tcuFloat.hpp"
54 #include "tcuFloatFormat.hpp"
55 #include "tcuStringTemplate.hpp"
56 #include "tcuVectorUtil.hpp"
57
58 #include <string>
59 #include <vector>
60 #include <map>
61
62 using namespace vk;
63
64 namespace vkt
65 {
66 namespace image
67 {
68 namespace
69 {
70
71 // Check for three-component (non-packed) format, i.e. pixel size is a multiple of 3.
formatHasThreeComponents(VkFormat format)72 bool formatHasThreeComponents(VkFormat format)
73 {
74 const tcu::TextureFormat texFormat = mapVkFormat(format);
75 return (getPixelSize(texFormat) % 3) == 0;
76 }
77
getSingleComponentFormat(VkFormat format)78 VkFormat getSingleComponentFormat(VkFormat format)
79 {
80 tcu::TextureFormat texFormat = mapVkFormat(format);
81 texFormat = tcu::TextureFormat(tcu::TextureFormat::R, texFormat.type);
82 return mapTextureFormat(texFormat);
83 }
84
makeBufferImageCopy(const Texture & texture)85 inline VkBufferImageCopy makeBufferImageCopy(const Texture &texture)
86 {
87 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
88 }
89
getLayerOrSlice(const Texture & texture,const tcu::ConstPixelBufferAccess access,const int layer)90 tcu::ConstPixelBufferAccess getLayerOrSlice(const Texture &texture, const tcu::ConstPixelBufferAccess access,
91 const int layer)
92 {
93 switch (texture.type())
94 {
95 case IMAGE_TYPE_1D:
96 case IMAGE_TYPE_2D:
97 case IMAGE_TYPE_BUFFER:
98 // Not layered
99 DE_ASSERT(layer == 0);
100 return access;
101
102 case IMAGE_TYPE_1D_ARRAY:
103 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
104
105 case IMAGE_TYPE_2D_ARRAY:
106 case IMAGE_TYPE_CUBE:
107 case IMAGE_TYPE_CUBE_ARRAY:
108 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
109 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
110
111 default:
112 DE_FATAL("Internal test error");
113 return tcu::ConstPixelBufferAccess();
114 }
115 }
116
117 //! \return the size in bytes of a given level of a mipmap image, including array layers.
getMipmapLevelImageSizeBytes(const Texture & texture,const vk::VkFormat format,const uint32_t mipmapLevel)118 vk::VkDeviceSize getMipmapLevelImageSizeBytes(const Texture &texture, const vk::VkFormat format,
119 const uint32_t mipmapLevel)
120 {
121 tcu::IVec3 size = texture.size(mipmapLevel);
122 return tcu::getPixelSize(vk::mapVkFormat(format)) * size.x() * size.y() * size.z();
123 }
124
125 //! \return the size in bytes of the whole mipmap image, including all mipmap levels and array layers
getMipmapImageTotalSizeBytes(const Texture & texture,const vk::VkFormat format)126 vk::VkDeviceSize getMipmapImageTotalSizeBytes(const Texture &texture, const vk::VkFormat format)
127 {
128 vk::VkDeviceSize size = 0u;
129 int32_t levelCount = 0u;
130
131 do
132 {
133 size += getMipmapLevelImageSizeBytes(texture, format, levelCount);
134 levelCount++;
135 } while (levelCount < texture.numMipmapLevels());
136 return size;
137 }
138
139 //! \return true if all layers match in both pixel buffers
comparePixelBuffers(tcu::TestLog & log,const Texture & texture,const VkFormat format,const tcu::ConstPixelBufferAccess reference,const tcu::ConstPixelBufferAccess result,const uint32_t mipmapLevel=0u)140 bool comparePixelBuffers(tcu::TestLog &log, const Texture &texture, const VkFormat format,
141 const tcu::ConstPixelBufferAccess reference, const tcu::ConstPixelBufferAccess result,
142 const uint32_t mipmapLevel = 0u)
143 {
144 DE_ASSERT(reference.getFormat() == result.getFormat());
145 DE_ASSERT(reference.getSize() == result.getSize());
146
147 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
148 const int numLayersOrSlices = (is3d ? texture.size(mipmapLevel).z() : texture.numLayers());
149 const int numCubeFaces = 6;
150
151 int passedLayers = 0;
152 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
153 {
154 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
155 const std::string comparisonDesc =
156 "Image Comparison, " + (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " +
157 de::toString(layerNdx / numCubeFaces) :
158 is3d ? "slice " + de::toString(layerNdx) :
159 "layer " + de::toString(layerNdx) + " , level " + de::toString(mipmapLevel));
160
161 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
162 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
163
164 bool ok = false;
165
166 switch (tcu::getTextureChannelClass(mapVkFormat(format).type))
167 {
168 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
169 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
170 {
171 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer,
172 tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
173 break;
174 }
175
176 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
177 {
178 // Allow error of minimum representable difference
179 tcu::Vec4 threshold(
180 1.0f /
181 ((tcu::UVec4(1u) << tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<uint32_t>()) - 1u)
182 .cast<float>());
183
184 // Add 1 ULP of fp32 imprecision to account for image comparison fp32 math with unorm->float conversions.
185 threshold += tcu::Vec4(std::numeric_limits<float>::epsilon());
186
187 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer,
188 threshold, tcu::COMPARE_LOG_RESULT);
189 break;
190 }
191
192 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
193 {
194 const tcu::UVec4 bitDepth =
195 tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<uint32_t>() - 1u;
196 // To avoid bit-shifting with negative value, which is undefined behaviour.
197 const tcu::UVec4 fixedBitDepth =
198 tcu::select(bitDepth, tcu::UVec4(0u, 0u, 0u, 0u),
199 tcu::greaterThanEqual(bitDepth.cast<int32_t>(), tcu::IVec4(0, 0, 0, 0)));
200
201 // Allow error of minimum representable difference
202 const tcu::Vec4 threshold(1.0f / ((tcu::UVec4(1u) << fixedBitDepth) - 1u).cast<float>());
203
204 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer,
205 threshold, tcu::COMPARE_LOG_RESULT);
206 break;
207 }
208
209 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
210 {
211 // Convert target format ulps to float ulps and allow 1 ulp difference
212 const tcu::UVec4 threshold(
213 tcu::UVec4(1u) << (tcu::UVec4(23) -
214 tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<uint32_t>()));
215
216 ok = tcu::floatUlpThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer,
217 resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
218 break;
219 }
220
221 default:
222 DE_FATAL("Unknown channel class");
223 }
224
225 if (ok)
226 ++passedLayers;
227 }
228
229 return passedLayers == numLayersOrSlices;
230 }
231
232 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
replaceBadFloatReinterpretValues(const tcu::PixelBufferAccess access)233 void replaceBadFloatReinterpretValues(const tcu::PixelBufferAccess access)
234 {
235 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
236
237 for (int z = 0; z < access.getDepth(); ++z)
238 for (int y = 0; y < access.getHeight(); ++y)
239 for (int x = 0; x < access.getWidth(); ++x)
240 {
241 const tcu::Vec4 color(access.getPixel(x, y, z));
242 tcu::Vec4 newColor = color;
243
244 for (int i = 0; i < 4; ++i)
245 {
246 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
247 {
248 const tcu::Float16 f(color[i]);
249 if (f.isDenorm() || f.isInf() || f.isNaN())
250 newColor[i] = 0.0f;
251 }
252 else
253 {
254 const tcu::Float32 f(color[i]);
255 if (f.isDenorm() || f.isInf() || f.isNaN())
256 newColor[i] = 0.0f;
257 }
258 }
259
260 if (newColor != color)
261 access.setPixel(newColor, x, y, z);
262 }
263 }
264
265 //!< replace invalid pixels in the image (-128)
replaceSnormReinterpretValues(const tcu::PixelBufferAccess access)266 void replaceSnormReinterpretValues(const tcu::PixelBufferAccess access)
267 {
268 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
269
270 for (int z = 0; z < access.getDepth(); ++z)
271 for (int y = 0; y < access.getHeight(); ++y)
272 for (int x = 0; x < access.getWidth(); ++x)
273 {
274 const tcu::IVec4 color(access.getPixelInt(x, y, z));
275 tcu::IVec4 newColor = color;
276
277 for (int i = 0; i < 4; ++i)
278 {
279 const int32_t oldColor(color[i]);
280 if (oldColor == -128)
281 newColor[i] = -127;
282 }
283
284 if (newColor != color)
285 access.setPixel(newColor, x, y, z);
286 }
287 }
288
getMiddleValue(VkFormat imageFormat)289 tcu::Vec4 getMiddleValue(VkFormat imageFormat)
290 {
291 tcu::TextureFormat format = mapVkFormat(imageFormat);
292 tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(format);
293 tcu::Vec4 val = (fmtInfo.valueMax - fmtInfo.valueMin) * tcu::Vec4(0.5f);
294
295 if (isIntegerFormat(imageFormat))
296 val = floor(val);
297
298 return val;
299 }
300
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat,const VkFormat readFormat,bool constantValue=false)301 tcu::TextureLevel generateReferenceImage(const tcu::IVec3 &imageSize, const VkFormat imageFormat,
302 const VkFormat readFormat, bool constantValue = false)
303 {
304 // Generate a reference image data using the storage format
305
306 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
307 const tcu::PixelBufferAccess access = reference.getAccess();
308
309 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
310 const float storeColorBias = computeStoreColorBias(imageFormat);
311
312 const bool srgbFormat = isSrgbFormat(imageFormat);
313 const bool intFormat = isIntegerFormat(imageFormat);
314 const bool storeNegativeValues = isSignedFormat(imageFormat) && (storeColorBias == 0);
315 const int xMax = imageSize.x() - 1;
316 const int yMax = imageSize.y() - 1;
317
318 for (int z = 0; z < imageSize.z(); ++z)
319 for (int y = 0; y < imageSize.y(); ++y)
320 for (int x = 0; x < imageSize.x(); ++x)
321 {
322 if (constantValue)
323 {
324 access.setPixel(getMiddleValue(imageFormat), x, y, z);
325 }
326 else
327 {
328 tcu::IVec4 color =
329 tcu::IVec4(x ^ y ^ z, (xMax - x) ^ y ^ z, x ^ (yMax - y) ^ z, (xMax - x) ^ (yMax - y) ^ z);
330
331 if (storeNegativeValues)
332 color -= tcu::IVec4(deRoundFloatToInt32((float)de::max(xMax, yMax) / 2.0f));
333
334 if (intFormat)
335 access.setPixel(color, x, y, z);
336 else
337 {
338 if (srgbFormat)
339 access.setPixel(tcu::linearToSRGB(color.asFloat() * storeColorScale + storeColorBias), x, y,
340 z);
341 else
342 access.setPixel(color.asFloat() * storeColorScale + storeColorBias, x, y, z);
343 }
344 }
345 }
346
347 // If the image is to be accessed as a float texture, get rid of invalid values
348
349 if (isFloatFormat(readFormat) && imageFormat != readFormat)
350 replaceBadFloatReinterpretValues(
351 tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
352 if (isSnormFormat(readFormat) && imageFormat != readFormat)
353 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
354
355 return reference;
356 }
357
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat,bool constantValue=false)358 inline tcu::TextureLevel generateReferenceImage(const tcu::IVec3 &imageSize, const VkFormat imageFormat,
359 bool constantValue = false)
360 {
361 return generateReferenceImage(imageSize, imageFormat, imageFormat, constantValue);
362 }
363
flipHorizontally(const tcu::PixelBufferAccess access)364 void flipHorizontally(const tcu::PixelBufferAccess access)
365 {
366 const int xMax = access.getWidth() - 1;
367 const int halfWidth = access.getWidth() / 2;
368
369 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
370 for (int z = 0; z < access.getDepth(); z++)
371 for (int y = 0; y < access.getHeight(); y++)
372 for (int x = 0; x < halfWidth; x++)
373 {
374 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
375 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
376 access.setPixel(temp, x, y, z);
377 }
378 else
379 for (int z = 0; z < access.getDepth(); z++)
380 for (int y = 0; y < access.getHeight(); y++)
381 for (int x = 0; x < halfWidth; x++)
382 {
383 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
384 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
385 access.setPixel(temp, x, y, z);
386 }
387 }
388
formatsAreCompatible(const VkFormat format0,const VkFormat format1)389 inline bool formatsAreCompatible(const VkFormat format0, const VkFormat format1)
390 {
391 const bool isAlphaOnly = (isAlphaOnlyFormat(format0) || isAlphaOnlyFormat(format1));
392 return format0 == format1 ||
393 (mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize() && !isAlphaOnly);
394 }
395
commandImageWriteBarrierBetweenShaderInvocations(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const Texture & texture)396 void commandImageWriteBarrierBetweenShaderInvocations(Context &context, const VkCommandBuffer cmdBuffer,
397 const VkImage image, const Texture &texture)
398 {
399 const DeviceInterface &vk = context.getDeviceInterface();
400
401 const VkImageSubresourceRange fullImageSubresourceRange =
402 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
403 const VkImageMemoryBarrier shaderWriteBarrier =
404 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, 0u, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, image,
405 fullImageSubresourceRange);
406
407 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
408 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
409 (const VkBufferMemoryBarrier *)DE_NULL, 1, &shaderWriteBarrier);
410 }
411
commandBufferWriteBarrierBeforeHostRead(Context & context,const VkCommandBuffer cmdBuffer,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes)412 void commandBufferWriteBarrierBeforeHostRead(Context &context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer,
413 const VkDeviceSize bufferSizeBytes)
414 {
415 const DeviceInterface &vk = context.getDeviceInterface();
416
417 const VkBufferMemoryBarrier shaderWriteBarrier =
418 makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, buffer, 0ull, bufferSizeBytes);
419
420 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
421 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &shaderWriteBarrier, 0,
422 (const VkImageMemoryBarrier *)DE_NULL);
423 }
424
425 //! Copy all layers of an image to a buffer.
commandCopyImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)426 void commandCopyImageToBuffer(Context &context, const VkCommandBuffer cmdBuffer, const VkImage image,
427 const VkBuffer buffer, const VkDeviceSize bufferSizeBytes, const Texture &texture)
428 {
429 const DeviceInterface &vk = context.getDeviceInterface();
430
431 const VkImageSubresourceRange fullImageSubresourceRange =
432 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
433 const VkImageMemoryBarrier prepareForTransferBarrier =
434 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
435 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, fullImageSubresourceRange);
436
437 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
438
439 const VkBufferMemoryBarrier copyBarrier =
440 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, buffer, 0ull, bufferSizeBytes);
441
442 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
443 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
444 (const VkBufferMemoryBarrier *)DE_NULL, 1, &prepareForTransferBarrier);
445 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
446 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
447 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
448 (const VkImageMemoryBarrier *)DE_NULL);
449 }
450
451 //! Copy all layers of a mipmap image to a buffer.
commandCopyMipmapImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkFormat imageFormat,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)452 void commandCopyMipmapImageToBuffer(Context &context, const VkCommandBuffer cmdBuffer, const VkImage image,
453 const VkFormat imageFormat, const VkBuffer buffer,
454 const VkDeviceSize bufferSizeBytes, const Texture &texture)
455 {
456 const DeviceInterface &vk = context.getDeviceInterface();
457
458 const VkImageSubresourceRange fullImageSubresourceRange =
459 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
460 const VkImageMemoryBarrier prepareForTransferBarrier =
461 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
462 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image, fullImageSubresourceRange);
463
464 std::vector<VkBufferImageCopy> copyRegions;
465 VkDeviceSize bufferOffset = 0u;
466 for (int32_t levelNdx = 0; levelNdx < texture.numMipmapLevels(); levelNdx++)
467 {
468 const VkBufferImageCopy copyParams = {
469 bufferOffset, // VkDeviceSize bufferOffset;
470 0u, // uint32_t bufferRowLength;
471 0u, // uint32_t bufferImageHeight;
472 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u,
473 texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
474 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
475 makeExtent3D(texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
476 };
477 copyRegions.push_back(copyParams);
478 bufferOffset += getMipmapLevelImageSizeBytes(texture, imageFormat, levelNdx);
479 }
480
481 const VkBufferMemoryBarrier copyBarrier =
482 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, buffer, 0ull, bufferSizeBytes);
483
484 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
485 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
486 (const VkBufferMemoryBarrier *)DE_NULL, 1, &prepareForTransferBarrier);
487 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer,
488 (uint32_t)copyRegions.size(), copyRegions.data());
489 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
490 0, (const VkMemoryBarrier *)DE_NULL, 1, ©Barrier, 0,
491 (const VkImageMemoryBarrier *)DE_NULL);
492 }
493
494 class StoreTest : public TestCase
495 {
496 public:
497 enum TestFlags
498 {
499 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
500 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 0x2, //!< Declare the format of the images in the shader code
501 FLAG_MINALIGN = 0x4, //!< Use bufferview offset that matches the advertised minimum alignment
502 FLAG_STORE_CONSTANT_VALUE = 0x8, //!< Store constant value
503 };
504
505 StoreTest(tcu::TestContext &testCtx, const std::string &name, const Texture &texture, const VkFormat format,
506 const VkImageTiling tiling, const uint32_t flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
507
508 virtual void checkSupport(Context &context) const;
509 void initPrograms(SourceCollections &programCollection) const;
510 TestInstance *createInstance(Context &context) const;
511
512 private:
513 const Texture m_texture;
514 const VkFormat m_format;
515 const VkImageTiling m_tiling;
516 const bool m_declareImageFormatInShader;
517 const bool m_singleLayerBind;
518 const bool m_minalign;
519 const bool m_storeConstantValue;
520 };
521
StoreTest(tcu::TestContext & testCtx,const std::string & name,const Texture & texture,const VkFormat format,const VkImageTiling tiling,const uint32_t flags)522 StoreTest::StoreTest(tcu::TestContext &testCtx, const std::string &name, const Texture &texture, const VkFormat format,
523 const VkImageTiling tiling, const uint32_t flags)
524 : TestCase(testCtx, name)
525 , m_texture(texture)
526 , m_format(format)
527 , m_tiling(tiling)
528 , m_declareImageFormatInShader((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
529 , m_singleLayerBind((flags & FLAG_SINGLE_LAYER_BIND) != 0)
530 , m_minalign((flags & FLAG_MINALIGN) != 0)
531 , m_storeConstantValue((flags & FLAG_STORE_CONSTANT_VALUE) != 0)
532 {
533 if (m_singleLayerBind)
534 DE_ASSERT(m_texture.numLayers() > 1);
535 }
536
checkSupport(Context & context) const537 void StoreTest::checkSupport(Context &context) const
538 {
539 #ifndef CTS_USES_VULKANSC
540 if (m_format == VK_FORMAT_A8_UNORM_KHR || m_format == VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR)
541 context.requireDeviceFunctionality("VK_KHR_maintenance5");
542
543 const VkFormatProperties3 formatProperties(context.getFormatProperties(m_format));
544
545 const auto &tilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ? formatProperties.optimalTilingFeatures :
546 formatProperties.linearTilingFeatures;
547
548 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !m_declareImageFormatInShader &&
549 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
550 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage buffer");
551
552 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !m_declareImageFormatInShader &&
553 !(tilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
554 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage images");
555
556 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
557 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
558
559 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(tilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
560 TCU_THROW(NotSupportedError, "Format not supported for storage images");
561
562 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
563 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
564 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
565 #else
566 const VkFormatProperties formatProperties(
567 getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), m_format));
568 const auto tilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ? formatProperties.optimalTilingFeatures :
569 formatProperties.linearTilingFeatures;
570
571 if (!m_declareImageFormatInShader)
572 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT);
573
574 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
575 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
576
577 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(tilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
578 TCU_THROW(NotSupportedError, "Format not supported for storage images");
579
580 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
581 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
582 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
583 #endif // CTS_USES_VULKANSC
584
585 const auto &vki = context.getInstanceInterface();
586 const auto physicalDevice = context.getPhysicalDevice();
587
588 VkImageFormatProperties imageFormatProperties;
589 const auto result = vki.getPhysicalDeviceImageFormatProperties(
590 physicalDevice, m_format, mapImageType(m_texture.type()), m_tiling,
591 (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT), 0, &imageFormatProperties);
592
593 if (result != VK_SUCCESS)
594 {
595 if (result == VK_ERROR_FORMAT_NOT_SUPPORTED)
596 TCU_THROW(NotSupportedError, "Format unsupported for tiling");
597 else
598 TCU_FAIL("vkGetPhysicalDeviceImageFormatProperties returned unexpected error");
599 }
600
601 if (imageFormatProperties.maxArrayLayers < (uint32_t)m_texture.numLayers())
602 {
603 TCU_THROW(NotSupportedError, "This format and tiling combination does not support this number of aray layers");
604 }
605
606 if (imageFormatProperties.maxMipLevels < (uint32_t)m_texture.numMipmapLevels())
607 {
608 TCU_THROW(NotSupportedError, "This format and tiling combination does not support this number of miplevels");
609 }
610 }
611
initPrograms(SourceCollections & programCollection) const612 void StoreTest::initPrograms(SourceCollections &programCollection) const
613 {
614 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
615 const float storeColorBias = computeStoreColorBias(m_format);
616 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
617
618 const uint32_t xMax = m_texture.size().x() - 1;
619 const uint32_t yMax = m_texture.size().y() - 1;
620 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
621 const bool storeNegativeValues = isSignedFormat(m_format) && (storeColorBias == 0);
622 bool useClamp = false;
623 const std::string colorType = signednessPrefix + "vec4";
624 std::string colorBaseExpr = colorType + "(";
625
626 std::string colorExpr;
627
628 if (m_storeConstantValue)
629 {
630 tcu::Vec4 val = getMiddleValue(m_format);
631
632 if (isIntegerFormat(m_format))
633 {
634 colorExpr = colorBaseExpr + de::toString(static_cast<int64_t>(val.x())) + ", " +
635 de::toString(static_cast<int64_t>(val.y())) + ", " +
636 de::toString(static_cast<int64_t>(val.z())) + ", " +
637 de::toString(static_cast<int64_t>(val.w())) + ")";
638 }
639 else
640 {
641 colorExpr = colorBaseExpr + de::toString(val.x()) + ", " + de::toString(val.y()) + ", " +
642 de::toString(val.z()) + ", " + de::toString(val.w()) + ")";
643 }
644 }
645 else
646 {
647 colorBaseExpr = colorBaseExpr + "gx^gy^gz, " + "(" + de::toString(xMax) + "-gx)^gy^gz, " + "gx^(" +
648 de::toString(yMax) + "-gy)^gz, " + "(" + de::toString(xMax) + "-gx)^(" + de::toString(yMax) +
649 "-gy)^gz)";
650
651 // Large integer values may not be represented with formats with low bit depths
652 if (isIntegerFormat(m_format))
653 {
654 const int64_t minStoreValue =
655 storeNegativeValues ? 0 - deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : 0;
656 const int64_t maxStoreValue =
657 storeNegativeValues ? deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : de::max(xMax, yMax);
658
659 useClamp = !isRepresentableIntegerValue(tcu::Vector<int64_t, 4>(minStoreValue), mapVkFormat(m_format)) ||
660 !isRepresentableIntegerValue(tcu::Vector<int64_t, 4>(maxStoreValue), mapVkFormat(m_format));
661 }
662
663 // Clamp if integer value cannot be represented with the current format
664 if (useClamp)
665 {
666 const tcu::IVec4 bitDepths = tcu::getTextureFormatBitDepth(mapVkFormat(m_format));
667 tcu::IVec4 minRepresentableValue;
668 tcu::IVec4 maxRepresentableValue;
669
670 switch (tcu::getTextureChannelClass(mapVkFormat(m_format).type))
671 {
672 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
673 {
674 minRepresentableValue = tcu::IVec4(0);
675 maxRepresentableValue = (tcu::IVec4(1) << bitDepths) - tcu::IVec4(1);
676 break;
677 }
678
679 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
680 {
681 minRepresentableValue = -(tcu::IVec4(1) << bitDepths - tcu::IVec4(1));
682 maxRepresentableValue = (tcu::IVec4(1) << (bitDepths - tcu::IVec4(1))) - tcu::IVec4(1);
683 break;
684 }
685
686 default:
687 DE_ASSERT(isIntegerFormat(m_format));
688 }
689
690 colorBaseExpr = "clamp(" + colorBaseExpr + ", " + signednessPrefix + "vec4" +
691 de::toString(minRepresentableValue) + ", " + signednessPrefix + "vec4" +
692 de::toString(maxRepresentableValue) + ")";
693 }
694
695 colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale)) +
696 (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
697
698 if (storeNegativeValues)
699 colorExpr += "-" + de::toString(deRoundFloatToInt32((float)deMax32(xMax, yMax) / 2.0f));
700 }
701
702 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
703 const std::string texelCoordStr = (dimension == 1 ? "gx" :
704 dimension == 2 ? "ivec2(gx, gy)" :
705 dimension == 3 ? "ivec3(gx, gy, gz)" :
706 "");
707
708 const ImageType usedImageType =
709 (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
710 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
711
712 std::string maybeFmtQualStr =
713 m_declareImageFormatInShader ? ", " + getShaderImageFormatQualifier(mapVkFormat(m_format)) : "";
714
715 std::ostringstream src;
716 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
717 << "\n"
718 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
719 << "layout (binding = 0" << maybeFmtQualStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
720
721 if (m_singleLayerBind)
722 src << "layout (binding = 1) readonly uniform Constants {\n"
723 << " int u_layerNdx;\n"
724 << "};\n";
725
726 src << "\n"
727 << "void main (void)\n"
728 << "{\n"
729 << " int gx = int(gl_GlobalInvocationID.x);\n"
730 << " int gy = int(gl_GlobalInvocationID.y);\n"
731 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
732 << " " << colorType << " storedColor = " << colorExpr << ";\n"
733 << " imageStore(u_image, " << texelCoordStr << ", storedColor);\n"
734 << "}\n";
735
736 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
737 }
738
739 //! Generic test iteration algorithm for image tests
740 class BaseTestInstance : public TestInstance
741 {
742 public:
743 BaseTestInstance(Context &context, const Texture &texture, const VkFormat format,
744 const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign,
745 const bool bufferLoadUniform);
746
747 tcu::TestStatus iterate(void);
748
~BaseTestInstance(void)749 virtual ~BaseTestInstance(void)
750 {
751 }
752
753 protected:
754 virtual VkDescriptorSetLayout prepareDescriptors(void) = 0;
755 virtual tcu::TestStatus verifyResult(void) = 0;
756
757 virtual void commandBeforeCompute(const VkCommandBuffer cmdBuffer) = 0;
758 virtual void commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer) = 0;
759 virtual void commandAfterCompute(const VkCommandBuffer cmdBuffer) = 0;
760
761 virtual void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
762 const int layerNdx) = 0;
763 virtual uint32_t getViewOffset(Context &context, const VkFormat format, bool uniform);
764
765 const Texture m_texture;
766 const VkFormat m_format;
767 const bool m_declareImageFormatInShader;
768 const bool m_singleLayerBind;
769 const bool m_minalign;
770 const bool m_bufferLoadUniform;
771 const uint32_t m_srcViewOffset;
772 const uint32_t m_dstViewOffset;
773 };
774
BaseTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)775 BaseTestInstance::BaseTestInstance(Context &context, const Texture &texture, const VkFormat format,
776 const bool declareImageFormatInShader, const bool singleLayerBind,
777 const bool minalign, const bool bufferLoadUniform)
778 : TestInstance(context)
779 , m_texture(texture)
780 , m_format(format)
781 , m_declareImageFormatInShader(declareImageFormatInShader)
782 , m_singleLayerBind(singleLayerBind)
783 , m_minalign(minalign)
784 , m_bufferLoadUniform(bufferLoadUniform)
785 , m_srcViewOffset(getViewOffset(context, format, m_bufferLoadUniform))
786 , m_dstViewOffset(
787 getViewOffset(context, formatHasThreeComponents(format) ? getSingleComponentFormat(format) : format, false))
788 {
789 }
790
iterate(void)791 tcu::TestStatus BaseTestInstance::iterate(void)
792 {
793 const DeviceInterface &vk = m_context.getDeviceInterface();
794 const VkDevice device = m_context.getDevice();
795 const VkQueue queue = m_context.getUniversalQueue();
796 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
797
798 const Unique<VkShaderModule> shaderModule(
799 createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
800
801 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
802 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
803 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
804
805 const Unique<VkCommandPool> cmdPool(
806 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex));
807 const Unique<VkCommandBuffer> cmdBuffer(
808 allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
809
810 beginCommandBuffer(vk, *cmdBuffer);
811
812 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
813 commandBeforeCompute(*cmdBuffer);
814
815 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
816 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
817 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
818 {
819 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
820
821 if (layerNdx > 0)
822 commandBetweenShaderInvocations(*cmdBuffer);
823
824 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
825 }
826
827 commandAfterCompute(*cmdBuffer);
828
829 endCommandBuffer(vk, *cmdBuffer);
830
831 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
832
833 return verifyResult();
834 }
835
836 //! Base store test implementation
837 class StoreTestInstance : public BaseTestInstance
838 {
839 public:
840 StoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
841 const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign,
842 const bool storeConstantValue);
843
844 protected:
845 virtual tcu::TestStatus verifyResult(void);
846
847 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)848 void commandBeforeCompute(const VkCommandBuffer)
849 {
850 }
commandBetweenShaderInvocations(const VkCommandBuffer)851 void commandBetweenShaderInvocations(const VkCommandBuffer)
852 {
853 }
commandAfterCompute(const VkCommandBuffer)854 void commandAfterCompute(const VkCommandBuffer)
855 {
856 }
857
858 de::MovePtr<BufferWithMemory> m_imageBuffer;
859 const VkDeviceSize m_imageSizeBytes;
860 bool m_storeConstantValue;
861 };
862
getViewOffset(Context & context,const VkFormat format,bool uniform)863 uint32_t BaseTestInstance::getViewOffset(Context &context, const VkFormat format, bool uniform)
864 {
865 if (m_minalign)
866 {
867 if (!context.getTexelBufferAlignmentFeaturesEXT().texelBufferAlignment)
868 return (uint32_t)context.getDeviceProperties().limits.minTexelBufferOffsetAlignment;
869
870 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT alignmentProperties;
871 deMemset(&alignmentProperties, 0, sizeof(alignmentProperties));
872 alignmentProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT;
873
874 VkPhysicalDeviceProperties2 properties2;
875 deMemset(&properties2, 0, sizeof(properties2));
876 properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
877 properties2.pNext = &alignmentProperties;
878
879 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
880
881 VkBool32 singleTexelAlignment = uniform ? alignmentProperties.uniformTexelBufferOffsetSingleTexelAlignment :
882 alignmentProperties.storageTexelBufferOffsetSingleTexelAlignment;
883 VkDeviceSize align = uniform ? alignmentProperties.uniformTexelBufferOffsetAlignmentBytes :
884 alignmentProperties.storageTexelBufferOffsetAlignmentBytes;
885
886 VkDeviceSize texelSize = formatHasThreeComponents(format) ? tcu::getChannelSize(vk::mapVkFormat(format).type) :
887 tcu::getPixelSize(vk::mapVkFormat(format));
888
889 if (singleTexelAlignment)
890 align = de::min(align, texelSize);
891
892 return (uint32_t)align;
893 }
894
895 return 0;
896 }
897
StoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool storeConstantValue)898 StoreTestInstance::StoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
899 const bool declareImageFormatInShader, const bool singleLayerBind,
900 const bool minalign, const bool storeConstantValue)
901 : BaseTestInstance(context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, false)
902 , m_imageSizeBytes(getImageSizeBytes(texture.size(), format))
903 , m_storeConstantValue(storeConstantValue)
904 {
905 const DeviceInterface &vk = m_context.getDeviceInterface();
906 const VkDevice device = m_context.getDevice();
907 Allocator &allocator = m_context.getDefaultAllocator();
908
909 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
910
911 m_imageBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
912 vk, device, allocator,
913 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset,
914 VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
915 MemoryRequirement::HostVisible));
916 }
917
verifyResult(void)918 tcu::TestStatus StoreTestInstance::verifyResult(void)
919 {
920 const DeviceInterface &vk = m_context.getDeviceInterface();
921 const VkDevice device = m_context.getDevice();
922
923 const tcu::IVec3 imageSize = m_texture.size();
924 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format, m_storeConstantValue);
925
926 const Allocation &alloc = m_imageBuffer->getAllocation();
927 invalidateAlloc(vk, device, alloc);
928 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize,
929 (const char *)alloc.getHostPtr() + m_dstViewOffset);
930
931 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
932 return tcu::TestStatus::pass("Passed");
933 else
934 return tcu::TestStatus::fail("Image comparison failed");
935 }
936
937 //! Store test for images
938 class ImageStoreTestInstance : public StoreTestInstance
939 {
940 public:
941 ImageStoreTestInstance(Context &context, const Texture &texture, const VkFormat format, const VkImageTiling tiling,
942 const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign,
943 const bool storeConstantValue);
944
945 protected:
946 VkDescriptorSetLayout prepareDescriptors(void);
947 void commandBeforeCompute(const VkCommandBuffer cmdBuffer);
948 void commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer);
949 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
950
951 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
952 const int layerNdx);
953
954 de::MovePtr<Image> m_image;
955 de::MovePtr<BufferWithMemory> m_constantsBuffer;
956 const VkDeviceSize m_constantsBufferChunkSizeBytes;
957 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
958 Move<VkDescriptorPool> m_descriptorPool;
959 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
960 std::vector<SharedVkImageView> m_allImageViews;
961 };
962
ImageStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkImageTiling tiling,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool storeConstantValue)963 ImageStoreTestInstance::ImageStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
964 const VkImageTiling tiling, const bool declareImageFormatInShader,
965 const bool singleLayerBind, const bool minalign,
966 const bool storeConstantValue)
967 : StoreTestInstance(context, texture, format, declareImageFormatInShader, singleLayerBind, minalign,
968 storeConstantValue)
969 , m_constantsBufferChunkSizeBytes(getOptimalUniformBufferChunkSize(context.getInstanceInterface(),
970 context.getPhysicalDevice(), sizeof(uint32_t)))
971 , m_allDescriptorSets(texture.numLayers())
972 , m_allImageViews(texture.numLayers())
973 {
974 const DeviceInterface &vk = m_context.getDeviceInterface();
975 const VkDevice device = m_context.getDevice();
976 Allocator &allocator = m_context.getDefaultAllocator();
977
978 m_image = de::MovePtr<Image>(
979 new Image(vk, device, allocator,
980 makeImageCreateInfo(m_texture, m_format,
981 (VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT), 0u, tiling),
982 MemoryRequirement::Any));
983
984 // This buffer will be used to pass constants to the shader
985
986 const int numLayers = m_texture.numLayers();
987 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
988 m_constantsBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
989 vk, device, allocator, makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
990 MemoryRequirement::HostVisible));
991
992 {
993 const Allocation &alloc = m_constantsBuffer->getAllocation();
994 uint8_t *const basePtr = static_cast<uint8_t *>(alloc.getHostPtr());
995
996 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
997
998 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
999 {
1000 uint32_t *valuePtr = reinterpret_cast<uint32_t *>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
1001 *valuePtr = static_cast<uint32_t>(layerNdx);
1002 }
1003
1004 flushAlloc(vk, device, alloc);
1005 }
1006 }
1007
prepareDescriptors(void)1008 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors(void)
1009 {
1010 const DeviceInterface &vk = m_context.getDeviceInterface();
1011 const VkDevice device = m_context.getDevice();
1012
1013 const int numLayers = m_texture.numLayers();
1014 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1015 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1016 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1017 .build(vk, device);
1018
1019 m_descriptorPool = DescriptorPoolBuilder()
1020 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1021 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
1022 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1023
1024 if (m_singleLayerBind)
1025 {
1026 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1027 {
1028 m_allDescriptorSets[layerNdx] =
1029 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1030 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
1031 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
1032 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
1033 }
1034 }
1035 else // bind all layers at once
1036 {
1037 m_allDescriptorSets[0] =
1038 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1039 m_allImageViews[0] =
1040 makeVkSharedPtr(makeImageView(vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
1041 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
1042 }
1043
1044 return *m_descriptorSetLayout; // not passing the ownership
1045 }
1046
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1047 void ImageStoreTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
1048 const VkPipelineLayout pipelineLayout, const int layerNdx)
1049 {
1050 const DeviceInterface &vk = m_context.getDeviceInterface();
1051 const VkDevice device = m_context.getDevice();
1052
1053 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1054 const VkImageView imageView = **m_allImageViews[layerNdx];
1055
1056 const VkDescriptorImageInfo descriptorImageInfo =
1057 makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
1058
1059 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
1060 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
1061 m_constantsBuffer->get(), layerNdx * m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
1062
1063 DescriptorSetUpdateBuilder()
1064 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
1065 &descriptorImageInfo)
1066 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u),
1067 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
1068 .update(vk, device);
1069 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u,
1070 DE_NULL);
1071 }
1072
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1073 void ImageStoreTestInstance::commandBeforeCompute(const VkCommandBuffer cmdBuffer)
1074 {
1075 const DeviceInterface &vk = m_context.getDeviceInterface();
1076
1077 const VkImageSubresourceRange fullImageSubresourceRange =
1078 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1079 const VkImageMemoryBarrier setImageLayoutBarrier =
1080 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1081 m_image->get(), fullImageSubresourceRange);
1082
1083 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
1084 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
1085 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, m_constantsBuffer->get(), 0ull, constantsBufferSize);
1086
1087 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1088 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1, &writeConstantsBarrier, 1,
1089 &setImageLayoutBarrier);
1090 }
1091
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1092 void ImageStoreTestInstance::commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)
1093 {
1094 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
1095 }
1096
commandAfterCompute(const VkCommandBuffer cmdBuffer)1097 void ImageStoreTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
1098 {
1099 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1100 }
1101
1102 //! Store test for buffers
1103 class BufferStoreTestInstance : public StoreTestInstance
1104 {
1105 public:
1106 BufferStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
1107 const bool declareImageFormatInShader, const bool minalign, const bool storeConstantValue);
1108
1109 protected:
1110 VkDescriptorSetLayout prepareDescriptors(void);
1111 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
1112
1113 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
1114 const int layerNdx);
1115
1116 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1117 Move<VkDescriptorPool> m_descriptorPool;
1118 Move<VkDescriptorSet> m_descriptorSet;
1119 Move<VkBufferView> m_bufferView;
1120 };
1121
BufferStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool minalign,const bool storeConstantValue)1122 BufferStoreTestInstance::BufferStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
1123 const bool declareImageFormatInShader, const bool minalign,
1124 const bool storeConstantValue)
1125 : StoreTestInstance(context, texture, format, declareImageFormatInShader, false, minalign, storeConstantValue)
1126 {
1127 }
1128
prepareDescriptors(void)1129 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors(void)
1130 {
1131 const DeviceInterface &vk = m_context.getDeviceInterface();
1132 const VkDevice device = m_context.getDevice();
1133
1134 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1135 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1136 .build(vk, device);
1137
1138 m_descriptorPool = DescriptorPoolBuilder()
1139 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1140 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1141
1142 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1143 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_dstViewOffset, m_imageSizeBytes);
1144
1145 return *m_descriptorSetLayout; // not passing the ownership
1146 }
1147
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1148 void BufferStoreTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
1149 const VkPipelineLayout pipelineLayout, const int layerNdx)
1150 {
1151 DE_ASSERT(layerNdx == 0);
1152 DE_UNREF(layerNdx);
1153
1154 const VkDevice device = m_context.getDevice();
1155 const DeviceInterface &vk = m_context.getDeviceInterface();
1156
1157 DescriptorSetUpdateBuilder()
1158 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u),
1159 VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
1160 .update(vk, device);
1161 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(),
1162 0u, DE_NULL);
1163 }
1164
commandAfterCompute(const VkCommandBuffer cmdBuffer)1165 void BufferStoreTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
1166 {
1167 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(),
1168 m_imageSizeBytes + m_dstViewOffset);
1169 }
1170
1171 class LoadStoreTest : public TestCase
1172 {
1173 public:
1174 enum TestFlags
1175 {
1176 FLAG_SINGLE_LAYER_BIND = (1 << 0), //!< Run the shader multiple times, each time binding a different layer.
1177 FLAG_RESTRICT_IMAGES = (1 << 1), //!< If given, images in the shader will be qualified with "restrict".
1178 FLAG_DECLARE_FORMAT_IN_SHADER_READS = (1 << 2), //!< Declare the format of images being read in the shader code
1179 FLAG_DECLARE_FORMAT_IN_SHADER_WRITES = (1 << 3), //!< Declare the format of images being read in the shader code
1180 FLAG_MINALIGN = (1 << 4), //!< Use bufferview offset that matches the advertised minimum alignment
1181 FLAG_UNIFORM_TEXEL_BUFFER = (1 << 5), //!< Load from a uniform texel buffer rather than a storage texel buffer
1182 };
1183
1184 LoadStoreTest(tcu::TestContext &testCtx, const std::string &name, const Texture &texture, const VkFormat format,
1185 const VkFormat imageFormat, const VkImageTiling tiling,
1186 const uint32_t flags = (FLAG_DECLARE_FORMAT_IN_SHADER_READS | FLAG_DECLARE_FORMAT_IN_SHADER_WRITES),
1187 const bool imageLoadStoreLodAMD = false);
1188
1189 virtual void checkSupport(Context &context) const;
1190 void initPrograms(SourceCollections &programCollection) const;
1191 TestInstance *createInstance(Context &context) const;
1192
1193 private:
1194 const Texture m_texture;
1195 const VkFormat m_format; //!< Format as accessed in the shader
1196 const VkFormat m_imageFormat; //!< Storage format
1197 const VkImageTiling m_tiling; //!< Image Tiling
1198 const bool
1199 m_declareFormatInShaderReads; //!< Whether the shader will specify the format layout qualifier of images being read from.
1200 const bool
1201 m_declareFormatInShaderWrites; //!< Whether the shader will specify the format layout qualifier of images being written to.
1202 const bool m_singleLayerBind;
1203 const bool m_restrictImages;
1204 const bool m_minalign;
1205 bool m_bufferLoadUniform;
1206 const bool m_imageLoadStoreLodAMD;
1207 };
1208
LoadStoreTest(tcu::TestContext & testCtx,const std::string & name,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const VkImageTiling tiling,const uint32_t flags,const bool imageLoadStoreLodAMD)1209 LoadStoreTest::LoadStoreTest(tcu::TestContext &testCtx, const std::string &name, const Texture &texture,
1210 const VkFormat format, const VkFormat imageFormat, const VkImageTiling tiling,
1211 const uint32_t flags, const bool imageLoadStoreLodAMD)
1212 : TestCase(testCtx, name)
1213 , m_texture(texture)
1214 , m_format(format)
1215 , m_imageFormat(imageFormat)
1216 , m_tiling(tiling)
1217 , m_declareFormatInShaderReads((flags & FLAG_DECLARE_FORMAT_IN_SHADER_READS) != 0)
1218 , m_declareFormatInShaderWrites((flags & FLAG_DECLARE_FORMAT_IN_SHADER_WRITES) != 0)
1219 , m_singleLayerBind((flags & FLAG_SINGLE_LAYER_BIND) != 0)
1220 , m_restrictImages((flags & FLAG_RESTRICT_IMAGES) != 0)
1221 , m_minalign((flags & FLAG_MINALIGN) != 0)
1222 , m_bufferLoadUniform((flags & FLAG_UNIFORM_TEXEL_BUFFER) != 0)
1223 , m_imageLoadStoreLodAMD(imageLoadStoreLodAMD)
1224 {
1225 if (m_singleLayerBind)
1226 DE_ASSERT(m_texture.numLayers() > 1);
1227
1228 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
1229 }
1230
checkSupport(Context & context) const1231 void LoadStoreTest::checkSupport(Context &context) const
1232 {
1233 #ifndef CTS_USES_VULKANSC
1234 if (m_format == VK_FORMAT_A8_UNORM_KHR || m_format == VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR)
1235 context.requireDeviceFunctionality("VK_KHR_maintenance5");
1236
1237 const VkFormatProperties3 formatProperties(context.getFormatProperties(m_format));
1238 const VkFormatProperties3 imageFormatProperties(context.getFormatProperties(m_imageFormat));
1239
1240 const auto &tilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ? formatProperties.optimalTilingFeatures :
1241 formatProperties.linearTilingFeatures;
1242 const auto &imageTilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ?
1243 imageFormatProperties.optimalTilingFeatures :
1244 imageFormatProperties.linearTilingFeatures;
1245
1246 if (m_imageLoadStoreLodAMD)
1247 context.requireDeviceFunctionality("VK_AMD_shader_image_load_store_lod");
1248
1249 if (!m_bufferLoadUniform && !m_declareFormatInShaderReads &&
1250 !(tilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT))
1251 TCU_THROW(NotSupportedError, "Format not supported for unformatted loads via storage images");
1252
1253 if (m_texture.type() == IMAGE_TYPE_BUFFER && !m_declareFormatInShaderReads &&
1254 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT))
1255 TCU_THROW(NotSupportedError, "Format not supported for unformatted loads via buffers");
1256
1257 if (!m_declareFormatInShaderWrites && !(tilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT))
1258 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage images");
1259
1260 if (m_texture.type() == IMAGE_TYPE_BUFFER && !m_declareFormatInShaderWrites &&
1261 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT))
1262 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via buffers");
1263
1264 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1265 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1266
1267 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(tilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1268 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1269
1270 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1271 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1272
1273 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1274 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1275 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1276
1277 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageTilingFeatures))
1278 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1279
1280 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1281 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1282
1283 if (formatHasThreeComponents(m_format))
1284 {
1285 // When the source buffer is three-component, the destination buffer is single-component.
1286 VkFormat dstFormat = getSingleComponentFormat(m_format);
1287 const VkFormatProperties3 dstFormatProperties(context.getFormatProperties(dstFormat));
1288
1289 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1290 !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1291 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1292 }
1293 else if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1294 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1295 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1296
1297 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER &&
1298 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1299 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1300 #else
1301 const vk::VkFormatProperties formatProperties(
1302 vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), m_format));
1303 const vk::VkFormatProperties imageFormatProperties(vk::getPhysicalDeviceFormatProperties(
1304 context.getInstanceInterface(), context.getPhysicalDevice(), m_imageFormat));
1305
1306 const auto tilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ? formatProperties.optimalTilingFeatures :
1307 formatProperties.linearTilingFeatures;
1308 const auto imageTilingFeatures = (m_tiling == vk::VK_IMAGE_TILING_OPTIMAL) ?
1309 imageFormatProperties.optimalTilingFeatures :
1310 imageFormatProperties.linearTilingFeatures;
1311
1312 if (m_imageLoadStoreLodAMD)
1313 context.requireDeviceFunctionality("VK_AMD_shader_image_load_store_lod");
1314
1315 if (!m_bufferLoadUniform && !m_declareFormatInShaderReads)
1316 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT);
1317
1318 if (!m_declareFormatInShaderWrites)
1319 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT);
1320
1321 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1322 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1323
1324 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(tilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1325 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1326
1327 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1328 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1329
1330 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1331 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1332 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1333
1334 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageTilingFeatures))
1335 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1336
1337 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1338 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1339
1340 if (formatHasThreeComponents(m_format))
1341 {
1342 // When the source buffer is three-component, the destination buffer is single-component.
1343 VkFormat dstFormat = getSingleComponentFormat(m_format);
1344 const vk::VkFormatProperties dstFormatProperties(vk::getPhysicalDeviceFormatProperties(
1345 context.getInstanceInterface(), context.getPhysicalDevice(), dstFormat));
1346
1347 if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1348 !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1349 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1350 }
1351 else if (m_texture.type() == IMAGE_TYPE_BUFFER &&
1352 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1353 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1354
1355 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER &&
1356 !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1357 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1358 #endif // CTS_USES_VULKANSC
1359
1360 const auto &vki = context.getInstanceInterface();
1361 const auto physicalDevice = context.getPhysicalDevice();
1362
1363 VkImageFormatProperties vkImageFormatProperties;
1364 const auto result = vki.getPhysicalDeviceImageFormatProperties(
1365 physicalDevice, m_imageFormat, mapImageType(m_texture.type()), m_tiling,
1366 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &vkImageFormatProperties);
1367
1368 if (result != VK_SUCCESS)
1369 {
1370 if (result == VK_ERROR_FORMAT_NOT_SUPPORTED)
1371 TCU_THROW(NotSupportedError, "Format unsupported for tiling");
1372 else
1373 TCU_FAIL("vkGetPhysicalDeviceImageFormatProperties returned unexpected error");
1374 }
1375
1376 if (vkImageFormatProperties.maxArrayLayers < (uint32_t)m_texture.numLayers())
1377 {
1378 TCU_THROW(NotSupportedError, "This format and tiling combination does not support this number of aray layers");
1379 }
1380
1381 if (vkImageFormatProperties.maxMipLevels < (uint32_t)m_texture.numMipmapLevels())
1382 {
1383 TCU_THROW(NotSupportedError, "This format and tiling combination does not support this number of miplevels");
1384 }
1385 }
1386
initPrograms(SourceCollections & programCollection) const1387 void LoadStoreTest::initPrograms(SourceCollections &programCollection) const
1388 {
1389 const tcu::TextureFormat texFormat = mapVkFormat(m_format);
1390 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
1391 const ImageType usedImageType =
1392 (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
1393 const bool noFormats = (!m_declareFormatInShaderReads && !m_declareFormatInShaderWrites);
1394 const std::string formatQualifierStr = (noFormats ? "" : getShaderImageFormatQualifier(texFormat));
1395 const std::string uniformTypeStr = getFormatPrefix(texFormat) + "textureBuffer";
1396 const std::string imageTypeStr = getShaderImageType(texFormat, usedImageType);
1397 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
1398 const std::string xMax = de::toString(m_texture.size().x() - 1);
1399
1400 std::ostringstream src;
1401 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1402 << "\n";
1403
1404 if (!m_declareFormatInShaderReads || !m_declareFormatInShaderWrites)
1405 {
1406 src << "#extension GL_EXT_shader_image_load_formatted : require\n";
1407 }
1408
1409 if (m_imageLoadStoreLodAMD)
1410 {
1411 src << "#extension GL_AMD_shader_image_load_store_lod : require\n";
1412 }
1413
1414 const std::string maybeFmtQualStrReads = m_declareFormatInShaderReads ? ", " + formatQualifierStr : "";
1415 const std::string maybeFmtQualStrWrites = m_declareFormatInShaderWrites ? ", " + formatQualifierStr : "";
1416
1417 src << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
1418
1419 if (m_bufferLoadUniform)
1420 src << "layout (binding = 0) uniform " << uniformTypeStr << " u_image0;\n";
1421 else
1422 src << "layout (binding = 0" << maybeFmtQualStrReads << ") " << maybeRestrictStr << "readonly uniform "
1423 << imageTypeStr << " u_image0;\n";
1424
1425 // For three-component formats, the dst buffer is single-component and the shader expands the store into 3 component-wise stores.
1426 // We always use the format qualifier for the dst buffer, except when splitting it up.
1427 if (formatHasThreeComponents(m_format))
1428 src << "layout (binding = 1) " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1429 else
1430 src << "layout (binding = 1" << maybeFmtQualStrWrites << ") " << maybeRestrictStr << "writeonly uniform "
1431 << imageTypeStr << " u_image1;\n";
1432
1433 src << "\n"
1434 << "void main (void)\n"
1435 << "{\n";
1436
1437 switch (dimension)
1438 {
1439 default:
1440 DE_ASSERT(0); // fallthrough
1441 case 1:
1442 if (m_bufferLoadUniform)
1443 {
1444 // Expand the store into 3 component-wise stores.
1445 std::string type = getFormatPrefix(texFormat) + "vec4";
1446 src << " int pos = int(gl_GlobalInvocationID.x);\n"
1447 " "
1448 << type << " t = texelFetch(u_image0, " + xMax + "-pos);\n";
1449 if (formatHasThreeComponents(m_format))
1450 {
1451 src << " imageStore(u_image1, 3*pos+0, " << type << "(t.x));\n";
1452 src << " imageStore(u_image1, 3*pos+1, " << type << "(t.y));\n";
1453 src << " imageStore(u_image1, 3*pos+2, " << type << "(t.z));\n";
1454 }
1455 else
1456 src << " imageStore(u_image1, pos, t);\n";
1457 }
1458 else if (m_imageLoadStoreLodAMD)
1459 {
1460 src << " int pos = int(gl_GlobalInvocationID.x);\n";
1461
1462 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1463 {
1464 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1465 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) +
1466 ", imageLoadLodAMD(u_image0, " + xMaxSize + "-pos, " + de::toString(levelNdx) + "));\n";
1467 }
1468 }
1469 else
1470 {
1471 src << " int pos = int(gl_GlobalInvocationID.x);\n"
1472 " imageStore(u_image1, pos, imageLoad(u_image0, " +
1473 xMax + "-pos));\n";
1474 }
1475 break;
1476 case 2:
1477 if (m_imageLoadStoreLodAMD)
1478 {
1479 src << " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n";
1480
1481 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1482 {
1483 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1484 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) +
1485 ", imageLoadLodAMD(u_image0, ivec2(" + xMaxSize + "-pos.x, pos.y), " +
1486 de::toString(levelNdx) + "));\n";
1487 }
1488 }
1489 else
1490 {
1491 src << " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
1492 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" +
1493 xMax + "-pos.x, pos.y)));\n";
1494 }
1495 break;
1496 case 3:
1497 if (m_imageLoadStoreLodAMD)
1498 {
1499 src << " ivec3 pos = ivec3(gl_GlobalInvocationID);\n";
1500
1501 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1502 {
1503 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1504 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) +
1505 ", imageLoadLodAMD(u_image0, ivec3(" + xMaxSize + "-pos.x, pos.y, pos.z), " +
1506 de::toString(levelNdx) + "));\n";
1507 }
1508 }
1509 else
1510 {
1511 src << " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
1512 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" +
1513 xMax + "-pos.x, pos.y, pos.z)));\n";
1514 }
1515 break;
1516 }
1517 src << "}\n";
1518
1519 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
1520 }
1521
1522 //! Load/store test base implementation
1523 class LoadStoreTestInstance : public BaseTestInstance
1524 {
1525 public:
1526 LoadStoreTestInstance(Context &context, const Texture &texture, const VkFormat format, const VkFormat imageFormat,
1527 const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign,
1528 const bool bufferLoadUniform);
1529
1530 protected:
1531 virtual BufferWithMemory *getResultBuffer(void) const = 0; //!< Get the buffer that contains the result image
1532
1533 tcu::TestStatus verifyResult(void);
1534
1535 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)1536 void commandBeforeCompute(const VkCommandBuffer)
1537 {
1538 }
commandBetweenShaderInvocations(const VkCommandBuffer)1539 void commandBetweenShaderInvocations(const VkCommandBuffer)
1540 {
1541 }
commandAfterCompute(const VkCommandBuffer)1542 void commandAfterCompute(const VkCommandBuffer)
1543 {
1544 }
1545
1546 de::MovePtr<BufferWithMemory> m_imageBuffer; //!< Source data and helper buffer
1547 const VkDeviceSize m_imageSizeBytes;
1548 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1549 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
1550
1551 bool m_bufferLoadUniform;
1552 VkDescriptorType m_bufferLoadDescriptorType;
1553 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1554 };
1555
LoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1556 LoadStoreTestInstance::LoadStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
1557 const VkFormat imageFormat, const bool declareImageFormatInShader,
1558 const bool singleLayerBind, const bool minalign,
1559 const bool bufferLoadUniform)
1560 : BaseTestInstance(context, texture, format, declareImageFormatInShader, singleLayerBind, minalign,
1561 bufferLoadUniform)
1562 , m_imageSizeBytes(getImageSizeBytes(texture.size(), format))
1563 , m_imageFormat(imageFormat)
1564 , m_referenceImage(generateReferenceImage(texture.size(), imageFormat, format))
1565 , m_bufferLoadUniform(bufferLoadUniform)
1566 {
1567 const DeviceInterface &vk = m_context.getDeviceInterface();
1568 const VkDevice device = m_context.getDevice();
1569 Allocator &allocator = m_context.getDefaultAllocator();
1570
1571 m_bufferLoadDescriptorType =
1572 m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1573 m_bufferLoadUsageBit =
1574 m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1575
1576 // A helper buffer with enough space to hold the whole image.
1577
1578 m_imageBuffer = de::MovePtr<BufferWithMemory>(
1579 new BufferWithMemory(vk, device, allocator,
1580 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset,
1581 m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
1582 VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1583 MemoryRequirement::HostVisible));
1584
1585 // Copy reference data to buffer for subsequent upload to image.
1586
1587 const Allocation &alloc = m_imageBuffer->getAllocation();
1588 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset, m_referenceImage.getAccess().getDataPtr(),
1589 static_cast<size_t>(m_imageSizeBytes));
1590 flushAlloc(vk, device, alloc);
1591 }
1592
verifyResult(void)1593 tcu::TestStatus LoadStoreTestInstance::verifyResult(void)
1594 {
1595 const DeviceInterface &vk = m_context.getDeviceInterface();
1596 const VkDevice device = m_context.getDevice();
1597
1598 // Apply the same transformation as done in the shader
1599 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
1600 flipHorizontally(reference);
1601
1602 const Allocation &alloc = getResultBuffer()->getAllocation();
1603 invalidateAlloc(vk, device, alloc);
1604 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(),
1605 (const char *)alloc.getHostPtr() + m_dstViewOffset);
1606
1607 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1608 return tcu::TestStatus::pass("Passed");
1609 else
1610 return tcu::TestStatus::fail("Image comparison failed");
1611 }
1612
1613 //! Load/store test for images
1614 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1615 {
1616 public:
1617 ImageLoadStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
1618 const VkFormat imageFormat, const VkImageTiling tiling,
1619 const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign,
1620 const bool bufferLoadUniform);
1621
1622 protected:
1623 VkDescriptorSetLayout prepareDescriptors(void);
1624 void commandBeforeCompute(const VkCommandBuffer cmdBuffer);
1625 void commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer);
1626 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
1627
1628 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
1629 const int layerNdx);
1630
getResultBuffer(void) const1631 BufferWithMemory *getResultBuffer(void) const
1632 {
1633 return m_imageBuffer.get();
1634 }
1635
1636 de::MovePtr<Image> m_imageSrc;
1637 de::MovePtr<Image> m_imageDst;
1638 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1639 Move<VkDescriptorPool> m_descriptorPool;
1640 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1641 std::vector<SharedVkImageView> m_allSrcImageViews;
1642 std::vector<SharedVkImageView> m_allDstImageViews;
1643 };
1644
ImageLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const VkImageTiling tiling,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1645 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
1646 const VkFormat imageFormat, const VkImageTiling tiling,
1647 const bool declareImageFormatInShader,
1648 const bool singleLayerBind, const bool minalign,
1649 const bool bufferLoadUniform)
1650 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, singleLayerBind,
1651 minalign, bufferLoadUniform)
1652 , m_allDescriptorSets(texture.numLayers())
1653 , m_allSrcImageViews(texture.numLayers())
1654 , m_allDstImageViews(texture.numLayers())
1655 {
1656 const DeviceInterface &vk = m_context.getDeviceInterface();
1657 const VkDevice device = m_context.getDevice();
1658 Allocator &allocator = m_context.getDefaultAllocator();
1659 const VkImageCreateFlags imageFlags =
1660 (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1661
1662 m_imageSrc = de::MovePtr<Image>(
1663 new Image(vk, device, allocator,
1664 makeImageCreateInfo(m_texture, m_imageFormat,
1665 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags, tiling),
1666 MemoryRequirement::Any));
1667
1668 m_imageDst = de::MovePtr<Image>(
1669 new Image(vk, device, allocator,
1670 makeImageCreateInfo(m_texture, m_imageFormat,
1671 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags, tiling),
1672 MemoryRequirement::Any));
1673 }
1674
prepareDescriptors(void)1675 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors(void)
1676 {
1677 const VkDevice device = m_context.getDevice();
1678 const DeviceInterface &vk = m_context.getDeviceInterface();
1679
1680 const int numLayers = m_texture.numLayers();
1681 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1682 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1683 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1684 .build(vk, device);
1685
1686 m_descriptorPool = DescriptorPoolBuilder()
1687 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1688 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1689 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1690
1691 if (m_singleLayerBind)
1692 {
1693 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1694 {
1695 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1696 const VkImageSubresourceRange subresourceRange =
1697 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1698
1699 m_allDescriptorSets[layerNdx] =
1700 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1701 m_allSrcImageViews[layerNdx] =
1702 makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1703 m_allDstImageViews[layerNdx] =
1704 makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1705 }
1706 }
1707 else // bind all layers at once
1708 {
1709 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1710 const VkImageSubresourceRange subresourceRange =
1711 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1712
1713 m_allDescriptorSets[0] =
1714 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1715 m_allSrcImageViews[0] =
1716 makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1717 m_allDstImageViews[0] =
1718 makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1719 }
1720
1721 return *m_descriptorSetLayout; // not passing the ownership
1722 }
1723
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1724 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
1725 const VkPipelineLayout pipelineLayout,
1726 const int layerNdx)
1727 {
1728 const VkDevice device = m_context.getDevice();
1729 const DeviceInterface &vk = m_context.getDeviceInterface();
1730
1731 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1732 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1733 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1734
1735 const VkDescriptorImageInfo descriptorSrcImageInfo =
1736 makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1737 const VkDescriptorImageInfo descriptorDstImageInfo =
1738 makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1739
1740 DescriptorSetUpdateBuilder()
1741 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
1742 &descriptorSrcImageInfo)
1743 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
1744 &descriptorDstImageInfo)
1745 .update(vk, device);
1746 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u,
1747 DE_NULL);
1748 }
1749
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1750 void ImageLoadStoreTestInstance::commandBeforeCompute(const VkCommandBuffer cmdBuffer)
1751 {
1752 const DeviceInterface &vk = m_context.getDeviceInterface();
1753
1754 const VkImageSubresourceRange fullImageSubresourceRange =
1755 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1756 {
1757 const VkImageMemoryBarrier preCopyImageBarriers[] = {
1758 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
1759 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_imageSrc->get(), fullImageSubresourceRange),
1760 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1761 m_imageDst->get(), fullImageSubresourceRange)};
1762
1763 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy =
1764 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, m_imageBuffer->get(), 0ull,
1765 m_imageSizeBytes + m_srcViewOffset);
1766
1767 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT,
1768 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1769 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1,
1770 &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers),
1771 preCopyImageBarriers);
1772 }
1773 {
1774 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1775 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1776 VK_IMAGE_LAYOUT_GENERAL, m_imageSrc->get(), fullImageSubresourceRange);
1777
1778 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1779
1780 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(),
1781 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1782 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
1783 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
1784 (const VkBufferMemoryBarrier *)DE_NULL, 1, &barrierAfterCopy);
1785 }
1786 }
1787
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1788 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)
1789 {
1790 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1791 }
1792
commandAfterCompute(const VkCommandBuffer cmdBuffer)1793 void ImageLoadStoreTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
1794 {
1795 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes,
1796 m_texture);
1797 }
1798
1799 //! Load/store Lod AMD test for images
1800 class ImageLoadStoreLodAMDTestInstance : public BaseTestInstance
1801 {
1802 public:
1803 ImageLoadStoreLodAMDTestInstance(Context &context, const Texture &texture, const VkFormat format,
1804 const VkFormat imageFormat, const bool declareImageFormatInShader,
1805 const bool singleLayerBind, const bool minalign, const bool bufferLoadUniform);
1806
1807 protected:
1808 VkDescriptorSetLayout prepareDescriptors(void);
1809 void commandBeforeCompute(const VkCommandBuffer cmdBuffer);
1810 void commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer);
1811 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
1812
1813 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
1814 const int layerNdx);
1815
getResultBuffer(void) const1816 BufferWithMemory *getResultBuffer(void) const
1817 {
1818 return m_imageBuffer.get();
1819 }
1820 tcu::TestStatus verifyResult(void);
1821
1822 de::MovePtr<BufferWithMemory> m_imageBuffer; //!< Source data and helper buffer
1823 const VkDeviceSize m_imageSizeBytes;
1824 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1825 std::vector<tcu::TextureLevel> m_referenceImages; //!< Used as input data and later to verify result image
1826
1827 bool m_bufferLoadUniform;
1828 VkDescriptorType m_bufferLoadDescriptorType;
1829 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1830
1831 de::MovePtr<Image> m_imageSrc;
1832 de::MovePtr<Image> m_imageDst;
1833 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1834 Move<VkDescriptorPool> m_descriptorPool;
1835 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1836 std::vector<SharedVkImageView> m_allSrcImageViews;
1837 std::vector<SharedVkImageView> m_allDstImageViews;
1838 };
1839
ImageLoadStoreLodAMDTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind,const bool minalign,const bool bufferLoadUniform)1840 ImageLoadStoreLodAMDTestInstance::ImageLoadStoreLodAMDTestInstance(Context &context, const Texture &texture,
1841 const VkFormat format, const VkFormat imageFormat,
1842 const bool declareImageFormatInShader,
1843 const bool singleLayerBind, const bool minalign,
1844 const bool bufferLoadUniform)
1845 : BaseTestInstance(context, texture, format, declareImageFormatInShader, singleLayerBind, minalign,
1846 bufferLoadUniform)
1847 , m_imageSizeBytes(getMipmapImageTotalSizeBytes(texture, format))
1848 , m_imageFormat(imageFormat)
1849 , m_bufferLoadUniform(bufferLoadUniform)
1850 , m_allDescriptorSets(texture.numLayers())
1851 , m_allSrcImageViews(texture.numLayers())
1852 , m_allDstImageViews(texture.numLayers())
1853 {
1854 const DeviceInterface &vk = m_context.getDeviceInterface();
1855 const VkDevice device = m_context.getDevice();
1856 Allocator &allocator = m_context.getDefaultAllocator();
1857 const VkImageCreateFlags imageFlags =
1858 (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1859
1860 const VkSampleCountFlagBits samples = static_cast<VkSampleCountFlagBits>(
1861 m_texture.numSamples()); // integer and bit mask are aligned, so we can cast like this
1862
1863 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1864 {
1865 tcu::TextureLevel referenceImage = generateReferenceImage(texture.size(levelNdx), imageFormat, format);
1866 m_referenceImages.push_back(referenceImage);
1867 }
1868
1869 m_bufferLoadDescriptorType =
1870 m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1871 m_bufferLoadUsageBit =
1872 m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1873
1874 // A helper buffer with enough space to hold the whole image.
1875 m_imageBuffer = de::MovePtr<BufferWithMemory>(
1876 new BufferWithMemory(vk, device, allocator,
1877 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset,
1878 m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
1879 VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1880 MemoryRequirement::HostVisible));
1881
1882 // Copy reference data to buffer for subsequent upload to image.
1883 {
1884 const Allocation &alloc = m_imageBuffer->getAllocation();
1885 VkDeviceSize bufferOffset = 0u;
1886 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1887 {
1888 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset + bufferOffset,
1889 m_referenceImages[levelNdx].getAccess().getDataPtr(),
1890 static_cast<size_t>(getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx)));
1891 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1892 }
1893 flushAlloc(vk, device, alloc);
1894 }
1895
1896 {
1897 const VkImageCreateInfo imageParamsSrc = {
1898 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1899 DE_NULL, // const void* pNext;
1900 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) |
1901 imageFlags, // VkImageCreateFlags flags;
1902 mapImageType(m_texture.type()), // VkImageType imageType;
1903 m_imageFormat, // VkFormat format;
1904 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1905 (uint32_t)m_texture.numMipmapLevels(), // uint32_t mipLevels;
1906 (uint32_t)m_texture.numLayers(), // uint32_t arrayLayers;
1907 samples, // VkSampleCountFlagBits samples;
1908 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1909 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1910 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1911 0u, // uint32_t queueFamilyIndexCount;
1912 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1913 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1914 };
1915
1916 m_imageSrc = de::MovePtr<Image>(new Image(vk, device, allocator, imageParamsSrc, MemoryRequirement::Any));
1917 }
1918
1919 {
1920 const VkImageCreateInfo imageParamsDst = {
1921 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1922 DE_NULL, // const void* pNext;
1923 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) |
1924 imageFlags, // VkImageCreateFlags flags;
1925 mapImageType(m_texture.type()), // VkImageType imageType;
1926 m_imageFormat, // VkFormat format;
1927 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1928 (uint32_t)m_texture.numMipmapLevels(), // uint32_t mipLevels;
1929 (uint32_t)m_texture.numLayers(), // uint32_t arrayLayers;
1930 samples, // VkSampleCountFlagBits samples;
1931 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1932 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
1933 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1934 0u, // uint32_t queueFamilyIndexCount;
1935 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1936 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1937 };
1938
1939 m_imageDst = de::MovePtr<Image>(new Image(vk, device, allocator, imageParamsDst, MemoryRequirement::Any));
1940 }
1941 }
1942
verifyResult(void)1943 tcu::TestStatus ImageLoadStoreLodAMDTestInstance::verifyResult(void)
1944 {
1945 const DeviceInterface &vk = m_context.getDeviceInterface();
1946 const VkDevice device = m_context.getDevice();
1947
1948 const Allocation &alloc = getResultBuffer()->getAllocation();
1949 invalidateAlloc(vk, device, alloc);
1950
1951 VkDeviceSize bufferOffset = 0;
1952 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1953 {
1954 // Apply the same transformation as done in the shader
1955 const tcu::PixelBufferAccess reference = m_referenceImages[levelNdx].getAccess();
1956 flipHorizontally(reference);
1957
1958 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(levelNdx),
1959 (const char *)alloc.getHostPtr() + m_dstViewOffset + bufferOffset);
1960
1961 if (!comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result,
1962 levelNdx))
1963 {
1964 std::ostringstream errorMessage;
1965 errorMessage << "Image Level " << levelNdx << " comparison failed";
1966 return tcu::TestStatus::fail(errorMessage.str());
1967 }
1968 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1969 }
1970
1971 return tcu::TestStatus::pass("Passed");
1972 }
1973
prepareDescriptors(void)1974 VkDescriptorSetLayout ImageLoadStoreLodAMDTestInstance::prepareDescriptors(void)
1975 {
1976 const VkDevice device = m_context.getDevice();
1977 const DeviceInterface &vk = m_context.getDeviceInterface();
1978
1979 const int numLayers = m_texture.numLayers();
1980 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1981 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1982 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1983 .build(vk, device);
1984
1985 m_descriptorPool = DescriptorPoolBuilder()
1986 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1987 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1988 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1989
1990 if (m_singleLayerBind)
1991 {
1992 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1993 {
1994 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1995 const VkImageSubresourceRange subresourceRange =
1996 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), layerNdx, 1u);
1997
1998 m_allDescriptorSets[layerNdx] =
1999 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
2000 m_allSrcImageViews[layerNdx] =
2001 makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
2002 m_allDstImageViews[layerNdx] =
2003 makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
2004 }
2005 }
2006 else // bind all layers at once
2007 {
2008 const VkImageViewType viewType = mapImageViewType(m_texture.type());
2009 const VkImageSubresourceRange subresourceRange =
2010 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, numLayers);
2011
2012 m_allDescriptorSets[0] =
2013 makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
2014 m_allSrcImageViews[0] =
2015 makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
2016 m_allDstImageViews[0] =
2017 makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
2018 }
2019
2020 return *m_descriptorSetLayout; // not passing the ownership
2021 }
2022
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)2023 void ImageLoadStoreLodAMDTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
2024 const VkPipelineLayout pipelineLayout,
2025 const int layerNdx)
2026 {
2027 const VkDevice device = m_context.getDevice();
2028 const DeviceInterface &vk = m_context.getDeviceInterface();
2029
2030 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
2031 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
2032 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
2033
2034 const VkDescriptorImageInfo descriptorSrcImageInfo =
2035 makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
2036 const VkDescriptorImageInfo descriptorDstImageInfo =
2037 makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
2038
2039 DescriptorSetUpdateBuilder()
2040 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
2041 &descriptorSrcImageInfo)
2042 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
2043 &descriptorDstImageInfo)
2044 .update(vk, device);
2045 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u,
2046 DE_NULL);
2047 }
2048
commandBeforeCompute(const VkCommandBuffer cmdBuffer)2049 void ImageLoadStoreLodAMDTestInstance::commandBeforeCompute(const VkCommandBuffer cmdBuffer)
2050 {
2051 const DeviceInterface &vk = m_context.getDeviceInterface();
2052 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(
2053 VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, m_texture.numLayers());
2054 {
2055 const VkImageMemoryBarrier preCopyImageBarriers[] = {
2056 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2057 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_imageSrc->get(), fullImageSubresourceRange),
2058 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2059 m_imageDst->get(), fullImageSubresourceRange)};
2060
2061 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy =
2062 makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, m_imageBuffer->get(), 0ull,
2063 m_imageSizeBytes + m_srcViewOffset);
2064
2065 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT,
2066 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
2067 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1,
2068 &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers),
2069 preCopyImageBarriers);
2070 }
2071 {
2072 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
2073 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2074 VK_IMAGE_LAYOUT_GENERAL, m_imageSrc->get(), fullImageSubresourceRange);
2075
2076 std::vector<VkBufferImageCopy> copyRegions;
2077 VkDeviceSize bufferOffset = 0u;
2078 for (int32_t levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
2079 {
2080 const VkBufferImageCopy copyParams = {
2081 bufferOffset, // VkDeviceSize bufferOffset;
2082 0u, // uint32_t bufferRowLength;
2083 0u, // uint32_t bufferImageHeight;
2084 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u,
2085 m_texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
2086 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
2087 makeExtent3D(m_texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
2088 };
2089 copyRegions.push_back(copyParams);
2090 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
2091 }
2092
2093 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(),
2094 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (uint32_t)copyRegions.size(), copyRegions.data());
2095 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2096 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2097 (const VkBufferMemoryBarrier *)DE_NULL, 1, &barrierAfterCopy);
2098 }
2099 }
2100
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)2101 void ImageLoadStoreLodAMDTestInstance::commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)
2102 {
2103 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
2104 }
2105
commandAfterCompute(const VkCommandBuffer cmdBuffer)2106 void ImageLoadStoreLodAMDTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
2107 {
2108 commandCopyMipmapImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageFormat, m_imageBuffer->get(),
2109 m_imageSizeBytes, m_texture);
2110 }
2111
2112 //! Load/store test for buffers
2113 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
2114 {
2115 public:
2116 BufferLoadStoreTestInstance(Context &context, const Texture &texture, const VkFormat format,
2117 const VkFormat imageFormat, const bool declareImageFormatInShader, const bool minalign,
2118 const bool bufferLoadUniform);
2119
2120 protected:
2121 VkDescriptorSetLayout prepareDescriptors(void);
2122 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
2123
2124 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
2125 const int layerNdx);
2126
getResultBuffer(void) const2127 BufferWithMemory *getResultBuffer(void) const
2128 {
2129 return m_imageBufferDst.get();
2130 }
2131
2132 de::MovePtr<BufferWithMemory> m_imageBufferDst;
2133 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
2134 Move<VkDescriptorPool> m_descriptorPool;
2135 Move<VkDescriptorSet> m_descriptorSet;
2136 Move<VkBufferView> m_bufferViewSrc;
2137 Move<VkBufferView> m_bufferViewDst;
2138 };
2139
BufferLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool minalign,const bool bufferLoadUniform)2140 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance(Context &context, const Texture &texture,
2141 const VkFormat format, const VkFormat imageFormat,
2142 const bool declareImageFormatInShader, const bool minalign,
2143 const bool bufferLoadUniform)
2144 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, false, minalign,
2145 bufferLoadUniform)
2146 {
2147 const DeviceInterface &vk = m_context.getDeviceInterface();
2148 const VkDevice device = m_context.getDevice();
2149 Allocator &allocator = m_context.getDefaultAllocator();
2150
2151 // Create a destination buffer.
2152
2153 m_imageBufferDst = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2154 vk, device, allocator,
2155 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
2156 MemoryRequirement::HostVisible));
2157 }
2158
prepareDescriptors(void)2159 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors(void)
2160 {
2161 const DeviceInterface &vk = m_context.getDeviceInterface();
2162 const VkDevice device = m_context.getDevice();
2163
2164 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2165 .addSingleBinding(m_bufferLoadDescriptorType, VK_SHADER_STAGE_COMPUTE_BIT)
2166 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2167 .build(vk, device);
2168
2169 m_descriptorPool = DescriptorPoolBuilder()
2170 .addType(m_bufferLoadDescriptorType)
2171 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
2172 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2173
2174 VkFormat dstFormat = formatHasThreeComponents(m_format) ? getSingleComponentFormat(m_format) : m_format;
2175
2176 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
2177 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_srcViewOffset, m_imageSizeBytes);
2178 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), dstFormat, m_dstViewOffset, m_imageSizeBytes);
2179
2180 return *m_descriptorSetLayout; // not passing the ownership
2181 }
2182
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)2183 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
2184 const VkPipelineLayout pipelineLayout,
2185 const int layerNdx)
2186 {
2187 DE_ASSERT(layerNdx == 0);
2188 DE_UNREF(layerNdx);
2189
2190 const VkDevice device = m_context.getDevice();
2191 const DeviceInterface &vk = m_context.getDeviceInterface();
2192
2193 DescriptorSetUpdateBuilder()
2194 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), m_bufferLoadDescriptorType,
2195 &m_bufferViewSrc.get())
2196 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u),
2197 VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
2198 .update(vk, device);
2199 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(),
2200 0u, DE_NULL);
2201 }
2202
commandAfterCompute(const VkCommandBuffer cmdBuffer)2203 void BufferLoadStoreTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
2204 {
2205 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(),
2206 m_imageSizeBytes + m_dstViewOffset);
2207 }
2208
createInstance(Context & context) const2209 TestInstance *StoreTest::createInstance(Context &context) const
2210 {
2211 if (m_texture.type() == IMAGE_TYPE_BUFFER)
2212 return new BufferStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_minalign,
2213 m_storeConstantValue);
2214 else
2215 return new ImageStoreTestInstance(context, m_texture, m_format, m_tiling, m_declareImageFormatInShader,
2216 m_singleLayerBind, m_minalign, m_storeConstantValue);
2217 }
2218
createInstance(Context & context) const2219 TestInstance *LoadStoreTest::createInstance(Context &context) const
2220 {
2221 if (m_imageLoadStoreLodAMD)
2222 return new ImageLoadStoreLodAMDTestInstance(context, m_texture, m_format, m_imageFormat,
2223 m_declareFormatInShaderReads, m_singleLayerBind, m_minalign,
2224 m_bufferLoadUniform);
2225
2226 if (m_texture.type() == IMAGE_TYPE_BUFFER)
2227 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat,
2228 m_declareFormatInShaderReads, m_minalign, m_bufferLoadUniform);
2229 else
2230 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_tiling,
2231 m_declareFormatInShaderReads, m_singleLayerBind, m_minalign,
2232 m_bufferLoadUniform);
2233 }
2234
2235 class ImageExtendOperandTestInstance : public BaseTestInstance
2236 {
2237 public:
2238 ImageExtendOperandTestInstance(Context &context, const Texture &texture, const VkFormat readFormat,
2239 const VkFormat writeFormat, bool relaxedPrecision);
2240
~ImageExtendOperandTestInstance(void)2241 virtual ~ImageExtendOperandTestInstance(void)
2242 {
2243 }
2244
2245 protected:
2246 VkDescriptorSetLayout prepareDescriptors(void);
2247 void commandBeforeCompute(const VkCommandBuffer cmdBuffer);
2248 void commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer);
2249 void commandAfterCompute(const VkCommandBuffer cmdBuffer);
2250
2251 void commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout,
2252 const int layerNdx);
2253
2254 tcu::TestStatus verifyResult(void);
2255
2256 protected:
2257 bool m_isSigned;
2258 tcu::TextureLevel m_inputImageData;
2259
2260 de::MovePtr<Image> m_imageSrc; // source image
2261 SharedVkImageView m_imageSrcView;
2262 VkDeviceSize m_imageSrcSize;
2263
2264 de::MovePtr<Image> m_imageDst; // dest image
2265 SharedVkImageView m_imageDstView;
2266 VkFormat m_imageDstFormat;
2267 VkDeviceSize m_imageDstSize;
2268
2269 de::MovePtr<BufferWithMemory> m_buffer; // result buffer
2270
2271 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
2272 Move<VkDescriptorPool> m_descriptorPool;
2273 SharedVkDescriptorSet m_descriptorSet;
2274
2275 bool m_relaxedPrecision;
2276 };
2277
ImageExtendOperandTestInstance(Context & context,const Texture & texture,const VkFormat readFormat,const VkFormat writeFormat,bool relaxedPrecision)2278 ImageExtendOperandTestInstance::ImageExtendOperandTestInstance(Context &context, const Texture &texture,
2279 const VkFormat readFormat, const VkFormat writeFormat,
2280 bool relaxedPrecision)
2281 : BaseTestInstance(context, texture, readFormat, true, true, false, false)
2282 , m_imageDstFormat(writeFormat)
2283 , m_relaxedPrecision(relaxedPrecision)
2284 {
2285 const DeviceInterface &vk = m_context.getDeviceInterface();
2286 const VkDevice device = m_context.getDevice();
2287 Allocator &allocator = m_context.getDefaultAllocator();
2288 const int32_t width = texture.size().x();
2289 const int32_t height = texture.size().y();
2290 const tcu::TextureFormat textureFormat = mapVkFormat(m_format);
2291
2292 // Generate reference image
2293 m_isSigned = (getTextureChannelClass(textureFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
2294 m_inputImageData.setStorage(textureFormat, width, height, 1);
2295
2296 const tcu::PixelBufferAccess access = m_inputImageData.getAccess();
2297 const int valueStart = (m_isSigned ? (-width / 2) : 0);
2298
2299 for (int x = 0; x < width; ++x)
2300 for (int y = 0; y < height; ++y)
2301 {
2302 const tcu::IVec4 color(valueStart + x, valueStart + y, valueStart, valueStart);
2303 access.setPixel(color, x, y);
2304 }
2305
2306 // Create source image
2307 m_imageSrc = de::MovePtr<Image>(new Image(
2308 vk, device, allocator,
2309 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0u),
2310 MemoryRequirement::Any));
2311
2312 // Create destination image
2313 m_imageDst = de::MovePtr<Image>(
2314 new Image(vk, device, allocator,
2315 makeImageCreateInfo(m_texture, m_imageDstFormat,
2316 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
2317 MemoryRequirement::Any));
2318
2319 // Compute image and buffer sizes
2320 m_imageSrcSize = width * height * tcu::getPixelSize(textureFormat);
2321 m_imageDstSize = width * height * tcu::getPixelSize(mapVkFormat(m_imageDstFormat));
2322 VkDeviceSize bufferSizeBytes = de::max(m_imageSrcSize, m_imageDstSize);
2323
2324 // Create helper buffer able to store input data and image write result
2325 m_buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2326 vk, device, allocator,
2327 makeBufferCreateInfo(bufferSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
2328 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2329 MemoryRequirement::HostVisible));
2330
2331 const Allocation &alloc = m_buffer->getAllocation();
2332 deMemcpy(alloc.getHostPtr(), m_inputImageData.getAccess().getDataPtr(), static_cast<size_t>(m_imageSrcSize));
2333 flushAlloc(vk, device, alloc);
2334 }
2335
prepareDescriptors(void)2336 VkDescriptorSetLayout ImageExtendOperandTestInstance::prepareDescriptors(void)
2337 {
2338 const DeviceInterface &vk = m_context.getDeviceInterface();
2339 const VkDevice device = m_context.getDevice();
2340
2341 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2342 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2343 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2344 .build(vk, device);
2345
2346 m_descriptorPool = DescriptorPoolBuilder()
2347 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2348 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2349 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1);
2350
2351 const VkImageViewType viewType = mapImageViewType(m_texture.type());
2352 const VkImageSubresourceRange subresourceRange =
2353 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2354
2355 m_descriptorSet = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
2356 m_imageSrcView =
2357 makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
2358 m_imageDstView =
2359 makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_imageDstFormat, subresourceRange));
2360
2361 return *m_descriptorSetLayout; // not passing the ownership
2362 }
2363
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)2364 void ImageExtendOperandTestInstance::commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,
2365 const VkPipelineLayout pipelineLayout,
2366 const int layerNdx)
2367 {
2368 DE_UNREF(layerNdx);
2369
2370 const DeviceInterface &vk = m_context.getDeviceInterface();
2371 const VkDevice device = m_context.getDevice();
2372 const VkDescriptorSet descriptorSet = **m_descriptorSet;
2373
2374 const VkDescriptorImageInfo descriptorSrcImageInfo =
2375 makeDescriptorImageInfo(DE_NULL, **m_imageSrcView, VK_IMAGE_LAYOUT_GENERAL);
2376 const VkDescriptorImageInfo descriptorDstImageInfo =
2377 makeDescriptorImageInfo(DE_NULL, **m_imageDstView, VK_IMAGE_LAYOUT_GENERAL);
2378
2379 typedef DescriptorSetUpdateBuilder::Location DSUBL;
2380 DescriptorSetUpdateBuilder()
2381 .writeSingle(descriptorSet, DSUBL::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
2382 .writeSingle(descriptorSet, DSUBL::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
2383 .update(vk, device);
2384 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u,
2385 DE_NULL);
2386 }
2387
commandBeforeCompute(const VkCommandBuffer cmdBuffer)2388 void ImageExtendOperandTestInstance::commandBeforeCompute(const VkCommandBuffer cmdBuffer)
2389 {
2390 const DeviceInterface &vk = m_context.getDeviceInterface();
2391
2392 const VkImageSubresourceRange fullImageSubresourceRange =
2393 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
2394 {
2395 const VkImageMemoryBarrier preCopyImageBarriers[] = {
2396 makeImageMemoryBarrier(0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2397 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, m_imageSrc->get(), fullImageSubresourceRange),
2398 makeImageMemoryBarrier(0u, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2399 m_imageDst->get(), fullImageSubresourceRange)};
2400
2401 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
2402 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, m_buffer->get(), 0ull, m_imageSrcSize);
2403
2404 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT,
2405 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
2406 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 1,
2407 &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers),
2408 preCopyImageBarriers);
2409 }
2410 {
2411 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
2412 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2413 VK_IMAGE_LAYOUT_GENERAL, m_imageSrc->get(), fullImageSubresourceRange);
2414
2415 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
2416
2417 vk.cmdCopyBufferToImage(cmdBuffer, m_buffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u,
2418 ©Region);
2419 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2420 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2421 (const VkBufferMemoryBarrier *)DE_NULL, 1, &barrierAfterCopy);
2422 }
2423 }
2424
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)2425 void ImageExtendOperandTestInstance::commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)
2426 {
2427 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
2428 }
2429
commandAfterCompute(const VkCommandBuffer cmdBuffer)2430 void ImageExtendOperandTestInstance::commandAfterCompute(const VkCommandBuffer cmdBuffer)
2431 {
2432 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_buffer->get(), m_imageDstSize, m_texture);
2433 }
2434
2435 // Clears the high bits of every pixel in the pixel buffer, leaving only the lowest 16 bits of each component.
clearHighBits(const tcu::PixelBufferAccess & pixels,int width,int height)2436 void clearHighBits(const tcu::PixelBufferAccess &pixels, int width, int height)
2437 {
2438 for (int y = 0; y < height; ++y)
2439 for (int x = 0; x < width; ++x)
2440 {
2441 auto color = pixels.getPixelUint(x, y);
2442 for (int c = 0; c < decltype(color)::SIZE; ++c)
2443 color[c] &= 0xFFFFull;
2444 pixels.setPixel(color, x, y);
2445 }
2446 }
2447
verifyResult(void)2448 tcu::TestStatus ImageExtendOperandTestInstance::verifyResult(void)
2449 {
2450 const DeviceInterface &vk = m_context.getDeviceInterface();
2451 const VkDevice device = m_context.getDevice();
2452 const tcu::IVec3 imageSize = m_texture.size();
2453 const tcu::PixelBufferAccess inputAccess = m_inputImageData.getAccess();
2454 const int32_t width = inputAccess.getWidth();
2455 const int32_t height = inputAccess.getHeight();
2456 tcu::TextureLevel refImage(mapVkFormat(m_imageDstFormat), width, height);
2457 tcu::PixelBufferAccess refAccess = refImage.getAccess();
2458
2459 for (int x = 0; x < width; ++x)
2460 for (int y = 0; y < height; ++y)
2461 {
2462 tcu::IVec4 color = inputAccess.getPixelInt(x, y);
2463 refAccess.setPixel(color, x, y);
2464 }
2465
2466 const Allocation &alloc = m_buffer->getAllocation();
2467 invalidateAlloc(vk, device, alloc);
2468 const tcu::PixelBufferAccess result(mapVkFormat(m_imageDstFormat), imageSize, alloc.getHostPtr());
2469
2470 if (m_relaxedPrecision)
2471 {
2472 // Preserve the lowest 16 bits of the reference and result pixels only.
2473 clearHighBits(refAccess, width, height);
2474 clearHighBits(result, width, height);
2475 }
2476
2477 if (tcu::intThresholdCompare(m_context.getTestContext().getLog(), "Comparison", "Comparison", refAccess, result,
2478 tcu::UVec4(0), tcu::COMPARE_LOG_RESULT, true /*use64Bits*/))
2479 return tcu::TestStatus::pass("Passed");
2480 else
2481 return tcu::TestStatus::fail("Image comparison failed");
2482 }
2483
2484 enum class ExtendTestType
2485 {
2486 READ = 0,
2487 WRITE,
2488 WRITE_NONTEMPORAL,
2489 };
2490
2491 enum class ExtendOperand
2492 {
2493 SIGN_EXTEND = 0,
2494 ZERO_EXTEND = 1
2495 };
2496
2497 class ImageExtendOperandTest : public TestCase
2498 {
2499 public:
2500 ImageExtendOperandTest(tcu::TestContext &testCtx, const std::string &name, const Texture texture,
2501 const VkFormat readFormat, const VkFormat writeFormat, const bool signedInt,
2502 const bool relaxedPrecision, ExtendTestType extendTestType);
2503
2504 void checkSupport(Context &context) const;
2505 void initPrograms(SourceCollections &programCollection) const;
2506 TestInstance *createInstance(Context &context) const;
2507
2508 private:
isWriteTest() const2509 bool isWriteTest() const
2510 {
2511 return (m_extendTestType == ExtendTestType::WRITE) || (m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL);
2512 }
2513
2514 const Texture m_texture;
2515 VkFormat m_readFormat;
2516 VkFormat m_writeFormat;
2517 bool m_operandForce; // Use an operand that doesn't match SampledType?
2518 bool m_relaxedPrecision;
2519 ExtendTestType m_extendTestType;
2520 };
2521
ImageExtendOperandTest(tcu::TestContext & testCtx,const std::string & name,const Texture texture,const VkFormat readFormat,const VkFormat writeFormat,const bool operandForce,const bool relaxedPrecision,ExtendTestType extendTestType)2522 ImageExtendOperandTest::ImageExtendOperandTest(tcu::TestContext &testCtx, const std::string &name,
2523 const Texture texture, const VkFormat readFormat,
2524 const VkFormat writeFormat, const bool operandForce,
2525 const bool relaxedPrecision, ExtendTestType extendTestType)
2526 : TestCase(testCtx, name)
2527 , m_texture(texture)
2528 , m_readFormat(readFormat)
2529 , m_writeFormat(writeFormat)
2530 , m_operandForce(operandForce)
2531 , m_relaxedPrecision(relaxedPrecision)
2532 , m_extendTestType(extendTestType)
2533 {
2534 }
2535
checkFormatProperties(const Context & context,VkFormat format)2536 void checkFormatProperties(const Context &context, VkFormat format)
2537 {
2538 #ifndef CTS_USES_VULKANSC
2539 const VkFormatProperties3 formatProperties(context.getFormatProperties(format));
2540
2541 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2542 TCU_THROW(NotSupportedError, "Format not supported for storage images");
2543 #else
2544 const VkFormatProperties formatProperties(
2545 getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), format));
2546
2547 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2548 TCU_THROW(NotSupportedError, "Format not supported for storage images");
2549 #endif // CTS_USES_VULKANSC
2550 }
2551
check64BitSupportIfNeeded(Context & context,VkFormat readFormat,VkFormat writeFormat)2552 void check64BitSupportIfNeeded(Context &context, VkFormat readFormat, VkFormat writeFormat)
2553 {
2554 if (is64BitIntegerFormat(readFormat) || is64BitIntegerFormat(writeFormat))
2555 {
2556 const auto &features = context.getDeviceFeatures();
2557 if (!features.shaderInt64)
2558 TCU_THROW(NotSupportedError, "64-bit integers not supported in shaders");
2559 }
2560 }
2561
checkSupport(Context & context) const2562 void ImageExtendOperandTest::checkSupport(Context &context) const
2563 {
2564 if (!context.requireDeviceFunctionality("VK_KHR_spirv_1_4"))
2565 TCU_THROW(NotSupportedError, "VK_KHR_spirv_1_4 not supported");
2566
2567 #ifndef CTS_USES_VULKANSC
2568 DE_ASSERT(m_readFormat != VK_FORMAT_A8_UNORM_KHR && m_writeFormat != VK_FORMAT_A8_UNORM_KHR);
2569 DE_ASSERT(m_readFormat != VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR &&
2570 m_writeFormat != VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR);
2571
2572 if ((m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL) && (context.getUsedApiVersion() < VK_API_VERSION_1_3))
2573 TCU_THROW(NotSupportedError, "Vulkan 1.3 or higher is required for this test to run");
2574 #endif // CTS_USES_VULKANSC
2575
2576 check64BitSupportIfNeeded(context, m_readFormat, m_writeFormat);
2577
2578 checkFormatProperties(context, m_readFormat);
2579 checkFormatProperties(context, m_writeFormat);
2580 }
2581
initPrograms(SourceCollections & programCollection) const2582 void ImageExtendOperandTest::initPrograms(SourceCollections &programCollection) const
2583 {
2584 tcu::StringTemplate shaderTemplate(
2585 "OpCapability Shader\n"
2586 "OpCapability StorageImageExtendedFormats\n"
2587
2588 "${capability}"
2589 "${extension}"
2590
2591 "%std450 = OpExtInstImport \"GLSL.std.450\"\n"
2592 "OpMemoryModel Logical GLSL450\n"
2593 "OpEntryPoint GLCompute %main \"main\" %id %src_image_ptr %dst_image_ptr\n"
2594 "OpExecutionMode %main LocalSize 1 1 1\n"
2595
2596 // decorations
2597 "OpDecorate %id BuiltIn GlobalInvocationId\n"
2598
2599 "OpDecorate %src_image_ptr DescriptorSet 0\n"
2600 "OpDecorate %src_image_ptr Binding 0\n"
2601 "OpDecorate %src_image_ptr NonWritable\n"
2602
2603 "${relaxed_precision}"
2604
2605 "OpDecorate %dst_image_ptr DescriptorSet 0\n"
2606 "OpDecorate %dst_image_ptr Binding 1\n"
2607 "OpDecorate %dst_image_ptr NonReadable\n"
2608
2609 // types
2610 "%type_void = OpTypeVoid\n"
2611 "%type_i32 = OpTypeInt 32 1\n"
2612 "%type_u32 = OpTypeInt 32 0\n"
2613 "%type_vec2_i32 = OpTypeVector %type_i32 2\n"
2614 "%type_vec2_u32 = OpTypeVector %type_u32 2\n"
2615 "%type_vec3_i32 = OpTypeVector %type_i32 3\n"
2616 "%type_vec3_u32 = OpTypeVector %type_u32 3\n"
2617 "%type_vec4_i32 = OpTypeVector %type_i32 4\n"
2618 "%type_vec4_u32 = OpTypeVector %type_u32 4\n"
2619 "${extra_types}"
2620
2621 "%type_fun_void = OpTypeFunction %type_void\n"
2622
2623 "${image_types}"
2624
2625 "%type_ptr_in_vec3_u32 = OpTypePointer Input %type_vec3_u32\n"
2626 "%type_ptr_in_u32 = OpTypePointer Input %type_u32\n"
2627
2628 "${image_uniforms}"
2629
2630 // variables
2631 "%id = OpVariable %type_ptr_in_vec3_u32 Input\n"
2632
2633 "${image_variables}"
2634
2635 // main function
2636 "%main = OpFunction %type_void None %type_fun_void\n"
2637 "%label = OpLabel\n"
2638
2639 "${image_load}"
2640
2641 "%idvec = OpLoad %type_vec3_u32 %id\n"
2642 "%id_xy = OpVectorShuffle %type_vec2_u32 %idvec %idvec 0 1\n"
2643 "%coord = OpBitcast %type_vec2_i32 %id_xy\n"
2644 "%value = OpImageRead ${sampled_type_vec4} %src_image %coord "
2645 "${read_extend_operand}\n"
2646 " OpImageWrite %dst_image %coord %value ${write_extend_operand}\n"
2647 " OpReturn\n"
2648 " OpFunctionEnd\n");
2649
2650 const auto testedFormat = mapVkFormat(isWriteTest() ? m_writeFormat : m_readFormat);
2651 const bool isSigned = (getTextureChannelClass(testedFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
2652
2653 const auto isRead64 = is64BitIntegerFormat(m_readFormat);
2654 const auto isWrite64 = is64BitIntegerFormat(m_writeFormat);
2655 DE_ASSERT(isRead64 == isWrite64);
2656
2657 const bool using64Bits = (isRead64 || isWrite64);
2658
2659 // Additional capabilities when needed.
2660 std::string capability;
2661 std::string extension;
2662 std::string extraTypes;
2663
2664 if (using64Bits)
2665 {
2666 extension += "OpExtension \"SPV_EXT_shader_image_int64\"\n";
2667 capability += "OpCapability Int64\n"
2668 "OpCapability Int64ImageEXT\n";
2669 extraTypes += "%type_i64 = OpTypeInt 64 1\n"
2670 "%type_u64 = OpTypeInt 64 0\n"
2671 "%type_vec3_i64 = OpTypeVector %type_i64 3\n"
2672 "%type_vec3_u64 = OpTypeVector %type_u64 3\n"
2673 "%type_vec4_i64 = OpTypeVector %type_i64 4\n"
2674 "%type_vec4_u64 = OpTypeVector %type_u64 4\n";
2675 }
2676
2677 std::string relaxed = "";
2678 if (m_relaxedPrecision)
2679 relaxed += "OpDecorate %src_image_ptr RelaxedPrecision\n";
2680
2681 // Sampled type depends on the format sign and mismatch force flag.
2682 const bool signedSampleType = ((isSigned && !m_operandForce) || (!isSigned && m_operandForce));
2683 const std::string bits = (using64Bits ? "64" : "32");
2684 const std::string sampledTypePostfix = (signedSampleType ? "i" : "u") + bits;
2685 const std::string extendOperandStr = (isSigned ? "SignExtend" : "ZeroExtend");
2686
2687 std::map<std::string, std::string> specializations{
2688 {"image_type_id", "%type_image"},
2689 {"image_uni_ptr_type_id", "%type_ptr_uniform_const_image"},
2690 {"image_var_id", "%src_image_ptr"},
2691 {"image_id", "%src_image"},
2692 {"capability", capability},
2693 {"extension", extension},
2694 {"extra_types", extraTypes},
2695 {"relaxed_precision", relaxed},
2696 {"image_format", getSpirvFormat(m_readFormat)},
2697 {"sampled_type", (std::string("%type_") + sampledTypePostfix)},
2698 {"sampled_type_vec4", (std::string("%type_vec4_") + sampledTypePostfix)},
2699 {"read_extend_operand", (!isWriteTest() ? extendOperandStr : "")},
2700 {"write_extend_operand", (isWriteTest() ? extendOperandStr : "")},
2701 };
2702
2703 SpirvVersion spirvVersion = SPIRV_VERSION_1_4;
2704 bool allowSpirv14 = true;
2705 if (m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL)
2706 {
2707 spirvVersion = SPIRV_VERSION_1_6;
2708 allowSpirv14 = false;
2709 specializations["write_extend_operand"] = "Nontemporal";
2710 }
2711
2712 // Addidtional parametrization is needed for a case when source and destination textures have same format
2713 tcu::StringTemplate imageTypeTemplate(
2714 "${image_type_id} = OpTypeImage ${sampled_type} 2D 0 0 0 2 ${image_format}\n");
2715 tcu::StringTemplate imageUniformTypeTemplate(
2716 "${image_uni_ptr_type_id} = OpTypePointer UniformConstant ${image_type_id}\n");
2717 tcu::StringTemplate imageVariablesTemplate(
2718 "${image_var_id} = OpVariable ${image_uni_ptr_type_id} UniformConstant\n");
2719 tcu::StringTemplate imageLoadTemplate(
2720 "${image_id} = OpLoad ${image_type_id} ${image_var_id}\n");
2721
2722 std::string imageTypes;
2723 std::string imageUniformTypes;
2724 std::string imageVariables;
2725 std::string imageLoad;
2726
2727 // If input image format is the same as output there is less spir-v definitions
2728 if (m_readFormat == m_writeFormat)
2729 {
2730 imageTypes = imageTypeTemplate.specialize(specializations);
2731 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2732 imageVariables = imageVariablesTemplate.specialize(specializations);
2733 imageLoad = imageLoadTemplate.specialize(specializations);
2734
2735 specializations["image_var_id"] = "%dst_image_ptr";
2736 specializations["image_id"] = "%dst_image";
2737 imageVariables += imageVariablesTemplate.specialize(specializations);
2738 imageLoad += imageLoadTemplate.specialize(specializations);
2739 }
2740 else
2741 {
2742 specializations["image_type_id"] = "%type_src_image";
2743 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_src_image";
2744 imageTypes = imageTypeTemplate.specialize(specializations);
2745 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2746 imageVariables = imageVariablesTemplate.specialize(specializations);
2747 imageLoad = imageLoadTemplate.specialize(specializations);
2748
2749 specializations["image_format"] = getSpirvFormat(m_writeFormat);
2750 specializations["image_type_id"] = "%type_dst_image";
2751 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_dst_image";
2752 specializations["image_var_id"] = "%dst_image_ptr";
2753 specializations["image_id"] = "%dst_image";
2754 imageTypes += imageTypeTemplate.specialize(specializations);
2755 imageUniformTypes += imageUniformTypeTemplate.specialize(specializations);
2756 imageVariables += imageVariablesTemplate.specialize(specializations);
2757 imageLoad += imageLoadTemplate.specialize(specializations);
2758 }
2759
2760 specializations["image_types"] = imageTypes;
2761 specializations["image_uniforms"] = imageUniformTypes;
2762 specializations["image_variables"] = imageVariables;
2763 specializations["image_load"] = imageLoad;
2764
2765 // Specialize whole shader and add it to program collection
2766 programCollection.spirvAsmSources.add("comp")
2767 << shaderTemplate.specialize(specializations)
2768 << vk::SpirVAsmBuildOptions(programCollection.usedVulkanVersion, spirvVersion, allowSpirv14);
2769 }
2770
createInstance(Context & context) const2771 TestInstance *ImageExtendOperandTest::createInstance(Context &context) const
2772 {
2773 return new ImageExtendOperandTestInstance(context, m_texture, m_readFormat, m_writeFormat, m_relaxedPrecision);
2774 }
2775
2776 static const Texture s_textures[] = {
2777 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
2778 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
2779 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
2780 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
2781 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
2782 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
2783 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2 * 6),
2784 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
2785 };
2786
getTestTexture(const ImageType imageType)2787 const Texture &getTestTexture(const ImageType imageType)
2788 {
2789 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2790 if (s_textures[textureNdx].type() == imageType)
2791 return s_textures[textureNdx];
2792
2793 DE_FATAL("Internal error");
2794 return s_textures[0];
2795 }
2796
2797 static const VkFormat s_formats[] = {VK_FORMAT_R32G32B32A32_SFLOAT,
2798 VK_FORMAT_R16G16B16A16_SFLOAT,
2799 VK_FORMAT_R32_SFLOAT,
2800
2801 VK_FORMAT_R32G32B32A32_UINT,
2802 VK_FORMAT_R16G16B16A16_UINT,
2803 VK_FORMAT_R8G8B8A8_UINT,
2804 VK_FORMAT_R32_UINT,
2805
2806 VK_FORMAT_R32G32B32A32_SINT,
2807 VK_FORMAT_R16G16B16A16_SINT,
2808 VK_FORMAT_R8G8B8A8_SINT,
2809 VK_FORMAT_R32_SINT,
2810
2811 VK_FORMAT_R8G8B8A8_UNORM,
2812
2813 VK_FORMAT_B8G8R8A8_UNORM,
2814 VK_FORMAT_B8G8R8A8_UINT,
2815
2816 VK_FORMAT_R8G8B8A8_SNORM,
2817
2818 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
2819
2820 VK_FORMAT_R32G32_SFLOAT,
2821 VK_FORMAT_R16G16_SFLOAT,
2822 VK_FORMAT_R16_SFLOAT,
2823
2824 VK_FORMAT_A2B10G10R10_UINT_PACK32,
2825 VK_FORMAT_R32G32_UINT,
2826 VK_FORMAT_R16G16_UINT,
2827 VK_FORMAT_R16_UINT,
2828 VK_FORMAT_R8G8_UINT,
2829 VK_FORMAT_R8_UINT,
2830
2831 VK_FORMAT_R32G32_SINT,
2832 VK_FORMAT_R16G16_SINT,
2833 VK_FORMAT_R16_SINT,
2834 VK_FORMAT_R8G8_SINT,
2835 VK_FORMAT_R8_SINT,
2836
2837 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
2838 VK_FORMAT_R16G16B16A16_UNORM,
2839 VK_FORMAT_R16G16B16A16_SNORM,
2840 VK_FORMAT_R16G16_UNORM,
2841 VK_FORMAT_R16_UNORM,
2842 VK_FORMAT_R8G8_UNORM,
2843 VK_FORMAT_R8_UNORM,
2844 #ifndef CTS_USES_VULKANSC
2845 VK_FORMAT_A8_UNORM_KHR,
2846 #endif // CTS_USES_VULKANSC
2847
2848 VK_FORMAT_R16G16_SNORM,
2849 VK_FORMAT_R16_SNORM,
2850 VK_FORMAT_R8G8_SNORM,
2851 VK_FORMAT_R8_SNORM,
2852
2853 VK_FORMAT_R10X6_UNORM_PACK16,
2854 VK_FORMAT_R10X6G10X6_UNORM_2PACK16,
2855 VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16,
2856
2857 VK_FORMAT_R4G4_UNORM_PACK8,
2858 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
2859 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
2860 VK_FORMAT_R5G6B5_UNORM_PACK16,
2861 VK_FORMAT_B5G6R5_UNORM_PACK16,
2862 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
2863 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
2864 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
2865 #ifndef CTS_USES_VULKANSC
2866 VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR,
2867 #endif // CTS_USES_VULKANSC
2868
2869 VK_FORMAT_B8G8R8A8_SNORM,
2870 VK_FORMAT_B8G8R8A8_SINT,
2871 VK_FORMAT_A8B8G8R8_UNORM_PACK32,
2872 VK_FORMAT_A8B8G8R8_SNORM_PACK32,
2873 VK_FORMAT_A8B8G8R8_UINT_PACK32,
2874 VK_FORMAT_A8B8G8R8_SINT_PACK32,
2875 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
2876 VK_FORMAT_A2R10G10B10_SNORM_PACK32,
2877 VK_FORMAT_A2R10G10B10_UINT_PACK32,
2878 VK_FORMAT_A2R10G10B10_SINT_PACK32,
2879 VK_FORMAT_A2B10G10R10_SNORM_PACK32,
2880 VK_FORMAT_A2B10G10R10_SINT_PACK32,
2881 VK_FORMAT_R32G32B32_UINT,
2882 VK_FORMAT_R32G32B32_SINT,
2883 VK_FORMAT_R32G32B32_SFLOAT,
2884 VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
2885
2886 VK_FORMAT_R8G8_SRGB,
2887 VK_FORMAT_R8G8B8_SRGB,
2888 VK_FORMAT_B8G8R8_SRGB,
2889 VK_FORMAT_R8G8B8A8_SRGB,
2890 VK_FORMAT_B8G8R8A8_SRGB,
2891 VK_FORMAT_A8B8G8R8_SRGB_PACK32};
2892
2893 static const VkFormat s_formatsThreeComponent[] = {
2894 VK_FORMAT_R8G8B8_UINT, VK_FORMAT_R8G8B8_SINT, VK_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_SNORM,
2895 VK_FORMAT_R16G16B16_UINT, VK_FORMAT_R16G16B16_SINT, VK_FORMAT_R16G16B16_UNORM, VK_FORMAT_R16G16B16_SNORM,
2896 VK_FORMAT_R16G16B16_SFLOAT, VK_FORMAT_R32G32B32_UINT, VK_FORMAT_R32G32B32_SINT, VK_FORMAT_R32G32B32_SFLOAT,
2897 };
2898
2899 static const VkImageTiling s_tilings[] = {
2900 VK_IMAGE_TILING_OPTIMAL,
2901 VK_IMAGE_TILING_LINEAR,
2902 };
2903
tilingSuffix(VkImageTiling tiling)2904 const char *tilingSuffix(VkImageTiling tiling)
2905 {
2906 switch (tiling)
2907 {
2908 case VK_IMAGE_TILING_OPTIMAL:
2909 return "";
2910 case VK_IMAGE_TILING_LINEAR:
2911 return "_linear";
2912 default:
2913 return "unknown";
2914 }
2915 }
2916
2917 } // namespace
2918
createImageStoreTests(tcu::TestContext & testCtx)2919 tcu::TestCaseGroup *createImageStoreTests(tcu::TestContext &testCtx)
2920 {
2921 // Plain imageStore() cases
2922 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store"));
2923 // Declare a format layout qualifier for write images
2924 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format"));
2925 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format"));
2926
2927 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2928 {
2929 const Texture &texture = s_textures[textureNdx];
2930 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType(
2931 new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str()));
2932 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType(
2933 new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str()));
2934 const bool isLayered = (texture.numLayers() > 1);
2935
2936 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2937 {
2938 for (int tilingNdx = 0; tilingNdx < DE_LENGTH_OF_ARRAY(s_tilings); tilingNdx++)
2939 {
2940 const bool hasSpirvFmt = hasSpirvFormat(s_formats[formatNdx]);
2941 const char *suffix = tilingSuffix(s_tilings[tilingNdx]);
2942
2943 if (hasSpirvFmt)
2944 {
2945 groupWithFormatByImageViewType->addChild(
2946 new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + suffix, texture,
2947 s_formats[formatNdx], s_tilings[tilingNdx]));
2948 // Additional tests where the shader uses constant data for imageStore.
2949 groupWithFormatByImageViewType->addChild(new StoreTest(
2950 testCtx, getFormatShortString(s_formats[formatNdx]) + "_constant" + suffix, texture,
2951 s_formats[formatNdx], s_tilings[tilingNdx],
2952 StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER | StoreTest::FLAG_STORE_CONSTANT_VALUE));
2953 }
2954 groupWithoutFormatByImageViewType->addChild(
2955 new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + suffix, texture,
2956 s_formats[formatNdx], s_tilings[tilingNdx], 0));
2957
2958 if (isLayered && hasSpirvFmt)
2959 groupWithFormatByImageViewType->addChild(new StoreTest(
2960 testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer" + suffix, texture,
2961 s_formats[formatNdx], VK_IMAGE_TILING_OPTIMAL,
2962 StoreTest::FLAG_SINGLE_LAYER_BIND | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2963
2964 if (texture.type() == IMAGE_TYPE_BUFFER)
2965 {
2966 if (hasSpirvFmt)
2967 groupWithFormatByImageViewType->addChild(
2968 new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign" + suffix,
2969 texture, s_formats[formatNdx], s_tilings[tilingNdx],
2970 StoreTest::FLAG_MINALIGN | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2971 groupWithoutFormatByImageViewType->addChild(
2972 new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign" + suffix,
2973 texture, s_formats[formatNdx], s_tilings[tilingNdx], StoreTest::FLAG_MINALIGN));
2974 }
2975 }
2976 }
2977
2978 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2979 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2980 }
2981
2982 testGroup->addChild(testGroupWithFormat.release());
2983 testGroup->addChild(testGroupWithoutFormat.release());
2984
2985 return testGroup.release();
2986 }
2987
createImageLoadStoreTests(tcu::TestContext & testCtx)2988 tcu::TestCaseGroup *createImageLoadStoreTests(tcu::TestContext &testCtx)
2989 {
2990 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store"));
2991 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format"));
2992 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format"));
2993 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutAnyFormat(new tcu::TestCaseGroup(testCtx, "without_any_format"));
2994
2995 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2996 {
2997 const Texture &texture = s_textures[textureNdx];
2998 const auto imageTypeName = getImageTypeName(texture.type());
2999 const bool isLayered = (texture.numLayers() > 1);
3000
3001 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType(
3002 new tcu::TestCaseGroup(testCtx, imageTypeName.c_str()));
3003 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType(
3004 new tcu::TestCaseGroup(testCtx, imageTypeName.c_str()));
3005 de::MovePtr<tcu::TestCaseGroup> groupWithoutAnyFormatByImageViewType(
3006 new tcu::TestCaseGroup(testCtx, imageTypeName.c_str()));
3007
3008 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
3009 {
3010 const auto formatShortString = getFormatShortString(s_formats[formatNdx]);
3011 const auto hasSpvFormat = hasSpirvFormat(s_formats[formatNdx]);
3012
3013 for (int tilingNdx = 0; tilingNdx < DE_LENGTH_OF_ARRAY(s_tilings); tilingNdx++)
3014 {
3015 const char *suffix = tilingSuffix(s_tilings[tilingNdx]);
3016
3017 if (hasSpvFormat)
3018 {
3019 groupWithFormatByImageViewType->addChild(
3020 new LoadStoreTest(testCtx, formatShortString + suffix, texture, s_formats[formatNdx],
3021 s_formats[formatNdx], s_tilings[tilingNdx]));
3022 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(
3023 testCtx, formatShortString + suffix, texture, s_formats[formatNdx], s_formats[formatNdx],
3024 s_tilings[tilingNdx], LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3025 }
3026 groupWithoutAnyFormatByImageViewType->addChild(
3027 new LoadStoreTest(testCtx, formatShortString + suffix, texture, s_formats[formatNdx],
3028 s_formats[formatNdx], s_tilings[tilingNdx], 0u));
3029
3030 if (isLayered && hasSpvFormat)
3031 groupWithFormatByImageViewType->addChild(new LoadStoreTest(
3032 testCtx, formatShortString + "_single_layer" + suffix, texture, s_formats[formatNdx],
3033 s_formats[formatNdx], s_tilings[tilingNdx],
3034 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3035 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3036
3037 if (texture.type() == IMAGE_TYPE_BUFFER)
3038 {
3039 if (hasSpvFormat)
3040 {
3041 groupWithFormatByImageViewType->addChild(new LoadStoreTest(
3042 testCtx, formatShortString + "_minalign" + suffix, texture, s_formats[formatNdx],
3043 s_formats[formatNdx], s_tilings[tilingNdx],
3044 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3045 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3046 groupWithFormatByImageViewType->addChild(new LoadStoreTest(
3047 testCtx, formatShortString + "_minalign_uniform" + suffix, texture, s_formats[formatNdx],
3048 s_formats[formatNdx], s_tilings[tilingNdx],
3049 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3050 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES |
3051 LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
3052 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(
3053 testCtx, formatShortString + "_minalign" + suffix, texture, s_formats[formatNdx],
3054 s_formats[formatNdx], s_tilings[tilingNdx],
3055 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3056 groupWithoutFormatByImageViewType->addChild(
3057 new LoadStoreTest(testCtx, formatShortString + "_minalign_uniform" + suffix, texture,
3058 s_formats[formatNdx], s_formats[formatNdx], s_tilings[tilingNdx],
3059 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER |
3060 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3061 }
3062 groupWithoutAnyFormatByImageViewType->addChild(new LoadStoreTest(
3063 testCtx, formatShortString + "_minalign" + suffix, texture, s_formats[formatNdx],
3064 s_formats[formatNdx], s_tilings[tilingNdx], LoadStoreTest::FLAG_MINALIGN));
3065 groupWithoutAnyFormatByImageViewType->addChild(
3066 new LoadStoreTest(testCtx, formatShortString + "_minalign_uniform" + suffix, texture,
3067 s_formats[formatNdx], s_formats[formatNdx], s_tilings[tilingNdx],
3068 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
3069 }
3070 }
3071 }
3072
3073 if (texture.type() == IMAGE_TYPE_BUFFER)
3074 {
3075 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formatsThreeComponent); ++formatNdx)
3076 {
3077 const auto formatShortString = getFormatShortString(s_formatsThreeComponent[formatNdx]);
3078
3079 for (int tilingNdx = 0; tilingNdx < DE_LENGTH_OF_ARRAY(s_tilings); tilingNdx++)
3080 {
3081 const char *suffix = tilingSuffix(s_tilings[tilingNdx]);
3082
3083 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(
3084 testCtx, formatShortString + "_uniform" + suffix, texture, s_formatsThreeComponent[formatNdx],
3085 s_formatsThreeComponent[formatNdx], s_tilings[tilingNdx],
3086 LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER |
3087 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3088 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(
3089 testCtx, formatShortString + "_minalign_uniform" + suffix, texture,
3090 s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], s_tilings[tilingNdx],
3091 LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER |
3092 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3093 }
3094 }
3095 }
3096
3097 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
3098 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
3099 testGroupWithoutAnyFormat->addChild(groupWithoutAnyFormatByImageViewType.release());
3100 }
3101
3102 testGroup->addChild(testGroupWithFormat.release());
3103 testGroup->addChild(testGroupWithoutFormat.release());
3104 testGroup->addChild(testGroupWithoutAnyFormat.release());
3105
3106 return testGroup.release();
3107 }
3108
createImageLoadStoreLodAMDTests(tcu::TestContext & testCtx)3109 tcu::TestCaseGroup *createImageLoadStoreLodAMDTests(tcu::TestContext &testCtx)
3110 {
3111 static const Texture textures[] = {
3112 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8, 1, 6),
3113 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1, 1, 6),
3114 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1, 1, 6),
3115 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8, 1, 6),
3116 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1, 1, 6),
3117 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6, 1, 6),
3118 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2 * 6, 1, 6),
3119 };
3120
3121 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_lod"));
3122 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format"));
3123 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format"));
3124
3125 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(textures); ++textureNdx)
3126 {
3127 const Texture &texture = textures[textureNdx];
3128 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType(
3129 new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str()));
3130 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType(
3131 new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str()));
3132 const bool isLayered = (texture.numLayers() > 1);
3133
3134 if (texture.type() == IMAGE_TYPE_BUFFER)
3135 continue;
3136
3137 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
3138 {
3139 // These tests always require a SPIR-V format for the write image, even if the read
3140 // image is being used without a format.
3141 if (!hasSpirvFormat(s_formats[formatNdx]))
3142 continue;
3143
3144 groupWithFormatByImageViewType->addChild(
3145 new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), texture, s_formats[formatNdx],
3146 s_formats[formatNdx], VK_IMAGE_TILING_OPTIMAL,
3147 (LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3148 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES),
3149 true));
3150 groupWithoutFormatByImageViewType->addChild(
3151 new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), texture, s_formats[formatNdx],
3152 s_formats[formatNdx], VK_IMAGE_TILING_OPTIMAL,
3153 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES, true));
3154
3155 if (isLayered)
3156 groupWithFormatByImageViewType->addChild(new LoadStoreTest(
3157 testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", texture,
3158 s_formats[formatNdx], s_formats[formatNdx], VK_IMAGE_TILING_OPTIMAL,
3159 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3160 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES,
3161 true));
3162 }
3163
3164 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
3165 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
3166 }
3167
3168 testGroup->addChild(testGroupWithFormat.release());
3169 testGroup->addChild(testGroupWithoutFormat.release());
3170
3171 return testGroup.release();
3172 }
3173
createImageFormatReinterpretTests(tcu::TestContext & testCtx)3174 tcu::TestCaseGroup *createImageFormatReinterpretTests(tcu::TestContext &testCtx)
3175 {
3176 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret"));
3177
3178 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
3179 {
3180 const Texture &texture = s_textures[textureNdx];
3181 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType(
3182 new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str()));
3183
3184 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
3185 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
3186 {
3187 if (!hasSpirvFormat(s_formats[formatNdx]))
3188 continue;
3189
3190 const std::string caseName =
3191 getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
3192 if (imageFormatNdx != formatNdx &&
3193 formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
3194 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, texture, s_formats[formatNdx],
3195 s_formats[imageFormatNdx],
3196 VK_IMAGE_TILING_OPTIMAL));
3197 }
3198 testGroup->addChild(groupByImageViewType.release());
3199 }
3200
3201 return testGroup.release();
3202 }
3203
createImageQualifierRestrictCase(tcu::TestContext & testCtx,const ImageType imageType,const std::string & name)3204 de::MovePtr<TestCase> createImageQualifierRestrictCase(tcu::TestContext &testCtx, const ImageType imageType,
3205 const std::string &name)
3206 {
3207 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
3208 const Texture &texture = getTestTexture(imageType);
3209 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, texture, format, format, VK_IMAGE_TILING_OPTIMAL,
3210 LoadStoreTest::FLAG_RESTRICT_IMAGES |
3211 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_READS |
3212 LoadStoreTest::FLAG_DECLARE_FORMAT_IN_SHADER_WRITES));
3213 }
3214
3215 namespace
3216 {
3217
relaxedOK(VkFormat format)3218 bool relaxedOK(VkFormat format)
3219 {
3220 tcu::IVec4 bitDepth = tcu::getTextureFormatBitDepth(mapVkFormat(format));
3221 int maxBitDepth = deMax32(deMax32(bitDepth[0], bitDepth[1]), deMax32(bitDepth[2], bitDepth[3]));
3222 return maxBitDepth <= 16;
3223 }
3224
3225 // Get a format used for reading or writing in extension operand tests. These formats allow representing the shader sampled type to
3226 // verify results from read or write operations.
getShaderExtensionOperandFormat(bool isSigned,bool is64Bit)3227 VkFormat getShaderExtensionOperandFormat(bool isSigned, bool is64Bit)
3228 {
3229 const VkFormat formats[] = {
3230 VK_FORMAT_R32G32B32A32_UINT,
3231 VK_FORMAT_R32G32B32A32_SINT,
3232 VK_FORMAT_R64_UINT,
3233 VK_FORMAT_R64_SINT,
3234 };
3235 return formats[2u * (is64Bit ? 1u : 0u) + (isSigned ? 1u : 0u)];
3236 }
3237
3238 // INT or UINT format?
isIntegralFormat(VkFormat format)3239 bool isIntegralFormat(VkFormat format)
3240 {
3241 return (isIntFormat(format) || isUintFormat(format));
3242 }
3243
3244 // Return the list of formats used for the extension operand tests (SignExten/ZeroExtend).
getExtensionOperandFormatList(void)3245 std::vector<VkFormat> getExtensionOperandFormatList(void)
3246 {
3247 std::vector<VkFormat> formatList;
3248
3249 for (auto format : s_formats)
3250 {
3251 if (isIntegralFormat(format))
3252 formatList.push_back(format);
3253 }
3254
3255 formatList.push_back(VK_FORMAT_R64_SINT);
3256 formatList.push_back(VK_FORMAT_R64_UINT);
3257
3258 return formatList;
3259 }
3260
3261 } // namespace
3262
createImageExtendOperandsTests(tcu::TestContext & testCtx)3263 tcu::TestCaseGroup *createImageExtendOperandsTests(tcu::TestContext &testCtx)
3264 {
3265 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
3266
3267 GroupPtr testGroup(new tcu::TestCaseGroup(testCtx, "extend_operands_spirv1p4"));
3268
3269 const struct
3270 {
3271 ExtendTestType testType;
3272 const char *name;
3273 } testTypes[] = {
3274 {ExtendTestType::READ, "read"},
3275 {ExtendTestType::WRITE, "write"},
3276 };
3277
3278 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
3279 const auto formatList = getExtensionOperandFormatList();
3280
3281 for (const auto format : formatList)
3282 {
3283 const auto isInt = isIntFormat(format);
3284 const auto isUint = isUintFormat(format);
3285 const auto use64Bits = is64BitIntegerFormat(format);
3286
3287 DE_ASSERT(isInt || isUint);
3288
3289 GroupPtr formatGroup(new tcu::TestCaseGroup(testCtx, getFormatShortString(format).c_str()));
3290
3291 for (const auto &testType : testTypes)
3292 {
3293 GroupPtr testTypeGroup(new tcu::TestCaseGroup(testCtx, testType.name));
3294
3295 for (int match = 0; match < 2; ++match)
3296 {
3297 const bool mismatched = (match == 1);
3298 const char *matchGroupName = (mismatched ? "mismatched_sign" : "matched_sign");
3299
3300 // SPIR-V does not allow this kind of sampled type override.
3301 if (mismatched && isUint)
3302 continue;
3303
3304 GroupPtr matchGroup(new tcu::TestCaseGroup(testCtx, matchGroupName));
3305
3306 for (int prec = 0; prec < 2; prec++)
3307 {
3308 const bool relaxedPrecision = (prec != 0);
3309
3310 const char *precisionName = (relaxedPrecision ? "relaxed_precision" : "normal_precision");
3311 const auto signedOther = ((isInt && !mismatched) || (isUint && mismatched));
3312 const auto otherFormat = getShaderExtensionOperandFormat(signedOther, use64Bits);
3313 const auto readFormat = (testType.testType == ExtendTestType::READ ? format : otherFormat);
3314 const auto writeFormat = (testType.testType == ExtendTestType::WRITE ? format : otherFormat);
3315
3316 if (relaxedPrecision && !relaxedOK(readFormat))
3317 continue;
3318
3319 if (!hasSpirvFormat(readFormat) || !hasSpirvFormat(writeFormat))
3320 continue;
3321
3322 matchGroup->addChild(new ImageExtendOperandTest(testCtx, precisionName, texture, readFormat,
3323 writeFormat, mismatched, relaxedPrecision,
3324 testType.testType));
3325 }
3326
3327 testTypeGroup->addChild(matchGroup.release());
3328 }
3329
3330 formatGroup->addChild(testTypeGroup.release());
3331 }
3332
3333 testGroup->addChild(formatGroup.release());
3334 }
3335
3336 return testGroup.release();
3337 }
3338
createImageNontemporalOperandTests(tcu::TestContext & testCtx)3339 tcu::TestCaseGroup *createImageNontemporalOperandTests(tcu::TestContext &testCtx)
3340 {
3341 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "nontemporal_operand"));
3342
3343 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
3344
3345 // using just integer formats for tests so that ImageExtendOperandTest could be reused
3346 const auto formatList = getExtensionOperandFormatList();
3347
3348 for (const auto format : formatList)
3349 {
3350 const std::string caseName = getFormatShortString(format);
3351 const auto readFormat = format;
3352 const auto writeFormat = getShaderExtensionOperandFormat(isIntFormat(format), is64BitIntegerFormat(format));
3353
3354 if (!hasSpirvFormat(readFormat) || !hasSpirvFormat(writeFormat))
3355 continue;
3356
3357 // note: just testing OpImageWrite as OpImageRead is tested with addComputeImageSamplerTest
3358 testGroup->addChild(new ImageExtendOperandTest(testCtx, caseName, texture, readFormat, writeFormat, false,
3359 false, ExtendTestType::WRITE_NONTEMPORAL));
3360 }
3361
3362 return testGroup.release();
3363 }
3364
3365 } // namespace image
3366 } // namespace vkt
3367