1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktOpaqueTypeIndexingTests.hpp"
27
28 #include "vkRefUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkTypeUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkCmdUtil.hpp"
34
35 #include "tcuTexture.hpp"
36 #include "tcuTestLog.hpp"
37 #include "tcuVectorUtil.hpp"
38 #include "tcuTextureUtil.hpp"
39
40 #include "deStringUtil.hpp"
41 #include "deSharedPtr.hpp"
42 #include "deRandom.hpp"
43 #include "deSTLUtil.hpp"
44
45 #include "vktShaderExecutor.hpp"
46
47 #include <sstream>
48
49 namespace vkt
50 {
51 namespace shaderexecutor
52 {
53
54 namespace
55 {
56
57 using de::MovePtr;
58 using de::SharedPtr;
59 using de::UniquePtr;
60 using std::vector;
61
62 using namespace vk;
63
64 typedef SharedPtr<Unique<VkSampler>> VkSamplerSp;
65
66 // Buffer helper
67
68 class Buffer
69 {
70 public:
71 Buffer(Context &context, VkBufferUsageFlags usage, size_t size);
72
getBuffer(void) const73 VkBuffer getBuffer(void) const
74 {
75 return *m_buffer;
76 }
getHostPtr(void) const77 void *getHostPtr(void) const
78 {
79 return m_allocation->getHostPtr();
80 }
81 void flush(void);
82 void invalidate(void);
83
84 private:
85 const DeviceInterface &m_vkd;
86 const VkDevice m_device;
87 const Unique<VkBuffer> m_buffer;
88 const UniquePtr<Allocation> m_allocation;
89 };
90
91 typedef de::SharedPtr<Buffer> BufferSp;
92
createBuffer(const DeviceInterface & vkd,VkDevice device,VkDeviceSize size,VkBufferUsageFlags usageFlags)93 Move<VkBuffer> createBuffer(const DeviceInterface &vkd, VkDevice device, VkDeviceSize size,
94 VkBufferUsageFlags usageFlags)
95 {
96 const VkBufferCreateInfo createInfo = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
97 DE_NULL,
98 (VkBufferCreateFlags)0,
99 size,
100 usageFlags,
101 VK_SHARING_MODE_EXCLUSIVE,
102 0u,
103 DE_NULL};
104 return createBuffer(vkd, device, &createInfo);
105 }
106
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkBuffer buffer)107 MovePtr<Allocation> allocateAndBindMemory(const DeviceInterface &vkd, VkDevice device, Allocator &allocator,
108 VkBuffer buffer)
109 {
110 MovePtr<Allocation> alloc(
111 allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
112
113 VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
114
115 return alloc;
116 }
117
Buffer(Context & context,VkBufferUsageFlags usage,size_t size)118 Buffer::Buffer(Context &context, VkBufferUsageFlags usage, size_t size)
119 : m_vkd(context.getDeviceInterface())
120 , m_device(context.getDevice())
121 , m_buffer(createBuffer(context.getDeviceInterface(), context.getDevice(), (VkDeviceSize)size, usage))
122 , m_allocation(allocateAndBindMemory(context.getDeviceInterface(), context.getDevice(),
123 context.getDefaultAllocator(), *m_buffer))
124 {
125 }
126
flush(void)127 void Buffer::flush(void)
128 {
129 flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
130 }
131
invalidate(void)132 void Buffer::invalidate(void)
133 {
134 invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
135 }
136
createUniformIndexBuffer(Context & context,int numIndices,const int * indices)137 MovePtr<Buffer> createUniformIndexBuffer(Context &context, int numIndices, const int *indices)
138 {
139 MovePtr<Buffer> buffer(new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int) * numIndices));
140 int *const bufPtr = (int *)buffer->getHostPtr();
141
142 for (int ndx = 0; ndx < numIndices; ++ndx)
143 bufPtr[ndx] = indices[ndx];
144
145 buffer->flush();
146
147 return buffer;
148 }
149
150 // Tests
151
152 enum IndexExprType
153 {
154 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
155 INDEX_EXPR_TYPE_CONST_EXPRESSION,
156 INDEX_EXPR_TYPE_UNIFORM,
157 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
158
159 INDEX_EXPR_TYPE_LAST
160 };
161
162 enum TextureType
163 {
164 TEXTURE_TYPE_1D = 0,
165 TEXTURE_TYPE_1D_ARRAY,
166 TEXTURE_TYPE_2D,
167 TEXTURE_TYPE_CUBE,
168 TEXTURE_TYPE_2D_ARRAY,
169 TEXTURE_TYPE_3D,
170
171 TEXTURE_TYPE_LAST
172 };
173
174 class OpaqueTypeIndexingCase : public TestCase
175 {
176 public:
177 OpaqueTypeIndexingCase(tcu::TestContext &testCtx, const char *name, const glu::ShaderType shaderType,
178 const IndexExprType indexExprType);
179 virtual ~OpaqueTypeIndexingCase(void);
180
initPrograms(vk::SourceCollections & programCollection) const181 virtual void initPrograms(vk::SourceCollections &programCollection) const
182 {
183 generateSources(m_shaderType, m_shaderSpec, programCollection);
184 }
185
186 virtual void checkSupport(Context &context) const;
187
188 protected:
189 const char *m_name;
190 const glu::ShaderType m_shaderType;
191 const IndexExprType m_indexExprType;
192 ShaderSpec m_shaderSpec;
193 };
194
OpaqueTypeIndexingCase(tcu::TestContext & testCtx,const char * name,const glu::ShaderType shaderType,const IndexExprType indexExprType)195 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase(tcu::TestContext &testCtx, const char *name,
196 const glu::ShaderType shaderType, const IndexExprType indexExprType)
197 : TestCase(testCtx, name)
198 , m_name(name)
199 , m_shaderType(shaderType)
200 , m_indexExprType(indexExprType)
201 {
202 }
203
~OpaqueTypeIndexingCase(void)204 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase(void)
205 {
206 }
207
checkSupport(Context & context) const208 void OpaqueTypeIndexingCase::checkSupport(Context &context) const
209 {
210 checkSupportShader(context, m_shaderType);
211 }
212
213 class OpaqueTypeIndexingTestInstance : public TestInstance
214 {
215 public:
216 OpaqueTypeIndexingTestInstance(Context &context, const glu::ShaderType shaderType, const ShaderSpec &shaderSpec,
217 const char *name, const IndexExprType indexExprType);
218 virtual ~OpaqueTypeIndexingTestInstance(void);
219
220 virtual tcu::TestStatus iterate(void) = 0;
221
222 protected:
223 void checkSupported(const VkDescriptorType descriptorType);
224
225 protected:
226 tcu::TestContext &m_testCtx;
227 const glu::ShaderType m_shaderType;
228 const ShaderSpec &m_shaderSpec;
229 const char *m_name;
230 const IndexExprType m_indexExprType;
231 };
232
OpaqueTypeIndexingTestInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const IndexExprType indexExprType)233 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance(Context &context, const glu::ShaderType shaderType,
234 const ShaderSpec &shaderSpec, const char *name,
235 const IndexExprType indexExprType)
236 : TestInstance(context)
237 , m_testCtx(context.getTestContext())
238 , m_shaderType(shaderType)
239 , m_shaderSpec(shaderSpec)
240 , m_name(name)
241 , m_indexExprType(indexExprType)
242 {
243 }
244
~OpaqueTypeIndexingTestInstance(void)245 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance(void)
246 {
247 }
248
checkSupported(const VkDescriptorType descriptorType)249 void OpaqueTypeIndexingTestInstance::checkSupported(const VkDescriptorType descriptorType)
250 {
251 const VkPhysicalDeviceFeatures &deviceFeatures = m_context.getDeviceFeatures();
252
253 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
254 {
255 switch (descriptorType)
256 {
257 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
258 if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
259 TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
260 break;
261
262 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
263 if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
264 TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
265 break;
266
267 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
268 if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
269 TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
270 break;
271
272 default:
273 break;
274 }
275 }
276 }
277
declareUniformIndexVars(std::ostream & str,uint32_t bindingLocation,const char * varPrefix,int numVars)278 static void declareUniformIndexVars(std::ostream &str, uint32_t bindingLocation, const char *varPrefix, int numVars)
279 {
280 str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation
281 << ", std140) uniform Indices\n{\n";
282
283 for (int varNdx = 0; varNdx < numVars; varNdx++)
284 str << "\thighp int " << varPrefix << varNdx << ";\n";
285
286 str << "};\n";
287 }
288
getTextureType(glu::DataType samplerType)289 static TextureType getTextureType(glu::DataType samplerType)
290 {
291 switch (samplerType)
292 {
293 case glu::TYPE_SAMPLER_1D:
294 case glu::TYPE_INT_SAMPLER_1D:
295 case glu::TYPE_UINT_SAMPLER_1D:
296 case glu::TYPE_SAMPLER_1D_SHADOW:
297 return TEXTURE_TYPE_1D;
298
299 case glu::TYPE_SAMPLER_1D_ARRAY:
300 case glu::TYPE_INT_SAMPLER_1D_ARRAY:
301 case glu::TYPE_UINT_SAMPLER_1D_ARRAY:
302 case glu::TYPE_SAMPLER_1D_ARRAY_SHADOW:
303 return TEXTURE_TYPE_1D_ARRAY;
304
305 case glu::TYPE_SAMPLER_2D:
306 case glu::TYPE_INT_SAMPLER_2D:
307 case glu::TYPE_UINT_SAMPLER_2D:
308 case glu::TYPE_SAMPLER_2D_SHADOW:
309 return TEXTURE_TYPE_2D;
310
311 case glu::TYPE_SAMPLER_CUBE:
312 case glu::TYPE_INT_SAMPLER_CUBE:
313 case glu::TYPE_UINT_SAMPLER_CUBE:
314 case glu::TYPE_SAMPLER_CUBE_SHADOW:
315 return TEXTURE_TYPE_CUBE;
316
317 case glu::TYPE_SAMPLER_2D_ARRAY:
318 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
319 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
320 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
321 return TEXTURE_TYPE_2D_ARRAY;
322
323 case glu::TYPE_SAMPLER_3D:
324 case glu::TYPE_INT_SAMPLER_3D:
325 case glu::TYPE_UINT_SAMPLER_3D:
326 return TEXTURE_TYPE_3D;
327
328 default:
329 throw tcu::InternalError("Invalid sampler type");
330 }
331 }
332
isShadowSampler(glu::DataType samplerType)333 static bool isShadowSampler(glu::DataType samplerType)
334 {
335 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW || samplerType == glu::TYPE_SAMPLER_1D_ARRAY_SHADOW ||
336 samplerType == glu::TYPE_SAMPLER_2D_SHADOW || samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW ||
337 samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
338 }
339
getSamplerOutputType(glu::DataType samplerType)340 static glu::DataType getSamplerOutputType(glu::DataType samplerType)
341 {
342 switch (samplerType)
343 {
344 case glu::TYPE_SAMPLER_1D:
345 case glu::TYPE_SAMPLER_1D_ARRAY:
346 case glu::TYPE_SAMPLER_2D:
347 case glu::TYPE_SAMPLER_CUBE:
348 case glu::TYPE_SAMPLER_2D_ARRAY:
349 case glu::TYPE_SAMPLER_3D:
350 return glu::TYPE_FLOAT_VEC4;
351
352 case glu::TYPE_SAMPLER_1D_SHADOW:
353 case glu::TYPE_SAMPLER_1D_ARRAY_SHADOW:
354 case glu::TYPE_SAMPLER_2D_SHADOW:
355 case glu::TYPE_SAMPLER_CUBE_SHADOW:
356 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
357 return glu::TYPE_FLOAT;
358
359 case glu::TYPE_INT_SAMPLER_1D:
360 case glu::TYPE_INT_SAMPLER_1D_ARRAY:
361 case glu::TYPE_INT_SAMPLER_2D:
362 case glu::TYPE_INT_SAMPLER_CUBE:
363 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
364 case glu::TYPE_INT_SAMPLER_3D:
365 return glu::TYPE_INT_VEC4;
366
367 case glu::TYPE_UINT_SAMPLER_1D:
368 case glu::TYPE_UINT_SAMPLER_1D_ARRAY:
369 case glu::TYPE_UINT_SAMPLER_2D:
370 case glu::TYPE_UINT_SAMPLER_CUBE:
371 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
372 case glu::TYPE_UINT_SAMPLER_3D:
373 return glu::TYPE_UINT_VEC4;
374
375 default:
376 throw tcu::InternalError("Invalid sampler type");
377 }
378 }
379
getSamplerTextureFormat(glu::DataType samplerType)380 static tcu::TextureFormat getSamplerTextureFormat(glu::DataType samplerType)
381 {
382 const glu::DataType outType = getSamplerOutputType(samplerType);
383 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
384
385 switch (outScalarType)
386 {
387 case glu::TYPE_FLOAT:
388 if (isShadowSampler(samplerType))
389 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
390 else
391 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
392
393 case glu::TYPE_INT:
394 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
395 case glu::TYPE_UINT:
396 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
397
398 default:
399 throw tcu::InternalError("Invalid sampler type");
400 }
401 }
402
getSamplerCoordType(glu::DataType samplerType)403 static glu::DataType getSamplerCoordType(glu::DataType samplerType)
404 {
405 const TextureType texType = getTextureType(samplerType);
406 int numCoords = 0;
407
408 switch (texType)
409 {
410 case TEXTURE_TYPE_1D:
411 numCoords = 1;
412 break;
413 case TEXTURE_TYPE_1D_ARRAY:
414 numCoords = 2;
415 break;
416 case TEXTURE_TYPE_2D:
417 numCoords = 2;
418 break;
419 case TEXTURE_TYPE_2D_ARRAY:
420 numCoords = 3;
421 break;
422 case TEXTURE_TYPE_CUBE:
423 numCoords = 3;
424 break;
425 case TEXTURE_TYPE_3D:
426 numCoords = 3;
427 break;
428 default:
429 DE_ASSERT(false);
430 }
431
432 if (samplerType == glu::TYPE_SAMPLER_1D_SHADOW)
433 numCoords = 3;
434 else if (isShadowSampler(samplerType))
435 numCoords += 1;
436
437 DE_ASSERT(de::inRange(numCoords, 1, 4));
438
439 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
440 }
441
fillTextureData(const tcu::PixelBufferAccess & access,de::Random & rnd)442 static void fillTextureData(const tcu::PixelBufferAccess &access, de::Random &rnd)
443 {
444 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
445
446 if (access.getFormat().order == tcu::TextureFormat::D)
447 {
448 // \note Texture uses odd values, lookup even values to avoid precision issues.
449 const float values[] = {0.1f, 0.3f, 0.5f, 0.7f, 0.9f};
450
451 for (int ndx = 0; ndx < access.getWidth(); ndx++)
452 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
453 }
454 else
455 {
456 TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA &&
457 access.getFormat().getPixelSize() == 4);
458
459 for (int ndx = 0; ndx < access.getWidth(); ndx++)
460 *((uint32_t *)access.getDataPtr() + ndx) = rnd.getUint32();
461 }
462 }
463
getVkImageType(TextureType texType)464 static vk::VkImageType getVkImageType(TextureType texType)
465 {
466 switch (texType)
467 {
468 case TEXTURE_TYPE_1D:
469 case TEXTURE_TYPE_1D_ARRAY:
470 return vk::VK_IMAGE_TYPE_1D;
471 case TEXTURE_TYPE_2D:
472 case TEXTURE_TYPE_2D_ARRAY:
473 return vk::VK_IMAGE_TYPE_2D;
474 case TEXTURE_TYPE_CUBE:
475 return vk::VK_IMAGE_TYPE_2D;
476 case TEXTURE_TYPE_3D:
477 return vk::VK_IMAGE_TYPE_3D;
478 default:
479 DE_FATAL("Impossible");
480 return (vk::VkImageType)0;
481 }
482 }
483
getVkImageViewType(TextureType texType)484 static vk::VkImageViewType getVkImageViewType(TextureType texType)
485 {
486 switch (texType)
487 {
488 case TEXTURE_TYPE_1D:
489 return vk::VK_IMAGE_VIEW_TYPE_1D;
490 case TEXTURE_TYPE_1D_ARRAY:
491 return vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY;
492 case TEXTURE_TYPE_2D:
493 return vk::VK_IMAGE_VIEW_TYPE_2D;
494 case TEXTURE_TYPE_2D_ARRAY:
495 return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
496 case TEXTURE_TYPE_CUBE:
497 return vk::VK_IMAGE_VIEW_TYPE_CUBE;
498 case TEXTURE_TYPE_3D:
499 return vk::VK_IMAGE_VIEW_TYPE_3D;
500 default:
501 DE_FATAL("Impossible");
502 return (vk::VkImageViewType)0;
503 }
504 }
505
506 //! Test image with 1-pixel dimensions and no mipmaps
507 class TestImage
508 {
509 public:
510 TestImage(Context &context, TextureType texType, tcu::TextureFormat format, const void *colorValue);
511
getImageView(void) const512 VkImageView getImageView(void) const
513 {
514 return *m_imageView;
515 }
516
517 private:
518 const Unique<VkImage> m_image;
519 const UniquePtr<Allocation> m_allocation;
520 const Unique<VkImageView> m_imageView;
521 };
522
createTestImage(const DeviceInterface & vkd,VkDevice device,TextureType texType,tcu::TextureFormat format)523 Move<VkImage> createTestImage(const DeviceInterface &vkd, VkDevice device, TextureType texType,
524 tcu::TextureFormat format)
525 {
526 const VkImageCreateInfo createInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
527 DE_NULL,
528 (texType == TEXTURE_TYPE_CUBE ?
529 (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT :
530 (VkImageCreateFlags)0),
531 getVkImageType(texType),
532 mapTextureFormat(format),
533 makeExtent3D(1, 1, 1),
534 1u,
535 (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
536 VK_SAMPLE_COUNT_1_BIT,
537 VK_IMAGE_TILING_OPTIMAL,
538 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
539 VK_SHARING_MODE_EXCLUSIVE,
540 0u,
541 DE_NULL,
542 VK_IMAGE_LAYOUT_UNDEFINED};
543
544 return createImage(vkd, device, &createInfo);
545 }
546
allocateAndBindMemory(const DeviceInterface & vkd,VkDevice device,Allocator & allocator,VkImage image)547 de::MovePtr<Allocation> allocateAndBindMemory(const DeviceInterface &vkd, VkDevice device, Allocator &allocator,
548 VkImage image)
549 {
550 de::MovePtr<Allocation> alloc =
551 allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
552
553 VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
554
555 return alloc;
556 }
557
createTestImageView(const DeviceInterface & vkd,VkDevice device,VkImage image,TextureType texType,tcu::TextureFormat format)558 Move<VkImageView> createTestImageView(const DeviceInterface &vkd, VkDevice device, VkImage image, TextureType texType,
559 tcu::TextureFormat format)
560 {
561 const bool isDepthImage = format.order == tcu::TextureFormat::D;
562 const VkImageViewCreateInfo createInfo = {
563 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
564 DE_NULL,
565 (VkImageViewCreateFlags)0,
566 image,
567 getVkImageViewType(texType),
568 mapTextureFormat(format),
569 {
570 VK_COMPONENT_SWIZZLE_IDENTITY,
571 VK_COMPONENT_SWIZZLE_IDENTITY,
572 VK_COMPONENT_SWIZZLE_IDENTITY,
573 VK_COMPONENT_SWIZZLE_IDENTITY,
574 },
575 {(VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT), 0u, 1u, 0u,
576 (texType == TEXTURE_TYPE_CUBE ? 6u : 1u)}};
577
578 return createImageView(vkd, device, &createInfo);
579 }
580
TestImage(Context & context,TextureType texType,tcu::TextureFormat format,const void * colorValue)581 TestImage::TestImage(Context &context, TextureType texType, tcu::TextureFormat format, const void *colorValue)
582 : m_image(createTestImage(context.getDeviceInterface(), context.getDevice(), texType, format))
583 , m_allocation(allocateAndBindMemory(context.getDeviceInterface(), context.getDevice(),
584 context.getDefaultAllocator(), *m_image))
585 , m_imageView(createTestImageView(context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
586 {
587 const DeviceInterface &vkd = context.getDeviceInterface();
588 const VkDevice device = context.getDevice();
589
590 const size_t pixelSize = (size_t)format.getPixelSize();
591 const uint32_t numLayers = (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
592 const size_t numReplicas = (size_t)numLayers;
593 const size_t stagingBufferSize = pixelSize * numReplicas;
594
595 const VkBufferCreateInfo stagingBufferInfo = {
596 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
597 DE_NULL,
598 (VkBufferCreateFlags)0u,
599 (VkDeviceSize)stagingBufferSize,
600 (VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
601 VK_SHARING_MODE_EXCLUSIVE,
602 0u,
603 DE_NULL,
604 };
605 const Unique<VkBuffer> stagingBuffer(createBuffer(vkd, device, &stagingBufferInfo));
606 const UniquePtr<Allocation> alloc(context.getDefaultAllocator().allocate(
607 getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
608
609 VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
610
611 for (size_t ndx = 0; ndx < numReplicas; ++ndx)
612 deMemcpy((uint8_t *)alloc->getHostPtr() + ndx * pixelSize, colorValue, pixelSize);
613
614 flushMappedMemoryRange(vkd, device, alloc->getMemory(), alloc->getOffset(), VK_WHOLE_SIZE);
615
616 {
617 const VkImageAspectFlags imageAspect =
618 (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT :
619 VK_IMAGE_ASPECT_COLOR_BIT);
620 const VkBufferImageCopy copyInfo = {0u, 1u, 1u, {imageAspect, 0u, 0u, numLayers}, {0u, 0u, 0u}, {1u, 1u, 1u}};
621
622 copyBufferToImage(vkd, device, context.getUniversalQueue(), context.getUniversalQueueFamilyIndex(),
623 *stagingBuffer, stagingBufferSize, vector<VkBufferImageCopy>(1, copyInfo), DE_NULL,
624 imageAspect, 1u, numLayers, *m_image);
625 }
626 }
627
628 typedef SharedPtr<TestImage> TestImageSp;
629
630 // SamplerIndexingCaseInstance
631
632 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
633 {
634 public:
635 enum
636 {
637 NUM_INVOCATIONS = 64,
638 NUM_SAMPLERS = 8,
639 NUM_LOOKUPS = 4
640 };
641
642 SamplerIndexingCaseInstance(Context &context, const glu::ShaderType shaderType, const ShaderSpec &shaderSpec,
643 const char *name, glu::DataType samplerType, const IndexExprType indexExprType,
644 const std::vector<int> &lookupIndices);
645 virtual ~SamplerIndexingCaseInstance(void);
646
647 virtual tcu::TestStatus iterate(void);
648
649 protected:
650 const glu::DataType m_samplerType;
651 const std::vector<int> m_lookupIndices;
652 };
653
SamplerIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,glu::DataType samplerType,const IndexExprType indexExprType,const std::vector<int> & lookupIndices)654 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance(Context &context, const glu::ShaderType shaderType,
655 const ShaderSpec &shaderSpec, const char *name,
656 glu::DataType samplerType, const IndexExprType indexExprType,
657 const std::vector<int> &lookupIndices)
658 : OpaqueTypeIndexingTestInstance(context, shaderType, shaderSpec, name, indexExprType)
659 , m_samplerType(samplerType)
660 , m_lookupIndices(lookupIndices)
661 {
662 }
663
~SamplerIndexingCaseInstance(void)664 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance(void)
665 {
666 }
667
isIntegerFormat(const tcu::TextureFormat & format)668 bool isIntegerFormat(const tcu::TextureFormat &format)
669 {
670 const tcu::TextureChannelClass chnClass = tcu::getTextureChannelClass(format.type);
671
672 return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER || chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
673 }
674
iterate(void)675 tcu::TestStatus SamplerIndexingCaseInstance::iterate(void)
676 {
677 const int numInvocations = SamplerIndexingCaseInstance::NUM_INVOCATIONS;
678 const int numSamplers = SamplerIndexingCaseInstance::NUM_SAMPLERS;
679 const int numLookups = SamplerIndexingCaseInstance::NUM_LOOKUPS;
680 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
681 const glu::DataType outputType = getSamplerOutputType(m_samplerType);
682 const tcu::TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
683 const int outLookupStride = numInvocations * getDataTypeScalarSize(outputType);
684 vector<float> coords;
685 vector<uint32_t> outData;
686 vector<uint8_t> texData(numSamplers * texFormat.getPixelSize());
687 const tcu::PixelBufferAccess refTexAccess(texFormat, numSamplers, 1, 1, &texData[0]);
688 de::Random rnd(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
689 const TextureType texType = getTextureType(m_samplerType);
690 const tcu::Sampler::FilterMode filterMode =
691 (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
692
693 // The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
694 const tcu::Sampler refSampler =
695 isShadowSampler(m_samplerType) ?
696 tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
697 filterMode, filterMode, 0.0f, false /* non-normalized */, tcu::Sampler::COMPAREMODE_LESS, 0,
698 tcu::Vec4(0.0f), true) :
699 tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
700 filterMode, filterMode, 0.0f, true, tcu::Sampler::COMPAREMODE_NONE, 0, tcu::Vec4(0.0f), true);
701
702 const DeviceInterface &vkd = m_context.getDeviceInterface();
703 const VkDevice device = m_context.getDevice();
704 vector<TestImageSp> images;
705 vector<VkSamplerSp> samplers;
706 MovePtr<Buffer> indexBuffer;
707 Move<VkDescriptorSetLayout> extraResourcesLayout;
708 Move<VkDescriptorPool> extraResourcesSetPool;
709 Move<VkDescriptorSet> extraResourcesSet;
710
711 checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
712
713 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
714
715 if (texType == TEXTURE_TYPE_CUBE)
716 {
717 if (isShadowSampler(m_samplerType))
718 {
719 for (size_t i = 0; i < coords.size() / 4; i++)
720 {
721 coords[4 * i] = 1.0f;
722 coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
723 }
724 }
725 else
726 {
727 for (size_t i = 0; i < coords.size() / 3; i++)
728 {
729 coords[3 * i] = 1.0f;
730 coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
731 }
732 }
733 }
734
735 if (isShadowSampler(m_samplerType))
736 {
737 // Use different comparison value per invocation.
738 // \note Texture uses odd values, comparison even values.
739 const int numCoordComps = getDataTypeScalarSize(coordType);
740 const float cmpValues[] = {0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f};
741
742 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
743 coords[invocationNdx * numCoordComps + (numCoordComps - 1)] =
744 rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
745 }
746
747 fillTextureData(refTexAccess, rnd);
748
749 outData.resize(numLookups * outLookupStride);
750
751 for (int ndx = 0; ndx < numSamplers; ++ndx)
752 {
753 images.push_back(
754 TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
755
756 {
757 tcu::Sampler samplerCopy(refSampler);
758 samplerCopy.normalizedCoords = true;
759
760 {
761 const VkSamplerCreateInfo samplerParams = mapSampler(samplerCopy, texFormat);
762 samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
763 }
764 }
765 }
766
767 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
768 indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
769
770 {
771 const VkDescriptorSetLayoutBinding bindings[] = {
772 {0u, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (uint32_t)numSamplers, VK_SHADER_STAGE_ALL, DE_NULL},
773 {(uint32_t)numSamplers, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL}};
774 const VkDescriptorSetLayoutCreateInfo layoutInfo = {
775 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
776 DE_NULL,
777 (VkDescriptorSetLayoutCreateFlags)0u,
778 DE_LENGTH_OF_ARRAY(bindings),
779 bindings,
780 };
781
782 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
783 }
784
785 {
786 const VkDescriptorPoolSize poolSizes[] = {{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (uint32_t)numSamplers},
787 {
788 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
789 1u,
790 }};
791 const VkDescriptorPoolCreateInfo poolInfo = {
792 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
793 DE_NULL,
794 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
795 1u, // maxSets
796 DE_LENGTH_OF_ARRAY(poolSizes),
797 poolSizes,
798 };
799
800 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
801 }
802
803 {
804 const VkDescriptorSetAllocateInfo allocInfo = {
805 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
806 DE_NULL,
807 *extraResourcesSetPool,
808 1u,
809 &extraResourcesLayout.get(),
810 };
811
812 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
813 }
814
815 {
816 vector<VkDescriptorImageInfo> imageInfos(numSamplers);
817 const VkWriteDescriptorSet descriptorWrite = {
818 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
819 DE_NULL,
820 *extraResourcesSet,
821 0u, // dstBinding
822 0u, // dstArrayElement
823 (uint32_t)numSamplers,
824 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
825 &imageInfos[0],
826 (const VkDescriptorBufferInfo *)DE_NULL,
827 (const VkBufferView *)DE_NULL,
828 };
829
830 for (int ndx = 0; ndx < numSamplers; ++ndx)
831 {
832 imageInfos[ndx].sampler = **samplers[ndx];
833 imageInfos[ndx].imageView = images[ndx]->getImageView();
834 imageInfos[ndx].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
835 }
836
837 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
838 }
839
840 if (indexBuffer)
841 {
842 const VkDescriptorBufferInfo bufferInfo = {indexBuffer->getBuffer(), 0u, VK_WHOLE_SIZE};
843 const VkWriteDescriptorSet descriptorWrite = {
844 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
845 DE_NULL,
846 *extraResourcesSet,
847 (uint32_t)numSamplers, // dstBinding
848 0u, // dstArrayElement
849 1u,
850 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
851 (const VkDescriptorImageInfo *)DE_NULL,
852 &bufferInfo,
853 (const VkBufferView *)DE_NULL,
854 };
855
856 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
857 }
858
859 {
860 std::vector<void *> inputs;
861 std::vector<void *> outputs;
862 std::vector<int> expandedIndices;
863 UniquePtr<ShaderExecutor> executor(
864 createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
865
866 inputs.push_back(&coords[0]);
867
868 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
869 {
870 expandedIndices.resize(numInvocations * m_lookupIndices.size());
871 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
872 {
873 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
874 expandedIndices[lookupNdx * numInvocations + invNdx] = m_lookupIndices[lookupNdx];
875 }
876
877 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
878 inputs.push_back(&expandedIndices[lookupNdx * numInvocations]);
879 }
880
881 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
882 outputs.push_back(&outData[outLookupStride * lookupNdx]);
883
884 executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
885 }
886
887 {
888 tcu::TestLog &log = m_context.getTestContext().getLog();
889 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
890
891 if (isShadowSampler(m_samplerType))
892 {
893 const int numCoordComps = getDataTypeScalarSize(coordType);
894
895 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
896
897 // Each invocation may have different results.
898 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
899 {
900 const float coord = coords[invocationNdx * numCoordComps + (numCoordComps - 1)];
901
902 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
903 {
904 const int texNdx = m_lookupIndices[lookupNdx];
905 const float result =
906 *((const float *)(const uint8_t *)&outData[lookupNdx * outLookupStride + invocationNdx]);
907 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord,
908 (float)texNdx, 0.0f, tcu::IVec3(0));
909
910 if (de::abs(result - reference) > 0.005f)
911 {
912 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup "
913 << lookupNdx << ": expected " << reference << ", got " << result
914 << tcu::TestLog::EndMessage;
915
916 if (testResult.getCode() == QP_TEST_RESULT_PASS)
917 testResult = tcu::TestStatus::fail("Got invalid lookup result");
918 }
919 }
920 }
921 }
922 else
923 {
924 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
925
926 // Validate results from first invocation
927 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
928 {
929 const int texNdx = m_lookupIndices[lookupNdx];
930 const uint8_t *resPtr = (const uint8_t *)&outData[lookupNdx * outLookupStride];
931 bool isOk;
932
933 if (outputType == glu::TYPE_FLOAT_VEC4)
934 {
935 const float threshold = 1.0f / 256.0f;
936 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
937 const float *floatPtr = (const float *)resPtr;
938 const tcu::Vec4 result(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
939
940 isOk = boolAll(lessThanEqual(abs(reference - result), tcu::Vec4(threshold)));
941
942 if (!isOk)
943 {
944 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected " << reference
945 << ", got " << result << tcu::TestLog::EndMessage;
946 }
947 }
948 else
949 {
950 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
951 const uint32_t *uintPtr = (const uint32_t *)resPtr;
952 const tcu::UVec4 result(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
953
954 isOk = boolAll(equal(reference, result));
955
956 if (!isOk)
957 {
958 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected " << reference
959 << ", got " << result << tcu::TestLog::EndMessage;
960 }
961 }
962
963 if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
964 testResult = tcu::TestStatus::fail("Got invalid lookup result");
965 }
966
967 // Check results of other invocations against first one
968 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
969 {
970 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
971 {
972 const uint32_t *refPtr = &outData[lookupNdx * outLookupStride];
973 const uint32_t *resPtr = refPtr + invocationNdx * 4;
974 bool isOk = true;
975
976 for (int ndx = 0; ndx < 4; ndx++)
977 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
978
979 if (!isOk)
980 {
981 log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
982 << tcu::formatArray(tcu::Format::HexIterator<uint32_t>(resPtr),
983 tcu::Format::HexIterator<uint32_t>(resPtr + 4))
984 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
985 << tcu::formatArray(tcu::Format::HexIterator<uint32_t>(refPtr),
986 tcu::Format::HexIterator<uint32_t>(refPtr + 4))
987 << tcu::TestLog::EndMessage;
988
989 if (testResult.getCode() == QP_TEST_RESULT_PASS)
990 testResult = tcu::TestStatus::fail("Inconsistent lookup results");
991 }
992 }
993 }
994 }
995
996 return testResult;
997 }
998 }
999
1000 class SamplerIndexingCase : public OpaqueTypeIndexingCase
1001 {
1002 public:
1003 SamplerIndexingCase(tcu::TestContext &testCtx, const char *name, const glu::ShaderType shaderType,
1004 glu::DataType samplerType, IndexExprType indexExprType);
1005 virtual ~SamplerIndexingCase(void);
1006
1007 virtual TestInstance *createInstance(Context &ctx) const;
1008
1009 private:
1010 SamplerIndexingCase(const SamplerIndexingCase &);
1011 SamplerIndexingCase &operator=(const SamplerIndexingCase &);
1012
1013 void createShaderSpec(void);
1014
1015 const glu::DataType m_samplerType;
1016 const int m_numSamplers;
1017 const int m_numLookups;
1018 std::vector<int> m_lookupIndices;
1019 };
1020
SamplerIndexingCase(tcu::TestContext & testCtx,const char * name,const glu::ShaderType shaderType,glu::DataType samplerType,IndexExprType indexExprType)1021 SamplerIndexingCase::SamplerIndexingCase(tcu::TestContext &testCtx, const char *name, const glu::ShaderType shaderType,
1022 glu::DataType samplerType, IndexExprType indexExprType)
1023 : OpaqueTypeIndexingCase(testCtx, name, shaderType, indexExprType)
1024 , m_samplerType(samplerType)
1025 , m_numSamplers(SamplerIndexingCaseInstance::NUM_SAMPLERS)
1026 , m_numLookups(SamplerIndexingCaseInstance::NUM_LOOKUPS)
1027 , m_lookupIndices(m_numLookups)
1028 {
1029 createShaderSpec();
1030 init();
1031 }
1032
~SamplerIndexingCase(void)1033 SamplerIndexingCase::~SamplerIndexingCase(void)
1034 {
1035 }
1036
createInstance(Context & ctx) const1037 TestInstance *SamplerIndexingCase::createInstance(Context &ctx) const
1038 {
1039 return new SamplerIndexingCaseInstance(ctx, m_shaderType, m_shaderSpec, m_name, m_samplerType, m_indexExprType,
1040 m_lookupIndices);
1041 }
1042
createShaderSpec(void)1043 void SamplerIndexingCase::createShaderSpec(void)
1044 {
1045 de::Random rnd(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1046 const char *samplersName = "texSampler";
1047 const char *coordsName = "coords";
1048 const char *indicesPrefix = "index";
1049 const char *resultPrefix = "result";
1050 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
1051 const glu::DataType outType = getSamplerOutputType(m_samplerType);
1052 std::ostringstream global, code;
1053
1054 for (int ndx = 0; ndx < m_numLookups; ndx++)
1055 m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers - 1);
1056
1057 m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
1058
1059 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1060 global << "#extension GL_EXT_gpu_shader5 : require\n";
1061
1062 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1063 global << "const highp int indexBase = 1;\n";
1064
1065 global << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp "
1066 << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
1067
1068 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1069 {
1070 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1071 {
1072 const std::string varName = indicesPrefix + de::toString(lookupNdx);
1073 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1074 }
1075 }
1076 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1077 declareUniformIndexVars(global, (uint32_t)m_numSamplers, indicesPrefix, m_numLookups);
1078
1079 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1080 {
1081 const std::string varName = resultPrefix + de::toString(lookupNdx);
1082 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
1083 }
1084
1085 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1086 {
1087 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
1088
1089 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1090 code << m_lookupIndices[lookupNdx];
1091 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1092 code << "indexBase + " << (m_lookupIndices[lookupNdx] - 1);
1093 else
1094 code << indicesPrefix << lookupNdx;
1095
1096 code << "], " << coordsName << ");\n";
1097 }
1098
1099 m_shaderSpec.globalDeclarations = global.str();
1100 m_shaderSpec.source = code.str();
1101 }
1102
1103 enum BlockType
1104 {
1105 BLOCKTYPE_UNIFORM = 0,
1106 BLOCKTYPE_BUFFER,
1107
1108 BLOCKTYPE_LAST
1109 };
1110
1111 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1112 {
1113 public:
1114 enum
1115 {
1116 NUM_INVOCATIONS = 32,
1117 NUM_INSTANCES = 4,
1118 NUM_READS = 4
1119 };
1120
1121 enum Flags
1122 {
1123 FLAG_USE_STORAGE_BUFFER = (1 << 0) // Use VK_KHR_storage_buffer_storage_class
1124 };
1125
1126 BlockArrayIndexingCaseInstance(Context &context, const glu::ShaderType shaderType, const ShaderSpec &shaderSpec,
1127 const char *name, BlockType blockType, const uint32_t flags,
1128 const IndexExprType indexExprType, const std::vector<int> &readIndices,
1129 const std::vector<uint32_t> &inValues);
1130 virtual ~BlockArrayIndexingCaseInstance(void);
1131
1132 virtual tcu::TestStatus iterate(void);
1133
1134 private:
1135 const BlockType m_blockType;
1136 const uint32_t m_flags;
1137 const std::vector<int> &m_readIndices;
1138 const std::vector<uint32_t> &m_inValues;
1139 };
1140
BlockArrayIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,BlockType blockType,const uint32_t flags,const IndexExprType indexExprType,const std::vector<int> & readIndices,const std::vector<uint32_t> & inValues)1141 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance(Context &context, const glu::ShaderType shaderType,
1142 const ShaderSpec &shaderSpec, const char *name,
1143 BlockType blockType, const uint32_t flags,
1144 const IndexExprType indexExprType,
1145 const std::vector<int> &readIndices,
1146 const std::vector<uint32_t> &inValues)
1147 : OpaqueTypeIndexingTestInstance(context, shaderType, shaderSpec, name, indexExprType)
1148 , m_blockType(blockType)
1149 , m_flags(flags)
1150 , m_readIndices(readIndices)
1151 , m_inValues(inValues)
1152 {
1153 }
1154
~BlockArrayIndexingCaseInstance(void)1155 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance(void)
1156 {
1157 }
1158
iterate(void)1159 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate(void)
1160 {
1161 const int numInvocations = NUM_INVOCATIONS;
1162 const int numReads = NUM_READS;
1163 std::vector<uint32_t> outValues(numInvocations * numReads);
1164
1165 tcu::TestLog &log = m_context.getTestContext().getLog();
1166 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1167
1168 std::vector<int> expandedIndices;
1169 std::vector<void *> inputs;
1170 std::vector<void *> outputs;
1171 const VkBufferUsageFlags bufferUsage =
1172 m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1173 const VkDescriptorType descriptorType =
1174 m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1175
1176 const DeviceInterface &vkd = m_context.getDeviceInterface();
1177 const VkDevice device = m_context.getDevice();
1178
1179 // \note Using separate buffer per element - might want to test
1180 // offsets & single buffer in the future.
1181 vector<BufferSp> buffers(m_inValues.size());
1182 MovePtr<Buffer> indexBuffer;
1183
1184 Move<VkDescriptorSetLayout> extraResourcesLayout;
1185 Move<VkDescriptorPool> extraResourcesSetPool;
1186 Move<VkDescriptorSet> extraResourcesSet;
1187
1188 checkSupported(descriptorType);
1189
1190 if ((m_flags & FLAG_USE_STORAGE_BUFFER) != 0)
1191 {
1192 if (!m_context.isDeviceFunctionalitySupported("VK_KHR_storage_buffer_storage_class"))
1193 TCU_THROW(NotSupportedError, "VK_KHR_storage_buffer_storage_class is not supported");
1194 }
1195
1196 for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
1197 {
1198 buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(uint32_t)));
1199 *(uint32_t *)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
1200 buffers[bufferNdx]->flush();
1201 }
1202
1203 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1204 indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
1205
1206 {
1207 const VkDescriptorSetLayoutBinding bindings[] = {
1208 {0u, descriptorType, (uint32_t)m_inValues.size(), VK_SHADER_STAGE_ALL, DE_NULL},
1209 {(uint32_t)m_inValues.size(), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL}};
1210 const VkDescriptorSetLayoutCreateInfo layoutInfo = {
1211 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1212 DE_NULL,
1213 (VkDescriptorSetLayoutCreateFlags)0u,
1214 DE_LENGTH_OF_ARRAY(bindings),
1215 bindings,
1216 };
1217
1218 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1219 }
1220
1221 {
1222 const VkDescriptorPoolSize poolSizes[] = {{descriptorType, (uint32_t)m_inValues.size()},
1223 {
1224 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1225 1u,
1226 }};
1227 const VkDescriptorPoolCreateInfo poolInfo = {
1228 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1229 DE_NULL,
1230 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1231 1u, // maxSets
1232 DE_LENGTH_OF_ARRAY(poolSizes),
1233 poolSizes,
1234 };
1235
1236 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1237 }
1238
1239 {
1240 const VkDescriptorSetAllocateInfo allocInfo = {
1241 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1242 DE_NULL,
1243 *extraResourcesSetPool,
1244 1u,
1245 &extraResourcesLayout.get(),
1246 };
1247
1248 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1249 }
1250
1251 {
1252 vector<VkDescriptorBufferInfo> bufferInfos(m_inValues.size());
1253 const VkWriteDescriptorSet descriptorWrite = {
1254 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1255 DE_NULL,
1256 *extraResourcesSet,
1257 0u, // dstBinding
1258 0u, // dstArrayElement
1259 (uint32_t)m_inValues.size(),
1260 descriptorType,
1261 (const VkDescriptorImageInfo *)DE_NULL,
1262 &bufferInfos[0],
1263 (const VkBufferView *)DE_NULL,
1264 };
1265
1266 for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
1267 {
1268 bufferInfos[ndx].buffer = buffers[ndx]->getBuffer();
1269 bufferInfos[ndx].offset = 0u;
1270 bufferInfos[ndx].range = VK_WHOLE_SIZE;
1271 }
1272
1273 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1274 }
1275
1276 if (indexBuffer)
1277 {
1278 const VkDescriptorBufferInfo bufferInfo = {indexBuffer->getBuffer(), 0u, VK_WHOLE_SIZE};
1279 const VkWriteDescriptorSet descriptorWrite = {
1280 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1281 DE_NULL,
1282 *extraResourcesSet,
1283 (uint32_t)m_inValues.size(), // dstBinding
1284 0u, // dstArrayElement
1285 1u,
1286 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1287 (const VkDescriptorImageInfo *)DE_NULL,
1288 &bufferInfo,
1289 (const VkBufferView *)DE_NULL,
1290 };
1291
1292 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1293 }
1294
1295 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1296 {
1297 expandedIndices.resize(numInvocations * m_readIndices.size());
1298
1299 for (int readNdx = 0; readNdx < numReads; readNdx++)
1300 {
1301 int *dst = &expandedIndices[numInvocations * readNdx];
1302 std::fill(dst, dst + numInvocations, m_readIndices[readNdx]);
1303 }
1304
1305 for (int readNdx = 0; readNdx < numReads; readNdx++)
1306 inputs.push_back(&expandedIndices[readNdx * numInvocations]);
1307 }
1308
1309 for (int readNdx = 0; readNdx < numReads; readNdx++)
1310 outputs.push_back(&outValues[readNdx * numInvocations]);
1311
1312 {
1313 UniquePtr<ShaderExecutor> executor(
1314 createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1315
1316 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1317 }
1318
1319 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1320 {
1321 for (int readNdx = 0; readNdx < numReads; readNdx++)
1322 {
1323 const uint32_t refValue = m_inValues[m_readIndices[readNdx]];
1324 const uint32_t resValue = outValues[readNdx * numInvocations + invocationNdx];
1325
1326 if (refValue != resValue)
1327 {
1328 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", read " << readNdx
1329 << ": expected " << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
1330 << tcu::TestLog::EndMessage;
1331
1332 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1333 testResult = tcu::TestStatus::fail("Invalid result value");
1334 }
1335 }
1336 }
1337
1338 return testResult;
1339 }
1340
1341 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
1342 {
1343 public:
1344 BlockArrayIndexingCase(tcu::TestContext &testCtx, const char *name, BlockType blockType,
1345 IndexExprType indexExprType, const glu::ShaderType shaderType, uint32_t flags = 0u);
1346 virtual ~BlockArrayIndexingCase(void);
1347
1348 virtual TestInstance *createInstance(Context &ctx) const;
1349 virtual void checkSupport(Context &context) const;
1350
1351 private:
1352 BlockArrayIndexingCase(const BlockArrayIndexingCase &);
1353 BlockArrayIndexingCase &operator=(const BlockArrayIndexingCase &);
1354
1355 void createShaderSpec(void);
1356
1357 const BlockType m_blockType;
1358 const uint32_t m_flags;
1359 std::vector<int> m_readIndices;
1360 std::vector<uint32_t> m_inValues;
1361 };
1362
BlockArrayIndexingCase(tcu::TestContext & testCtx,const char * name,BlockType blockType,IndexExprType indexExprType,const glu::ShaderType shaderType,uint32_t flags)1363 BlockArrayIndexingCase::BlockArrayIndexingCase(tcu::TestContext &testCtx, const char *name, BlockType blockType,
1364 IndexExprType indexExprType, const glu::ShaderType shaderType,
1365 uint32_t flags)
1366 : OpaqueTypeIndexingCase(testCtx, name, shaderType, indexExprType)
1367 , m_blockType(blockType)
1368 , m_flags(flags)
1369 , m_readIndices(BlockArrayIndexingCaseInstance::NUM_READS)
1370 , m_inValues(BlockArrayIndexingCaseInstance::NUM_INSTANCES)
1371 {
1372 createShaderSpec();
1373 init();
1374 }
1375
~BlockArrayIndexingCase(void)1376 BlockArrayIndexingCase::~BlockArrayIndexingCase(void)
1377 {
1378 }
1379
checkSupport(Context & context) const1380 void BlockArrayIndexingCase::checkSupport(Context &context) const
1381 {
1382 OpaqueTypeIndexingCase::checkSupport(context);
1383
1384 uint32_t maxDescriptorStorageBuffers = (uint32_t)(m_inValues.size());
1385
1386 switch (m_shaderType)
1387 {
1388 case glu::SHADERTYPE_VERTEX:
1389 case glu::SHADERTYPE_TESSELLATION_CONTROL:
1390 case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1391 case glu::SHADERTYPE_GEOMETRY:
1392 case glu::SHADERTYPE_FRAGMENT:
1393 // No extra storage buffers
1394 break;
1395 case glu::SHADERTYPE_COMPUTE:
1396 // From ComputerShaderExecutor class
1397 maxDescriptorStorageBuffers += 2u;
1398 break;
1399 default:
1400 TCU_THROW(InternalError, "Unsupported shader type");
1401 }
1402
1403 if (maxDescriptorStorageBuffers >
1404 context.getDeviceProperties2().properties.limits.maxPerStageDescriptorStorageBuffers)
1405 TCU_THROW(NotSupportedError, "Driver supports less maxPerStageDescriptorStorageBuffers than the ones required");
1406 }
1407
createInstance(Context & ctx) const1408 TestInstance *BlockArrayIndexingCase::createInstance(Context &ctx) const
1409 {
1410 return new BlockArrayIndexingCaseInstance(ctx, m_shaderType, m_shaderSpec, m_name, m_blockType, m_flags,
1411 m_indexExprType, m_readIndices, m_inValues);
1412 }
1413
createShaderSpec(void)1414 void BlockArrayIndexingCase::createShaderSpec(void)
1415 {
1416 const int numInstances = BlockArrayIndexingCaseInstance::NUM_INSTANCES;
1417 const int numReads = BlockArrayIndexingCaseInstance::NUM_READS;
1418 de::Random rnd(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
1419 const char *blockName = "Block";
1420 const char *instanceName = "block";
1421 const char *indicesPrefix = "index";
1422 const char *resultPrefix = "result";
1423 const char *interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "readonly buffer";
1424 std::ostringstream global, code;
1425
1426 for (int readNdx = 0; readNdx < numReads; readNdx++)
1427 m_readIndices[readNdx] = rnd.getInt(0, numInstances - 1);
1428
1429 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1430 m_inValues[instanceNdx] = rnd.getUint32();
1431
1432 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1433 global << "#extension GL_EXT_gpu_shader5 : require\n";
1434
1435 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1436 global << "const highp int indexBase = 1;\n";
1437
1438 global << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " "
1439 << blockName
1440 << "\n"
1441 "{\n"
1442 " highp uint value;\n"
1443 "} "
1444 << instanceName << "[" << numInstances << "];\n";
1445
1446 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1447 {
1448 for (int readNdx = 0; readNdx < numReads; readNdx++)
1449 {
1450 const std::string varName = indicesPrefix + de::toString(readNdx);
1451 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1452 }
1453 }
1454 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1455 declareUniformIndexVars(global, (uint32_t)m_inValues.size(), indicesPrefix, numReads);
1456
1457 for (int readNdx = 0; readNdx < numReads; readNdx++)
1458 {
1459 const std::string varName = resultPrefix + de::toString(readNdx);
1460 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1461 }
1462
1463 for (int readNdx = 0; readNdx < numReads; readNdx++)
1464 {
1465 code << resultPrefix << readNdx << " = " << instanceName << "[";
1466
1467 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1468 code << m_readIndices[readNdx];
1469 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1470 code << "indexBase + " << (m_readIndices[readNdx] - 1);
1471 else
1472 code << indicesPrefix << readNdx;
1473
1474 code << "].value;\n";
1475 }
1476
1477 m_shaderSpec.globalDeclarations = global.str();
1478 m_shaderSpec.source = code.str();
1479
1480 if ((m_flags & BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER) != 0)
1481 m_shaderSpec.buildOptions.flags |= vk::ShaderBuildOptions::FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS;
1482 }
1483
1484 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1485 {
1486 public:
1487 enum
1488 {
1489 NUM_INVOCATIONS = 32,
1490 NUM_COUNTERS = 4,
1491 NUM_OPS = 4
1492 };
1493
1494 AtomicCounterIndexingCaseInstance(Context &context, const glu::ShaderType shaderType, const ShaderSpec &shaderSpec,
1495 const char *name, const std::vector<int> &opIndices,
1496 const IndexExprType indexExprType);
1497 virtual ~AtomicCounterIndexingCaseInstance(void);
1498
1499 virtual tcu::TestStatus iterate(void);
1500
1501 private:
1502 const std::vector<int> &m_opIndices;
1503 };
1504
AtomicCounterIndexingCaseInstance(Context & context,const glu::ShaderType shaderType,const ShaderSpec & shaderSpec,const char * name,const std::vector<int> & opIndices,const IndexExprType indexExprType)1505 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance(Context &context, const glu::ShaderType shaderType,
1506 const ShaderSpec &shaderSpec, const char *name,
1507 const std::vector<int> &opIndices,
1508 const IndexExprType indexExprType)
1509 : OpaqueTypeIndexingTestInstance(context, shaderType, shaderSpec, name, indexExprType)
1510 , m_opIndices(opIndices)
1511 {
1512 }
1513
~AtomicCounterIndexingCaseInstance(void)1514 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance(void)
1515 {
1516 }
1517
iterate(void)1518 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate(void)
1519 {
1520 const int numInvocations = NUM_INVOCATIONS;
1521 const int numCounters = NUM_COUNTERS;
1522 const int numOps = NUM_OPS;
1523 std::vector<int> expandedIndices;
1524 std::vector<void *> inputs;
1525 std::vector<void *> outputs;
1526 std::vector<uint32_t> outValues(numInvocations * numOps);
1527
1528 const DeviceInterface &vkd = m_context.getDeviceInterface();
1529 const VkDevice device = m_context.getDevice();
1530 const VkPhysicalDeviceFeatures &deviceFeatures = m_context.getDeviceFeatures();
1531
1532 //Check stores and atomic operation support.
1533 switch (m_shaderType)
1534 {
1535 case glu::SHADERTYPE_VERTEX:
1536 case glu::SHADERTYPE_TESSELLATION_CONTROL:
1537 case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1538 case glu::SHADERTYPE_GEOMETRY:
1539 if (!deviceFeatures.vertexPipelineStoresAndAtomics)
1540 TCU_THROW(NotSupportedError,
1541 "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
1542 break;
1543 case glu::SHADERTYPE_FRAGMENT:
1544 if (!deviceFeatures.fragmentStoresAndAtomics)
1545 TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
1546 break;
1547 case glu::SHADERTYPE_COMPUTE:
1548 break;
1549 default:
1550 throw tcu::InternalError("Unsupported shader type");
1551 }
1552
1553 // \note Using separate buffer per element - might want to test
1554 // offsets & single buffer in the future.
1555 Buffer atomicOpBuffer(m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(uint32_t) * numCounters);
1556 MovePtr<Buffer> indexBuffer;
1557
1558 Move<VkDescriptorSetLayout> extraResourcesLayout;
1559 Move<VkDescriptorPool> extraResourcesSetPool;
1560 Move<VkDescriptorSet> extraResourcesSet;
1561
1562 checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1563
1564 deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(uint32_t) * numCounters);
1565 atomicOpBuffer.flush();
1566
1567 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1568 indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
1569
1570 {
1571 const VkDescriptorSetLayoutBinding bindings[] = {
1572 {0u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL},
1573 {1u, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL}};
1574 const VkDescriptorSetLayoutCreateInfo layoutInfo = {
1575 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1576 DE_NULL,
1577 (VkDescriptorSetLayoutCreateFlags)0u,
1578 DE_LENGTH_OF_ARRAY(bindings),
1579 bindings,
1580 };
1581
1582 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1583 }
1584
1585 {
1586 const VkDescriptorPoolSize poolSizes[] = {{
1587 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1588 1u,
1589 },
1590 {
1591 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1592 1u,
1593 }};
1594 const VkDescriptorPoolCreateInfo poolInfo = {
1595 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1596 DE_NULL,
1597 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1598 1u, // maxSets
1599 DE_LENGTH_OF_ARRAY(poolSizes),
1600 poolSizes,
1601 };
1602
1603 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1604 }
1605
1606 {
1607 const VkDescriptorSetAllocateInfo allocInfo = {
1608 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1609 DE_NULL,
1610 *extraResourcesSetPool,
1611 1u,
1612 &extraResourcesLayout.get(),
1613 };
1614
1615 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1616 }
1617
1618 {
1619 const VkDescriptorBufferInfo bufferInfo = {atomicOpBuffer.getBuffer(), 0u, VK_WHOLE_SIZE};
1620 const VkWriteDescriptorSet descriptorWrite = {
1621 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1622 DE_NULL,
1623 *extraResourcesSet,
1624 0u, // dstBinding
1625 0u, // dstArrayElement
1626 1u,
1627 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1628 (const VkDescriptorImageInfo *)DE_NULL,
1629 &bufferInfo,
1630 (const VkBufferView *)DE_NULL,
1631 };
1632
1633 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1634 }
1635
1636 if (indexBuffer)
1637 {
1638 const VkDescriptorBufferInfo bufferInfo = {indexBuffer->getBuffer(), 0u, VK_WHOLE_SIZE};
1639 const VkWriteDescriptorSet descriptorWrite = {
1640 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1641 DE_NULL,
1642 *extraResourcesSet,
1643 1u, // dstBinding
1644 0u, // dstArrayElement
1645 1u,
1646 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1647 (const VkDescriptorImageInfo *)DE_NULL,
1648 &bufferInfo,
1649 (const VkBufferView *)DE_NULL,
1650 };
1651
1652 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1653 }
1654
1655 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1656 {
1657 expandedIndices.resize(numInvocations * m_opIndices.size());
1658
1659 for (int opNdx = 0; opNdx < numOps; opNdx++)
1660 {
1661 int *dst = &expandedIndices[numInvocations * opNdx];
1662 std::fill(dst, dst + numInvocations, m_opIndices[opNdx]);
1663 }
1664
1665 for (int opNdx = 0; opNdx < numOps; opNdx++)
1666 inputs.push_back(&expandedIndices[opNdx * numInvocations]);
1667 }
1668
1669 for (int opNdx = 0; opNdx < numOps; opNdx++)
1670 outputs.push_back(&outValues[opNdx * numInvocations]);
1671
1672 {
1673 UniquePtr<ShaderExecutor> executor(
1674 createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1675
1676 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1677 }
1678
1679 {
1680 tcu::TestLog &log = m_context.getTestContext().getLog();
1681 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1682 std::vector<int> numHits(numCounters, 0); // Number of hits per counter.
1683 std::vector<uint32_t> counterValues(numCounters);
1684 std::vector<std::map<uint32_t, int>> resultValueHitCountMaps(numCounters);
1685
1686 for (int opNdx = 0; opNdx < numOps; opNdx++)
1687 numHits[m_opIndices[opNdx]] += 1;
1688
1689 // Read counter values
1690 {
1691 const void *mapPtr = atomicOpBuffer.getHostPtr();
1692 DE_ASSERT(mapPtr != DE_NULL);
1693 atomicOpBuffer.invalidate();
1694 std::copy((const uint32_t *)mapPtr, (const uint32_t *)mapPtr + numCounters, &counterValues[0]);
1695 }
1696
1697 // Verify counter values
1698 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1699 {
1700 const uint32_t refCount = (uint32_t)(numHits[counterNdx] * numInvocations);
1701 const uint32_t resCount = counterValues[counterNdx];
1702
1703 bool foundInvalidCtrValue = false;
1704
1705 if (resCount < refCount)
1706 {
1707 log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1708 << ", expected value greater than or equal to " << refCount << tcu::TestLog::EndMessage;
1709
1710 foundInvalidCtrValue = true;
1711 }
1712 else if (refCount == 0 && resCount != 0)
1713 {
1714 log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1715 << ", expected " << refCount << tcu::TestLog::EndMessage;
1716
1717 foundInvalidCtrValue = true;
1718 }
1719
1720 if (foundInvalidCtrValue == true)
1721 {
1722 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1723 testResult = tcu::TestStatus::fail("Invalid atomic counter value");
1724 }
1725 }
1726
1727 // Verify result values from shaders
1728 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1729 {
1730 for (int opNdx = 0; opNdx < numOps; opNdx++)
1731 {
1732 const int counterNdx = m_opIndices[opNdx];
1733 const uint32_t resValue = outValues[opNdx * numInvocations + invocationNdx];
1734 const bool rangeOk = de::inBounds(resValue, 0u, counterValues[counterNdx]);
1735
1736 if (resultValueHitCountMaps[counterNdx].count(resValue) == 0)
1737 resultValueHitCountMaps[counterNdx][resValue] = 1;
1738 else
1739 resultValueHitCountMaps[counterNdx][resValue] += 1;
1740
1741 if (!rangeOk)
1742 {
1743 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", op " << opNdx
1744 << ": got invalid result value " << resValue << tcu::TestLog::EndMessage;
1745
1746 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1747 testResult = tcu::TestStatus::fail("Invalid result value");
1748 }
1749 }
1750 }
1751
1752 for (int ctrIdx = 0; ctrIdx < numCounters; ctrIdx++)
1753 {
1754 std::map<uint32_t, int>::iterator hitCountItr;
1755 for (hitCountItr = resultValueHitCountMaps[ctrIdx].begin();
1756 hitCountItr != resultValueHitCountMaps[ctrIdx].end(); hitCountItr++)
1757 {
1758 if (hitCountItr->second > 1)
1759 {
1760 log << tcu::TestLog::Message << "ERROR: Duplicate result value from counter " << ctrIdx << "."
1761 << " Value " << hitCountItr->first << " found " << hitCountItr->second << " times."
1762 << tcu::TestLog::EndMessage;
1763
1764 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1765 testResult = tcu::TestStatus::fail("Invalid result value");
1766 }
1767 }
1768 }
1769
1770 return testResult;
1771 }
1772 }
1773
1774 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
1775 {
1776 public:
1777 AtomicCounterIndexingCase(tcu::TestContext &testCtx, const char *name, IndexExprType indexExprType,
1778 const glu::ShaderType shaderType);
1779 virtual ~AtomicCounterIndexingCase(void);
1780
1781 virtual TestInstance *createInstance(Context &ctx) const;
1782
1783 private:
1784 AtomicCounterIndexingCase(const BlockArrayIndexingCase &);
1785 AtomicCounterIndexingCase &operator=(const BlockArrayIndexingCase &);
1786
1787 void createShaderSpec(void);
1788
1789 std::vector<int> m_opIndices;
1790 };
1791
AtomicCounterIndexingCase(tcu::TestContext & testCtx,const char * name,IndexExprType indexExprType,const glu::ShaderType shaderType)1792 AtomicCounterIndexingCase::AtomicCounterIndexingCase(tcu::TestContext &testCtx, const char *name,
1793 IndexExprType indexExprType, const glu::ShaderType shaderType)
1794 : OpaqueTypeIndexingCase(testCtx, name, shaderType, indexExprType)
1795 , m_opIndices(AtomicCounterIndexingCaseInstance::NUM_OPS)
1796 {
1797 createShaderSpec();
1798 init();
1799 }
1800
~AtomicCounterIndexingCase(void)1801 AtomicCounterIndexingCase::~AtomicCounterIndexingCase(void)
1802 {
1803 }
1804
createInstance(Context & ctx) const1805 TestInstance *AtomicCounterIndexingCase::createInstance(Context &ctx) const
1806 {
1807 return new AtomicCounterIndexingCaseInstance(ctx, m_shaderType, m_shaderSpec, m_name, m_opIndices, m_indexExprType);
1808 }
1809
createShaderSpec(void)1810 void AtomicCounterIndexingCase::createShaderSpec(void)
1811 {
1812 const int numCounters = AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
1813 const int numOps = AtomicCounterIndexingCaseInstance::NUM_OPS;
1814 de::Random rnd(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1815
1816 for (int opNdx = 0; opNdx < numOps; opNdx++)
1817 m_opIndices[opNdx] = rnd.getInt(0, numOps - 1);
1818
1819 {
1820 const char *indicesPrefix = "index";
1821 const char *resultPrefix = "result";
1822 std::ostringstream global, code;
1823
1824 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1825 global << "#extension GL_EXT_gpu_shader5 : require\n";
1826
1827 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1828 global << "const highp int indexBase = 1;\n";
1829
1830 global << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX
1831 << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
1832
1833 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1834 {
1835 for (int opNdx = 0; opNdx < numOps; opNdx++)
1836 {
1837 const std::string varName = indicesPrefix + de::toString(opNdx);
1838 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1839 }
1840 }
1841 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1842 declareUniformIndexVars(global, 1, indicesPrefix, numOps);
1843
1844 for (int opNdx = 0; opNdx < numOps; opNdx++)
1845 {
1846 const std::string varName = resultPrefix + de::toString(opNdx);
1847 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1848 }
1849
1850 for (int opNdx = 0; opNdx < numOps; opNdx++)
1851 {
1852 code << resultPrefix << opNdx << " = atomicAdd(counter[";
1853
1854 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1855 code << m_opIndices[opNdx];
1856 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1857 code << "indexBase + " << (m_opIndices[opNdx] - 1);
1858 else
1859 code << indicesPrefix << opNdx;
1860
1861 code << "], uint(1));\n";
1862 }
1863
1864 m_shaderSpec.globalDeclarations = global.str();
1865 m_shaderSpec.source = code.str();
1866 }
1867 }
1868
1869 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
1870 {
1871 public:
1872 OpaqueTypeIndexingTests(tcu::TestContext &testCtx);
1873 virtual ~OpaqueTypeIndexingTests(void);
1874
1875 virtual void init(void);
1876
1877 private:
1878 OpaqueTypeIndexingTests(const OpaqueTypeIndexingTests &);
1879 OpaqueTypeIndexingTests &operator=(const OpaqueTypeIndexingTests &);
1880 };
1881
OpaqueTypeIndexingTests(tcu::TestContext & testCtx)1882 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests(tcu::TestContext &testCtx)
1883 : tcu::TestCaseGroup(testCtx, "opaque_type_indexing")
1884 {
1885 }
1886
~OpaqueTypeIndexingTests(void)1887 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests(void)
1888 {
1889 }
1890
init(void)1891 void OpaqueTypeIndexingTests::init(void)
1892 {
1893 static const struct
1894 {
1895 IndexExprType type;
1896 const char *name;
1897 } indexingTypes[] = {// Indexing by constant literal
1898 {INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal"},
1899 // Indexing by constant expression
1900 {INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression"},
1901 // Indexing by uniform value
1902 {INDEX_EXPR_TYPE_UNIFORM, "uniform"},
1903 // Indexing by dynamically uniform expression
1904 {INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform"}};
1905
1906 static const struct
1907 {
1908 glu::ShaderType type;
1909 const char *name;
1910 } shaderTypes[] = {{glu::SHADERTYPE_VERTEX, "vertex"},
1911 {glu::SHADERTYPE_FRAGMENT, "fragment"},
1912 {glu::SHADERTYPE_GEOMETRY, "geometry"},
1913 {glu::SHADERTYPE_TESSELLATION_CONTROL, "tess_ctrl"},
1914 {glu::SHADERTYPE_TESSELLATION_EVALUATION, "tess_eval"},
1915 {glu::SHADERTYPE_COMPUTE, "compute"}};
1916
1917 // .sampler
1918 {
1919 static const glu::DataType samplerTypes[] = {
1920 glu::TYPE_SAMPLER_1D,
1921 glu::TYPE_SAMPLER_1D_ARRAY,
1922 glu::TYPE_SAMPLER_1D_ARRAY_SHADOW,
1923 glu::TYPE_SAMPLER_2D,
1924 glu::TYPE_SAMPLER_CUBE,
1925 glu::TYPE_SAMPLER_2D_ARRAY,
1926 glu::TYPE_SAMPLER_3D,
1927 glu::TYPE_SAMPLER_1D_SHADOW,
1928 glu::TYPE_SAMPLER_2D_SHADOW,
1929 glu::TYPE_SAMPLER_CUBE_SHADOW,
1930 glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
1931 glu::TYPE_INT_SAMPLER_1D,
1932 glu::TYPE_INT_SAMPLER_1D_ARRAY,
1933 glu::TYPE_INT_SAMPLER_2D,
1934 glu::TYPE_INT_SAMPLER_CUBE,
1935 glu::TYPE_INT_SAMPLER_2D_ARRAY,
1936 glu::TYPE_INT_SAMPLER_3D,
1937 glu::TYPE_UINT_SAMPLER_1D,
1938 glu::TYPE_UINT_SAMPLER_1D_ARRAY,
1939 glu::TYPE_UINT_SAMPLER_2D,
1940 glu::TYPE_UINT_SAMPLER_CUBE,
1941 glu::TYPE_UINT_SAMPLER_2D_ARRAY,
1942 glu::TYPE_UINT_SAMPLER_3D,
1943 };
1944
1945 // Sampler Array Indexing Tests
1946 tcu::TestCaseGroup *const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler");
1947 addChild(samplerGroup);
1948
1949 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1950 {
1951 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1952 tcu::TestCaseGroup *const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name);
1953 samplerGroup->addChild(indexGroup);
1954
1955 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1956 {
1957 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1958 tcu::TestCaseGroup *const shaderGroup =
1959 new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name);
1960 indexGroup->addChild(shaderGroup);
1961
1962 // \note [pyry] In Vulkan CTS 1.0.2 sampler groups should not cover tess/geom stages
1963 if ((shaderType != glu::SHADERTYPE_VERTEX) && (shaderType != glu::SHADERTYPE_FRAGMENT) &&
1964 (shaderType != glu::SHADERTYPE_COMPUTE))
1965 continue;
1966
1967 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1968 {
1969 const glu::DataType samplerType = samplerTypes[samplerTypeNdx];
1970 const char *samplerName = getDataTypeName(samplerType);
1971 const std::string caseName = de::toLower(samplerName);
1972
1973 shaderGroup->addChild(
1974 new SamplerIndexingCase(m_testCtx, caseName.c_str(), shaderType, samplerType, indexExprType));
1975 }
1976 }
1977 }
1978 }
1979
1980 // .ubo / .ssbo / .atomic_counter
1981 {
1982 // Uniform Block Instance Array Indexing Tests
1983 tcu::TestCaseGroup *const uboGroup = new tcu::TestCaseGroup(m_testCtx, "ubo");
1984 tcu::TestCaseGroup *const ssboGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo");
1985 tcu::TestCaseGroup *const ssboStorageBufGroup =
1986 new tcu::TestCaseGroup(m_testCtx, "ssbo_storage_buffer_decoration");
1987 tcu::TestCaseGroup *const acGroup = new tcu::TestCaseGroup(m_testCtx, "atomic_counter");
1988 addChild(uboGroup);
1989 addChild(ssboGroup);
1990 addChild(ssboStorageBufGroup);
1991 addChild(acGroup);
1992
1993 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1994 {
1995 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1996 const char *indexExprName = indexingTypes[indexTypeNdx].name;
1997
1998 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1999 {
2000 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
2001 const std::string name = std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
2002
2003 uboGroup->addChild(
2004 new BlockArrayIndexingCase(m_testCtx, name.c_str(), BLOCKTYPE_UNIFORM, indexExprType, shaderType));
2005 acGroup->addChild(new AtomicCounterIndexingCase(m_testCtx, name.c_str(), indexExprType, shaderType));
2006 ssboGroup->addChild(
2007 new BlockArrayIndexingCase(m_testCtx, name.c_str(), BLOCKTYPE_BUFFER, indexExprType, shaderType));
2008 ssboStorageBufGroup->addChild(
2009 new BlockArrayIndexingCase(m_testCtx, name.c_str(), BLOCKTYPE_BUFFER, indexExprType, shaderType,
2010 (uint32_t)BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER));
2011 }
2012 }
2013 }
2014 }
2015
2016 } // namespace
2017
createOpaqueTypeIndexingTests(tcu::TestContext & testCtx)2018 tcu::TestCaseGroup *createOpaqueTypeIndexingTests(tcu::TestContext &testCtx)
2019 {
2020 return new OpaqueTypeIndexingTests(testCtx);
2021 }
2022
2023 } // namespace shaderexecutor
2024 } // namespace vkt
2025