1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Vulkan ShaderRenderCase
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktShaderRender.hpp"
27
28 #include "tcuImageCompare.hpp"
29 #include "tcuImageIO.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuTextureUtil.hpp"
32 #include "tcuSurface.hpp"
33 #include "tcuVector.hpp"
34
35 #include "deFilePath.hpp"
36 #include "deMath.h"
37 #include "deUniquePtr.hpp"
38
39 #include "vkDeviceUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkPlatform.hpp"
42 #include "vkQueryUtil.hpp"
43 #include "vkRef.hpp"
44 #include "vkRefUtil.hpp"
45 #include "vkStrUtil.hpp"
46 #include "vkTypeUtil.hpp"
47 #include "vkCmdUtil.hpp"
48 #include "vkObjUtil.hpp"
49
50 #include <vector>
51 #include <string>
52
53 namespace vkt
54 {
55 namespace sr
56 {
57
58 using namespace vk;
59
textureTypeToImageViewType(TextureBinding::Type type)60 VkImageViewType textureTypeToImageViewType(TextureBinding::Type type)
61 {
62 switch (type)
63 {
64 case TextureBinding::TYPE_1D:
65 return VK_IMAGE_VIEW_TYPE_1D;
66 case TextureBinding::TYPE_2D:
67 return VK_IMAGE_VIEW_TYPE_2D;
68 case TextureBinding::TYPE_3D:
69 return VK_IMAGE_VIEW_TYPE_3D;
70 case TextureBinding::TYPE_CUBE_MAP:
71 return VK_IMAGE_VIEW_TYPE_CUBE;
72 case TextureBinding::TYPE_1D_ARRAY:
73 return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
74 case TextureBinding::TYPE_2D_ARRAY:
75 return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
76 case TextureBinding::TYPE_CUBE_ARRAY:
77 return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
78
79 default:
80 DE_FATAL("Impossible");
81 return (VkImageViewType)0;
82 }
83 }
84
viewTypeToImageType(VkImageViewType type)85 VkImageType viewTypeToImageType(VkImageViewType type)
86 {
87 switch (type)
88 {
89 case VK_IMAGE_VIEW_TYPE_1D:
90 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
91 return VK_IMAGE_TYPE_1D;
92 case VK_IMAGE_VIEW_TYPE_2D:
93 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
94 return VK_IMAGE_TYPE_2D;
95 case VK_IMAGE_VIEW_TYPE_3D:
96 return VK_IMAGE_TYPE_3D;
97 case VK_IMAGE_VIEW_TYPE_CUBE:
98 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
99 return VK_IMAGE_TYPE_2D;
100
101 default:
102 DE_FATAL("Impossible");
103 return (VkImageType)0;
104 }
105 }
106
textureUsageFlags(void)107 vk::VkImageUsageFlags textureUsageFlags(void)
108 {
109 return (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
110 }
111
textureCreateFlags(vk::VkImageViewType viewType,ShaderRenderCaseInstance::ImageBackingMode backingMode)112 vk::VkImageCreateFlags textureCreateFlags(vk::VkImageViewType viewType,
113 ShaderRenderCaseInstance::ImageBackingMode backingMode)
114 {
115 const bool isCube = (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY);
116 VkImageCreateFlags imageCreateFlags =
117 (isCube ? static_cast<VkImageCreateFlags>(VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) : 0u);
118
119 if (backingMode == ShaderRenderCaseInstance::IMAGE_BACKING_MODE_SPARSE)
120 imageCreateFlags |= (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT);
121
122 return imageCreateFlags;
123 }
124
125 namespace
126 {
127
128 static const uint32_t MAX_RENDER_WIDTH = 128;
129 static const uint32_t MAX_RENDER_HEIGHT = 128;
130 static const tcu::Vec4 DEFAULT_CLEAR_COLOR = tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f);
131
132 /*! Gets the next multiple of a given divisor */
getNextMultiple(uint32_t divisor,uint32_t value)133 static uint32_t getNextMultiple(uint32_t divisor, uint32_t value)
134 {
135 if (value % divisor == 0)
136 {
137 return value;
138 }
139 return value + divisor - (value % divisor);
140 }
141
142 /*! Gets the next value that is multiple of all given divisors */
getNextMultiple(const std::vector<uint32_t> & divisors,uint32_t value)143 static uint32_t getNextMultiple(const std::vector<uint32_t> &divisors, uint32_t value)
144 {
145 uint32_t nextMultiple = value;
146 bool nextMultipleFound = false;
147
148 while (true)
149 {
150 nextMultipleFound = true;
151
152 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
153 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
154
155 if (nextMultipleFound)
156 break;
157
158 DE_ASSERT(nextMultiple < ~((uint32_t)0u));
159 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
160 }
161
162 return nextMultiple;
163 }
164
165 } // namespace
166
167 // QuadGrid.
168
169 class QuadGrid
170 {
171 public:
172 QuadGrid(int gridSize, int screenWidth, int screenHeight, const tcu::Vec4 &constCoords,
173 const std::vector<tcu::Mat4> &userAttribTransforms, const std::vector<TextureBindingSp> &textures);
174 ~QuadGrid(void);
175
getGridSize(void) const176 int getGridSize(void) const
177 {
178 return m_gridSize;
179 }
getNumVertices(void) const180 int getNumVertices(void) const
181 {
182 return m_numVertices;
183 }
getNumTriangles(void) const184 int getNumTriangles(void) const
185 {
186 return m_numTriangles;
187 }
getConstCoords(void) const188 const tcu::Vec4 &getConstCoords(void) const
189 {
190 return m_constCoords;
191 }
getUserAttribTransforms(void) const192 const std::vector<tcu::Mat4> getUserAttribTransforms(void) const
193 {
194 return m_userAttribTransforms;
195 }
getTextures(void) const196 const std::vector<TextureBindingSp> &getTextures(void) const
197 {
198 return m_textures;
199 }
200
getPositions(void) const201 const tcu::Vec4 *getPositions(void) const
202 {
203 return &m_positions[0];
204 }
getAttribOne(void) const205 const float *getAttribOne(void) const
206 {
207 return &m_attribOne[0];
208 }
getCoords(void) const209 const tcu::Vec4 *getCoords(void) const
210 {
211 return &m_coords[0];
212 }
getUnitCoords(void) const213 const tcu::Vec4 *getUnitCoords(void) const
214 {
215 return &m_unitCoords[0];
216 }
217
getUserAttrib(int attribNdx) const218 const tcu::Vec4 *getUserAttrib(int attribNdx) const
219 {
220 return &m_userAttribs[attribNdx][0];
221 }
getIndices(void) const222 const uint16_t *getIndices(void) const
223 {
224 return &m_indices[0];
225 }
226
227 tcu::Vec4 getCoords(float sx, float sy) const;
228 tcu::Vec4 getUnitCoords(float sx, float sy) const;
229
getNumUserAttribs(void) const230 int getNumUserAttribs(void) const
231 {
232 return (int)m_userAttribTransforms.size();
233 }
234 tcu::Vec4 getUserAttrib(int attribNdx, float sx, float sy) const;
235
236 private:
237 const int m_gridSize;
238 const int m_numVertices;
239 const int m_numTriangles;
240 const tcu::Vec4 m_constCoords;
241 const std::vector<tcu::Mat4> m_userAttribTransforms;
242
243 const std::vector<TextureBindingSp> &m_textures;
244
245 std::vector<tcu::Vec4> m_screenPos;
246 std::vector<tcu::Vec4> m_positions;
247 std::vector<tcu::Vec4> m_coords; //!< Near-unit coordinates, roughly [-2.0 .. 2.0].
248 std::vector<tcu::Vec4> m_unitCoords; //!< Positive-only coordinates [0.0 .. 1.5].
249 std::vector<float> m_attribOne;
250 std::vector<tcu::Vec4> m_userAttribs[ShaderEvalContext::MAX_TEXTURES];
251 std::vector<uint16_t> m_indices;
252 };
253
QuadGrid(int gridSize,int width,int height,const tcu::Vec4 & constCoords,const std::vector<tcu::Mat4> & userAttribTransforms,const std::vector<TextureBindingSp> & textures)254 QuadGrid::QuadGrid(int gridSize, int width, int height, const tcu::Vec4 &constCoords,
255 const std::vector<tcu::Mat4> &userAttribTransforms, const std::vector<TextureBindingSp> &textures)
256 : m_gridSize(gridSize)
257 , m_numVertices((gridSize + 1) * (gridSize + 1))
258 , m_numTriangles(gridSize * gridSize * 2)
259 , m_constCoords(constCoords)
260 , m_userAttribTransforms(userAttribTransforms)
261 , m_textures(textures)
262 {
263 const tcu::Vec4 viewportScale((float)width, (float)height, 0.0f, 0.0f);
264
265 // Compute vertices.
266 m_screenPos.resize(m_numVertices);
267 m_positions.resize(m_numVertices);
268 m_coords.resize(m_numVertices);
269 m_unitCoords.resize(m_numVertices);
270 m_attribOne.resize(m_numVertices);
271
272 // User attributes.
273 for (int attrNdx = 0; attrNdx < DE_LENGTH_OF_ARRAY(m_userAttribs); attrNdx++)
274 m_userAttribs[attrNdx].resize(m_numVertices);
275
276 for (int y = 0; y < gridSize + 1; y++)
277 for (int x = 0; x < gridSize + 1; x++)
278 {
279 float sx = (float)x / (float)gridSize;
280 float sy = (float)y / (float)gridSize;
281 float fx = 2.0f * sx - 1.0f;
282 float fy = 2.0f * sy - 1.0f;
283 int vtxNdx = ((y * (gridSize + 1)) + x);
284
285 m_positions[vtxNdx] = tcu::Vec4(fx, fy, 0.0f, 1.0f);
286 m_coords[vtxNdx] = getCoords(sx, sy);
287 m_unitCoords[vtxNdx] = getUnitCoords(sx, sy);
288 m_attribOne[vtxNdx] = 1.0f;
289
290 m_screenPos[vtxNdx] = tcu::Vec4(sx, sy, 0.0f, 1.0f) * viewportScale;
291
292 for (int attribNdx = 0; attribNdx < getNumUserAttribs(); attribNdx++)
293 m_userAttribs[attribNdx][vtxNdx] = getUserAttrib(attribNdx, sx, sy);
294 }
295
296 // Compute indices.
297 m_indices.resize(3 * m_numTriangles);
298 for (int y = 0; y < gridSize; y++)
299 for (int x = 0; x < gridSize; x++)
300 {
301 int stride = gridSize + 1;
302 int v00 = (y * stride) + x;
303 int v01 = (y * stride) + x + 1;
304 int v10 = ((y + 1) * stride) + x;
305 int v11 = ((y + 1) * stride) + x + 1;
306
307 int baseNdx = ((y * gridSize) + x) * 6;
308 m_indices[baseNdx + 0] = (uint16_t)v10;
309 m_indices[baseNdx + 1] = (uint16_t)v00;
310 m_indices[baseNdx + 2] = (uint16_t)v01;
311
312 m_indices[baseNdx + 3] = (uint16_t)v10;
313 m_indices[baseNdx + 4] = (uint16_t)v01;
314 m_indices[baseNdx + 5] = (uint16_t)v11;
315 }
316 }
317
~QuadGrid(void)318 QuadGrid::~QuadGrid(void)
319 {
320 }
321
getCoords(float sx,float sy) const322 inline tcu::Vec4 QuadGrid::getCoords(float sx, float sy) const
323 {
324 const float fx = 2.0f * sx - 1.0f;
325 const float fy = 2.0f * sy - 1.0f;
326 return tcu::Vec4(fx, fy, -fx + 0.33f * fy, -0.275f * fx - fy);
327 }
328
getUnitCoords(float sx,float sy) const329 inline tcu::Vec4 QuadGrid::getUnitCoords(float sx, float sy) const
330 {
331 return tcu::Vec4(sx, sy, 0.33f * sx + 0.5f * sy, 0.5f * sx + 0.25f * sy);
332 }
333
getUserAttrib(int attribNdx,float sx,float sy) const334 inline tcu::Vec4 QuadGrid::getUserAttrib(int attribNdx, float sx, float sy) const
335 {
336 // homogeneous normalized screen-space coordinates
337 return m_userAttribTransforms[attribNdx] * tcu::Vec4(sx, sy, 0.0f, 1.0f);
338 }
339
340 // TextureBinding
341
TextureBinding(const tcu::Archive & archive,const char * filename,const Type type,const tcu::Sampler & sampler)342 TextureBinding::TextureBinding(const tcu::Archive &archive, const char *filename, const Type type,
343 const tcu::Sampler &sampler)
344 : m_type(type)
345 , m_sampler(sampler)
346 {
347 switch (m_type)
348 {
349 case TYPE_2D:
350 m_binding.tex2D = loadTexture2D(archive, filename).release();
351 break;
352 default:
353 DE_FATAL("Unsupported texture type");
354 }
355 }
356
TextureBinding(const tcu::Texture1D * tex1D,const tcu::Sampler & sampler)357 TextureBinding::TextureBinding(const tcu::Texture1D *tex1D, const tcu::Sampler &sampler)
358 : m_type(TYPE_1D)
359 , m_sampler(sampler)
360 {
361 m_binding.tex1D = tex1D;
362 }
363
TextureBinding(const tcu::Texture2D * tex2D,const tcu::Sampler & sampler)364 TextureBinding::TextureBinding(const tcu::Texture2D *tex2D, const tcu::Sampler &sampler)
365 : m_type(TYPE_2D)
366 , m_sampler(sampler)
367 {
368 m_binding.tex2D = tex2D;
369 }
370
TextureBinding(const tcu::Texture3D * tex3D,const tcu::Sampler & sampler)371 TextureBinding::TextureBinding(const tcu::Texture3D *tex3D, const tcu::Sampler &sampler)
372 : m_type(TYPE_3D)
373 , m_sampler(sampler)
374 {
375 m_binding.tex3D = tex3D;
376 }
377
TextureBinding(const tcu::TextureCube * texCube,const tcu::Sampler & sampler)378 TextureBinding::TextureBinding(const tcu::TextureCube *texCube, const tcu::Sampler &sampler)
379 : m_type(TYPE_CUBE_MAP)
380 , m_sampler(sampler)
381 {
382 m_binding.texCube = texCube;
383 }
384
TextureBinding(const tcu::Texture1DArray * tex1DArray,const tcu::Sampler & sampler)385 TextureBinding::TextureBinding(const tcu::Texture1DArray *tex1DArray, const tcu::Sampler &sampler)
386 : m_type(TYPE_1D_ARRAY)
387 , m_sampler(sampler)
388 {
389 m_binding.tex1DArray = tex1DArray;
390 }
391
TextureBinding(const tcu::Texture2DArray * tex2DArray,const tcu::Sampler & sampler)392 TextureBinding::TextureBinding(const tcu::Texture2DArray *tex2DArray, const tcu::Sampler &sampler)
393 : m_type(TYPE_2D_ARRAY)
394 , m_sampler(sampler)
395 {
396 m_binding.tex2DArray = tex2DArray;
397 }
398
TextureBinding(const tcu::TextureCubeArray * texCubeArray,const tcu::Sampler & sampler)399 TextureBinding::TextureBinding(const tcu::TextureCubeArray *texCubeArray, const tcu::Sampler &sampler)
400 : m_type(TYPE_CUBE_ARRAY)
401 , m_sampler(sampler)
402 {
403 m_binding.texCubeArray = texCubeArray;
404 }
405
~TextureBinding(void)406 TextureBinding::~TextureBinding(void)
407 {
408 switch (m_type)
409 {
410 case TYPE_1D:
411 delete m_binding.tex1D;
412 break;
413 case TYPE_2D:
414 delete m_binding.tex2D;
415 break;
416 case TYPE_3D:
417 delete m_binding.tex3D;
418 break;
419 case TYPE_CUBE_MAP:
420 delete m_binding.texCube;
421 break;
422 case TYPE_1D_ARRAY:
423 delete m_binding.tex1DArray;
424 break;
425 case TYPE_2D_ARRAY:
426 delete m_binding.tex2DArray;
427 break;
428 case TYPE_CUBE_ARRAY:
429 delete m_binding.texCubeArray;
430 break;
431 default:
432 break;
433 }
434 }
435
loadTexture2D(const tcu::Archive & archive,const char * filename)436 de::MovePtr<tcu::Texture2D> TextureBinding::loadTexture2D(const tcu::Archive &archive, const char *filename)
437 {
438 tcu::TextureLevel level;
439 tcu::ImageIO::loadImage(level, archive, filename);
440
441 TCU_CHECK_INTERNAL(
442 level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8) ||
443 level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGB, tcu::TextureFormat::UNORM_INT8));
444
445 // \todo [2015-10-08 elecro] for some reason we get better when using RGBA texture even in RGB case, this needs to be investigated
446 de::MovePtr<tcu::Texture2D> texture(
447 new tcu::Texture2D(tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8),
448 level.getWidth(), level.getHeight()));
449
450 // Fill level 0.
451 texture->allocLevel(0);
452 tcu::copy(texture->getLevel(0), level.getAccess());
453
454 return texture;
455 }
456
457 // ShaderEvalContext.
458
ShaderEvalContext(const QuadGrid & quadGrid)459 ShaderEvalContext::ShaderEvalContext(const QuadGrid &quadGrid)
460 : constCoords(quadGrid.getConstCoords())
461 , isDiscarded(false)
462 , m_quadGrid(quadGrid)
463 {
464 const std::vector<TextureBindingSp> &bindings = m_quadGrid.getTextures();
465 DE_ASSERT((int)bindings.size() <= MAX_TEXTURES);
466
467 // Fill in texture array.
468 for (int ndx = 0; ndx < (int)bindings.size(); ndx++)
469 {
470 const TextureBinding &binding = *bindings[ndx];
471
472 if (binding.getType() == TextureBinding::TYPE_NONE)
473 continue;
474
475 textures[ndx].sampler = binding.getSampler();
476
477 switch (binding.getType())
478 {
479 case TextureBinding::TYPE_1D:
480 textures[ndx].tex1D = &binding.get1D();
481 break;
482 case TextureBinding::TYPE_2D:
483 textures[ndx].tex2D = &binding.get2D();
484 break;
485 case TextureBinding::TYPE_3D:
486 textures[ndx].tex3D = &binding.get3D();
487 break;
488 case TextureBinding::TYPE_CUBE_MAP:
489 textures[ndx].texCube = &binding.getCube();
490 break;
491 case TextureBinding::TYPE_1D_ARRAY:
492 textures[ndx].tex1DArray = &binding.get1DArray();
493 break;
494 case TextureBinding::TYPE_2D_ARRAY:
495 textures[ndx].tex2DArray = &binding.get2DArray();
496 break;
497 case TextureBinding::TYPE_CUBE_ARRAY:
498 textures[ndx].texCubeArray = &binding.getCubeArray();
499 break;
500 default:
501 TCU_THROW(InternalError, "Handling of texture binding type not implemented");
502 }
503 }
504 }
505
~ShaderEvalContext(void)506 ShaderEvalContext::~ShaderEvalContext(void)
507 {
508 }
509
reset(float sx,float sy)510 void ShaderEvalContext::reset(float sx, float sy)
511 {
512 // Clear old values
513 color = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
514 isDiscarded = false;
515
516 // Compute coords
517 coords = m_quadGrid.getCoords(sx, sy);
518 unitCoords = m_quadGrid.getUnitCoords(sx, sy);
519
520 // Compute user attributes.
521 const int numAttribs = m_quadGrid.getNumUserAttribs();
522 DE_ASSERT(numAttribs <= MAX_USER_ATTRIBS);
523 for (int attribNdx = 0; attribNdx < numAttribs; attribNdx++)
524 in[attribNdx] = m_quadGrid.getUserAttrib(attribNdx, sx, sy);
525 }
526
texture2D(int unitNdx,const tcu::Vec2 & texCoords)527 tcu::Vec4 ShaderEvalContext::texture2D(int unitNdx, const tcu::Vec2 &texCoords)
528 {
529 if (textures[unitNdx].tex2D)
530 return textures[unitNdx].tex2D->sample(textures[unitNdx].sampler, texCoords.x(), texCoords.y(), 0.0f);
531 else
532 return tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
533 }
534
535 // ShaderEvaluator.
536
ShaderEvaluator(void)537 ShaderEvaluator::ShaderEvaluator(void) : m_evalFunc(DE_NULL)
538 {
539 }
540
ShaderEvaluator(ShaderEvalFunc evalFunc)541 ShaderEvaluator::ShaderEvaluator(ShaderEvalFunc evalFunc) : m_evalFunc(evalFunc)
542 {
543 }
544
~ShaderEvaluator(void)545 ShaderEvaluator::~ShaderEvaluator(void)
546 {
547 }
548
evaluate(ShaderEvalContext & ctx) const549 void ShaderEvaluator::evaluate(ShaderEvalContext &ctx) const
550 {
551 DE_ASSERT(m_evalFunc);
552 m_evalFunc(ctx);
553 }
554
555 // UniformSetup.
556
UniformSetup(void)557 UniformSetup::UniformSetup(void) : m_setupFunc(DE_NULL)
558 {
559 }
560
UniformSetup(UniformSetupFunc setupFunc)561 UniformSetup::UniformSetup(UniformSetupFunc setupFunc) : m_setupFunc(setupFunc)
562 {
563 }
564
~UniformSetup(void)565 UniformSetup::~UniformSetup(void)
566 {
567 }
568
setup(ShaderRenderCaseInstance & instance,const tcu::Vec4 & constCoords) const569 void UniformSetup::setup(ShaderRenderCaseInstance &instance, const tcu::Vec4 &constCoords) const
570 {
571 if (m_setupFunc)
572 m_setupFunc(instance, constCoords);
573 }
574
575 // ShaderRenderCase.
576
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const bool isVertexCase,const ShaderEvalFunc evalFunc,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)577 ShaderRenderCase::ShaderRenderCase(tcu::TestContext &testCtx, const std::string &name, const bool isVertexCase,
578 const ShaderEvalFunc evalFunc, const UniformSetup *uniformSetup,
579 const AttributeSetupFunc attribFunc)
580 : vkt::TestCase(testCtx, name)
581 , m_isVertexCase(isVertexCase)
582 , m_evaluator(new ShaderEvaluator(evalFunc))
583 , m_uniformSetup(uniformSetup ? uniformSetup : new UniformSetup())
584 , m_attribFunc(attribFunc)
585 {
586 }
587
ShaderRenderCase(tcu::TestContext & testCtx,const std::string & name,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc)588 ShaderRenderCase::ShaderRenderCase(tcu::TestContext &testCtx, const std::string &name, const bool isVertexCase,
589 const ShaderEvaluator *evaluator, const UniformSetup *uniformSetup,
590 const AttributeSetupFunc attribFunc)
591 : vkt::TestCase(testCtx, name)
592 , m_isVertexCase(isVertexCase)
593 , m_evaluator(evaluator)
594 , m_uniformSetup(uniformSetup ? uniformSetup : new UniformSetup())
595 , m_attribFunc(attribFunc)
596 {
597 }
598
~ShaderRenderCase(void)599 ShaderRenderCase::~ShaderRenderCase(void)
600 {
601 }
602
initPrograms(vk::SourceCollections & programCollection) const603 void ShaderRenderCase::initPrograms(vk::SourceCollections &programCollection) const
604 {
605 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
606 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
607 }
608
createInstance(Context & context) const609 TestInstance *ShaderRenderCase::createInstance(Context &context) const
610 {
611 DE_ASSERT(m_evaluator != DE_NULL);
612 DE_ASSERT(m_uniformSetup != DE_NULL);
613 return new ShaderRenderCaseInstance(context, m_isVertexCase, *m_evaluator, *m_uniformSetup, m_attribFunc);
614 }
615
616 // ShaderRenderCaseInstance.
617
ShaderRenderCaseInstance(Context & context)618 ShaderRenderCaseInstance::ShaderRenderCaseInstance(Context &context)
619 : vkt::TestInstance(context)
620 , m_imageBackingMode(IMAGE_BACKING_MODE_REGULAR)
621 , m_quadGridSize(static_cast<uint32_t>(GRID_SIZE_DEFAULT_FRAGMENT))
622 , m_memAlloc(getAllocator())
623 , m_clearColor(DEFAULT_CLEAR_COLOR)
624 , m_isVertexCase(false)
625 , m_vertexShaderName("vert")
626 , m_fragmentShaderName("frag")
627 , m_renderSize(MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
628 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
629 , m_evaluator(DE_NULL)
630 , m_uniformSetup(DE_NULL)
631 , m_attribFunc(DE_NULL)
632 , m_sampleCount(VK_SAMPLE_COUNT_1_BIT)
633 , m_fuzzyCompare(true)
634 {
635 }
636
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator & evaluator,const UniformSetup & uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode,const uint32_t gridSize,const bool fuzzyCompare)637 ShaderRenderCaseInstance::ShaderRenderCaseInstance(Context &context, const bool isVertexCase,
638 const ShaderEvaluator &evaluator, const UniformSetup &uniformSetup,
639 const AttributeSetupFunc attribFunc,
640 const ImageBackingMode imageBackingMode, const uint32_t gridSize,
641 const bool fuzzyCompare)
642 : vkt::TestInstance(context)
643 , m_imageBackingMode(imageBackingMode)
644 , m_quadGridSize(gridSize == static_cast<uint32_t>(GRID_SIZE_DEFAULTS) ?
645 (isVertexCase ? static_cast<uint32_t>(GRID_SIZE_DEFAULT_VERTEX) :
646 static_cast<uint32_t>(GRID_SIZE_DEFAULT_FRAGMENT)) :
647 gridSize)
648 , m_memAlloc(getAllocator())
649 , m_clearColor(DEFAULT_CLEAR_COLOR)
650 , m_isVertexCase(isVertexCase)
651 , m_vertexShaderName("vert")
652 , m_fragmentShaderName("frag")
653 , m_renderSize(MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
654 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
655 , m_evaluator(&evaluator)
656 , m_uniformSetup(&uniformSetup)
657 , m_attribFunc(attribFunc)
658 , m_sampleCount(VK_SAMPLE_COUNT_1_BIT)
659 , m_fuzzyCompare(fuzzyCompare)
660 {
661 }
662
ShaderRenderCaseInstance(Context & context,const bool isVertexCase,const ShaderEvaluator * evaluator,const UniformSetup * uniformSetup,const AttributeSetupFunc attribFunc,const ImageBackingMode imageBackingMode,const uint32_t gridSize)663 ShaderRenderCaseInstance::ShaderRenderCaseInstance(Context &context, const bool isVertexCase,
664 const ShaderEvaluator *evaluator, const UniformSetup *uniformSetup,
665 const AttributeSetupFunc attribFunc,
666 const ImageBackingMode imageBackingMode, const uint32_t gridSize)
667 : vkt::TestInstance(context)
668 , m_imageBackingMode(imageBackingMode)
669 , m_quadGridSize(gridSize == static_cast<uint32_t>(GRID_SIZE_DEFAULTS) ?
670 (isVertexCase ? static_cast<uint32_t>(GRID_SIZE_DEFAULT_VERTEX) :
671 static_cast<uint32_t>(GRID_SIZE_DEFAULT_FRAGMENT)) :
672 gridSize)
673 , m_memAlloc(getAllocator())
674 , m_clearColor(DEFAULT_CLEAR_COLOR)
675 , m_isVertexCase(isVertexCase)
676 , m_vertexShaderName("vert")
677 , m_fragmentShaderName("frag")
678 , m_renderSize(MAX_RENDER_WIDTH, MAX_RENDER_HEIGHT)
679 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
680 , m_evaluator(evaluator)
681 , m_uniformSetup(uniformSetup)
682 , m_attribFunc(attribFunc)
683 , m_sampleCount(VK_SAMPLE_COUNT_1_BIT)
684 , m_fuzzyCompare(false)
685 {
686 }
687
getAllocator(void) const688 vk::Allocator &ShaderRenderCaseInstance::getAllocator(void) const
689 {
690 return m_context.getDefaultAllocator();
691 }
692
~ShaderRenderCaseInstance(void)693 ShaderRenderCaseInstance::~ShaderRenderCaseInstance(void)
694 {
695 }
696
getDevice(void) const697 VkDevice ShaderRenderCaseInstance::getDevice(void) const
698 {
699 return m_context.getDevice();
700 }
701
getUniversalQueueFamilyIndex(void) const702 uint32_t ShaderRenderCaseInstance::getUniversalQueueFamilyIndex(void) const
703 {
704 return m_context.getUniversalQueueFamilyIndex();
705 }
706
getSparseQueueFamilyIndex(void) const707 uint32_t ShaderRenderCaseInstance::getSparseQueueFamilyIndex(void) const
708 {
709 return m_context.getSparseQueueFamilyIndex();
710 }
711
getDeviceInterface(void) const712 const DeviceInterface &ShaderRenderCaseInstance::getDeviceInterface(void) const
713 {
714 return m_context.getDeviceInterface();
715 }
716
getUniversalQueue(void) const717 VkQueue ShaderRenderCaseInstance::getUniversalQueue(void) const
718 {
719 return m_context.getUniversalQueue();
720 }
721
getSparseQueue(void) const722 VkQueue ShaderRenderCaseInstance::getSparseQueue(void) const
723 {
724 return m_context.getSparseQueue();
725 }
726
getPhysicalDevice(void) const727 VkPhysicalDevice ShaderRenderCaseInstance::getPhysicalDevice(void) const
728 {
729 return m_context.getPhysicalDevice();
730 }
731
getInstanceInterface(void) const732 const InstanceInterface &ShaderRenderCaseInstance::getInstanceInterface(void) const
733 {
734 return m_context.getInstanceInterface();
735 }
736
iterate(void)737 tcu::TestStatus ShaderRenderCaseInstance::iterate(void)
738 {
739 setup();
740
741 // Create quad grid.
742 const tcu::UVec2 viewportSize = getViewportSize();
743 const int width = viewportSize.x();
744 const int height = viewportSize.y();
745
746 m_quadGrid = de::MovePtr<QuadGrid>(
747 new QuadGrid(m_quadGridSize, width, height, getDefaultConstCoords(), m_userAttribTransforms, m_textures));
748
749 // Render result.
750 tcu::Surface resImage(width, height);
751
752 render(m_quadGrid->getNumVertices(), m_quadGrid->getNumTriangles(), m_quadGrid->getIndices(),
753 m_quadGrid->getConstCoords());
754 tcu::copy(resImage.getAccess(), m_resultImage.getAccess());
755
756 // Compute reference.
757 tcu::Surface refImage(width, height);
758 if (m_isVertexCase)
759 computeVertexReference(refImage, *m_quadGrid);
760 else
761 computeFragmentReference(refImage, *m_quadGrid);
762
763 // Compare.
764 const bool compareOk = compareImages(resImage, refImage, 0.2f);
765
766 if (compareOk)
767 return tcu::TestStatus::pass("Result image matches reference");
768 else
769 return tcu::TestStatus::fail("Image mismatch");
770 }
771
setup(void)772 void ShaderRenderCaseInstance::setup(void)
773 {
774 m_resultImage = tcu::TextureLevel();
775 m_descriptorSetLayoutBuilder = de::MovePtr<DescriptorSetLayoutBuilder>(new DescriptorSetLayoutBuilder());
776 m_descriptorPoolBuilder = de::MovePtr<DescriptorPoolBuilder>(new DescriptorPoolBuilder());
777 m_descriptorSetUpdateBuilder = de::MovePtr<DescriptorSetUpdateBuilder>(new DescriptorSetUpdateBuilder());
778
779 m_uniformInfos.clear();
780 m_vertexBindingDescription.clear();
781 m_vertexAttributeDescription.clear();
782 m_vertexBuffers.clear();
783 m_vertexBufferAllocs.clear();
784 m_pushConstantRanges.clear();
785 }
786
setupUniformData(uint32_t bindingLocation,size_t size,const void * dataPtr)787 void ShaderRenderCaseInstance::setupUniformData(uint32_t bindingLocation, size_t size, const void *dataPtr)
788 {
789 const VkDevice vkDevice = getDevice();
790 const DeviceInterface &vk = getDeviceInterface();
791 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
792
793 const VkBufferCreateInfo uniformBufferParams = {
794 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
795 DE_NULL, // const void* pNext;
796 0u, // VkBufferCreateFlags flags;
797 size, // VkDeviceSize size;
798 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, // VkBufferUsageFlags usage;
799 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
800 1u, // uint32_t queueFamilyCount;
801 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
802 };
803
804 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &uniformBufferParams);
805 de::MovePtr<Allocation> alloc =
806 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
807 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
808
809 deMemcpy(alloc->getHostPtr(), dataPtr, size);
810 flushAlloc(vk, vkDevice, *alloc);
811
812 de::MovePtr<BufferUniform> uniformInfo(new BufferUniform());
813 uniformInfo->type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
814 uniformInfo->descriptor = makeDescriptorBufferInfo(*buffer, 0u, size);
815 uniformInfo->location = bindingLocation;
816 uniformInfo->buffer = VkBufferSp(new vk::Unique<VkBuffer>(buffer));
817 uniformInfo->alloc = AllocationSp(alloc.release());
818
819 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniformInfo)));
820 }
821
addUniform(uint32_t bindingLocation,vk::VkDescriptorType descriptorType,size_t dataSize,const void * data)822 void ShaderRenderCaseInstance::addUniform(uint32_t bindingLocation, vk::VkDescriptorType descriptorType,
823 size_t dataSize, const void *data)
824 {
825 m_descriptorSetLayoutBuilder->addSingleBinding(descriptorType, vk::VK_SHADER_STAGE_ALL);
826 m_descriptorPoolBuilder->addType(descriptorType);
827
828 setupUniformData(bindingLocation, dataSize, data);
829 }
830
addAttribute(uint32_t bindingLocation,vk::VkFormat format,uint32_t sizePerElement,uint32_t count,const void * dataPtr)831 void ShaderRenderCaseInstance::addAttribute(uint32_t bindingLocation, vk::VkFormat format, uint32_t sizePerElement,
832 uint32_t count, const void *dataPtr)
833 {
834 // Portability requires stride to be multiply of minVertexInputBindingStrideAlignment
835 // this value is usually 4 and current tests meet this requirement but
836 // if this changes in future then this limit should be verified in checkSupport
837 #ifndef CTS_USES_VULKANSC
838 if (m_context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
839 ((sizePerElement % m_context.getPortabilitySubsetProperties().minVertexInputBindingStrideAlignment) != 0))
840 {
841 DE_FATAL("stride is not multiply of minVertexInputBindingStrideAlignment");
842 }
843 #endif // CTS_USES_VULKANSC
844
845 // Add binding specification
846 const uint32_t binding = (uint32_t)m_vertexBindingDescription.size();
847 const VkVertexInputBindingDescription bindingDescription = {
848 binding, // uint32_t binding;
849 sizePerElement, // uint32_t stride;
850 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate stepRate;
851 };
852
853 m_vertexBindingDescription.push_back(bindingDescription);
854
855 // Add location and format specification
856 const VkVertexInputAttributeDescription attributeDescription = {
857 bindingLocation, // uint32_t location;
858 binding, // uint32_t binding;
859 format, // VkFormat format;
860 0u, // uint32_t offset;
861 };
862
863 m_vertexAttributeDescription.push_back(attributeDescription);
864
865 // Upload data to buffer
866 const VkDevice vkDevice = getDevice();
867 const DeviceInterface &vk = getDeviceInterface();
868 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
869
870 const VkDeviceSize inputSize = sizePerElement * count;
871 const VkBufferCreateInfo vertexBufferParams = {
872 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
873 DE_NULL, // const void* pNext;
874 0u, // VkBufferCreateFlags flags;
875 inputSize, // VkDeviceSize size;
876 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
877 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
878 1u, // uint32_t queueFamilyCount;
879 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
880 };
881
882 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
883 de::MovePtr<vk::Allocation> alloc =
884 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
885 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
886
887 deMemcpy(alloc->getHostPtr(), dataPtr, (size_t)inputSize);
888 flushAlloc(vk, vkDevice, *alloc);
889
890 m_vertexBuffers.push_back(VkBufferSp(new vk::Unique<VkBuffer>(buffer)));
891 m_vertexBufferAllocs.push_back(AllocationSp(alloc.release()));
892 }
893
useAttribute(uint32_t bindingLocation,BaseAttributeType type)894 void ShaderRenderCaseInstance::useAttribute(uint32_t bindingLocation, BaseAttributeType type)
895 {
896 const EnabledBaseAttribute attribute = {
897 bindingLocation, // uint32_t location;
898 type // BaseAttributeType type;
899 };
900 m_enabledBaseAttributes.push_back(attribute);
901 }
902
setupUniforms(const tcu::Vec4 & constCoords)903 void ShaderRenderCaseInstance::setupUniforms(const tcu::Vec4 &constCoords)
904 {
905 if (m_uniformSetup)
906 m_uniformSetup->setup(*this, constCoords);
907 }
908
useUniform(uint32_t bindingLocation,BaseUniformType type)909 void ShaderRenderCaseInstance::useUniform(uint32_t bindingLocation, BaseUniformType type)
910 {
911 #define UNIFORM_CASE(type, value) \
912 case type: \
913 addUniform(bindingLocation, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, value); \
914 break
915
916 switch (type)
917 {
918 // Bool
919 UNIFORM_CASE(UB_FALSE, 0);
920 UNIFORM_CASE(UB_TRUE, 1);
921
922 // BVec4
923 UNIFORM_CASE(UB4_FALSE, tcu::Vec4(0));
924 UNIFORM_CASE(UB4_TRUE, tcu::Vec4(1));
925
926 // Integer
927 UNIFORM_CASE(UI_ZERO, 0);
928 UNIFORM_CASE(UI_ONE, 1);
929 UNIFORM_CASE(UI_TWO, 2);
930 UNIFORM_CASE(UI_THREE, 3);
931 UNIFORM_CASE(UI_FOUR, 4);
932 UNIFORM_CASE(UI_FIVE, 5);
933 UNIFORM_CASE(UI_SIX, 6);
934 UNIFORM_CASE(UI_SEVEN, 7);
935 UNIFORM_CASE(UI_EIGHT, 8);
936 UNIFORM_CASE(UI_ONEHUNDREDONE, 101);
937
938 // IVec2
939 UNIFORM_CASE(UI2_MINUS_ONE, tcu::IVec2(-1));
940 UNIFORM_CASE(UI2_ZERO, tcu::IVec2(0));
941 UNIFORM_CASE(UI2_ONE, tcu::IVec2(1));
942 UNIFORM_CASE(UI2_TWO, tcu::IVec2(2));
943 UNIFORM_CASE(UI2_THREE, tcu::IVec2(3));
944 UNIFORM_CASE(UI2_FOUR, tcu::IVec2(4));
945 UNIFORM_CASE(UI2_FIVE, tcu::IVec2(5));
946
947 // IVec3
948 UNIFORM_CASE(UI3_MINUS_ONE, tcu::IVec3(-1));
949 UNIFORM_CASE(UI3_ZERO, tcu::IVec3(0));
950 UNIFORM_CASE(UI3_ONE, tcu::IVec3(1));
951 UNIFORM_CASE(UI3_TWO, tcu::IVec3(2));
952 UNIFORM_CASE(UI3_THREE, tcu::IVec3(3));
953 UNIFORM_CASE(UI3_FOUR, tcu::IVec3(4));
954 UNIFORM_CASE(UI3_FIVE, tcu::IVec3(5));
955
956 // IVec4
957 UNIFORM_CASE(UI4_MINUS_ONE, tcu::IVec4(-1));
958 UNIFORM_CASE(UI4_ZERO, tcu::IVec4(0));
959 UNIFORM_CASE(UI4_ONE, tcu::IVec4(1));
960 UNIFORM_CASE(UI4_TWO, tcu::IVec4(2));
961 UNIFORM_CASE(UI4_THREE, tcu::IVec4(3));
962 UNIFORM_CASE(UI4_FOUR, tcu::IVec4(4));
963 UNIFORM_CASE(UI4_FIVE, tcu::IVec4(5));
964
965 // Float
966 UNIFORM_CASE(UF_ZERO, 0.0f);
967 UNIFORM_CASE(UF_ONE, 1.0f);
968 UNIFORM_CASE(UF_TWO, 2.0f);
969 UNIFORM_CASE(UF_THREE, 3.0f);
970 UNIFORM_CASE(UF_FOUR, 4.0f);
971 UNIFORM_CASE(UF_FIVE, 5.0f);
972 UNIFORM_CASE(UF_SIX, 6.0f);
973 UNIFORM_CASE(UF_SEVEN, 7.0f);
974 UNIFORM_CASE(UF_EIGHT, 8.0f);
975
976 UNIFORM_CASE(UF_HALF, 1.0f / 2.0f);
977 UNIFORM_CASE(UF_THIRD, 1.0f / 3.0f);
978 UNIFORM_CASE(UF_FOURTH, 1.0f / 4.0f);
979 UNIFORM_CASE(UF_FIFTH, 1.0f / 5.0f);
980 UNIFORM_CASE(UF_SIXTH, 1.0f / 6.0f);
981 UNIFORM_CASE(UF_SEVENTH, 1.0f / 7.0f);
982 UNIFORM_CASE(UF_EIGHTH, 1.0f / 8.0f);
983
984 // Vec2
985 UNIFORM_CASE(UV2_MINUS_ONE, tcu::Vec2(-1.0f));
986 UNIFORM_CASE(UV2_ZERO, tcu::Vec2(0.0f));
987 UNIFORM_CASE(UV2_ONE, tcu::Vec2(1.0f));
988 UNIFORM_CASE(UV2_TWO, tcu::Vec2(2.0f));
989 UNIFORM_CASE(UV2_THREE, tcu::Vec2(3.0f));
990
991 UNIFORM_CASE(UV2_HALF, tcu::Vec2(1.0f / 2.0f));
992
993 // Vec3
994 UNIFORM_CASE(UV3_MINUS_ONE, tcu::Vec3(-1.0f));
995 UNIFORM_CASE(UV3_ZERO, tcu::Vec3(0.0f));
996 UNIFORM_CASE(UV3_ONE, tcu::Vec3(1.0f));
997 UNIFORM_CASE(UV3_TWO, tcu::Vec3(2.0f));
998 UNIFORM_CASE(UV3_THREE, tcu::Vec3(3.0f));
999
1000 UNIFORM_CASE(UV3_HALF, tcu::Vec3(1.0f / 2.0f));
1001
1002 // Vec4
1003 UNIFORM_CASE(UV4_MINUS_ONE, tcu::Vec4(-1.0f));
1004 UNIFORM_CASE(UV4_ZERO, tcu::Vec4(0.0f));
1005 UNIFORM_CASE(UV4_ONE, tcu::Vec4(1.0f));
1006 UNIFORM_CASE(UV4_TWO, tcu::Vec4(2.0f));
1007 UNIFORM_CASE(UV4_THREE, tcu::Vec4(3.0f));
1008
1009 UNIFORM_CASE(UV4_HALF, tcu::Vec4(1.0f / 2.0f));
1010
1011 UNIFORM_CASE(UV4_BLACK, tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
1012 UNIFORM_CASE(UV4_GRAY, tcu::Vec4(0.5f, 0.5f, 0.5f, 1.0f));
1013 UNIFORM_CASE(UV4_WHITE, tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
1014
1015 default:
1016 m_context.getTestContext().getLog()
1017 << tcu::TestLog::Message << "Unknown Uniform type: " << type << tcu::TestLog::EndMessage;
1018 break;
1019 }
1020
1021 #undef UNIFORM_CASE
1022 }
1023
getViewportSize(void) const1024 const tcu::UVec2 ShaderRenderCaseInstance::getViewportSize(void) const
1025 {
1026 return tcu::UVec2(de::min(m_renderSize.x(), MAX_RENDER_WIDTH), de::min(m_renderSize.y(), MAX_RENDER_HEIGHT));
1027 }
1028
setSampleCount(VkSampleCountFlagBits sampleCount)1029 void ShaderRenderCaseInstance::setSampleCount(VkSampleCountFlagBits sampleCount)
1030 {
1031 m_sampleCount = sampleCount;
1032 }
1033
isMultiSampling(void) const1034 bool ShaderRenderCaseInstance::isMultiSampling(void) const
1035 {
1036 return m_sampleCount != VK_SAMPLE_COUNT_1_BIT;
1037 }
1038
uploadImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,uint32_t mipLevels,uint32_t arrayLayers,VkImage destImage)1039 void ShaderRenderCaseInstance::uploadImage(const tcu::TextureFormat &texFormat, const TextureData &textureData,
1040 const tcu::Sampler &refSampler, uint32_t mipLevels, uint32_t arrayLayers,
1041 VkImage destImage)
1042 {
1043 const VkDevice vkDevice = getDevice();
1044 const DeviceInterface &vk = getDeviceInterface();
1045 const VkQueue queue = getUniversalQueue();
1046 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
1047
1048 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1049 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1050 uint32_t bufferSize = 0u;
1051 Move<VkBuffer> buffer;
1052 de::MovePtr<Allocation> bufferAlloc;
1053 std::vector<VkBufferImageCopy> copyRegions;
1054 std::vector<uint32_t> offsetMultiples;
1055
1056 offsetMultiples.push_back(4u);
1057 offsetMultiples.push_back(texFormat.getPixelSize());
1058
1059 // Calculate buffer size
1060 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1061 {
1062 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1063 {
1064 const tcu::ConstPixelBufferAccess &access = *lit;
1065
1066 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1067 bufferSize +=
1068 access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1069 }
1070 }
1071
1072 // Create source buffer
1073 {
1074 const VkBufferCreateInfo bufferParams = {
1075 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1076 DE_NULL, // const void* pNext;
1077 0u, // VkBufferCreateFlags flags;
1078 bufferSize, // VkDeviceSize size;
1079 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1080 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1081 0u, // uint32_t queueFamilyIndexCount;
1082 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1083 };
1084
1085 buffer = createBuffer(vk, vkDevice, &bufferParams);
1086 bufferAlloc =
1087 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1088 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1089 }
1090
1091 // Get copy regions and write buffer data
1092 {
1093 uint32_t layerDataOffset = 0;
1094 uint8_t *destPtr = (uint8_t *)bufferAlloc->getHostPtr();
1095
1096 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1097 {
1098 const TextureLayerData &layerData = textureData[levelNdx];
1099
1100 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1101 {
1102 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1103
1104 const tcu::ConstPixelBufferAccess &access = layerData[layerNdx];
1105 const tcu::PixelBufferAccess destAccess(access.getFormat(), access.getSize(),
1106 destPtr + layerDataOffset);
1107
1108 const VkBufferImageCopy layerRegion = {
1109 layerDataOffset, // VkDeviceSize bufferOffset;
1110 (uint32_t)access.getWidth(), // uint32_t bufferRowLength;
1111 (uint32_t)access.getHeight(), // uint32_t bufferImageHeight;
1112 {
1113 // VkImageSubresourceLayers imageSubresource;
1114 aspectMask, // VkImageAspectFlags aspectMask;
1115 (uint32_t)levelNdx, // uint32_t mipLevel;
1116 (uint32_t)layerNdx, // uint32_t baseArrayLayer;
1117 1u // uint32_t layerCount;
1118 },
1119 {0u, 0u, 0u}, // VkOffset3D imageOffset;
1120 { // VkExtent3D imageExtent;
1121 (uint32_t)access.getWidth(), (uint32_t)access.getHeight(), (uint32_t)access.getDepth()}};
1122
1123 copyRegions.push_back(layerRegion);
1124 tcu::copy(destAccess, access);
1125
1126 layerDataOffset +=
1127 access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1128 }
1129 }
1130 }
1131
1132 flushAlloc(vk, vkDevice, *bufferAlloc);
1133
1134 if (m_externalCommandPool.get() != DE_NULL)
1135 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions, DE_NULL, aspectMask,
1136 mipLevels, arrayLayers, destImage, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1137 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT,
1138 &(m_externalCommandPool.get()->get()));
1139 else
1140 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions, DE_NULL, aspectMask,
1141 mipLevels, arrayLayers, destImage);
1142 }
1143
clearImage(const tcu::Sampler & refSampler,uint32_t mipLevels,uint32_t arrayLayers,VkImage destImage)1144 void ShaderRenderCaseInstance::clearImage(const tcu::Sampler &refSampler, uint32_t mipLevels, uint32_t arrayLayers,
1145 VkImage destImage)
1146 {
1147 const VkDevice vkDevice = m_context.getDevice();
1148 const DeviceInterface &vk = m_context.getDeviceInterface();
1149 const VkQueue queue = m_context.getUniversalQueue();
1150 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1151
1152 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1153 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1154 Move<VkCommandPool> cmdPool;
1155 Move<VkCommandBuffer> cmdBuffer;
1156
1157 VkClearValue clearValue;
1158 deMemset(&clearValue, 0, sizeof(clearValue));
1159
1160 // Create command pool
1161 VkCommandPool activeCmdPool;
1162 if (m_externalCommandPool.get() == DE_NULL)
1163 {
1164 // Create local command pool
1165 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1166 activeCmdPool = *cmdPool;
1167 }
1168 else
1169 {
1170 // Use external command pool if available
1171 activeCmdPool = m_externalCommandPool.get()->get();
1172 }
1173 // Create command buffer
1174 cmdBuffer = allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1175
1176 const VkImageMemoryBarrier preImageBarrier = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1177 DE_NULL, // const void* pNext;
1178 0u, // VkAccessFlags srcAccessMask;
1179 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1180 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1181 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1182 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1183 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1184 destImage, // VkImage image;
1185 {
1186 // VkImageSubresourceRange subresourceRange;
1187 aspectMask, // VkImageAspect aspect;
1188 0u, // uint32_t baseMipLevel;
1189 mipLevels, // uint32_t mipLevels;
1190 0u, // uint32_t baseArraySlice;
1191 arrayLayers // uint32_t arraySize;
1192 }};
1193
1194 const VkImageMemoryBarrier postImageBarrier = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1195 DE_NULL, // const void* pNext;
1196 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1197 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1198 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1199 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1200 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
1201 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
1202 destImage, // VkImage image;
1203 {
1204 // VkImageSubresourceRange subresourceRange;
1205 aspectMask, // VkImageAspect aspect;
1206 0u, // uint32_t baseMipLevel;
1207 mipLevels, // uint32_t mipLevels;
1208 0u, // uint32_t baseArraySlice;
1209 arrayLayers // uint32_t arraySize;
1210 }};
1211
1212 const VkImageSubresourceRange clearRange = {
1213 aspectMask, // VkImageAspectFlags aspectMask;
1214 0u, // uint32_t baseMipLevel;
1215 mipLevels, // uint32_t levelCount;
1216 0u, // uint32_t baseArrayLayer;
1217 arrayLayers // uint32_t layerCount;
1218 };
1219
1220 // Copy buffer to image
1221 beginCommandBuffer(vk, *cmdBuffer);
1222 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
1223 0, (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL, 1,
1224 &preImageBarrier);
1225 if (aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
1226 {
1227 vk.cmdClearColorImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1,
1228 &clearRange);
1229 }
1230 else
1231 {
1232 vk.cmdClearDepthStencilImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1233 &clearValue.depthStencil, 1, &clearRange);
1234 }
1235 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
1236 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
1237 (const VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
1238 endCommandBuffer(vk, *cmdBuffer);
1239
1240 submitCommandsAndWait(vk, vkDevice, queue, cmdBuffer.get());
1241 }
1242
mipLevelExtents(const VkExtent3D & baseExtents,const uint32_t mipLevel)1243 VkExtent3D mipLevelExtents(const VkExtent3D &baseExtents, const uint32_t mipLevel)
1244 {
1245 VkExtent3D result;
1246
1247 result.width = std::max(baseExtents.width >> mipLevel, 1u);
1248 result.height = std::max(baseExtents.height >> mipLevel, 1u);
1249 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
1250
1251 return result;
1252 }
1253
alignedDivide(const VkExtent3D & extent,const VkExtent3D & divisor)1254 tcu::UVec3 alignedDivide(const VkExtent3D &extent, const VkExtent3D &divisor)
1255 {
1256 tcu::UVec3 result;
1257
1258 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
1259 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
1260 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
1261
1262 return result;
1263 }
1264
isImageSizeSupported(const VkImageType imageType,const tcu::UVec3 & imageSize,const vk::VkPhysicalDeviceLimits & limits)1265 bool isImageSizeSupported(const VkImageType imageType, const tcu::UVec3 &imageSize,
1266 const vk::VkPhysicalDeviceLimits &limits)
1267 {
1268 switch (imageType)
1269 {
1270 case VK_IMAGE_TYPE_1D:
1271 return (imageSize.x() <= limits.maxImageDimension1D && imageSize.y() == 1 && imageSize.z() == 1);
1272 case VK_IMAGE_TYPE_2D:
1273 return (imageSize.x() <= limits.maxImageDimension2D && imageSize.y() <= limits.maxImageDimension2D &&
1274 imageSize.z() == 1);
1275 case VK_IMAGE_TYPE_3D:
1276 return (imageSize.x() <= limits.maxImageDimension3D && imageSize.y() <= limits.maxImageDimension3D &&
1277 imageSize.z() <= limits.maxImageDimension3D);
1278 default:
1279 DE_FATAL("Unknown image type");
1280 return false;
1281 }
1282 }
1283
checkSparseSupport(const VkImageCreateInfo & imageInfo) const1284 void ShaderRenderCaseInstance::checkSparseSupport(const VkImageCreateInfo &imageInfo) const
1285 {
1286 #ifdef CTS_USES_VULKANSC
1287 TCU_THROW(NotSupportedError, "Vulkan SC does not support sparse operations");
1288 #endif // CTS_USES_VULKANSC
1289 const InstanceInterface &instance = getInstanceInterface();
1290 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1291 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice);
1292 #ifndef CTS_USES_VULKANSC
1293 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
1294 getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageInfo.format, imageInfo.imageType,
1295 imageInfo.samples, imageInfo.usage, imageInfo.tiling);
1296 #endif // CTS_USES_VULKANSC
1297
1298 if (!deviceFeatures.shaderResourceResidency)
1299 TCU_THROW(NotSupportedError, "Required feature: shaderResourceResidency.");
1300
1301 if (!deviceFeatures.sparseBinding)
1302 TCU_THROW(NotSupportedError, "Required feature: sparseBinding.");
1303
1304 if (imageInfo.imageType == VK_IMAGE_TYPE_2D && !deviceFeatures.sparseResidencyImage2D)
1305 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage2D.");
1306
1307 if (imageInfo.imageType == VK_IMAGE_TYPE_3D && !deviceFeatures.sparseResidencyImage3D)
1308 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage3D.");
1309 #ifndef CTS_USES_VULKANSC
1310 if (sparseImageFormatPropVec.size() == 0)
1311 TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
1312 #endif // CTS_USES_VULKANSC
1313 }
1314
1315 #ifndef CTS_USES_VULKANSC
uploadSparseImage(const tcu::TextureFormat & texFormat,const TextureData & textureData,const tcu::Sampler & refSampler,const uint32_t mipLevels,const uint32_t arrayLayers,const VkImage sparseImage,const VkImageCreateInfo & imageCreateInfo,const tcu::UVec3 texSize)1316 void ShaderRenderCaseInstance::uploadSparseImage(const tcu::TextureFormat &texFormat, const TextureData &textureData,
1317 const tcu::Sampler &refSampler, const uint32_t mipLevels,
1318 const uint32_t arrayLayers, const VkImage sparseImage,
1319 const VkImageCreateInfo &imageCreateInfo, const tcu::UVec3 texSize)
1320 {
1321 const VkDevice vkDevice = getDevice();
1322 const DeviceInterface &vk = getDeviceInterface();
1323 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1324 const VkQueue queue = getUniversalQueue();
1325 const VkQueue sparseQueue = getSparseQueue();
1326 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
1327 const InstanceInterface &instance = getInstanceInterface();
1328 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
1329 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1330 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1331 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(vk, vkDevice));
1332 Move<VkBuffer> buffer;
1333 uint32_t bufferSize = 0u;
1334 de::MovePtr<Allocation> bufferAlloc;
1335 std::vector<VkBufferImageCopy> copyRegions;
1336 std::vector<uint32_t> offsetMultiples;
1337
1338 offsetMultiples.push_back(4u);
1339 offsetMultiples.push_back(texFormat.getPixelSize());
1340
1341 if (isImageSizeSupported(imageCreateInfo.imageType, texSize, deviceProperties.limits) == false)
1342 TCU_THROW(NotSupportedError, "Image size not supported for device.");
1343
1344 allocateAndBindSparseImage(vk, vkDevice, physicalDevice, instance, imageCreateInfo, *imageMemoryBindSemaphore,
1345 sparseQueue, m_memAlloc, m_allocations, texFormat, sparseImage);
1346
1347 // Calculate buffer size
1348 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1349 {
1350 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1351 {
1352 const tcu::ConstPixelBufferAccess &access = *lit;
1353
1354 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1355 bufferSize +=
1356 access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1357 }
1358 }
1359
1360 {
1361 // Create source buffer
1362 const VkBufferCreateInfo bufferParams = {
1363 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1364 DE_NULL, // const void* pNext;
1365 0u, // VkBufferCreateFlags flags;
1366 bufferSize, // VkDeviceSize size;
1367 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1368 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1369 0u, // uint32_t queueFamilyIndexCount;
1370 DE_NULL, // const uint32_t* pQueueFamilyIndices;
1371 };
1372
1373 buffer = createBuffer(vk, vkDevice, &bufferParams);
1374 bufferAlloc =
1375 m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1376
1377 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1378 }
1379
1380 // Get copy regions and write buffer data
1381 {
1382 uint32_t layerDataOffset = 0;
1383 uint8_t *destPtr = (uint8_t *)bufferAlloc->getHostPtr();
1384
1385 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1386 {
1387 const TextureLayerData &layerData = textureData[levelNdx];
1388
1389 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1390 {
1391 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1392
1393 const tcu::ConstPixelBufferAccess &access = layerData[layerNdx];
1394 const tcu::PixelBufferAccess destAccess(access.getFormat(), access.getSize(),
1395 destPtr + layerDataOffset);
1396
1397 const VkBufferImageCopy layerRegion = {
1398 layerDataOffset, // VkDeviceSize bufferOffset;
1399 (uint32_t)access.getWidth(), // uint32_t bufferRowLength;
1400 (uint32_t)access.getHeight(), // uint32_t bufferImageHeight;
1401 {
1402 // VkImageSubresourceLayers imageSubresource;
1403 aspectMask, // VkImageAspectFlags aspectMask;
1404 (uint32_t)levelNdx, // uint32_t mipLevel;
1405 (uint32_t)layerNdx, // uint32_t baseArrayLayer;
1406 1u // uint32_t layerCount;
1407 },
1408 {0u, 0u, 0u}, // VkOffset3D imageOffset;
1409 { // VkExtent3D imageExtent;
1410 (uint32_t)access.getWidth(), (uint32_t)access.getHeight(), (uint32_t)access.getDepth()}};
1411
1412 copyRegions.push_back(layerRegion);
1413 tcu::copy(destAccess, access);
1414
1415 layerDataOffset +=
1416 access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1417 }
1418 }
1419 }
1420 copyBufferToImage(vk, vkDevice, queue, queueFamilyIndex, *buffer, bufferSize, copyRegions,
1421 &(*imageMemoryBindSemaphore), aspectMask, mipLevels, arrayLayers, sparseImage);
1422 }
1423 #endif // CTS_USES_VULKANSC
1424
useSampler(uint32_t bindingLocation,uint32_t textureId)1425 void ShaderRenderCaseInstance::useSampler(uint32_t bindingLocation, uint32_t textureId)
1426 {
1427 DE_ASSERT(textureId < m_textures.size());
1428
1429 const TextureBinding &textureBinding = *m_textures[textureId];
1430 const TextureBinding::Type textureType = textureBinding.getType();
1431 const tcu::Sampler &refSampler = textureBinding.getSampler();
1432 const TextureBinding::Parameters &textureParams = textureBinding.getParameters();
1433 const bool isMSTexture = textureParams.samples != vk::VK_SAMPLE_COUNT_1_BIT;
1434 uint32_t mipLevels = 1u;
1435 uint32_t arrayLayers = 1u;
1436 tcu::TextureFormat texFormat;
1437 tcu::UVec3 texSize;
1438 TextureData textureData;
1439
1440 if (textureType == TextureBinding::TYPE_2D)
1441 {
1442 const tcu::Texture2D &texture = textureBinding.get2D();
1443
1444 texFormat = texture.getFormat();
1445 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1446 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1447 arrayLayers = 1u;
1448
1449 textureData.resize(mipLevels);
1450
1451 for (uint32_t level = 0; level < mipLevels; ++level)
1452 {
1453 if (texture.isLevelEmpty(level))
1454 continue;
1455
1456 textureData[level].push_back(texture.getLevel(level));
1457 }
1458 }
1459 else if (textureType == TextureBinding::TYPE_CUBE_MAP)
1460 {
1461 const tcu::TextureCube &texture = textureBinding.getCube();
1462
1463 texFormat = texture.getFormat();
1464 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1u);
1465 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1466 arrayLayers = 6u;
1467
1468 static const tcu::CubeFace cubeFaceMapping[tcu::CUBEFACE_LAST] = {
1469 tcu::CUBEFACE_POSITIVE_X, tcu::CUBEFACE_NEGATIVE_X, tcu::CUBEFACE_POSITIVE_Y,
1470 tcu::CUBEFACE_NEGATIVE_Y, tcu::CUBEFACE_POSITIVE_Z, tcu::CUBEFACE_NEGATIVE_Z};
1471
1472 textureData.resize(mipLevels);
1473
1474 for (uint32_t level = 0; level < mipLevels; ++level)
1475 {
1476 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx)
1477 {
1478 tcu::CubeFace face = cubeFaceMapping[faceNdx];
1479
1480 if (texture.isLevelEmpty(face, level))
1481 continue;
1482
1483 textureData[level].push_back(texture.getLevelFace(level, face));
1484 }
1485 }
1486 }
1487 else if (textureType == TextureBinding::TYPE_2D_ARRAY)
1488 {
1489 const tcu::Texture2DArray &texture = textureBinding.get2DArray();
1490
1491 texFormat = texture.getFormat();
1492 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1493 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1494 arrayLayers = (uint32_t)texture.getNumLayers();
1495
1496 textureData.resize(mipLevels);
1497
1498 for (uint32_t level = 0; level < mipLevels; ++level)
1499 {
1500 if (texture.isLevelEmpty(level))
1501 continue;
1502
1503 const tcu::ConstPixelBufferAccess &levelLayers = texture.getLevel(level);
1504 const uint32_t layerSize =
1505 levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1506
1507 for (uint32_t layer = 0; layer < arrayLayers; ++layer)
1508 {
1509 const uint32_t layerOffset = layerSize * layer;
1510 tcu::ConstPixelBufferAccess layerData(levelLayers.getFormat(), levelLayers.getWidth(),
1511 levelLayers.getHeight(), 1,
1512 (uint8_t *)levelLayers.getDataPtr() + layerOffset);
1513 textureData[level].push_back(layerData);
1514 }
1515 }
1516 }
1517 else if (textureType == TextureBinding::TYPE_3D)
1518 {
1519 const tcu::Texture3D &texture = textureBinding.get3D();
1520
1521 texFormat = texture.getFormat();
1522 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), texture.getDepth());
1523 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1524 arrayLayers = 1u;
1525
1526 textureData.resize(mipLevels);
1527
1528 for (uint32_t level = 0; level < mipLevels; ++level)
1529 {
1530 if (texture.isLevelEmpty(level))
1531 continue;
1532
1533 textureData[level].push_back(texture.getLevel(level));
1534 }
1535 }
1536 else if (textureType == TextureBinding::TYPE_1D)
1537 {
1538 const tcu::Texture1D &texture = textureBinding.get1D();
1539
1540 texFormat = texture.getFormat();
1541 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1542 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1543 arrayLayers = 1u;
1544
1545 textureData.resize(mipLevels);
1546
1547 for (uint32_t level = 0; level < mipLevels; ++level)
1548 {
1549 if (texture.isLevelEmpty(level))
1550 continue;
1551
1552 textureData[level].push_back(texture.getLevel(level));
1553 }
1554 }
1555 else if (textureType == TextureBinding::TYPE_1D_ARRAY)
1556 {
1557 const tcu::Texture1DArray &texture = textureBinding.get1DArray();
1558
1559 texFormat = texture.getFormat();
1560 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1561 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1562 arrayLayers = (uint32_t)texture.getNumLayers();
1563
1564 textureData.resize(mipLevels);
1565
1566 for (uint32_t level = 0; level < mipLevels; ++level)
1567 {
1568 if (texture.isLevelEmpty(level))
1569 continue;
1570
1571 const tcu::ConstPixelBufferAccess &levelLayers = texture.getLevel(level);
1572 const uint32_t layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1573
1574 for (uint32_t layer = 0; layer < arrayLayers; ++layer)
1575 {
1576 const uint32_t layerOffset = layerSize * layer;
1577 tcu::ConstPixelBufferAccess layerData(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1,
1578 (uint8_t *)levelLayers.getDataPtr() + layerOffset);
1579 textureData[level].push_back(layerData);
1580 }
1581 }
1582 }
1583 else if (textureType == TextureBinding::TYPE_CUBE_ARRAY)
1584 {
1585 const tcu::TextureCubeArray &texture = textureBinding.getCubeArray();
1586 texFormat = texture.getFormat();
1587 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1);
1588 mipLevels = isMSTexture ? 1u : (uint32_t)texture.getNumLevels();
1589 arrayLayers = texture.getDepth();
1590
1591 textureData.resize(mipLevels);
1592
1593 for (uint32_t level = 0; level < mipLevels; ++level)
1594 {
1595 if (texture.isLevelEmpty(level))
1596 continue;
1597
1598 const tcu::ConstPixelBufferAccess &levelLayers = texture.getLevel(level);
1599 const uint32_t layerSize =
1600 levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1601
1602 for (uint32_t layer = 0; layer < arrayLayers; ++layer)
1603 {
1604 const uint32_t layerOffset = layerSize * layer;
1605 tcu::ConstPixelBufferAccess layerData(levelLayers.getFormat(), levelLayers.getWidth(),
1606 levelLayers.getHeight(), 1,
1607 (uint8_t *)levelLayers.getDataPtr() + layerOffset);
1608 textureData[level].push_back(layerData);
1609 }
1610 }
1611 }
1612 else
1613 {
1614 TCU_THROW(InternalError, "Invalid texture type");
1615 }
1616
1617 createSamplerUniform(bindingLocation, textureType, textureBinding.getParameters().initialization, texFormat,
1618 texSize, textureData, refSampler, mipLevels, arrayLayers, textureParams);
1619 }
1620
setPushConstantRanges(const uint32_t rangeCount,const vk::VkPushConstantRange * const pcRanges)1621 void ShaderRenderCaseInstance::setPushConstantRanges(const uint32_t rangeCount,
1622 const vk::VkPushConstantRange *const pcRanges)
1623 {
1624 m_pushConstantRanges.clear();
1625 for (uint32_t i = 0; i < rangeCount; ++i)
1626 {
1627 m_pushConstantRanges.push_back(pcRanges[i]);
1628 }
1629 }
1630
updatePushConstants(vk::VkCommandBuffer,vk::VkPipelineLayout)1631 void ShaderRenderCaseInstance::updatePushConstants(vk::VkCommandBuffer, vk::VkPipelineLayout)
1632 {
1633 }
1634
createSamplerUniform(uint32_t bindingLocation,TextureBinding::Type textureType,TextureBinding::Init textureInit,const tcu::TextureFormat & texFormat,const tcu::UVec3 texSize,const TextureData & textureData,const tcu::Sampler & refSampler,uint32_t mipLevels,uint32_t arrayLayers,TextureBinding::Parameters textureParams)1635 void ShaderRenderCaseInstance::createSamplerUniform(uint32_t bindingLocation, TextureBinding::Type textureType,
1636 TextureBinding::Init textureInit,
1637 const tcu::TextureFormat &texFormat, const tcu::UVec3 texSize,
1638 const TextureData &textureData, const tcu::Sampler &refSampler,
1639 uint32_t mipLevels, uint32_t arrayLayers,
1640 TextureBinding::Parameters textureParams)
1641 {
1642 const VkDevice vkDevice = getDevice();
1643 const DeviceInterface &vk = getDeviceInterface();
1644 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
1645 const uint32_t sparseFamilyIndex =
1646 (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE) ? getSparseQueueFamilyIndex() : queueFamilyIndex;
1647
1648 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1649
1650 // when isShadowSampler is true mapSampler utill will set compareEnabled in
1651 // VkSamplerCreateInfo to true and in portability this functionality is under
1652 // feature flag - note that this is safety check as this is known at the
1653 // TestCase level and NotSupportedError should be thrown from checkSupport
1654 #ifndef CTS_USES_VULKANSC
1655 if (isShadowSampler && m_context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
1656 !m_context.getPortabilitySubsetFeatures().mutableComparisonSamplers)
1657 {
1658 DE_FATAL("mutableComparisonSamplers support should be checked in checkSupport");
1659 }
1660 #endif // CTS_USES_VULKANSC
1661
1662 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1663 const VkImageViewType imageViewType = textureTypeToImageViewType(textureType);
1664 const VkImageType imageType = viewTypeToImageType(imageViewType);
1665 const VkSharingMode sharingMode =
1666 (queueFamilyIndex != sparseFamilyIndex) ? VK_SHARING_MODE_CONCURRENT : VK_SHARING_MODE_EXCLUSIVE;
1667 const VkFormat format = mapTextureFormat(texFormat);
1668 const VkImageUsageFlags imageUsageFlags = textureUsageFlags();
1669 const VkImageCreateFlags imageCreateFlags = textureCreateFlags(imageViewType, m_imageBackingMode);
1670
1671 const uint32_t queueIndexCount = (queueFamilyIndex != sparseFamilyIndex) ? 2 : 1;
1672 const uint32_t queueIndices[] = {queueFamilyIndex, sparseFamilyIndex};
1673
1674 Move<VkImage> vkTexture;
1675 de::MovePtr<Allocation> allocation;
1676
1677 // Create image
1678 const VkImageCreateInfo imageParams = {
1679 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1680 DE_NULL, // const void* pNext;
1681 imageCreateFlags, // VkImageCreateFlags flags;
1682 imageType, // VkImageType imageType;
1683 format, // VkFormat format;
1684 { // VkExtent3D extent;
1685 texSize.x(), texSize.y(), texSize.z()},
1686 mipLevels, // uint32_t mipLevels;
1687 arrayLayers, // uint32_t arrayLayers;
1688 textureParams.samples, // VkSampleCountFlagBits samples;
1689 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1690 imageUsageFlags, // VkImageUsageFlags usage;
1691 sharingMode, // VkSharingMode sharingMode;
1692 queueIndexCount, // uint32_t queueFamilyIndexCount;
1693 queueIndices, // const uint32_t* pQueueFamilyIndices;
1694 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1695 };
1696
1697 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
1698 {
1699 checkSparseSupport(imageParams);
1700 }
1701
1702 vkTexture = createImage(vk, vkDevice, &imageParams);
1703 allocation = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *vkTexture), MemoryRequirement::Any);
1704
1705 if (m_imageBackingMode != IMAGE_BACKING_MODE_SPARSE)
1706 {
1707 VK_CHECK(vk.bindImageMemory(vkDevice, *vkTexture, allocation->getMemory(), allocation->getOffset()));
1708 }
1709
1710 switch (textureInit)
1711 {
1712 case TextureBinding::INIT_UPLOAD_DATA:
1713 {
1714 // upload*Image functions use cmdCopyBufferToImage, which is invalid for multisample images
1715 DE_ASSERT(textureParams.samples == VK_SAMPLE_COUNT_1_BIT);
1716
1717 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
1718 {
1719 #ifndef CTS_USES_VULKANSC
1720 uploadSparseImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture, imageParams,
1721 texSize);
1722 #endif // CTS_USES_VULKANSC
1723 }
1724 else
1725 {
1726 // Upload texture data
1727 uploadImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture);
1728 }
1729 break;
1730 }
1731 case TextureBinding::INIT_CLEAR:
1732 clearImage(refSampler, mipLevels, arrayLayers, *vkTexture);
1733 break;
1734 default:
1735 DE_FATAL("Impossible");
1736 }
1737
1738 // Create sampler
1739 const auto &minMaxLod = textureParams.minMaxLod;
1740 const VkSamplerCreateInfo samplerParams =
1741 (minMaxLod ? mapSampler(refSampler, texFormat, minMaxLod.get().minLod, minMaxLod.get().maxLod) :
1742 mapSampler(refSampler, texFormat));
1743 Move<VkSampler> sampler = createSampler(vk, vkDevice, &samplerParams);
1744 const uint32_t baseMipLevel = textureParams.baseMipLevel;
1745 const vk::VkComponentMapping components = textureParams.componentMapping;
1746 const VkImageViewCreateInfo viewParams = {
1747 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1748 NULL, // const voide* pNext;
1749 0u, // VkImageViewCreateFlags flags;
1750 *vkTexture, // VkImage image;
1751 imageViewType, // VkImageViewType viewType;
1752 format, // VkFormat format;
1753 components, // VkChannelMapping channels;
1754 {
1755 aspectMask, // VkImageAspectFlags aspectMask;
1756 baseMipLevel, // uint32_t baseMipLevel;
1757 mipLevels - baseMipLevel, // uint32_t mipLevels;
1758 0, // uint32_t baseArraySlice;
1759 arrayLayers // uint32_t arraySize;
1760 }, // VkImageSubresourceRange subresourceRange;
1761 };
1762
1763 Move<VkImageView> imageView = createImageView(vk, vkDevice, &viewParams);
1764
1765 const vk::VkDescriptorImageInfo descriptor = {
1766 sampler.get(), // VkSampler sampler;
1767 imageView.get(), // VkImageView imageView;
1768 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout imageLayout;
1769 };
1770
1771 de::MovePtr<SamplerUniform> uniform(new SamplerUniform());
1772 uniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1773 uniform->descriptor = descriptor;
1774 uniform->location = bindingLocation;
1775 uniform->image = VkImageSp(new vk::Unique<VkImage>(vkTexture));
1776 uniform->imageView = VkImageViewSp(new vk::Unique<VkImageView>(imageView));
1777 uniform->sampler = VkSamplerSp(new vk::Unique<VkSampler>(sampler));
1778 uniform->alloc = AllocationSp(allocation.release());
1779
1780 m_descriptorSetLayoutBuilder->addSingleSamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
1781 vk::VK_SHADER_STAGE_ALL, DE_NULL);
1782 m_descriptorPoolBuilder->addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
1783
1784 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniform)));
1785 }
1786
setupDefaultInputs(void)1787 void ShaderRenderCaseInstance::setupDefaultInputs(void)
1788 {
1789 /* Configuration of the vertex input attributes:
1790 a_position is at location 0
1791 a_coords is at location 1
1792 a_unitCoords is at location 2
1793 a_one is at location 3
1794
1795 User attributes starts from at the location 4.
1796 */
1797
1798 DE_ASSERT(m_quadGrid);
1799 const QuadGrid &quadGrid = *m_quadGrid;
1800
1801 addAttribute(0u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(),
1802 quadGrid.getPositions());
1803 addAttribute(1u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getCoords());
1804 addAttribute(2u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(),
1805 quadGrid.getUnitCoords());
1806 addAttribute(3u, VK_FORMAT_R32_SFLOAT, sizeof(float), quadGrid.getNumVertices(), quadGrid.getAttribOne());
1807
1808 static const struct
1809 {
1810 BaseAttributeType type;
1811 int userNdx;
1812 } userAttributes[] = {{A_IN0, 0}, {A_IN1, 1}, {A_IN2, 2}, {A_IN3, 3}};
1813
1814 static const struct
1815 {
1816 BaseAttributeType matrixType;
1817 int numCols;
1818 int numRows;
1819 } matrices[] = {{MAT2, 2, 2}, {MAT2x3, 2, 3}, {MAT2x4, 2, 4}, {MAT3x2, 3, 2}, {MAT3, 3, 3},
1820 {MAT3x4, 3, 4}, {MAT4x2, 4, 2}, {MAT4x3, 4, 3}, {MAT4, 4, 4}};
1821
1822 for (size_t attrNdx = 0; attrNdx < m_enabledBaseAttributes.size(); attrNdx++)
1823 {
1824 for (int userNdx = 0; userNdx < DE_LENGTH_OF_ARRAY(userAttributes); userNdx++)
1825 {
1826 if (userAttributes[userNdx].type != m_enabledBaseAttributes[attrNdx].type)
1827 continue;
1828
1829 addAttribute(m_enabledBaseAttributes[attrNdx].location, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4),
1830 quadGrid.getNumVertices(), quadGrid.getUserAttrib(userNdx));
1831 }
1832
1833 for (int matNdx = 0; matNdx < DE_LENGTH_OF_ARRAY(matrices); matNdx++)
1834 {
1835
1836 if (matrices[matNdx].matrixType != m_enabledBaseAttributes[attrNdx].type)
1837 continue;
1838
1839 const int numCols = matrices[matNdx].numCols;
1840
1841 for (int colNdx = 0; colNdx < numCols; colNdx++)
1842 {
1843 addAttribute(m_enabledBaseAttributes[attrNdx].location + colNdx, VK_FORMAT_R32G32B32A32_SFLOAT,
1844 (uint32_t)(4 * sizeof(float)), quadGrid.getNumVertices(), quadGrid.getUserAttrib(colNdx));
1845 }
1846 }
1847 }
1848 }
1849
render(uint32_t numVertices,uint32_t numTriangles,const uint16_t * indices,const tcu::Vec4 & constCoords)1850 void ShaderRenderCaseInstance::render(uint32_t numVertices, uint32_t numTriangles, const uint16_t *indices,
1851 const tcu::Vec4 &constCoords)
1852 {
1853 render(numVertices, numTriangles * 3, indices, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, constCoords);
1854 }
1855
render(uint32_t numVertices,uint32_t numIndices,const uint16_t * indices,VkPrimitiveTopology topology,const tcu::Vec4 & constCoords)1856 void ShaderRenderCaseInstance::render(uint32_t numVertices, uint32_t numIndices, const uint16_t *indices,
1857 VkPrimitiveTopology topology, const tcu::Vec4 &constCoords)
1858 {
1859 const VkDevice vkDevice = getDevice();
1860 const DeviceInterface &vk = getDeviceInterface();
1861 const VkQueue queue = getUniversalQueue();
1862 const uint32_t queueFamilyIndex = getUniversalQueueFamilyIndex();
1863
1864 vk::Move<vk::VkImage> colorImage;
1865 de::MovePtr<vk::Allocation> colorImageAlloc;
1866 vk::Move<vk::VkImageView> colorImageView;
1867 vk::Move<vk::VkImage> resolvedImage;
1868 de::MovePtr<vk::Allocation> resolvedImageAlloc;
1869 vk::Move<vk::VkImageView> resolvedImageView;
1870 vk::Move<vk::VkRenderPass> renderPass;
1871 vk::Move<vk::VkFramebuffer> framebuffer;
1872 vk::Move<vk::VkPipelineLayout> pipelineLayout;
1873 vk::Move<vk::VkPipeline> graphicsPipeline;
1874 vk::Move<vk::VkShaderModule> vertexShaderModule;
1875 vk::Move<vk::VkShaderModule> fragmentShaderModule;
1876 vk::Move<vk::VkBuffer> indexBuffer;
1877 de::MovePtr<vk::Allocation> indexBufferAlloc;
1878 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
1879 vk::Move<vk::VkDescriptorPool> descriptorPool;
1880 vk::Move<vk::VkDescriptorSet> descriptorSet;
1881 vk::Move<vk::VkCommandPool> cmdPool;
1882 vk::Move<vk::VkCommandBuffer> cmdBuffer;
1883
1884 // Create color image
1885 {
1886 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1887 VkImageFormatProperties properties;
1888
1889 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(
1890 getPhysicalDevice(), m_colorFormat, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, imageUsage, 0u,
1891 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
1892 {
1893 TCU_THROW(NotSupportedError, "Format not supported");
1894 }
1895
1896 if ((properties.sampleCounts & m_sampleCount) != m_sampleCount)
1897 {
1898 TCU_THROW(NotSupportedError, "Format not supported");
1899 }
1900
1901 const VkImageCreateInfo colorImageParams = {
1902 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1903 DE_NULL, // const void* pNext;
1904 0u, // VkImageCreateFlags flags;
1905 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1906 m_colorFormat, // VkFormat format;
1907 {m_renderSize.x(), m_renderSize.y(), 1u}, // VkExtent3D extent;
1908 1u, // uint32_t mipLevels;
1909 1u, // uint32_t arraySize;
1910 m_sampleCount, // uint32_t samples;
1911 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1912 imageUsage, // VkImageUsageFlags usage;
1913 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1914 1u, // uint32_t queueFamilyCount;
1915 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
1916 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1917 };
1918
1919 colorImage = createImage(vk, vkDevice, &colorImageParams);
1920
1921 // Allocate and bind color image memory
1922 colorImageAlloc =
1923 m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *colorImage), MemoryRequirement::Any);
1924 VK_CHECK(vk.bindImageMemory(vkDevice, *colorImage, colorImageAlloc->getMemory(), colorImageAlloc->getOffset()));
1925 }
1926
1927 // Create color attachment view
1928 {
1929 const VkImageViewCreateInfo colorImageViewParams = {
1930 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1931 DE_NULL, // const void* pNext;
1932 0u, // VkImageViewCreateFlags flags;
1933 *colorImage, // VkImage image;
1934 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1935 m_colorFormat, // VkFormat format;
1936 {
1937 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
1938 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
1939 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
1940 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
1941 }, // VkChannelMapping channels;
1942 {
1943 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1944 0, // uint32_t baseMipLevel;
1945 1, // uint32_t mipLevels;
1946 0, // uint32_t baseArraySlice;
1947 1 // uint32_t arraySize;
1948 }, // VkImageSubresourceRange subresourceRange;
1949 };
1950
1951 colorImageView = createImageView(vk, vkDevice, &colorImageViewParams);
1952 }
1953
1954 if (isMultiSampling())
1955 {
1956 // Resolved Image
1957 {
1958 const VkImageUsageFlags imageUsage =
1959 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1960 VkImageFormatProperties properties;
1961
1962 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(
1963 getPhysicalDevice(), m_colorFormat, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, imageUsage, 0,
1964 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
1965 {
1966 TCU_THROW(NotSupportedError, "Format not supported");
1967 }
1968
1969 const VkImageCreateInfo imageCreateInfo = {
1970 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1971 DE_NULL, // const void* pNext;
1972 0u, // VkImageCreateFlags flags;
1973 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1974 m_colorFormat, // VkFormat format;
1975 {m_renderSize.x(), m_renderSize.y(), 1u}, // VkExtent3D extent;
1976 1u, // uint32_t mipLevels;
1977 1u, // uint32_t arrayLayers;
1978 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1979 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1980 imageUsage, // VkImageUsageFlags usage;
1981 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1982 1u, // uint32_t queueFamilyIndexCount;
1983 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
1984 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1985 };
1986
1987 resolvedImage = vk::createImage(vk, vkDevice, &imageCreateInfo, DE_NULL);
1988 resolvedImageAlloc =
1989 m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *resolvedImage), MemoryRequirement::Any);
1990 VK_CHECK(vk.bindImageMemory(vkDevice, *resolvedImage, resolvedImageAlloc->getMemory(),
1991 resolvedImageAlloc->getOffset()));
1992 }
1993
1994 // Resolved Image View
1995 {
1996 const VkImageViewCreateInfo imageViewCreateInfo = {
1997 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1998 DE_NULL, // const void* pNext;
1999 0u, // VkImageViewCreateFlags flags;
2000 *resolvedImage, // VkImage image;
2001 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2002 m_colorFormat, // VkFormat format;
2003 {
2004 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
2005 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
2006 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
2007 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
2008 },
2009 {
2010 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2011 0u, // uint32_t baseMipLevel;
2012 1u, // uint32_t mipLevels;
2013 0u, // uint32_t baseArrayLayer;
2014 1u, // uint32_t arraySize;
2015 }, // VkImageSubresourceRange subresourceRange;
2016 };
2017
2018 resolvedImageView = vk::createImageView(vk, vkDevice, &imageViewCreateInfo, DE_NULL);
2019 }
2020 }
2021
2022 // Create render pass
2023 {
2024 const VkAttachmentDescription attachmentDescription[] = {
2025 {
2026 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2027 m_colorFormat, // VkFormat format;
2028 m_sampleCount, // uint32_t samples;
2029 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
2030 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2031 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2032 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2033 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2034 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2035 },
2036 {
2037 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2038 m_colorFormat, // VkFormat format;
2039 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2040 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp loadOp;
2041 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2042 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2043 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2044 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2045 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2046 }};
2047
2048 const VkAttachmentReference attachmentReference = {
2049 0u, // uint32_t attachment;
2050 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2051 };
2052
2053 const VkAttachmentReference resolveAttachmentRef = {
2054 1u, // uint32_t attachment;
2055 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2056 };
2057
2058 const VkSubpassDescription subpassDescription = {
2059 0u, // VkSubpassDescriptionFlags flags;
2060 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
2061 0u, // uint32_t inputCount;
2062 DE_NULL, // constVkAttachmentReference* pInputAttachments;
2063 1u, // uint32_t colorCount;
2064 &attachmentReference, // constVkAttachmentReference* pColorAttachments;
2065 isMultiSampling() ? &resolveAttachmentRef : DE_NULL, // constVkAttachmentReference* pResolveAttachments;
2066 DE_NULL, // VkAttachmentReference depthStencilAttachment;
2067 0u, // uint32_t preserveCount;
2068 DE_NULL // constVkAttachmentReference* pPreserveAttachments;
2069 };
2070
2071 const VkRenderPassCreateInfo renderPassParams = {
2072 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
2073 DE_NULL, // const void* pNext;
2074 0u, // VkRenderPassCreateFlags flags;
2075 isMultiSampling() ? 2u : 1u, // uint32_t attachmentCount;
2076 attachmentDescription, // const VkAttachmentDescription* pAttachments;
2077 1u, // uint32_t subpassCount;
2078 &subpassDescription, // const VkSubpassDescription* pSubpasses;
2079 0u, // uint32_t dependencyCount;
2080 DE_NULL // const VkSubpassDependency* pDependencies;
2081 };
2082
2083 renderPass = createRenderPass(vk, vkDevice, &renderPassParams);
2084 }
2085
2086 // Create framebuffer
2087 {
2088 const VkImageView attachments[] = {*colorImageView, *resolvedImageView};
2089
2090 const VkFramebufferCreateInfo framebufferParams = {
2091 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
2092 DE_NULL, // const void* pNext;
2093 (VkFramebufferCreateFlags)0,
2094 *renderPass, // VkRenderPass renderPass;
2095 isMultiSampling() ? 2u : 1u, // uint32_t attachmentCount;
2096 attachments, // const VkImageView* pAttachments;
2097 (uint32_t)m_renderSize.x(), // uint32_t width;
2098 (uint32_t)m_renderSize.y(), // uint32_t height;
2099 1u // uint32_t layers;
2100 };
2101
2102 framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
2103 }
2104
2105 // Create descriptors
2106 {
2107 setupUniforms(constCoords);
2108
2109 descriptorSetLayout = m_descriptorSetLayoutBuilder->build(vk, vkDevice);
2110 if (!m_uniformInfos.empty())
2111 {
2112 descriptorPool =
2113 m_descriptorPoolBuilder->build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2114 const VkDescriptorSetAllocateInfo allocInfo = {
2115 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
2116 DE_NULL,
2117 *descriptorPool,
2118 1u,
2119 &descriptorSetLayout.get(),
2120 };
2121
2122 descriptorSet = allocateDescriptorSet(vk, vkDevice, &allocInfo);
2123 }
2124
2125 for (uint32_t i = 0; i < m_uniformInfos.size(); i++)
2126 {
2127 const UniformInfo *uniformInfo = m_uniformInfos[i].get()->get();
2128 uint32_t location = uniformInfo->location;
2129
2130 if (uniformInfo->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
2131 {
2132 const BufferUniform *bufferInfo = dynamic_cast<const BufferUniform *>(uniformInfo);
2133
2134 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet,
2135 DescriptorSetUpdateBuilder::Location::binding(location),
2136 uniformInfo->type, &bufferInfo->descriptor);
2137 }
2138 else if (uniformInfo->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2139 {
2140 const SamplerUniform *samplerInfo = dynamic_cast<const SamplerUniform *>(uniformInfo);
2141
2142 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet,
2143 DescriptorSetUpdateBuilder::Location::binding(location),
2144 uniformInfo->type, &samplerInfo->descriptor);
2145 }
2146 else
2147 DE_FATAL("Impossible");
2148 }
2149
2150 m_descriptorSetUpdateBuilder->update(vk, vkDevice);
2151 }
2152
2153 // Create pipeline layout
2154 {
2155 const VkPushConstantRange *const pcRanges = m_pushConstantRanges.empty() ? DE_NULL : &m_pushConstantRanges[0];
2156 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
2157 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
2158 DE_NULL, // const void* pNext;
2159 (VkPipelineLayoutCreateFlags)0,
2160 1u, // uint32_t descriptorSetCount;
2161 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
2162 uint32_t(m_pushConstantRanges.size()), // uint32_t pushConstantRangeCount;
2163 pcRanges // const VkPushConstantRange* pPushConstantRanges;
2164 };
2165
2166 pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
2167 }
2168
2169 // Create shaders
2170 {
2171 vertexShaderModule =
2172 createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_vertexShaderName), 0);
2173 fragmentShaderModule =
2174 createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_fragmentShaderName), 0);
2175 }
2176
2177 // Create pipeline
2178 {
2179 // Add test case specific attributes
2180 if (m_attribFunc)
2181 m_attribFunc(*this, numVertices);
2182
2183 // Add base attributes
2184 setupDefaultInputs();
2185
2186 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams = {
2187 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2188 DE_NULL, // const void* pNext;
2189 (VkPipelineVertexInputStateCreateFlags)0,
2190 (uint32_t)m_vertexBindingDescription.size(), // uint32_t bindingCount;
2191 &m_vertexBindingDescription[0], // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2192 (uint32_t)m_vertexAttributeDescription.size(), // uint32_t attributeCount;
2193 &m_vertexAttributeDescription[0], // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2194 };
2195
2196 const std::vector<VkViewport> viewports(1, makeViewport(m_renderSize));
2197 const std::vector<VkRect2D> scissors(1, makeRect2D(m_renderSize));
2198
2199 const VkPipelineMultisampleStateCreateInfo multisampleStateParams = {
2200 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
2201 DE_NULL, // const void* pNext;
2202 0u, // VkPipelineMultisampleStateCreateFlags flags;
2203 m_sampleCount, // VkSampleCountFlagBits rasterizationSamples;
2204 VK_FALSE, // VkBool32 sampleShadingEnable;
2205 0.0f, // float minSampleShading;
2206 DE_NULL, // const VkSampleMask* pSampleMask;
2207 VK_FALSE, // VkBool32 alphaToCoverageEnable;
2208 VK_FALSE // VkBool32 alphaToOneEnable;
2209 };
2210
2211 graphicsPipeline = makeGraphicsPipeline(
2212 vk, // const DeviceInterface& vk
2213 vkDevice, // const VkDevice device
2214 *pipelineLayout, // const VkPipelineLayout pipelineLayout
2215 *vertexShaderModule, // const VkShaderModule vertexShaderModule
2216 DE_NULL, // const VkShaderModule tessellationControlShaderModule
2217 DE_NULL, // const VkShaderModule tessellationEvalShaderModule
2218 DE_NULL, // const VkShaderModule geometryShaderModule
2219 *fragmentShaderModule, // const VkShaderModule fragmentShaderModule
2220 *renderPass, // const VkRenderPass renderPass
2221 viewports, // const std::vector<VkViewport>& viewports
2222 scissors, // const std::vector<VkRect2D>& scissors
2223 topology, // const VkPrimitiveTopology topology
2224 0u, // const uint32_t subpass
2225 0u, // const uint32_t patchControlPoints
2226 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
2227 DE_NULL, // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
2228 &multisampleStateParams); // const VkPipelineMultisampleStateCreateInfo* multisampleStateCreateInfo
2229 }
2230
2231 // Create vertex indices buffer
2232 if (numIndices != 0)
2233 {
2234 const VkDeviceSize indexBufferSize = numIndices * sizeof(uint16_t);
2235 const VkBufferCreateInfo indexBufferParams = {
2236 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2237 DE_NULL, // const void* pNext;
2238 0u, // VkBufferCreateFlags flags;
2239 indexBufferSize, // VkDeviceSize size;
2240 VK_BUFFER_USAGE_INDEX_BUFFER_BIT, // VkBufferUsageFlags usage;
2241 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2242 1u, // uint32_t queueFamilyCount;
2243 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
2244 };
2245
2246 indexBuffer = createBuffer(vk, vkDevice, &indexBufferParams);
2247 indexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *indexBuffer),
2248 MemoryRequirement::HostVisible);
2249
2250 VK_CHECK(
2251 vk.bindBufferMemory(vkDevice, *indexBuffer, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset()));
2252
2253 // Load vertice indices into buffer
2254 deMemcpy(indexBufferAlloc->getHostPtr(), indices, (size_t)indexBufferSize);
2255 flushAlloc(vk, vkDevice, *indexBufferAlloc);
2256 }
2257
2258 VkCommandPool activeCmdPool;
2259 if (m_externalCommandPool.get() == DE_NULL)
2260 {
2261 // Create local command pool
2262 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
2263 activeCmdPool = *cmdPool;
2264 }
2265 else
2266 {
2267 // Use external command pool if available
2268 activeCmdPool = m_externalCommandPool.get()->get();
2269 }
2270
2271 // Create command buffer
2272 {
2273 cmdBuffer = allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2274
2275 beginCommandBuffer(vk, *cmdBuffer);
2276
2277 {
2278 const VkImageMemoryBarrier imageBarrier = {
2279 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2280 DE_NULL, // const void* pNext;
2281 0u, // VkAccessFlags srcAccessMask;
2282 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2283 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2284 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2285 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
2286 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
2287 *colorImage, // VkImage image;
2288 {
2289 // VkImageSubresourceRange subresourceRange;
2290 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2291 0u, // uint32_t baseMipLevel;
2292 1u, // uint32_t mipLevels;
2293 0u, // uint32_t baseArrayLayer;
2294 1u, // uint32_t arraySize;
2295 }};
2296
2297 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
2298 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0, DE_NULL, 1,
2299 &imageBarrier);
2300
2301 if (isMultiSampling())
2302 {
2303 // add multisample barrier
2304 const VkImageMemoryBarrier multiSampleImageBarrier = {
2305 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2306 DE_NULL, // const void* pNext;
2307 0u, // VkAccessFlags srcAccessMask;
2308 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2309 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2310 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2311 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
2312 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
2313 *resolvedImage, // VkImage image;
2314 {
2315 // VkImageSubresourceRange subresourceRange;
2316 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2317 0u, // uint32_t baseMipLevel;
2318 1u, // uint32_t mipLevels;
2319 0u, // uint32_t baseArrayLayer;
2320 1u, // uint32_t arraySize;
2321 }};
2322
2323 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
2324 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0, DE_NULL, 1,
2325 &multiSampleImageBarrier);
2326 }
2327 }
2328
2329 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()),
2330 m_clearColor);
2331
2332 updatePushConstants(*cmdBuffer, *pipelineLayout);
2333 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
2334 if (!m_uniformInfos.empty())
2335 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1,
2336 &*descriptorSet, 0u, DE_NULL);
2337
2338 const uint32_t numberOfVertexAttributes = (uint32_t)m_vertexBuffers.size();
2339 const std::vector<VkDeviceSize> offsets(numberOfVertexAttributes, 0);
2340
2341 std::vector<VkBuffer> buffers(numberOfVertexAttributes);
2342 for (size_t i = 0; i < numberOfVertexAttributes; i++)
2343 {
2344 buffers[i] = m_vertexBuffers[i].get()->get();
2345 }
2346
2347 vk.cmdBindVertexBuffers(*cmdBuffer, 0, numberOfVertexAttributes, &buffers[0], &offsets[0]);
2348 if (numIndices != 0)
2349 {
2350 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT16);
2351 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1, 0, 0, 0);
2352 }
2353 else
2354 vk.cmdDraw(*cmdBuffer, numVertices, 1, 0, 0);
2355
2356 endRenderPass(vk, *cmdBuffer);
2357 endCommandBuffer(vk, *cmdBuffer);
2358 }
2359
2360 // Execute Draw
2361 submitCommandsAndWait(vk, vkDevice, queue, cmdBuffer.get());
2362
2363 // Read back the result
2364 {
2365 const tcu::TextureFormat resultFormat = mapVkFormat(m_colorFormat);
2366 const VkDeviceSize imageSizeBytes =
2367 (VkDeviceSize)(resultFormat.getPixelSize() * m_renderSize.x() * m_renderSize.y());
2368 const VkBufferCreateInfo readImageBufferParams = {
2369 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2370 DE_NULL, // const void* pNext;
2371 0u, // VkBufferCreateFlags flags;
2372 imageSizeBytes, // VkDeviceSize size;
2373 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
2374 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2375 1u, // uint32_t queueFamilyCount;
2376 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
2377 };
2378 const Unique<VkBuffer> readImageBuffer(createBuffer(vk, vkDevice, &readImageBufferParams));
2379 const de::UniquePtr<Allocation> readImageBufferMemory(m_memAlloc.allocate(
2380 getBufferMemoryRequirements(vk, vkDevice, *readImageBuffer), MemoryRequirement::HostVisible));
2381
2382 VK_CHECK(vk.bindBufferMemory(vkDevice, *readImageBuffer, readImageBufferMemory->getMemory(),
2383 readImageBufferMemory->getOffset()));
2384
2385 // Copy image to buffer
2386 const Move<VkCommandBuffer> resultCmdBuffer =
2387 allocateCommandBuffer(vk, vkDevice, activeCmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2388
2389 beginCommandBuffer(vk, *resultCmdBuffer);
2390
2391 copyImageToBuffer(vk, *resultCmdBuffer, isMultiSampling() ? *resolvedImage : *colorImage, *readImageBuffer,
2392 tcu::IVec2(m_renderSize.x(), m_renderSize.y()));
2393
2394 endCommandBuffer(vk, *resultCmdBuffer);
2395
2396 submitCommandsAndWait(vk, vkDevice, queue, resultCmdBuffer.get());
2397
2398 invalidateAlloc(vk, vkDevice, *readImageBufferMemory);
2399
2400 const tcu::ConstPixelBufferAccess resultAccess(resultFormat, m_renderSize.x(), m_renderSize.y(), 1,
2401 readImageBufferMemory->getHostPtr());
2402
2403 m_resultImage.setStorage(resultFormat, m_renderSize.x(), m_renderSize.y());
2404 tcu::copy(m_resultImage.getAccess(), resultAccess);
2405 }
2406 }
2407
computeVertexReference(tcu::Surface & result,const QuadGrid & quadGrid)2408 void ShaderRenderCaseInstance::computeVertexReference(tcu::Surface &result, const QuadGrid &quadGrid)
2409 {
2410 DE_ASSERT(m_evaluator);
2411
2412 // Buffer info.
2413 const int width = result.getWidth();
2414 const int height = result.getHeight();
2415 const int gridSize = quadGrid.getGridSize();
2416 const int stride = gridSize + 1;
2417 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
2418 ShaderEvalContext evalCtx(quadGrid);
2419
2420 // Evaluate color for each vertex.
2421 std::vector<tcu::Vec4> colors((gridSize + 1) * (gridSize + 1));
2422 for (int y = 0; y < gridSize + 1; y++)
2423 for (int x = 0; x < gridSize + 1; x++)
2424 {
2425 const float sx = (float)x / (float)gridSize;
2426 const float sy = (float)y / (float)gridSize;
2427 const int vtxNdx = ((y * (gridSize + 1)) + x);
2428
2429 evalCtx.reset(sx, sy);
2430 m_evaluator->evaluate(evalCtx);
2431 DE_ASSERT(!evalCtx.isDiscarded); // Discard is not available in vertex shader.
2432 tcu::Vec4 color = evalCtx.color;
2433
2434 if (!hasAlpha)
2435 color.w() = 1.0f;
2436
2437 colors[vtxNdx] = color;
2438 }
2439
2440 // Render quads.
2441 for (int y = 0; y < gridSize; y++)
2442 for (int x = 0; x < gridSize; x++)
2443 {
2444 const float x0 = (float)x / (float)gridSize;
2445 const float x1 = (float)(x + 1) / (float)gridSize;
2446 const float y0 = (float)y / (float)gridSize;
2447 const float y1 = (float)(y + 1) / (float)gridSize;
2448
2449 const float sx0 = x0 * (float)width;
2450 const float sx1 = x1 * (float)width;
2451 const float sy0 = y0 * (float)height;
2452 const float sy1 = y1 * (float)height;
2453 const float oosx = 1.0f / (sx1 - sx0);
2454 const float oosy = 1.0f / (sy1 - sy0);
2455
2456 const int ix0 = deCeilFloatToInt32(sx0 - 0.5f);
2457 const int ix1 = deCeilFloatToInt32(sx1 - 0.5f);
2458 const int iy0 = deCeilFloatToInt32(sy0 - 0.5f);
2459 const int iy1 = deCeilFloatToInt32(sy1 - 0.5f);
2460
2461 const int v00 = (y * stride) + x;
2462 const int v01 = (y * stride) + x + 1;
2463 const int v10 = ((y + 1) * stride) + x;
2464 const int v11 = ((y + 1) * stride) + x + 1;
2465 const tcu::Vec4 c00 = colors[v00];
2466 const tcu::Vec4 c01 = colors[v01];
2467 const tcu::Vec4 c10 = colors[v10];
2468 const tcu::Vec4 c11 = colors[v11];
2469
2470 //printf("(%d,%d) -> (%f..%f, %f..%f) (%d..%d, %d..%d)\n", x, y, sx0, sx1, sy0, sy1, ix0, ix1, iy0, iy1);
2471
2472 for (int iy = iy0; iy < iy1; iy++)
2473 for (int ix = ix0; ix < ix1; ix++)
2474 {
2475 DE_ASSERT(deInBounds32(ix, 0, width));
2476 DE_ASSERT(deInBounds32(iy, 0, height));
2477
2478 const float sfx = (float)ix + 0.5f;
2479 const float sfy = (float)iy + 0.5f;
2480 const float fx1 = deFloatClamp((sfx - sx0) * oosx, 0.0f, 1.0f);
2481 const float fy1 = deFloatClamp((sfy - sy0) * oosy, 0.0f, 1.0f);
2482
2483 // Triangle quad interpolation.
2484 const bool tri = fx1 + fy1 <= 1.0f;
2485 const float tx = tri ? fx1 : (1.0f - fx1);
2486 const float ty = tri ? fy1 : (1.0f - fy1);
2487 const tcu::Vec4 &t0 = tri ? c00 : c11;
2488 const tcu::Vec4 &t1 = tri ? c01 : c10;
2489 const tcu::Vec4 &t2 = tri ? c10 : c01;
2490 const tcu::Vec4 color = t0 + (t1 - t0) * tx + (t2 - t0) * ty;
2491
2492 result.setPixel(ix, iy, tcu::RGBA(color));
2493 }
2494 }
2495 }
2496
computeFragmentReference(tcu::Surface & result,const QuadGrid & quadGrid)2497 void ShaderRenderCaseInstance::computeFragmentReference(tcu::Surface &result, const QuadGrid &quadGrid)
2498 {
2499 DE_ASSERT(m_evaluator);
2500
2501 // Buffer info.
2502 const int width = result.getWidth();
2503 const int height = result.getHeight();
2504 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
2505 ShaderEvalContext evalCtx(quadGrid);
2506
2507 // Render.
2508 for (int y = 0; y < height; y++)
2509 for (int x = 0; x < width; x++)
2510 {
2511 const float sx = ((float)x + 0.5f) / (float)width;
2512 const float sy = ((float)y + 0.5f) / (float)height;
2513
2514 evalCtx.reset(sx, sy);
2515 m_evaluator->evaluate(evalCtx);
2516 // Select either clear color or computed color based on discarded bit.
2517 tcu::Vec4 color = evalCtx.isDiscarded ? m_clearColor : evalCtx.color;
2518
2519 if (!hasAlpha)
2520 color.w() = 1.0f;
2521
2522 result.setPixel(x, y, tcu::RGBA(color));
2523 }
2524 }
2525
compareImages(const tcu::Surface & resImage,const tcu::Surface & refImage,float errorThreshold)2526 bool ShaderRenderCaseInstance::compareImages(const tcu::Surface &resImage, const tcu::Surface &refImage,
2527 float errorThreshold)
2528 {
2529 if (m_fuzzyCompare)
2530 return tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ComparisonResult", "Image comparison result",
2531 refImage, resImage, errorThreshold, tcu::COMPARE_LOG_EVERYTHING);
2532 else
2533 return tcu::pixelThresholdCompare(m_context.getTestContext().getLog(), "ComparisonResult",
2534 "Image comparison result", refImage, resImage, tcu::RGBA(1, 1, 1, 1),
2535 tcu::COMPARE_LOG_EVERYTHING);
2536 }
2537
2538 } // namespace sr
2539 } // namespace vkt
2540