1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_pipeline.h"
12
13 #include "venus-protocol/vn_protocol_driver_pipeline.h"
14 #include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
15 #include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
16 #include "venus-protocol/vn_protocol_driver_shader_module.h"
17
18 #include "vn_descriptor_set.h"
19 #include "vn_device.h"
20 #include "vn_physical_device.h"
21 #include "vn_render_pass.h"
22
23 /**
24 * Fields in the VkGraphicsPipelineCreateInfo pNext chain that we must track
25 * to determine which fields are valid and which must be erased.
26 */
27 struct vn_graphics_pipeline_info_self {
28 union {
29 /* Bitmask exists for testing if any field is set. */
30 uint32_t mask;
31
32 /* Group the fixes by Vulkan struct. Within each group, sort by struct
33 * order.
34 */
35 struct {
36 /** VkGraphicsPipelineCreateInfo::pStages */
37 bool shader_stages : 1;
38 /** VkGraphicsPipelineCreateInfo::pVertexInputState */
39 bool vertex_input_state : 1;
40 /** VkGraphicsPipelineCreateInfo::pInputAssemblyState */
41 bool input_assembly_state : 1;
42 /** VkGraphicsPipelineCreateInfo::pTessellationState */
43 bool tessellation_state : 1;
44 /** VkGraphicsPipelineCreateInfo::pViewportState */
45 bool viewport_state : 1;
46 /** VkGraphicsPipelineCreateInfo::pRasterizationState */
47 bool rasterization_state : 1;
48 /** VkGraphicsPipelineCreateInfo::pMultisampleState */
49 bool multisample_state : 1;
50 /** VkGraphicsPipelineCreateInfo::pDepthStencilState */
51 bool depth_stencil_state : 1;
52 /** VkGraphicsPipelineCreateInfo::pColorBlendState */
53 bool color_blend_state : 1;
54 /** VkGraphicsPipelineCreateInfo::layout */
55 bool pipeline_layout : 1;
56 /** VkGraphicsPipelineCreateInfo::renderPass */
57 bool render_pass : 1;
58 /** VkGraphicsPipelineCreateInfo::basePipelineHandle */
59 bool base_pipeline_handle : 1;
60
61 /** VkPipelineViewportStateCreateInfo::pViewports */
62 bool viewport_state_viewports : 1;
63 /** VkPipelineViewportStateCreateInfo::pScissors */
64 bool viewport_state_scissors : 1;
65
66 /** VkPipelineMultisampleStateCreateInfo::pSampleMask */
67 bool multisample_state_sample_mask : 1;
68 };
69 };
70 };
71
72 static_assert(sizeof(struct vn_graphics_pipeline_info_self) ==
73 sizeof(((struct vn_graphics_pipeline_info_self){}).mask),
74 "vn_graphics_pipeline_create_info_self::mask is too small");
75
76 /**
77 * Fields in the VkGraphicsPipelineCreateInfo pNext chain that we must track
78 * to determine which fields are valid and which must be erased.
79 */
80 struct vn_graphics_pipeline_info_pnext {
81 union {
82 /* Bitmask exists for testing if any field is set. */
83 uint32_t mask;
84
85 /* Group the fixes by Vulkan struct. Within each group, sort by struct
86 * order.
87 */
88 struct {
89 /** VkPipelineRenderingCreateInfo, all format fields */
90 bool rendering_info_formats : 1;
91 };
92 };
93 };
94
95 static_assert(sizeof(struct vn_graphics_pipeline_info_pnext) ==
96 sizeof(((struct vn_graphics_pipeline_info_pnext){}).mask),
97 "vn_graphics_pipeline_create_info_pnext::mask is too small");
98
99 /**
100 * Description of fixes needed for a single VkGraphicsPipelineCreateInfo
101 * pNext chain.
102 */
103 struct vn_graphics_pipeline_fix_desc {
104 struct vn_graphics_pipeline_info_self self;
105 struct vn_graphics_pipeline_info_pnext pnext;
106 };
107
108 /**
109 * Typesafe bitmask for VkGraphicsPipelineLibraryFlagsEXT. Named members
110 * reduce long lines.
111 *
112 * From the Vulkan 1.3.215 spec:
113 *
114 * The state required for a graphics pipeline is divided into vertex input
115 * state, pre-rasterization shader state, fragment shader state, and
116 * fragment output state.
117 */
118 struct vn_graphics_pipeline_library_state {
119 union {
120 VkGraphicsPipelineLibraryFlagsEXT mask;
121
122 struct {
123 /** VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT */
124 bool vertex_input : 1;
125 /** VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT */
126 bool pre_raster_shaders : 1;
127 /** VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT */
128 bool fragment_shader : 1;
129 /** VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT */
130 bool fragment_output : 1;
131 };
132 };
133 };
134
135 /**
136 * Compact bitmask for the subset of graphics VkDynamicState that
137 * venus needs to track. Named members reduce long lines.
138 *
139 * We want a *compact* bitmask because enum VkDynamicState has large gaps due
140 * to extensions.
141 */
142 struct vn_graphics_dynamic_state {
143 union {
144 uint32_t mask;
145
146 struct {
147 /** VK_DYNAMIC_STATE_VERTEX_INPUT_EXT **/
148 bool vertex_input : 1;
149 /** VK_DYNAMIC_STATE_VIEWPORT */
150 bool viewport : 1;
151 /** VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT */
152 bool viewport_with_count : 1;
153 /** VK_DYNAMIC_STATE_SAMPLE_MASK_EXT */
154 bool sample_mask : 1;
155 /** VK_DYNAMIC_STATE_SCISSOR */
156 bool scissor : 1;
157 /** VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT */
158 bool scissor_with_count : 1;
159 /** VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE */
160 bool rasterizer_discard_enable : 1;
161 };
162 };
163 };
164
165 /**
166 * Graphics pipeline state that Venus tracks to determine which fixes are
167 * required in the VkGraphicsPipelineCreateInfo pNext chain.
168 *
169 * This is the pipeline's fully linked state. That is, it includes the state
170 * provided directly in VkGraphicsPipelineCreateInfo and the state provided
171 * indirectly in VkPipelineLibraryCreateInfoKHR.
172 */
173 struct vn_graphics_pipeline_state {
174 /** The GPL state subsets that the pipeline provides. */
175 struct vn_graphics_pipeline_library_state gpl;
176
177 struct vn_graphics_dynamic_state dynamic;
178 VkShaderStageFlags shader_stages;
179
180 struct vn_render_pass_state {
181 /**
182 * The attachment aspects accessed by the pipeline.
183 *
184 * Valid if and only if VK_IMAGE_ASPECT_METADATA_BIT is unset.
185 *
186 * In a complete pipeline, this must be valid (and may be empty). In
187 * a pipeline library, this may be invalid. We initialize this to be
188 * invalid, and it remains invalid until we read the attachment info in
189 * the VkGraphicsPipelineCreateInfo chain.
190 *
191 * The app provides the attachment info in
192 * VkGraphicsPipelineCreateInfo::renderPass or
193 * VkPipelineRenderingCreateInfo, but the validity of that info depends
194 * on VkGraphicsPipelineLibraryFlagsEXT.
195 */
196 VkImageAspectFlags attachment_aspects;
197 } render_pass;
198
199 /** VkPipelineRasterizationStateCreateInfo::rasterizerDiscardEnable
200 *
201 * Valid if and only if gpl.pre_raster_shaders is set.
202 */
203 bool rasterizer_discard_enable;
204 };
205
206 struct vn_graphics_pipeline {
207 struct vn_pipeline base;
208 struct vn_graphics_pipeline_state state;
209 };
210
211 /**
212 * Temporary storage for fixes in vkCreateGraphicsPipelines.
213 *
214 * Length of each array is vkCreateGraphicsPipelines::createInfoCount.
215 */
216 struct vn_graphics_pipeline_fix_tmp {
217 VkGraphicsPipelineCreateInfo *infos;
218 VkPipelineMultisampleStateCreateInfo *multisample_state_infos;
219 VkPipelineViewportStateCreateInfo *viewport_state_infos;
220
221 /* Fixing the pNext chain
222 *
223 * TODO: extend when below or more extensions are supported:
224 * - VK_KHR_maintenance5
225 * - VK_EXT_pipeline_robustness
226 */
227 VkGraphicsPipelineLibraryCreateInfoEXT *gpl_infos;
228 VkPipelineCreationFeedbackCreateInfo *feedback_infos;
229 VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_infos;
230 VkPipelineLibraryCreateInfoKHR *library_infos;
231 VkPipelineRenderingCreateInfo *rendering_infos;
232 };
233
234 /* shader module commands */
235
236 VkResult
vn_CreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)237 vn_CreateShaderModule(VkDevice device,
238 const VkShaderModuleCreateInfo *pCreateInfo,
239 const VkAllocationCallbacks *pAllocator,
240 VkShaderModule *pShaderModule)
241 {
242 struct vn_device *dev = vn_device_from_handle(device);
243 const VkAllocationCallbacks *alloc =
244 pAllocator ? pAllocator : &dev->base.base.alloc;
245
246 struct vn_shader_module *mod =
247 vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
248 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
249 if (!mod)
250 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
251
252 vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
253
254 VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
255 vn_async_vkCreateShaderModule(dev->primary_ring, device, pCreateInfo, NULL,
256 &mod_handle);
257
258 *pShaderModule = mod_handle;
259
260 return VK_SUCCESS;
261 }
262
263 void
vn_DestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)264 vn_DestroyShaderModule(VkDevice device,
265 VkShaderModule shaderModule,
266 const VkAllocationCallbacks *pAllocator)
267 {
268 struct vn_device *dev = vn_device_from_handle(device);
269 struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
270 const VkAllocationCallbacks *alloc =
271 pAllocator ? pAllocator : &dev->base.base.alloc;
272
273 if (!mod)
274 return;
275
276 vn_async_vkDestroyShaderModule(dev->primary_ring, device, shaderModule,
277 NULL);
278
279 vn_object_base_fini(&mod->base);
280 vk_free(alloc, mod);
281 }
282
283 /* pipeline layout commands */
284
285 static void
vn_pipeline_layout_destroy(struct vn_device * dev,struct vn_pipeline_layout * pipeline_layout)286 vn_pipeline_layout_destroy(struct vn_device *dev,
287 struct vn_pipeline_layout *pipeline_layout)
288 {
289 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
290 if (pipeline_layout->push_descriptor_set_layout) {
291 vn_descriptor_set_layout_unref(
292 dev, pipeline_layout->push_descriptor_set_layout);
293 }
294 vn_async_vkDestroyPipelineLayout(
295 dev->primary_ring, vn_device_to_handle(dev),
296 vn_pipeline_layout_to_handle(pipeline_layout), NULL);
297
298 vn_object_base_fini(&pipeline_layout->base);
299 vk_free(alloc, pipeline_layout);
300 }
301
302 static inline struct vn_pipeline_layout *
vn_pipeline_layout_ref(struct vn_device * dev,struct vn_pipeline_layout * pipeline_layout)303 vn_pipeline_layout_ref(struct vn_device *dev,
304 struct vn_pipeline_layout *pipeline_layout)
305 {
306 vn_refcount_inc(&pipeline_layout->refcount);
307 return pipeline_layout;
308 }
309
310 static inline void
vn_pipeline_layout_unref(struct vn_device * dev,struct vn_pipeline_layout * pipeline_layout)311 vn_pipeline_layout_unref(struct vn_device *dev,
312 struct vn_pipeline_layout *pipeline_layout)
313 {
314 if (vn_refcount_dec(&pipeline_layout->refcount))
315 vn_pipeline_layout_destroy(dev, pipeline_layout);
316 }
317
318 VkResult
vn_CreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)319 vn_CreatePipelineLayout(VkDevice device,
320 const VkPipelineLayoutCreateInfo *pCreateInfo,
321 const VkAllocationCallbacks *pAllocator,
322 VkPipelineLayout *pPipelineLayout)
323 {
324 struct vn_device *dev = vn_device_from_handle(device);
325 /* ignore pAllocator as the pipeline layout is reference-counted */
326 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
327
328 struct vn_pipeline_layout *layout =
329 vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
330 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
331 if (!layout)
332 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
333
334 vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
335 &dev->base);
336 layout->refcount = VN_REFCOUNT_INIT(1);
337
338 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) {
339 struct vn_descriptor_set_layout *descriptor_set_layout =
340 vn_descriptor_set_layout_from_handle(pCreateInfo->pSetLayouts[i]);
341
342 /* Avoid null derefs. pSetLayouts may contain VK_NULL_HANDLE.
343 *
344 * From the Vulkan 1.3.254 spec:
345 * VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter
346 *
347 * If setLayoutCount is not 0, pSetLayouts must be a valid pointer to
348 * an array of setLayoutCount valid or VK_NULL_HANDLE
349 * VkDescriptorSetLayout handles
350 */
351 if (descriptor_set_layout &&
352 descriptor_set_layout->is_push_descriptor) {
353 layout->push_descriptor_set_layout =
354 vn_descriptor_set_layout_ref(dev, descriptor_set_layout);
355 break;
356 }
357 }
358
359 layout->has_push_constant_ranges = pCreateInfo->pushConstantRangeCount > 0;
360
361 VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
362 vn_async_vkCreatePipelineLayout(dev->primary_ring, device, pCreateInfo,
363 NULL, &layout_handle);
364
365 *pPipelineLayout = layout_handle;
366
367 return VK_SUCCESS;
368 }
369
370 void
vn_DestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)371 vn_DestroyPipelineLayout(VkDevice device,
372 VkPipelineLayout pipelineLayout,
373 const VkAllocationCallbacks *pAllocator)
374 {
375 struct vn_device *dev = vn_device_from_handle(device);
376 struct vn_pipeline_layout *layout =
377 vn_pipeline_layout_from_handle(pipelineLayout);
378
379 if (!layout)
380 return;
381
382 vn_pipeline_layout_unref(dev, layout);
383 }
384
385 /* pipeline cache commands */
386
387 VkResult
vn_CreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)388 vn_CreatePipelineCache(VkDevice device,
389 const VkPipelineCacheCreateInfo *pCreateInfo,
390 const VkAllocationCallbacks *pAllocator,
391 VkPipelineCache *pPipelineCache)
392 {
393 struct vn_device *dev = vn_device_from_handle(device);
394 const VkAllocationCallbacks *alloc =
395 pAllocator ? pAllocator : &dev->base.base.alloc;
396
397 struct vn_pipeline_cache *cache =
398 vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
399 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
400 if (!cache)
401 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
402
403 vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
404 &dev->base);
405
406 VkPipelineCacheCreateInfo local_create_info;
407 if (pCreateInfo->initialDataSize) {
408 const struct vk_pipeline_cache_header *header =
409 pCreateInfo->pInitialData;
410
411 local_create_info = *pCreateInfo;
412 local_create_info.initialDataSize -= header->header_size;
413 local_create_info.pInitialData += header->header_size;
414 pCreateInfo = &local_create_info;
415 }
416
417 VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
418 vn_async_vkCreatePipelineCache(dev->primary_ring, device, pCreateInfo,
419 NULL, &cache_handle);
420
421 *pPipelineCache = cache_handle;
422
423 return VK_SUCCESS;
424 }
425
426 void
vn_DestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)427 vn_DestroyPipelineCache(VkDevice device,
428 VkPipelineCache pipelineCache,
429 const VkAllocationCallbacks *pAllocator)
430 {
431 struct vn_device *dev = vn_device_from_handle(device);
432 struct vn_pipeline_cache *cache =
433 vn_pipeline_cache_from_handle(pipelineCache);
434 const VkAllocationCallbacks *alloc =
435 pAllocator ? pAllocator : &dev->base.base.alloc;
436
437 if (!cache)
438 return;
439
440 vn_async_vkDestroyPipelineCache(dev->primary_ring, device, pipelineCache,
441 NULL);
442
443 vn_object_base_fini(&cache->base);
444 vk_free(alloc, cache);
445 }
446
447 static struct vn_ring *
vn_get_target_ring(struct vn_device * dev)448 vn_get_target_ring(struct vn_device *dev)
449 {
450 if (vn_tls_get_async_pipeline_create())
451 return dev->primary_ring;
452
453 struct vn_ring *ring = vn_tls_get_ring(dev->instance);
454 if (!ring)
455 return NULL;
456
457 if (ring != dev->primary_ring) {
458 /* Ensure pipeline create and pipeline cache retrieval dependencies are
459 * ready on the renderer side.
460 *
461 * TODO:
462 * - For pipeline create, track ring seqnos of layout and renderpass
463 * objects it depends on, and only wait for those seqnos once.
464 * - For pipeline cache retrieval, track ring seqno of pipeline cache
465 * object it depends on. Treat different sync mode separately.
466 */
467 vn_ring_wait_all(dev->primary_ring);
468 }
469 return ring;
470 }
471
472 VkResult
vn_GetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)473 vn_GetPipelineCacheData(VkDevice device,
474 VkPipelineCache pipelineCache,
475 size_t *pDataSize,
476 void *pData)
477 {
478 struct vn_device *dev = vn_device_from_handle(device);
479 struct vn_physical_device *physical_dev = dev->physical_device;
480 struct vn_ring *target_ring = vn_get_target_ring(dev);
481
482 struct vk_pipeline_cache_header *header = pData;
483 VkResult result;
484 if (!pData) {
485 result = vn_call_vkGetPipelineCacheData(target_ring, device,
486 pipelineCache, pDataSize, NULL);
487 if (result != VK_SUCCESS)
488 return vn_error(dev->instance, result);
489
490 *pDataSize += sizeof(*header);
491 return VK_SUCCESS;
492 }
493
494 if (*pDataSize <= sizeof(*header)) {
495 *pDataSize = 0;
496 return VK_INCOMPLETE;
497 }
498
499 const struct vk_properties *props = &physical_dev->base.base.properties;
500 header->header_size = sizeof(*header);
501 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
502 header->vendor_id = props->vendorID;
503 header->device_id = props->deviceID;
504 memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
505
506 *pDataSize -= header->header_size;
507 result =
508 vn_call_vkGetPipelineCacheData(target_ring, device, pipelineCache,
509 pDataSize, pData + header->header_size);
510 if (result < VK_SUCCESS)
511 return vn_error(dev->instance, result);
512
513 *pDataSize += header->header_size;
514
515 return result;
516 }
517
518 VkResult
vn_MergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)519 vn_MergePipelineCaches(VkDevice device,
520 VkPipelineCache dstCache,
521 uint32_t srcCacheCount,
522 const VkPipelineCache *pSrcCaches)
523 {
524 struct vn_device *dev = vn_device_from_handle(device);
525
526 vn_async_vkMergePipelineCaches(dev->primary_ring, device, dstCache,
527 srcCacheCount, pSrcCaches);
528
529 return VK_SUCCESS;
530 }
531
532 /* pipeline commands */
533
534 static struct vn_graphics_pipeline *
vn_graphics_pipeline_from_handle(VkPipeline pipeline_h)535 vn_graphics_pipeline_from_handle(VkPipeline pipeline_h)
536 {
537 struct vn_pipeline *p = vn_pipeline_from_handle(pipeline_h);
538 assert(p->type == VN_PIPELINE_TYPE_GRAPHICS);
539 return (struct vn_graphics_pipeline *)p;
540 }
541
542 static bool
vn_create_pipeline_handles(struct vn_device * dev,enum vn_pipeline_type type,uint32_t pipeline_count,VkPipeline * pipeline_handles,const VkAllocationCallbacks * alloc)543 vn_create_pipeline_handles(struct vn_device *dev,
544 enum vn_pipeline_type type,
545 uint32_t pipeline_count,
546 VkPipeline *pipeline_handles,
547 const VkAllocationCallbacks *alloc)
548 {
549 size_t pipeline_size;
550
551 switch (type) {
552 case VN_PIPELINE_TYPE_GRAPHICS:
553 pipeline_size = sizeof(struct vn_graphics_pipeline);
554 break;
555 case VN_PIPELINE_TYPE_COMPUTE:
556 pipeline_size = sizeof(struct vn_pipeline);
557 break;
558 }
559
560 for (uint32_t i = 0; i < pipeline_count; i++) {
561 struct vn_pipeline *pipeline =
562 vk_zalloc(alloc, pipeline_size, VN_DEFAULT_ALIGN,
563 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
564
565 if (!pipeline) {
566 for (uint32_t j = 0; j < i; j++) {
567 pipeline = vn_pipeline_from_handle(pipeline_handles[j]);
568 vn_object_base_fini(&pipeline->base);
569 vk_free(alloc, pipeline);
570 }
571
572 memset(pipeline_handles, 0,
573 pipeline_count * sizeof(pipeline_handles[0]));
574 return false;
575 }
576
577 vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
578 &dev->base);
579 pipeline->type = type;
580 pipeline_handles[i] = vn_pipeline_to_handle(pipeline);
581 }
582
583 return true;
584 }
585
586 static void
vn_destroy_pipeline_handles_internal(struct vn_device * dev,uint32_t pipeline_count,VkPipeline * pipeline_handles,const VkAllocationCallbacks * alloc,bool failed_only)587 vn_destroy_pipeline_handles_internal(struct vn_device *dev,
588 uint32_t pipeline_count,
589 VkPipeline *pipeline_handles,
590 const VkAllocationCallbacks *alloc,
591 bool failed_only)
592 {
593 for (uint32_t i = 0; i < pipeline_count; i++) {
594 struct vn_pipeline *pipeline =
595 vn_pipeline_from_handle(pipeline_handles[i]);
596
597 if (!failed_only || pipeline->base.id == 0) {
598 if (pipeline->layout) {
599 vn_pipeline_layout_unref(dev, pipeline->layout);
600 }
601 vn_object_base_fini(&pipeline->base);
602 vk_free(alloc, pipeline);
603 pipeline_handles[i] = VK_NULL_HANDLE;
604 }
605 }
606 }
607
608 static inline void
vn_destroy_pipeline_handles(struct vn_device * dev,uint32_t pipeline_count,VkPipeline * pipeline_handles,const VkAllocationCallbacks * alloc)609 vn_destroy_pipeline_handles(struct vn_device *dev,
610 uint32_t pipeline_count,
611 VkPipeline *pipeline_handles,
612 const VkAllocationCallbacks *alloc)
613 {
614 vn_destroy_pipeline_handles_internal(dev, pipeline_count, pipeline_handles,
615 alloc, false);
616 }
617
618 static inline void
vn_destroy_failed_pipeline_handles(struct vn_device * dev,uint32_t pipeline_count,VkPipeline * pipeline_handles,const VkAllocationCallbacks * alloc)619 vn_destroy_failed_pipeline_handles(struct vn_device *dev,
620 uint32_t pipeline_count,
621 VkPipeline *pipeline_handles,
622 const VkAllocationCallbacks *alloc)
623 {
624 vn_destroy_pipeline_handles_internal(dev, pipeline_count, pipeline_handles,
625 alloc, true);
626 }
627
628 #define VN_PIPELINE_CREATE_SYNC_MASK \
629 (VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT | \
630 VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
631
632 static struct vn_graphics_pipeline_fix_tmp *
vn_graphics_pipeline_fix_tmp_alloc(const VkAllocationCallbacks * alloc,uint32_t info_count,bool alloc_pnext)633 vn_graphics_pipeline_fix_tmp_alloc(const VkAllocationCallbacks *alloc,
634 uint32_t info_count,
635 bool alloc_pnext)
636 {
637 struct vn_graphics_pipeline_fix_tmp *tmp;
638 VkGraphicsPipelineCreateInfo *infos;
639 VkPipelineMultisampleStateCreateInfo *multisample_state_infos;
640 VkPipelineViewportStateCreateInfo *viewport_state_infos;
641
642 /* for pNext */
643 VkGraphicsPipelineLibraryCreateInfoEXT *gpl_infos;
644 VkPipelineCreationFeedbackCreateInfo *feedback_infos;
645 VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr_infos;
646 VkPipelineLibraryCreateInfoKHR *library_infos;
647 VkPipelineRenderingCreateInfo *rendering_infos;
648
649 VK_MULTIALLOC(ma);
650 vk_multialloc_add(&ma, &tmp, __typeof__(*tmp), 1);
651 vk_multialloc_add(&ma, &infos, __typeof__(*infos), info_count);
652 vk_multialloc_add(&ma, &multisample_state_infos,
653 __typeof__(*multisample_state_infos), info_count);
654 vk_multialloc_add(&ma, &viewport_state_infos,
655 __typeof__(*viewport_state_infos), info_count);
656
657 if (alloc_pnext) {
658 vk_multialloc_add(&ma, &gpl_infos, __typeof__(*gpl_infos), info_count);
659 vk_multialloc_add(&ma, &feedback_infos, __typeof__(*feedback_infos),
660 info_count);
661 vk_multialloc_add(&ma, &fsr_infos, __typeof__(*fsr_infos), info_count);
662 vk_multialloc_add(&ma, &library_infos, __typeof__(*library_infos),
663 info_count);
664 vk_multialloc_add(&ma, &rendering_infos, __typeof__(*rendering_infos),
665 info_count);
666 }
667
668 if (!vk_multialloc_zalloc(&ma, alloc, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
669 return NULL;
670
671 tmp->infos = infos;
672 tmp->multisample_state_infos = multisample_state_infos;
673 tmp->viewport_state_infos = viewport_state_infos;
674
675 if (alloc_pnext) {
676 tmp->gpl_infos = gpl_infos;
677 tmp->feedback_infos = feedback_infos;
678 tmp->fsr_infos = fsr_infos;
679 tmp->library_infos = library_infos;
680 tmp->rendering_infos = rendering_infos;
681 }
682
683 return tmp;
684 }
685
686 /**
687 * Update \a gpl with the VkGraphicsPipelineLibraryFlagsEXT that the pipeline
688 * provides directly (without linking). The spec says that the pipeline always
689 * provides flags, but may do it implicitly.
690 *
691 * From the Vulkan 1.3.251 spec:
692 *
693 * If this structure [VkGraphicsPipelineLibraryCreateInfoEXT] is
694 * omitted, and either VkGraphicsPipelineCreateInfo::flags includes
695 * VK_PIPELINE_CREATE_LIBRARY_BIT_KHR or the
696 * VkGraphicsPipelineCreateInfo::pNext chain includes
697 * a VkPipelineLibraryCreateInfoKHR structure with a libraryCount
698 * greater than 0, it is as if flags is 0. Otherwise if this
699 * structure is omitted, it is as if flags includes all possible subsets
700 * of the graphics pipeline (i.e. a complete graphics pipeline).
701 */
702 static void
vn_graphics_pipeline_library_state_update(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_library_state * restrict gpl)703 vn_graphics_pipeline_library_state_update(
704 const VkGraphicsPipelineCreateInfo *info,
705 struct vn_graphics_pipeline_library_state *restrict gpl)
706 {
707 const VkGraphicsPipelineLibraryCreateInfoEXT *gpl_info =
708 vk_find_struct_const(info->pNext,
709 GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT);
710 const VkPipelineLibraryCreateInfoKHR *lib_info =
711 vk_find_struct_const(info->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
712 const uint32_t lib_count = lib_info ? lib_info->libraryCount : 0;
713
714 if (gpl_info) {
715 gpl->mask |= gpl_info->flags;
716 } else if ((info->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) ||
717 lib_count > 0) {
718 gpl->mask |= 0;
719 } else {
720 gpl->mask |=
721 VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT |
722 VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT |
723 VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT |
724 VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
725 }
726 }
727
728 /**
729 * Update \a dynamic with the VkDynamicState that the pipeline provides
730 * directly (without linking).
731 *
732 * \a direct_gpl The VkGraphicsPipelineLibraryFlagsEXT that the pipeline sets
733 * directly (without linking).
734 */
735 static void
vn_graphics_dynamic_state_update(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_library_state direct_gpl,struct vn_graphics_dynamic_state * restrict dynamic)736 vn_graphics_dynamic_state_update(
737 const VkGraphicsPipelineCreateInfo *info,
738 struct vn_graphics_pipeline_library_state direct_gpl,
739 struct vn_graphics_dynamic_state *restrict dynamic)
740 {
741 const VkPipelineDynamicStateCreateInfo *dyn_info = info->pDynamicState;
742 if (!dyn_info)
743 return;
744
745 struct vn_graphics_dynamic_state raw = { 0 };
746
747 for (uint32_t i = 0; i < dyn_info->dynamicStateCount; i++) {
748 switch (dyn_info->pDynamicStates[i]) {
749 case VK_DYNAMIC_STATE_VERTEX_INPUT_EXT:
750 raw.vertex_input = true;
751 break;
752 case VK_DYNAMIC_STATE_VIEWPORT:
753 raw.viewport = true;
754 break;
755 case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT:
756 raw.viewport_with_count = true;
757 break;
758 case VK_DYNAMIC_STATE_SAMPLE_MASK_EXT:
759 raw.sample_mask = true;
760 break;
761 case VK_DYNAMIC_STATE_SCISSOR:
762 raw.scissor = true;
763 break;
764 case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT:
765 raw.scissor_with_count = true;
766 break;
767 case VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE:
768 raw.rasterizer_discard_enable = true;
769 break;
770 default:
771 break;
772 }
773 }
774
775 /* We must ignore VkDynamicState unrelated to the
776 * VkGraphicsPipelineLibraryFlagsEXT that the pipeline provides directly
777 * (without linking).
778 *
779 * [Vulkan 1.3.252]
780 * Dynamic state values set via pDynamicState must be ignored if the
781 * state they correspond to is not otherwise statically set by one of
782 * the state subsets used to create the pipeline.
783 *
784 * In general, we must update dynamic state bits with `|=` rather than `=`
785 * because multiple GPL state subsets can enable the same dynamic state.
786 *
787 * [Vulkan 1.3.252]
788 * Any linked library that has dynamic state enabled that same dynamic
789 * state must also be enabled in all the other linked libraries to which
790 * that dynamic state applies.
791 */
792 if (direct_gpl.vertex_input) {
793 dynamic->vertex_input |= raw.vertex_input;
794 }
795 if (direct_gpl.pre_raster_shaders) {
796 dynamic->viewport |= raw.viewport;
797 dynamic->viewport_with_count |= raw.viewport_with_count;
798 dynamic->scissor |= raw.scissor;
799 dynamic->scissor_with_count |= raw.scissor_with_count;
800 dynamic->rasterizer_discard_enable |= raw.rasterizer_discard_enable;
801 }
802 if (direct_gpl.fragment_shader) {
803 dynamic->sample_mask |= raw.sample_mask;
804 }
805 if (direct_gpl.fragment_output) {
806 dynamic->sample_mask |= raw.sample_mask;
807 }
808 }
809
810 /**
811 * Update \a shader_stages with the VkShaderStageFlags that the pipeline
812 * provides directly (without linking).
813 *
814 * \a direct_gpl The VkGraphicsPipelineLibraryFlagsEXT that the pipeline sets
815 * directly (without linking).
816 */
817 static void
vn_graphics_shader_stages_update(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_library_state direct_gpl,struct vn_graphics_pipeline_fix_desc * restrict valid,VkShaderStageFlags * restrict shader_stages)818 vn_graphics_shader_stages_update(
819 const VkGraphicsPipelineCreateInfo *info,
820 struct vn_graphics_pipeline_library_state direct_gpl,
821 struct vn_graphics_pipeline_fix_desc *restrict valid,
822 VkShaderStageFlags *restrict shader_stages)
823 {
824 /* From the Vulkan 1.3.251 spec:
825 *
826 * VUID-VkGraphicsPipelineCreateInfo-flags-06640
827 *
828 * If VkGraphicsPipelineLibraryCreateInfoEXT::flags includes
829 * VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT or
830 * VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT, pStages must be
831 * a valid pointer to an array of stageCount valid
832 * VkPipelineShaderStageCreateInfo structures
833 */
834 if (!direct_gpl.pre_raster_shaders && !direct_gpl.fragment_shader)
835 return;
836
837 valid->self.shader_stages = true;
838
839 for (uint32_t i = 0; i < info->stageCount; i++) {
840 /* We do not need to ignore the stages irrelevant to the GPL flags.
841 * The following VUs require the app to provide only relevant stages.
842 *
843 * VUID-VkGraphicsPipelineCreateInfo-pStages-06894
844 * VUID-VkGraphicsPipelineCreateInfo-pStages-06895
845 * VUID-VkGraphicsPipelineCreateInfo-pStages-06896
846 */
847 *shader_stages |= info->pStages[i].stage;
848 }
849 }
850
851 /**
852 * Update the render pass state with the state that the pipeline provides
853 * directly (without linking).
854 *
855 * \a direct_gpl The VkGraphicsPipelineLibraryFlagsEXT that the pipeline sets
856 * directly (without linking).
857 */
858 static void
vn_render_pass_state_update(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_library_state direct_gpl,struct vn_graphics_pipeline_fix_desc * restrict valid,struct vn_render_pass_state * restrict state)859 vn_render_pass_state_update(
860 const VkGraphicsPipelineCreateInfo *info,
861 struct vn_graphics_pipeline_library_state direct_gpl,
862 struct vn_graphics_pipeline_fix_desc *restrict valid,
863 struct vn_render_pass_state *restrict state)
864 {
865 /* We must set validity before early returns, to ensure we don't erase
866 * valid info during fixup. We must not erase valid info because, even if
867 * we don't read it, the host driver may read it.
868 */
869
870 /* VUID-VkGraphicsPipelineCreateInfo-flags-06643
871 *
872 * If VkGraphicsPipelineLibraryCreateInfoEXT::flags includes
873 * VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT, or
874 * VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT,
875 * VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT, and
876 * renderPass is not VK_NULL_HANDLE, renderPass must be a valid
877 * VkRenderPass handle
878 */
879 valid->self.render_pass |= direct_gpl.pre_raster_shaders ||
880 direct_gpl.fragment_shader ||
881 direct_gpl.fragment_output;
882
883 /* VUID-VkGraphicsPipelineCreateInfo-renderPass-06579
884 *
885 * If the pipeline requires fragment output interface state, and renderPass
886 * is VK_NULL_HANDLE, and
887 * VkPipelineRenderingCreateInfo::colorAttachmentCount is not 0,
888 * VkPipelineRenderingCreateInfo::pColorAttachmentFormats must be a valid
889 * pointer to an array of colorAttachmentCount valid VkFormat values
890 *
891 * VUID-VkGraphicsPipelineCreateInfo-renderPass-06580
892 *
893 * If the pipeline requires fragment output interface state, and renderPass
894 * is VK_NULL_HANDLE, each element of
895 * VkPipelineRenderingCreateInfo::pColorAttachmentFormats must be a valid
896 * VkFormat value
897 */
898 valid->pnext.rendering_info_formats |=
899 direct_gpl.fragment_output && !info->renderPass;
900
901 if (state->attachment_aspects != VK_IMAGE_ASPECT_METADATA_BIT) {
902 /* We have previously collected the pipeline's attachment aspects. We
903 * do not need to inspect the attachment info again because VUs ensure
904 * that all valid render pass info used to create the pipeline and its
905 * linked pipelines are compatible. Ignored info is not required to be
906 * compatible across linked pipeline libraries. An example of ignored
907 * info is VkPipelineRenderingCreateInfo::pColorAttachmentFormats
908 * without
909 * VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT.
910 *
911 * VUID-VkGraphicsPipelineCreateInfo-renderpass-06625
912 * VUID-VkGraphicsPipelineCreateInfo-pLibraries-06628
913 */
914 return;
915 }
916
917 if (valid->self.render_pass && info->renderPass) {
918 struct vn_render_pass *pass =
919 vn_render_pass_from_handle(info->renderPass);
920 state->attachment_aspects =
921 pass->subpasses[info->subpass].attachment_aspects;
922 return;
923 }
924
925 if (valid->pnext.rendering_info_formats) {
926 state->attachment_aspects = 0;
927
928 /* From the Vulkan 1.3.255 spec:
929 *
930 * When a pipeline is created without a VkRenderPass, if this
931 * structure [VkPipelineRenderingCreateInfo] is present in the pNext
932 * chain of VkGraphicsPipelineCreateInfo, it specifies the view mask
933 * and format of attachments used for rendering. If this structure
934 * is not specified, and the pipeline does not include
935 * a VkRenderPass, viewMask and colorAttachmentCount are 0, and
936 * depthAttachmentFormat and stencilAttachmentFormat are
937 * VK_FORMAT_UNDEFINED. If a graphics pipeline is created with
938 * a valid VkRenderPass, parameters of this structure are ignored.
939 *
940 * However, other spec text clearly states that the format members of
941 * VkPipelineRenderingCreateInfo are ignored unless the pipeline
942 * provides fragment output interface state directly (without linking).
943 */
944 const VkPipelineRenderingCreateInfo *r_info =
945 vk_find_struct_const(info->pNext, PIPELINE_RENDERING_CREATE_INFO);
946
947 if (r_info) {
948 for (uint32_t i = 0; i < r_info->colorAttachmentCount; i++) {
949 if (r_info->pColorAttachmentFormats[i]) {
950 state->attachment_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
951 break;
952 }
953 }
954 if (r_info->depthAttachmentFormat)
955 state->attachment_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
956 if (r_info->stencilAttachmentFormat)
957 state->attachment_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
958 }
959
960 return;
961 }
962
963 /* Aspects remain invalid. */
964 assert(state->attachment_aspects == VK_IMAGE_ASPECT_METADATA_BIT);
965 }
966
967 static void
vn_graphics_pipeline_state_merge(struct vn_graphics_pipeline_state * restrict dst,const struct vn_graphics_pipeline_state * restrict src)968 vn_graphics_pipeline_state_merge(
969 struct vn_graphics_pipeline_state *restrict dst,
970 const struct vn_graphics_pipeline_state *restrict src)
971 {
972 /* The Vulkan 1.3.251 spec says:
973 * VUID-VkGraphicsPipelineCreateInfo-pLibraries-06611
974 *
975 * Any pipeline libraries included via
976 * VkPipelineLibraryCreateInfoKHR::pLibraries must not include any state
977 * subset already defined by this structure or defined by any other
978 * pipeline library in VkPipelineLibraryCreateInfoKHR::pLibraries
979 */
980 assert(!(dst->gpl.mask & src->gpl.mask));
981
982 dst->gpl.mask |= src->gpl.mask;
983 dst->dynamic.mask |= src->dynamic.mask;
984 dst->shader_stages |= src->shader_stages;
985
986 VkImageAspectFlags src_aspects = src->render_pass.attachment_aspects;
987 VkImageAspectFlags *dst_aspects = &dst->render_pass.attachment_aspects;
988
989 if (src_aspects != VK_IMAGE_ASPECT_METADATA_BIT) {
990 if (*dst_aspects != VK_IMAGE_ASPECT_METADATA_BIT) {
991 /* All linked pipelines must have compatible render pass info. */
992 assert(*dst_aspects == src_aspects);
993 } else {
994 *dst_aspects = src_aspects;
995 }
996 }
997
998 if (dst->gpl.pre_raster_shaders)
999 dst->rasterizer_discard_enable = src->rasterizer_discard_enable;
1000 }
1001
1002 /**
1003 * Fill \a state by reading the VkGraphicsPipelineCreateInfo pNext chain,
1004 * including any linked pipeline libraries. Return in \a out_fix_desc
1005 * a description of required fixes to the VkGraphicsPipelineCreateInfo chain.
1006 *
1007 * \pre state is zero-filled
1008 *
1009 * The logic for choosing which struct members to ignore, and which members
1010 * have valid values, is derived from the Vulkan spec sections for
1011 * VkGraphicsPipelineCreateInfo, VkGraphicsPipelineLibraryCreateInfoEXT, and
1012 * VkPipelineLibraryCreateInfoKHR. As of Vulkan 1.3.255, the spec text and VUs
1013 * still contain inconsistencies regarding the validity of struct members, so
1014 * read it carefully. Many of the VUs were written before
1015 * VK_EXT_graphics_pipeline_library and never updated. (Lina's advice: Focus
1016 * primarily on understanding the non-VU text, and use VUs to verify your
1017 * comprehension).
1018 */
1019 static void
vn_graphics_pipeline_state_fill(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_state * restrict state,struct vn_graphics_pipeline_fix_desc * out_fix_desc)1020 vn_graphics_pipeline_state_fill(
1021 const VkGraphicsPipelineCreateInfo *info,
1022 struct vn_graphics_pipeline_state *restrict state,
1023 struct vn_graphics_pipeline_fix_desc *out_fix_desc)
1024 {
1025 /* Assume that state is already zero-filled.
1026 *
1027 * Invalidate attachment_aspects.
1028 */
1029 state->render_pass.attachment_aspects = VK_IMAGE_ASPECT_METADATA_BIT;
1030
1031 const VkPipelineRenderingCreateInfo *rendering_info =
1032 vk_find_struct_const(info->pNext, PIPELINE_RENDERING_CREATE_INFO);
1033 const VkPipelineLibraryCreateInfoKHR *lib_info =
1034 vk_find_struct_const(info->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
1035 const uint32_t lib_count = lib_info ? lib_info->libraryCount : 0;
1036
1037 /* This tracks which fields have valid values in the
1038 * VkGraphicsPipelineCreateInfo pNext chain.
1039 *
1040 * We initially assume that all fields are invalid. We flip fields from
1041 * invalid to valid as we dig through the pNext chain.
1042 *
1043 * A single field may be updated at multiple locations, therefore we update
1044 * with `|=` instead of `=`.
1045 *
1046 * If `valid.foo` is set, then foo has a valid value if foo exists in the
1047 * pNext chain. Even though NULL is not a valid pointer, NULL is considered
1048 * a valid *value* for a pointer-typed variable. Same for VK_NULL_HANDLE
1049 * and Vulkan handle-typed variables.
1050 *
1051 * Conversely, if `valid.foo` remains false at the end of this function,
1052 * then the Vulkan spec permits foo to have any value. If foo has a pointer
1053 * type, it may be an invalid pointer. If foo has a Vulkan handle type, it
1054 * may be an invalid handle.
1055 */
1056 struct vn_graphics_pipeline_fix_desc valid = { 0 };
1057
1058 /* Merge the linked pipeline libraries. */
1059 for (uint32_t i = 0; i < lib_count; i++) {
1060 struct vn_graphics_pipeline *p =
1061 vn_graphics_pipeline_from_handle(lib_info->pLibraries[i]);
1062 vn_graphics_pipeline_state_merge(state, &p->state);
1063 }
1064
1065 /* The VkGraphicsPipelineLibraryFlagsEXT that this pipeline provides
1066 * directly (without linking).
1067 */
1068 struct vn_graphics_pipeline_library_state direct_gpl = { 0 };
1069 vn_graphics_pipeline_library_state_update(info, &direct_gpl);
1070
1071 /* From the Vulkan 1.3.251 spec:
1072 * VUID-VkGraphicsPipelineCreateInfo-pLibraries-06611
1073 *
1074 * Any pipeline libraries included via
1075 * VkPipelineLibraryCreateInfoKHR::pLibraries must not include any state
1076 * subset already defined by this structure or defined by any other
1077 * pipeline library in VkPipelineLibraryCreateInfoKHR::pLibraries
1078 */
1079 assert(!(direct_gpl.mask & state->gpl.mask));
1080
1081 /* Collect orthogonal state that is common to multiple GPL state subsets. */
1082 vn_graphics_dynamic_state_update(info, direct_gpl, &state->dynamic);
1083 vn_graphics_shader_stages_update(info, direct_gpl, &valid,
1084 &state->shader_stages);
1085 vn_render_pass_state_update(info, direct_gpl, &valid, &state->render_pass);
1086
1087 /* Collect remaining pre-raster shaders state.
1088 *
1089 * Of the remaining state, we must first collect the pre-raster shaders
1090 * state because it influences how the other state is collected.
1091 */
1092 if (direct_gpl.pre_raster_shaders) {
1093 valid.self.tessellation_state |=
1094 (bool)(state->shader_stages &
1095 (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
1096 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
1097 valid.self.rasterization_state = true;
1098 valid.self.pipeline_layout = true;
1099
1100 if (info->pRasterizationState) {
1101 state->rasterizer_discard_enable =
1102 info->pRasterizationState->rasterizerDiscardEnable;
1103 }
1104
1105 const bool is_raster_statically_disabled =
1106 !state->dynamic.rasterizer_discard_enable &&
1107 state->rasterizer_discard_enable;
1108
1109 if (!is_raster_statically_disabled) {
1110 valid.self.viewport_state = true;
1111
1112 valid.self.viewport_state_viewports =
1113 !state->dynamic.viewport && !state->dynamic.viewport_with_count;
1114
1115 valid.self.viewport_state_scissors =
1116 !state->dynamic.scissor && !state->dynamic.scissor_with_count;
1117 }
1118
1119 /* Defer setting the flag until all its state is filled. */
1120 state->gpl.pre_raster_shaders = true;
1121 }
1122
1123 /* Collect remaining vertex input interface state.
1124 *
1125 * TODO(VK_EXT_mesh_shader): Update.
1126 */
1127 if (direct_gpl.vertex_input) {
1128 const bool may_have_vertex_shader =
1129 !state->gpl.pre_raster_shaders ||
1130 (state->shader_stages & VK_SHADER_STAGE_VERTEX_BIT);
1131
1132 valid.self.vertex_input_state |=
1133 may_have_vertex_shader && !state->dynamic.vertex_input;
1134
1135 valid.self.input_assembly_state |= may_have_vertex_shader;
1136
1137 /* Defer setting the flag until all its state is filled. */
1138 state->gpl.vertex_input = true;
1139 }
1140
1141 /* Does this pipeline have rasterization statically disabled? If disabled,
1142 * then this pipeline does not directly provide fragment shader state nor
1143 * fragment output state.
1144 *
1145 * About fragment shader state, the Vulkan 1.3.254 spec says:
1146 *
1147 * If a pipeline specifies pre-rasterization state either directly or by
1148 * including it as a pipeline library and rasterizerDiscardEnable is set
1149 * to VK_FALSE or VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE is used,
1150 * this state must be specified to create a complete graphics pipeline.
1151 *
1152 * If a pipeline includes
1153 * VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT in
1154 * VkGraphicsPipelineLibraryCreateInfoEXT::flags either explicitly or as
1155 * a default, and either the conditions requiring this state for
1156 * a complete graphics pipeline are met or this pipeline does not
1157 * specify pre-rasterization state in any way, that pipeline must
1158 * specify this state directly.
1159 *
1160 * About fragment output state, the Vulkan 1.3.254 spec says the same, but
1161 * with VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT.
1162 */
1163 const bool is_raster_statically_disabled =
1164 state->gpl.pre_raster_shaders &&
1165 !state->dynamic.rasterizer_discard_enable &&
1166 state->rasterizer_discard_enable;
1167
1168 /* Collect remaining fragment shader state. */
1169 if (direct_gpl.fragment_shader) {
1170 if (!is_raster_statically_disabled) {
1171 /* Validity of pMultisampleState is easy here.
1172 *
1173 * VUID-VkGraphicsPipelineCreateInfo-pMultisampleState-06629
1174 *
1175 * If the pipeline requires fragment shader state
1176 * pMultisampleState must be NULL or a valid pointer to a valid
1177 * VkPipelineMultisampleStateCreateInfo structure
1178 */
1179 valid.self.multisample_state = true;
1180
1181 valid.self.multisample_state_sample_mask =
1182 !state->dynamic.sample_mask;
1183
1184 if ((state->render_pass.attachment_aspects &
1185 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
1186 valid.self.depth_stencil_state = true;
1187 } else if (state->render_pass.attachment_aspects ==
1188 VK_IMAGE_ASPECT_METADATA_BIT &&
1189 (info->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)) {
1190 /* The app has not yet provided render pass info, neither directly
1191 * in this VkGraphicsPipelineCreateInfo nor in any linked pipeline
1192 * libraries. Therefore we do not know if the final complete
1193 * pipeline will have any depth or stencil attachments. If the
1194 * final complete pipeline does have depth or stencil attachments,
1195 * then the pipeline will use
1196 * VkPipelineDepthStencilStateCreateInfo. Therefore, we must not
1197 * ignore it.
1198 */
1199 valid.self.depth_stencil_state = true;
1200 }
1201
1202 valid.self.pipeline_layout = true;
1203 }
1204
1205 /* Defer setting the flag until all its state is filled. */
1206 state->gpl.fragment_shader = true;
1207 }
1208
1209 /* Collect remaining fragment output interface state. */
1210 if (direct_gpl.fragment_output) {
1211 if (!is_raster_statically_disabled) {
1212 /* Validity of pMultisampleState is easy here.
1213 *
1214 * VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
1215 *
1216 * If the pipeline requires fragment output interface state,
1217 * pMultisampleState must be a valid pointer to a valid
1218 * VkPipelineMultisampleStateCreateInfo structure
1219 */
1220 valid.self.multisample_state = true;
1221
1222 valid.self.multisample_state_sample_mask =
1223 !state->dynamic.sample_mask;
1224
1225 valid.self.color_blend_state |=
1226 (bool)(state->render_pass.attachment_aspects &
1227 VK_IMAGE_ASPECT_COLOR_BIT);
1228 valid.self.depth_stencil_state |=
1229 (bool)(state->render_pass.attachment_aspects &
1230 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
1231 }
1232
1233 /* Defer setting the flag until all its state is filled. */
1234 state->gpl.fragment_output = true;
1235 }
1236
1237 /* After direct_gpl states collection, check the final state to validate
1238 * VkPipelineLayout in case of being the final layout in linked pipeline.
1239 *
1240 * From the Vulkan 1.3.275 spec:
1241 * VUID-VkGraphicsPipelineCreateInfo-layout-06602
1242 *
1243 * If the pipeline requires fragment shader state or pre-rasterization
1244 * shader state, layout must be a valid VkPipelineLayout handle
1245 */
1246 if ((state->gpl.fragment_shader && !is_raster_statically_disabled) ||
1247 state->gpl.pre_raster_shaders)
1248 valid.self.pipeline_layout = true;
1249
1250 /* Pipeline Derivatives
1251 *
1252 * VUID-VkGraphicsPipelineCreateInfo-flags-07984
1253 *
1254 * If flags contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag, and
1255 * basePipelineIndex is -1, basePipelineHandle must be a valid graphics
1256 * VkPipeline handle
1257 */
1258 if ((info->flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) &&
1259 info->basePipelineIndex == -1)
1260 valid.self.base_pipeline_handle = true;
1261
1262 *out_fix_desc = (struct vn_graphics_pipeline_fix_desc) {
1263 .self = {
1264 /* clang-format off */
1265 .shader_stages =
1266 !valid.self.shader_stages &&
1267 info->pStages,
1268 .vertex_input_state =
1269 !valid.self.vertex_input_state &&
1270 info->pVertexInputState,
1271 .input_assembly_state =
1272 !valid.self.input_assembly_state &&
1273 info->pInputAssemblyState,
1274 .tessellation_state =
1275 !valid.self.tessellation_state &&
1276 info->pTessellationState,
1277 .viewport_state =
1278 !valid.self.viewport_state &&
1279 info->pViewportState,
1280 .viewport_state_viewports =
1281 !valid.self.viewport_state_viewports &&
1282 valid.self.viewport_state &&
1283 info->pViewportState &&
1284 info->pViewportState->pViewports &&
1285 info->pViewportState->viewportCount,
1286 .viewport_state_scissors =
1287 !valid.self.viewport_state_scissors &&
1288 valid.self.viewport_state &&
1289 info->pViewportState &&
1290 info->pViewportState->pScissors &&
1291 info->pViewportState->scissorCount,
1292 .rasterization_state =
1293 !valid.self.rasterization_state &&
1294 info->pRasterizationState,
1295 .multisample_state =
1296 !valid.self.multisample_state &&
1297 info->pMultisampleState,
1298 .multisample_state_sample_mask =
1299 !valid.self.multisample_state_sample_mask &&
1300 valid.self.multisample_state &&
1301 info->pMultisampleState &&
1302 info->pMultisampleState->pSampleMask,
1303 .depth_stencil_state =
1304 !valid.self.depth_stencil_state &&
1305 info->pDepthStencilState,
1306 .color_blend_state =
1307 !valid.self.color_blend_state &&
1308 info->pColorBlendState,
1309 .pipeline_layout =
1310 !valid.self.pipeline_layout &&
1311 info->layout,
1312 .render_pass =
1313 !valid.self.render_pass &&
1314 info->renderPass,
1315 .base_pipeline_handle =
1316 !valid.self.base_pipeline_handle &&
1317 info->basePipelineHandle,
1318 /* clang-format on */
1319 },
1320 .pnext = {
1321 /* clang-format off */
1322 .rendering_info_formats =
1323 !valid.pnext.rendering_info_formats &&
1324 rendering_info &&
1325 rendering_info->pColorAttachmentFormats &&
1326 rendering_info->colorAttachmentCount,
1327 /* clang-format on */
1328 },
1329 };
1330 }
1331
1332 static void
vn_fix_graphics_pipeline_create_info_self(const struct vn_graphics_pipeline_info_self * ignore,const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_fix_tmp * fix_tmp,uint32_t index)1333 vn_fix_graphics_pipeline_create_info_self(
1334 const struct vn_graphics_pipeline_info_self *ignore,
1335 const VkGraphicsPipelineCreateInfo *info,
1336 struct vn_graphics_pipeline_fix_tmp *fix_tmp,
1337 uint32_t index)
1338 {
1339 /* VkGraphicsPipelineCreateInfo */
1340 if (ignore->shader_stages) {
1341 fix_tmp->infos[index].stageCount = 0;
1342 fix_tmp->infos[index].pStages = NULL;
1343 }
1344 if (ignore->vertex_input_state)
1345 fix_tmp->infos[index].pVertexInputState = NULL;
1346 if (ignore->input_assembly_state)
1347 fix_tmp->infos[index].pInputAssemblyState = NULL;
1348 if (ignore->tessellation_state)
1349 fix_tmp->infos[index].pTessellationState = NULL;
1350 if (ignore->viewport_state)
1351 fix_tmp->infos[index].pViewportState = NULL;
1352 if (ignore->rasterization_state)
1353 fix_tmp->infos[index].pRasterizationState = NULL;
1354 if (ignore->multisample_state)
1355 fix_tmp->infos[index].pMultisampleState = NULL;
1356 if (ignore->depth_stencil_state)
1357 fix_tmp->infos[index].pDepthStencilState = NULL;
1358 if (ignore->color_blend_state)
1359 fix_tmp->infos[index].pColorBlendState = NULL;
1360 if (ignore->pipeline_layout)
1361 fix_tmp->infos[index].layout = VK_NULL_HANDLE;
1362 if (ignore->base_pipeline_handle)
1363 fix_tmp->infos[index].basePipelineHandle = VK_NULL_HANDLE;
1364
1365 /* VkPipelineMultisampleStateCreateInfo */
1366 if (ignore->multisample_state_sample_mask) {
1367 /* Swap original pMultisampleState with temporary state. */
1368 fix_tmp->multisample_state_infos[index] = *info->pMultisampleState;
1369 fix_tmp->infos[index].pMultisampleState =
1370 &fix_tmp->multisample_state_infos[index];
1371
1372 fix_tmp->multisample_state_infos[index].pSampleMask = NULL;
1373 }
1374
1375 /* VkPipelineViewportStateCreateInfo */
1376 if (ignore->viewport_state_viewports || ignore->viewport_state_scissors) {
1377 /* Swap original pViewportState with temporary state. */
1378 fix_tmp->viewport_state_infos[index] = *info->pViewportState;
1379 fix_tmp->infos[index].pViewportState =
1380 &fix_tmp->viewport_state_infos[index];
1381
1382 if (ignore->viewport_state_viewports)
1383 fix_tmp->viewport_state_infos[index].pViewports = NULL;
1384 if (ignore->viewport_state_scissors)
1385 fix_tmp->viewport_state_infos[index].pScissors = NULL;
1386 }
1387 }
1388
1389 static void
vn_graphics_pipeline_create_info_pnext_init(const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_fix_tmp * fix_tmp,uint32_t index)1390 vn_graphics_pipeline_create_info_pnext_init(
1391 const VkGraphicsPipelineCreateInfo *info,
1392 struct vn_graphics_pipeline_fix_tmp *fix_tmp,
1393 uint32_t index)
1394 {
1395 VkGraphicsPipelineLibraryCreateInfoEXT *gpl = &fix_tmp->gpl_infos[index];
1396 VkPipelineCreationFeedbackCreateInfo *feedback =
1397 &fix_tmp->feedback_infos[index];
1398 VkPipelineFragmentShadingRateStateCreateInfoKHR *fsr =
1399 &fix_tmp->fsr_infos[index];
1400 VkPipelineLibraryCreateInfoKHR *library = &fix_tmp->library_infos[index];
1401 VkPipelineRenderingCreateInfo *rendering =
1402 &fix_tmp->rendering_infos[index];
1403
1404 VkBaseOutStructure *cur = (void *)&fix_tmp->infos[index];
1405
1406 vk_foreach_struct_const(src, info->pNext) {
1407 void *next = NULL;
1408 switch (src->sType) {
1409 case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT:
1410 memcpy(gpl, src, sizeof(*gpl));
1411 next = gpl;
1412 break;
1413 case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
1414 memcpy(feedback, src, sizeof(*feedback));
1415 next = feedback;
1416 break;
1417 case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
1418 memcpy(fsr, src, sizeof(*fsr));
1419 next = fsr;
1420 break;
1421 case VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR:
1422 memcpy(library, src, sizeof(*library));
1423 next = library;
1424 break;
1425 case VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO:
1426 memcpy(rendering, src, sizeof(*rendering));
1427 next = rendering;
1428 break;
1429 default:
1430 break;
1431 }
1432
1433 if (next) {
1434 cur->pNext = next;
1435 cur = next;
1436 }
1437 }
1438
1439 cur->pNext = NULL;
1440 }
1441
1442 static void
vn_fix_graphics_pipeline_create_info_pnext(const struct vn_graphics_pipeline_info_pnext * ignore,const VkGraphicsPipelineCreateInfo * info,struct vn_graphics_pipeline_fix_tmp * fix_tmp,uint32_t index)1443 vn_fix_graphics_pipeline_create_info_pnext(
1444 const struct vn_graphics_pipeline_info_pnext *ignore,
1445 const VkGraphicsPipelineCreateInfo *info,
1446 struct vn_graphics_pipeline_fix_tmp *fix_tmp,
1447 uint32_t index)
1448 {
1449 /* initialize pNext chain with allocated tmp storage */
1450 vn_graphics_pipeline_create_info_pnext_init(info, fix_tmp, index);
1451
1452 /* VkPipelineRenderingCreateInfo */
1453 if (ignore->rendering_info_formats) {
1454 fix_tmp->rendering_infos[index].colorAttachmentCount = 0;
1455 fix_tmp->rendering_infos[index].pColorAttachmentFormats = NULL;
1456 }
1457 }
1458
1459 static const VkGraphicsPipelineCreateInfo *
vn_fix_graphics_pipeline_create_infos(struct vn_device * dev,uint32_t info_count,const VkGraphicsPipelineCreateInfo * infos,const struct vn_graphics_pipeline_fix_desc fix_descs[info_count],struct vn_graphics_pipeline_fix_tmp ** out_fix_tmp,const VkAllocationCallbacks * alloc)1460 vn_fix_graphics_pipeline_create_infos(
1461 struct vn_device *dev,
1462 uint32_t info_count,
1463 const VkGraphicsPipelineCreateInfo *infos,
1464 const struct vn_graphics_pipeline_fix_desc fix_descs[info_count],
1465 struct vn_graphics_pipeline_fix_tmp **out_fix_tmp,
1466 const VkAllocationCallbacks *alloc)
1467 {
1468 uint32_t self_mask = 0;
1469 uint32_t pnext_mask = 0;
1470 for (uint32_t i = 0; i < info_count; i++) {
1471 self_mask |= fix_descs[i].self.mask;
1472 pnext_mask |= fix_descs[i].pnext.mask;
1473 }
1474
1475 if (!self_mask && !pnext_mask) {
1476 /* No fix is needed. */
1477 *out_fix_tmp = NULL;
1478 return infos;
1479 }
1480
1481 /* tell whether fixes are applied in tracing */
1482 VN_TRACE_SCOPE("sanitize pipeline");
1483
1484 struct vn_graphics_pipeline_fix_tmp *fix_tmp =
1485 vn_graphics_pipeline_fix_tmp_alloc(alloc, info_count, pnext_mask);
1486 if (!fix_tmp)
1487 return NULL;
1488
1489 memcpy(fix_tmp->infos, infos, info_count * sizeof(infos[0]));
1490
1491 for (uint32_t i = 0; i < info_count; i++) {
1492 if (fix_descs[i].self.mask) {
1493 vn_fix_graphics_pipeline_create_info_self(&fix_descs[i].self,
1494 &infos[i], fix_tmp, i);
1495 }
1496 if (fix_descs[i].pnext.mask) {
1497 vn_fix_graphics_pipeline_create_info_pnext(&fix_descs[i].pnext,
1498 &infos[i], fix_tmp, i);
1499 }
1500 }
1501
1502 *out_fix_tmp = fix_tmp;
1503 return fix_tmp->infos;
1504 }
1505
1506 /**
1507 * We invalidate each VkPipelineCreationFeedback. This is a legal but useless
1508 * implementation.
1509 *
1510 * We invalidate because the venus protocol (as of 2022-08-25) does not know
1511 * that the VkPipelineCreationFeedback structs in the
1512 * VkGraphicsPipelineCreateInfo pNext are output parameters. Before
1513 * VK_EXT_pipeline_creation_feedback, the pNext chain was input-only.
1514 */
1515 static void
vn_invalidate_pipeline_creation_feedback(const VkBaseInStructure * chain)1516 vn_invalidate_pipeline_creation_feedback(const VkBaseInStructure *chain)
1517 {
1518 const VkPipelineCreationFeedbackCreateInfo *feedback_info =
1519 vk_find_struct_const(chain, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
1520
1521 if (!feedback_info)
1522 return;
1523
1524 feedback_info->pPipelineCreationFeedback->flags = 0;
1525
1526 for (uint32_t i = 0; i < feedback_info->pipelineStageCreationFeedbackCount;
1527 i++)
1528 feedback_info->pPipelineStageCreationFeedbacks[i].flags = 0;
1529 }
1530
1531 VkResult
vn_CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)1532 vn_CreateGraphicsPipelines(VkDevice device,
1533 VkPipelineCache pipelineCache,
1534 uint32_t createInfoCount,
1535 const VkGraphicsPipelineCreateInfo *pCreateInfos,
1536 const VkAllocationCallbacks *pAllocator,
1537 VkPipeline *pPipelines)
1538 {
1539 struct vn_device *dev = vn_device_from_handle(device);
1540 const VkAllocationCallbacks *alloc =
1541 pAllocator ? pAllocator : &dev->base.base.alloc;
1542 bool want_sync = false;
1543 VkResult result;
1544
1545 /* silence -Wmaybe-uninitialized false alarm on release build with gcc */
1546 if (!createInfoCount)
1547 return VK_SUCCESS;
1548
1549 memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
1550
1551 if (!vn_create_pipeline_handles(dev, VN_PIPELINE_TYPE_GRAPHICS,
1552 createInfoCount, pPipelines, alloc)) {
1553 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1554 }
1555
1556 STACK_ARRAY(struct vn_graphics_pipeline_fix_desc, fix_descs,
1557 createInfoCount);
1558 for (uint32_t i = 0; i < createInfoCount; i++) {
1559 struct vn_graphics_pipeline *pipeline =
1560 vn_graphics_pipeline_from_handle(pPipelines[i]);
1561 vn_graphics_pipeline_state_fill(&pCreateInfos[i], &pipeline->state,
1562 &fix_descs[i]);
1563 }
1564
1565 struct vn_graphics_pipeline_fix_tmp *fix_tmp = NULL;
1566 pCreateInfos = vn_fix_graphics_pipeline_create_infos(
1567 dev, createInfoCount, pCreateInfos, fix_descs, &fix_tmp, alloc);
1568 if (!pCreateInfos) {
1569 vn_destroy_pipeline_handles(dev, createInfoCount, pPipelines, alloc);
1570 STACK_ARRAY_FINISH(fix_descs);
1571 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1572 }
1573
1574 for (uint32_t i = 0; i < createInfoCount; i++) {
1575 struct vn_pipeline *pipeline = vn_pipeline_from_handle(pPipelines[i]);
1576 struct vn_pipeline_layout *layout =
1577 vn_pipeline_layout_from_handle(pCreateInfos[i].layout);
1578 if (layout && (layout->push_descriptor_set_layout ||
1579 layout->has_push_constant_ranges)) {
1580 pipeline->layout = vn_pipeline_layout_ref(dev, layout);
1581 }
1582
1583 if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK))
1584 want_sync = true;
1585
1586 vn_invalidate_pipeline_creation_feedback(
1587 (const VkBaseInStructure *)pCreateInfos[i].pNext);
1588 }
1589
1590 struct vn_ring *target_ring = vn_get_target_ring(dev);
1591 if (!target_ring) {
1592 vk_free(alloc, fix_tmp);
1593 vn_destroy_pipeline_handles(dev, createInfoCount, pPipelines, alloc);
1594 STACK_ARRAY_FINISH(fix_descs);
1595 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1596 }
1597
1598 if (want_sync || target_ring != dev->primary_ring) {
1599 if (target_ring == dev->primary_ring) {
1600 VN_TRACE_SCOPE("want sync");
1601 }
1602
1603 result = vn_call_vkCreateGraphicsPipelines(
1604 target_ring, device, pipelineCache, createInfoCount, pCreateInfos,
1605 NULL, pPipelines);
1606 if (result != VK_SUCCESS)
1607 vn_destroy_failed_pipeline_handles(dev, createInfoCount, pPipelines,
1608 alloc);
1609 } else {
1610 vn_async_vkCreateGraphicsPipelines(target_ring, device, pipelineCache,
1611 createInfoCount, pCreateInfos, NULL,
1612 pPipelines);
1613 result = VK_SUCCESS;
1614 }
1615
1616 vk_free(alloc, fix_tmp);
1617 STACK_ARRAY_FINISH(fix_descs);
1618 return vn_result(dev->instance, result);
1619 }
1620
1621 VkResult
vn_CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)1622 vn_CreateComputePipelines(VkDevice device,
1623 VkPipelineCache pipelineCache,
1624 uint32_t createInfoCount,
1625 const VkComputePipelineCreateInfo *pCreateInfos,
1626 const VkAllocationCallbacks *pAllocator,
1627 VkPipeline *pPipelines)
1628 {
1629 struct vn_device *dev = vn_device_from_handle(device);
1630 const VkAllocationCallbacks *alloc =
1631 pAllocator ? pAllocator : &dev->base.base.alloc;
1632 bool want_sync = false;
1633 VkResult result;
1634
1635 memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
1636
1637 if (!vn_create_pipeline_handles(dev, VN_PIPELINE_TYPE_COMPUTE,
1638 createInfoCount, pPipelines, alloc))
1639 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1640
1641 for (uint32_t i = 0; i < createInfoCount; i++) {
1642 struct vn_pipeline *pipeline = vn_pipeline_from_handle(pPipelines[i]);
1643 struct vn_pipeline_layout *layout =
1644 vn_pipeline_layout_from_handle(pCreateInfos[i].layout);
1645 if (layout->push_descriptor_set_layout ||
1646 layout->has_push_constant_ranges) {
1647 pipeline->layout = vn_pipeline_layout_ref(dev, layout);
1648 }
1649 if ((pCreateInfos[i].flags & VN_PIPELINE_CREATE_SYNC_MASK))
1650 want_sync = true;
1651
1652 vn_invalidate_pipeline_creation_feedback(
1653 (const VkBaseInStructure *)pCreateInfos[i].pNext);
1654 }
1655
1656 struct vn_ring *target_ring = vn_get_target_ring(dev);
1657 if (!target_ring) {
1658 vn_destroy_pipeline_handles(dev, createInfoCount, pPipelines, alloc);
1659 return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1660 }
1661
1662 if (want_sync || target_ring != dev->primary_ring) {
1663 result = vn_call_vkCreateComputePipelines(
1664 target_ring, device, pipelineCache, createInfoCount, pCreateInfos,
1665 NULL, pPipelines);
1666 if (result != VK_SUCCESS)
1667 vn_destroy_failed_pipeline_handles(dev, createInfoCount, pPipelines,
1668 alloc);
1669 } else {
1670 vn_async_vkCreateComputePipelines(target_ring, device, pipelineCache,
1671 createInfoCount, pCreateInfos, NULL,
1672 pPipelines);
1673 result = VK_SUCCESS;
1674 }
1675
1676 return vn_result(dev->instance, result);
1677 }
1678
1679 void
vn_DestroyPipeline(VkDevice device,VkPipeline _pipeline,const VkAllocationCallbacks * pAllocator)1680 vn_DestroyPipeline(VkDevice device,
1681 VkPipeline _pipeline,
1682 const VkAllocationCallbacks *pAllocator)
1683 {
1684 struct vn_device *dev = vn_device_from_handle(device);
1685 struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
1686 const VkAllocationCallbacks *alloc =
1687 pAllocator ? pAllocator : &dev->base.base.alloc;
1688
1689 if (!pipeline)
1690 return;
1691
1692 if (pipeline->layout) {
1693 vn_pipeline_layout_unref(dev, pipeline->layout);
1694 }
1695
1696 vn_async_vkDestroyPipeline(dev->primary_ring, device, _pipeline, NULL);
1697
1698 vn_object_base_fini(&pipeline->base);
1699 vk_free(alloc, pipeline);
1700 }
1701