1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <stdint.h>
31
32 #include <llvm/Config/llvm-config.h>
33
34 #include "util/macros.h"
35 #include "util/list.h"
36 #include "util/u_dynarray.h"
37 #include "util/simple_mtx.h"
38 #include "util/u_queue.h"
39 #include "util/u_upload_mgr.h"
40
41 #include "compiler/shader_enums.h"
42 #include "pipe/p_screen.h"
43 #include "pipe/p_state.h"
44 #include "cso_cache/cso_context.h"
45 #include "nir.h"
46
47 #ifdef HAVE_LIBDRM
48 #include <drm-uapi/drm.h>
49 #include "drm-uapi/drm_fourcc.h"
50 #endif
51
52 #if DETECT_OS_ANDROID
53 #include <vndk/hardware_buffer.h>
54 #endif
55
56 /* Pre-declarations needed for WSI entrypoints */
57 struct wl_surface;
58 struct wl_display;
59 typedef struct xcb_connection_t xcb_connection_t;
60 typedef uint32_t xcb_visualid_t;
61 typedef uint32_t xcb_window_t;
62
63 #define VK_PROTOTYPES
64 #include <vulkan/vulkan.h>
65 #include <vulkan/vk_icd.h>
66
67 #include "lvp_entrypoints.h"
68 #include "vk_acceleration_structure.h"
69 #include "vk_buffer.h"
70 #include "vk_buffer_view.h"
71 #include "vk_device.h"
72 #include "vk_instance.h"
73 #include "vk_image.h"
74 #include "vk_log.h"
75 #include "vk_physical_device.h"
76 #include "vk_shader_module.h"
77 #include "vk_util.h"
78 #include "vk_format.h"
79 #include "vk_cmd_queue.h"
80 #include "vk_command_buffer.h"
81 #include "vk_command_pool.h"
82 #include "vk_descriptor_set_layout.h"
83 #include "vk_graphics_state.h"
84 #include "vk_pipeline_layout.h"
85 #include "vk_queue.h"
86 #include "vk_sampler.h"
87 #include "vk_sync.h"
88 #include "vk_sync_timeline.h"
89 #include "vk_ycbcr_conversion.h"
90 #include "lp_jit.h"
91
92 #include "wsi_common.h"
93
94 #include <assert.h>
95 #ifdef __cplusplus
96 extern "C" {
97 #endif
98
99 #define MAX_SETS 8
100 #define MAX_DESCRIPTORS 1000000 /* Required by vkd3d-proton */
101 #define MAX_PUSH_CONSTANTS_SIZE 256
102 #define MAX_PUSH_DESCRIPTORS 32
103 #define MAX_DESCRIPTOR_UNIFORM_BLOCK_SIZE 4096
104 #define MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS 8
105 #define MAX_DGC_STREAMS 16
106 #define MAX_DGC_TOKENS 16
107 /* Currently lavapipe does not support more than 1 image plane */
108 #define LVP_MAX_PLANE_COUNT 1
109
110 #ifdef _WIN32
111 #define lvp_printflike(a, b)
112 #else
113 #define lvp_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
114 #endif
115
116 #define LVP_DEBUG_ALL_ENTRYPOINTS (1 << 0)
117
118 void __lvp_finishme(const char *file, int line, const char *format, ...)
119 lvp_printflike(3, 4);
120
121 #define lvp_finishme(format, ...) \
122 __lvp_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
123
124 #define stub_return(v) \
125 do { \
126 lvp_finishme("stub %s", __func__); \
127 return (v); \
128 } while (0)
129
130 #define stub() \
131 do { \
132 lvp_finishme("stub %s", __func__); \
133 return; \
134 } while (0)
135
136 #define LVP_SHADER_STAGES (MESA_SHADER_CALLABLE + 1)
137 #define LVP_STAGE_MASK BITFIELD_MASK(LVP_SHADER_STAGES)
138 #define LVP_STAGE_MASK_GFX (BITFIELD_MASK(PIPE_SHADER_MESH_TYPES) & ~BITFIELD_BIT(MESA_SHADER_COMPUTE))
139
140 #define lvp_foreach_stage(stage, stage_bits) \
141 for (gl_shader_stage stage, \
142 __tmp = (gl_shader_stage)((stage_bits) & LVP_STAGE_MASK); \
143 stage = ffs(__tmp) - 1, __tmp; \
144 __tmp &= ~(1 << (stage)))
145
146 #define lvp_forall_stage(stage) \
147 for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < LVP_SHADER_STAGES; stage++)
148
149 #define lvp_forall_gfx_stage(stage) \
150 for (gl_shader_stage stage, \
151 __tmp = (gl_shader_stage)(LVP_STAGE_MASK_GFX); \
152 stage = ffs(__tmp) - 1, __tmp; \
153 __tmp &= ~(1 << (stage)))
154
155 struct lvp_physical_device {
156 struct vk_physical_device vk;
157
158 struct pipe_loader_device *pld;
159 struct pipe_screen *pscreen;
160 const nir_shader_compiler_options *drv_options[LVP_SHADER_STAGES];
161 uint32_t max_images;
162 bool snorm_blend;
163
164 struct vk_sync_timeline_type sync_timeline_type;
165 const struct vk_sync_type *sync_types[3];
166
167 struct wsi_device wsi_device;
168 };
169
170 struct lvp_instance {
171 struct vk_instance vk;
172
173 uint32_t apiVersion;
174
175 uint64_t debug_flags;
176
177 struct pipe_loader_device *devs;
178 int num_devices;
179 };
180
181 VkResult lvp_init_wsi(struct lvp_physical_device *physical_device);
182 void lvp_finish_wsi(struct lvp_physical_device *physical_device);
183
184 bool lvp_physical_device_extension_supported(struct lvp_physical_device *dev,
185 const char *name);
186
187 struct lvp_queue {
188 struct vk_queue vk;
189 struct lvp_device * device;
190 struct pipe_context *ctx;
191 struct cso_context *cso;
192 struct u_upload_mgr *uploader;
193 struct pipe_fence_handle *last_fence;
194 void *state;
195 struct util_dynarray pipeline_destroys;
196 simple_mtx_t lock;
197 };
198
199 struct lvp_pipeline_cache {
200 struct vk_object_base base;
201 struct lvp_device * device;
202 VkAllocationCallbacks alloc;
203 };
204
205 struct lvp_device {
206 struct vk_device vk;
207
208 struct lvp_queue queue;
209 struct lvp_instance * instance;
210 struct lvp_physical_device *physical_device;
211 struct pipe_screen *pscreen;
212 void *noop_fs;
213 simple_mtx_t bda_lock;
214 struct hash_table bda;
215 struct pipe_resource *zero_buffer; /* for zeroed bda */
216 bool poison_mem;
217 bool print_cmds;
218
219 struct lp_texture_handle *null_texture_handle;
220 struct lp_texture_handle *null_image_handle;
221 struct util_dynarray bda_texture_handles;
222 struct util_dynarray bda_image_handles;
223
224 uint32_t group_handle_alloc;
225 };
226
227 void lvp_device_get_cache_uuid(void *uuid);
228
229 enum lvp_device_memory_type {
230 LVP_DEVICE_MEMORY_TYPE_DEFAULT,
231 LVP_DEVICE_MEMORY_TYPE_USER_PTR,
232 LVP_DEVICE_MEMORY_TYPE_OPAQUE_FD,
233 LVP_DEVICE_MEMORY_TYPE_DMA_BUF,
234 };
235
236 struct lvp_device_memory {
237 struct vk_object_base base;
238 struct pipe_memory_allocation *pmem;
239 struct llvmpipe_memory_allocation mem_alloc;
240 uint32_t type_index;
241 VkDeviceSize map_size;
242 VkDeviceSize size;
243 void * map;
244 enum lvp_device_memory_type memory_type;
245 int backed_fd;
246 #ifdef PIPE_MEMORY_FD
247 struct llvmpipe_memory_allocation *alloc;
248 #endif
249 #if DETECT_OS_ANDROID
250 struct AHardwareBuffer *android_hardware_buffer;
251 #endif
252 };
253
254 struct lvp_pipe_sync {
255 struct vk_sync base;
256
257 mtx_t lock;
258 cnd_t changed;
259
260 bool signaled;
261 struct pipe_fence_handle *fence;
262 };
263
264 extern const struct vk_sync_type lvp_pipe_sync_type;
265
266 void lvp_pipe_sync_signal_with_fence(struct lvp_device *device,
267 struct lvp_pipe_sync *sync,
268 struct pipe_fence_handle *fence);
269
270 static inline struct lvp_pipe_sync *
vk_sync_as_lvp_pipe_sync(struct vk_sync * sync)271 vk_sync_as_lvp_pipe_sync(struct vk_sync *sync)
272 {
273 assert(sync->type == &lvp_pipe_sync_type);
274 return container_of(sync, struct lvp_pipe_sync, base);
275 }
276
277 struct lvp_image_plane {
278 struct pipe_resource *bo;
279 struct pipe_memory_allocation *pmem;
280 VkDeviceSize plane_offset;
281 VkDeviceSize memory_offset;
282 VkDeviceSize size;
283 };
284
285 struct lvp_image {
286 struct vk_image vk;
287 VkDeviceSize offset;
288 VkDeviceSize size;
289 uint32_t alignment;
290 bool disjoint;
291 uint8_t plane_count;
292 struct lvp_image_plane planes[3];
293 };
294
295 struct lvp_image_view {
296 struct vk_image_view vk;
297 const struct lvp_image *image; /**< VkImageViewCreateInfo::image */
298
299 enum pipe_format pformat;
300
301 struct pipe_surface *surface; /* have we created a pipe surface for this? */
302 struct lvp_image_view *multisample; //VK_EXT_multisampled_render_to_single_sampled
303
304 uint8_t plane_count;
305 struct {
306 unsigned image_plane;
307 struct pipe_sampler_view *sv;
308 struct pipe_image_view iv;
309 struct lp_texture_handle *texture_handle;
310 struct lp_texture_handle *image_handle;
311 } planes[3];
312 };
313
314 struct lvp_sampler {
315 struct vk_sampler vk;
316 struct lp_descriptor desc;
317
318 struct lp_texture_handle *texture_handle;
319 };
320
321 struct lvp_descriptor_set_binding_layout {
322 uint32_t descriptor_index;
323 /* Number of array elements in this binding */
324 VkDescriptorType type;
325 uint32_t stride; /* used for planar samplers */
326 uint32_t array_size;
327 bool valid;
328
329 uint32_t dynamic_index;
330
331 uint32_t uniform_block_offset;
332 uint32_t uniform_block_size;
333
334 /* Immutable samplers (or NULL if no immutable samplers) */
335 struct lvp_sampler **immutable_samplers;
336 };
337
338 struct lvp_descriptor_set_layout {
339 struct vk_descriptor_set_layout vk;
340
341 /* add new members after this */
342
343 uint32_t immutable_sampler_count;
344
345 /* Number of bindings in this descriptor set */
346 uint32_t binding_count;
347
348 /* Total size of the descriptor set with room for all array entries */
349 uint32_t size;
350
351 /* Shader stages affected by this descriptor set */
352 uint32_t shader_stages;
353
354 /* Number of dynamic offsets used by this descriptor set */
355 uint32_t dynamic_offset_count;
356
357 /* if this layout is comprised solely of immutable samplers, this will be a bindable set */
358 struct lvp_descriptor_set *immutable_set;
359
360 /* Bindings in this descriptor set */
361 struct lvp_descriptor_set_binding_layout binding[0];
362 };
363
364 static inline const struct lvp_descriptor_set_layout *
vk_to_lvp_descriptor_set_layout(const struct vk_descriptor_set_layout * layout)365 vk_to_lvp_descriptor_set_layout(const struct vk_descriptor_set_layout *layout)
366 {
367 return container_of(layout, const struct lvp_descriptor_set_layout, vk);
368 }
369
370 struct lvp_descriptor_set {
371 struct vk_object_base base;
372 struct lvp_descriptor_set_layout *layout;
373 struct list_head link;
374
375 /* Buffer holding the descriptors. */
376 struct pipe_memory_allocation *pmem;
377 struct pipe_resource *bo;
378 void *map;
379 };
380
381 struct lvp_descriptor_pool {
382 struct vk_object_base base;
383 VkDescriptorPoolCreateFlags flags;
384 uint32_t max_sets;
385
386 struct list_head sets;
387 };
388
389 uint32_t lvp_descriptor_update_template_entry_size(VkDescriptorType type);
390
391 VkResult
392 lvp_descriptor_set_create(struct lvp_device *device,
393 struct lvp_descriptor_set_layout *layout,
394 struct lvp_descriptor_set **out_set);
395
396 void
397 lvp_descriptor_set_destroy(struct lvp_device *device,
398 struct lvp_descriptor_set *set);
399
400 void
401 lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descriptorSet,
402 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
403 const void *pData);
404
405 struct lvp_pipeline_layout {
406 struct vk_pipeline_layout vk;
407
408 uint32_t push_constant_size;
409 VkShaderStageFlags push_constant_stages;
410 };
411
412
413 struct lvp_pipeline_layout *
414 lvp_pipeline_layout_create(struct lvp_device *device,
415 const VkPipelineLayoutCreateInfo* pCreateInfo,
416 const VkAllocationCallbacks* pAllocator);
417
418 struct lvp_pipeline_nir {
419 int ref_cnt;
420 nir_shader *nir;
421 };
422
423 struct lvp_pipeline_nir *
424 lvp_create_pipeline_nir(nir_shader *nir);
425
426 static inline void
lvp_pipeline_nir_ref(struct lvp_pipeline_nir ** dst,struct lvp_pipeline_nir * src)427 lvp_pipeline_nir_ref(struct lvp_pipeline_nir **dst, struct lvp_pipeline_nir *src)
428 {
429 struct lvp_pipeline_nir *old_dst = *dst;
430 if (old_dst == src || (old_dst && src && old_dst->nir == src->nir))
431 return;
432
433 if (old_dst && p_atomic_dec_zero(&old_dst->ref_cnt)) {
434 ralloc_free(old_dst->nir);
435 ralloc_free(old_dst);
436 }
437 if (src)
438 p_atomic_inc(&src->ref_cnt);
439 *dst = src;
440 }
441
442 struct lvp_inline_variant {
443 uint32_t mask;
444 uint32_t vals[PIPE_MAX_CONSTANT_BUFFERS][MAX_INLINABLE_UNIFORMS];
445 void *cso;
446 };
447
448 struct lvp_shader {
449 struct vk_object_base base;
450 struct lvp_pipeline_layout *layout;
451 struct lvp_pipeline_nir *pipeline_nir;
452 struct lvp_pipeline_nir *tess_ccw;
453 void *shader_cso;
454 void *tess_ccw_cso;
455 struct {
456 uint32_t uniform_offsets[PIPE_MAX_CONSTANT_BUFFERS][MAX_INLINABLE_UNIFORMS];
457 uint8_t count[PIPE_MAX_CONSTANT_BUFFERS];
458 bool must_inline;
459 uint32_t can_inline; //bitmask
460 struct set variants;
461 } inlines;
462 struct pipe_stream_output_info stream_output;
463 struct blob blob; //preserved for GetShaderBinaryDataEXT
464 };
465
466 enum lvp_pipeline_type {
467 LVP_PIPELINE_GRAPHICS,
468 LVP_PIPELINE_COMPUTE,
469 LVP_PIPELINE_RAY_TRACING,
470 LVP_PIPELINE_EXEC_GRAPH,
471 LVP_PIPELINE_TYPE_COUNT,
472 };
473
474 static inline enum lvp_pipeline_type
lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)475 lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)
476 {
477 switch (bind_point) {
478 case VK_PIPELINE_BIND_POINT_GRAPHICS: return LVP_PIPELINE_GRAPHICS;
479 case VK_PIPELINE_BIND_POINT_COMPUTE: return LVP_PIPELINE_COMPUTE;
480 case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: return LVP_PIPELINE_RAY_TRACING;
481 #ifdef VK_ENABLE_BETA_EXTENSIONS
482 case VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX: return LVP_PIPELINE_EXEC_GRAPH;
483 #endif
484 default: unreachable("Unsupported VkPipelineBindPoint");
485 }
486 }
487
488 #define LVP_RAY_TRACING_STAGES (VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR | \
489 VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR | \
490 VK_SHADER_STAGE_INTERSECTION_BIT_KHR | VK_SHADER_STAGE_CALLABLE_BIT_KHR)
491
492 static inline uint32_t
lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)493 lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)
494 {
495 uint32_t types = 0;
496 #ifdef VK_ENABLE_BETA_EXTENSIONS
497 if (stageFlags & MESA_VK_SHADER_STAGE_WORKGRAPH_HACK_BIT_FIXME)
498 types |= BITFIELD_BIT(LVP_PIPELINE_EXEC_GRAPH);
499 #endif
500 if (stageFlags & LVP_RAY_TRACING_STAGES)
501 types |= BITFIELD_BIT(LVP_PIPELINE_RAY_TRACING);
502 if (stageFlags & VK_SHADER_STAGE_COMPUTE_BIT)
503 types |= BITFIELD_BIT(LVP_PIPELINE_COMPUTE);
504 if (stageFlags & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_MESH_BIT_EXT | VK_SHADER_STAGE_TASK_BIT_EXT))
505 types |= BITFIELD_BIT(LVP_PIPELINE_GRAPHICS);
506 return types;
507 }
508
509 #define LVP_RAY_TRACING_GROUP_HANDLE_SIZE 32
510 #define LVP_RAY_HIT_ATTRIBS_SIZE 32
511
512 struct lvp_ray_tracing_group_handle {
513 uint32_t index;
514 };
515
516 struct lvp_ray_tracing_group {
517 struct lvp_ray_tracing_group_handle handle;
518 uint32_t recursive_index;
519 uint32_t ahit_index;
520 uint32_t isec_index;
521 };
522
523 struct lvp_pipeline {
524 struct vk_object_base base;
525 struct lvp_device * device;
526 struct lvp_pipeline_layout * layout;
527
528 enum lvp_pipeline_type type;
529 VkPipelineCreateFlags2KHR flags;
530
531 void *state_data;
532 bool force_min_sample;
533 struct lvp_shader shaders[LVP_SHADER_STAGES];
534 gl_shader_stage last_vertex;
535 struct vk_graphics_pipeline_state graphics_state;
536 VkGraphicsPipelineLibraryFlagsEXT stages;
537 bool line_smooth;
538 bool disable_multisample;
539 bool line_rectangular;
540 bool library;
541 bool compiled;
542 bool used;
543
544 struct {
545 const char *name;
546 const char *next_name;
547 uint32_t index;
548 uint32_t scratch_size;
549 } exec_graph;
550
551 struct {
552 struct lvp_pipeline_nir **stages;
553 struct lvp_ray_tracing_group *groups;
554 uint32_t stage_count;
555 uint32_t group_count;
556 } rt;
557
558 unsigned num_groups;
559 unsigned num_groups_total;
560 VkPipeline groups[0];
561 };
562
563 /* Minimum requirement by the spec. */
564 #define LVP_MAX_EXEC_GRAPH_PAYLOADS 256
565
566 struct lvp_exec_graph_shader_output {
567 uint32_t payload_count;
568 uint32_t node_index;
569 };
570
571 struct lvp_exec_graph_internal_data {
572 /* inputs */
573 void *payload_in;
574 void *payloads;
575 /* outputs */
576 struct lvp_exec_graph_shader_output outputs[LVP_MAX_EXEC_GRAPH_PAYLOADS];
577 };
578
579 bool
580 lvp_lower_exec_graph(struct lvp_pipeline *pipeline, nir_shader *nir);
581
582 void
583 lvp_pipeline_shaders_compile(struct lvp_pipeline *pipeline, bool locked);
584
585 struct lvp_event {
586 struct vk_object_base base;
587 volatile uint64_t event_storage;
588 };
589
590 struct lvp_buffer {
591 struct vk_buffer vk;
592
593 struct lvp_device_memory *mem;
594 struct pipe_resource *bo;
595 uint64_t total_size;
596 uint64_t offset;
597 void *map;
598 struct pipe_transfer *transfer;
599 };
600
601 struct lvp_buffer_view {
602 struct vk_buffer_view vk;
603 enum pipe_format pformat;
604 struct pipe_sampler_view *sv;
605 struct pipe_image_view iv;
606
607 struct lp_texture_handle *texture_handle;
608 struct lp_texture_handle *image_handle;
609 };
610
611 #define LVP_QUERY_ACCELERATION_STRUCTURE_COMPACTED_SIZE (PIPE_QUERY_TYPES)
612 #define LVP_QUERY_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE (PIPE_QUERY_TYPES + 1)
613 #define LVP_QUERY_ACCELERATION_STRUCTURE_SIZE (PIPE_QUERY_TYPES + 2)
614 #define LVP_QUERY_ACCELERATION_STRUCTURE_INSTANCE_COUNT (PIPE_QUERY_TYPES + 3)
615
616 struct lvp_query_pool {
617 struct vk_object_base base;
618 VkQueryType type;
619 uint32_t count;
620 VkQueryPipelineStatisticFlags pipeline_stats;
621 enum pipe_query_type base_type;
622 void *data; /* Used by queries that are not implemented by pipe_query */
623 struct pipe_query *queries[0];
624 };
625
626 struct lvp_cmd_buffer {
627 struct vk_command_buffer vk;
628
629 struct lvp_device * device;
630
631 uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
632 };
633
634 struct lvp_indirect_command_layout_nv {
635 struct vk_object_base base;
636 uint8_t stream_count;
637 uint8_t token_count;
638 uint16_t stream_strides[MAX_DGC_STREAMS];
639 VkPipelineBindPoint bind_point;
640 VkIndirectCommandsLayoutUsageFlagsNV flags;
641 VkIndirectCommandsLayoutTokenNV tokens[0];
642 };
643
644 extern const struct vk_command_buffer_ops lvp_cmd_buffer_ops;
645
646 static inline const struct lvp_descriptor_set_layout *
get_set_layout(const struct lvp_pipeline_layout * layout,uint32_t set)647 get_set_layout(const struct lvp_pipeline_layout *layout, uint32_t set)
648 {
649 return container_of(layout->vk.set_layouts[set],
650 const struct lvp_descriptor_set_layout, vk);
651 }
652
653 static inline const struct lvp_descriptor_set_binding_layout *
get_binding_layout(const struct lvp_pipeline_layout * layout,uint32_t set,uint32_t binding)654 get_binding_layout(const struct lvp_pipeline_layout *layout,
655 uint32_t set, uint32_t binding)
656 {
657 return &get_set_layout(layout, set)->binding[binding];
658 }
659
660 #define LVP_FROM_HANDLE(__lvp_type, __name, __handle) \
661 struct __lvp_type *__name = __lvp_type ## _from_handle(__handle)
662
663 VK_DEFINE_HANDLE_CASTS(lvp_cmd_buffer, vk.base, VkCommandBuffer,
664 VK_OBJECT_TYPE_COMMAND_BUFFER)
665 VK_DEFINE_HANDLE_CASTS(lvp_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
666 VK_DEFINE_HANDLE_CASTS(lvp_instance, vk.base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
667 VK_DEFINE_HANDLE_CASTS(lvp_physical_device, vk.base, VkPhysicalDevice,
668 VK_OBJECT_TYPE_PHYSICAL_DEVICE)
669 VK_DEFINE_HANDLE_CASTS(lvp_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
670
671 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer, vk.base, VkBuffer,
672 VK_OBJECT_TYPE_BUFFER)
673 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer_view, vk.base, VkBufferView,
674 VK_OBJECT_TYPE_BUFFER_VIEW)
675 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_pool, base, VkDescriptorPool,
676 VK_OBJECT_TYPE_DESCRIPTOR_POOL)
677 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set, base, VkDescriptorSet,
678 VK_OBJECT_TYPE_DESCRIPTOR_SET)
679 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_descriptor_set_layout, vk.base, VkDescriptorSetLayout,
680 VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
681 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_device_memory, base, VkDeviceMemory,
682 VK_OBJECT_TYPE_DEVICE_MEMORY)
683 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
684 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
685 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_image_view, vk.base, VkImageView,
686 VK_OBJECT_TYPE_IMAGE_VIEW);
687 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_cache, base, VkPipelineCache,
688 VK_OBJECT_TYPE_PIPELINE_CACHE)
689 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline, base, VkPipeline,
690 VK_OBJECT_TYPE_PIPELINE)
691 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_shader, base, VkShaderEXT,
692 VK_OBJECT_TYPE_SHADER_EXT)
693 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_layout, vk.base, VkPipelineLayout,
694 VK_OBJECT_TYPE_PIPELINE_LAYOUT)
695 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_query_pool, base, VkQueryPool,
696 VK_OBJECT_TYPE_QUERY_POOL)
697 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_sampler, vk.base, VkSampler,
698 VK_OBJECT_TYPE_SAMPLER)
699 VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_indirect_command_layout_nv, base, VkIndirectCommandsLayoutNV,
700 VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV)
701
702 void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp);
703
704 VkResult lvp_buffer_bind_sparse(struct lvp_device *device,
705 struct lvp_queue *queue,
706 VkSparseBufferMemoryBindInfo *bind);
707 VkResult lvp_image_bind_opaque_sparse(struct lvp_device *device,
708 struct lvp_queue *queue,
709 VkSparseImageOpaqueMemoryBindInfo *bind);
710 VkResult lvp_image_bind_sparse(struct lvp_device *device,
711 struct lvp_queue *queue,
712 VkSparseImageMemoryBindInfo *bind);
713
714 VkResult lvp_execute_cmds(struct lvp_device *device,
715 struct lvp_queue *queue,
716 struct lvp_cmd_buffer *cmd_buffer);
717 size_t
718 lvp_get_rendering_state_size(void);
719 struct lvp_image *lvp_swapchain_get_image(VkSwapchainKHR swapchain,
720 uint32_t index);
721
722 static inline enum pipe_format
lvp_vk_format_to_pipe_format(VkFormat format)723 lvp_vk_format_to_pipe_format(VkFormat format)
724 {
725 /* Some formats cause problems with CTS right now.*/
726 if (format == VK_FORMAT_R4G4B4A4_UNORM_PACK16 ||
727 format == VK_FORMAT_R8_SRGB ||
728 format == VK_FORMAT_R8G8_SRGB ||
729 format == VK_FORMAT_R64G64B64A64_SFLOAT ||
730 format == VK_FORMAT_R64_SFLOAT ||
731 format == VK_FORMAT_R64G64_SFLOAT ||
732 format == VK_FORMAT_R64G64B64_SFLOAT ||
733 format == VK_FORMAT_A2R10G10B10_SINT_PACK32 ||
734 format == VK_FORMAT_A2B10G10R10_SINT_PACK32 ||
735 format == VK_FORMAT_D16_UNORM_S8_UINT)
736 return PIPE_FORMAT_NONE;
737
738 return vk_format_to_pipe_format(format);
739 }
740
741 static inline uint8_t
lvp_image_aspects_to_plane(ASSERTED const struct lvp_image * image,VkImageAspectFlags aspectMask)742 lvp_image_aspects_to_plane(ASSERTED const struct lvp_image *image,
743 VkImageAspectFlags aspectMask)
744 {
745 /* If we are requesting the first plane of image with only one plane, return that */
746 if (image->vk.aspects == VK_IMAGE_ASPECT_COLOR_BIT && aspectMask == VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT)
747 return 0;
748
749 /* Verify that the aspects are actually in the image */
750 assert(!(aspectMask & ~image->vk.aspects));
751
752 /* Must only be one aspect unless it's depth/stencil */
753 assert(aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
754 VK_IMAGE_ASPECT_STENCIL_BIT) ||
755 util_bitcount(aspectMask) == 1);
756
757 switch(aspectMask) {
758 case VK_IMAGE_ASPECT_PLANE_1_BIT: return 1;
759 case VK_IMAGE_ASPECT_PLANE_2_BIT: return 2;
760 default: return 0;
761 }
762 }
763
764 void
765 lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, bool locked);
766
767 void
768 queue_thread_noop(void *data, void *gdata, int thread_index);
769
770 VkResult
771 lvp_spirv_to_nir(struct lvp_pipeline *pipeline, const VkPipelineShaderStageCreateInfo *sinfo,
772 nir_shader **out_nir);
773
774 void
775 lvp_shader_init(struct lvp_shader *shader, nir_shader *nir);
776
777 void
778 lvp_shader_optimize(nir_shader *nir);
779 bool
780 lvp_find_inlinable_uniforms(struct lvp_shader *shader, nir_shader *nir);
781 void
782 lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint32_t *uniform_values, uint32_t ubo);
783 void *
784 lvp_shader_compile(struct lvp_device *device, struct lvp_shader *shader, nir_shader *nir, bool locked);
785 bool
786 lvp_nir_lower_ray_queries(struct nir_shader *shader);
787 bool
788 lvp_nir_lower_sparse_residency(struct nir_shader *shader);
789 enum vk_cmd_type
790 lvp_nv_dgc_token_to_cmd_type(const VkIndirectCommandsLayoutTokenNV *token);
791
792 #if DETECT_OS_ANDROID
793 VkResult
794 lvp_import_ahb_memory(struct lvp_device *device, struct lvp_device_memory *mem,
795 const VkImportAndroidHardwareBufferInfoANDROID *info);
796 VkResult
797 lvp_create_ahb_memory(struct lvp_device *device, struct lvp_device_memory *mem,
798 const VkMemoryAllocateInfo *pAllocateInfo);
799 #endif
800
801 #ifdef __cplusplus
802 }
803 #endif
804