1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "util/format/u_format.h"
26 #include "util/u_inlines.h"
27 #include "util/u_surface.h"
28 #include "pipe/p_state.h"
29 #include "frontend/winsys_handle.h"
30 #include "vk_android.h"
31
32 static VkResult
lvp_image_create(VkDevice _device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * alloc,VkImage * pImage)33 lvp_image_create(VkDevice _device,
34 const VkImageCreateInfo *pCreateInfo,
35 const VkAllocationCallbacks* alloc,
36 VkImage *pImage)
37 {
38 LVP_FROM_HANDLE(lvp_device, device, _device);
39 struct lvp_image *image;
40 VkResult result = VK_SUCCESS;
41 bool android_surface = false;
42 const VkSubresourceLayout *layouts = NULL;
43 uint64_t modifier;
44 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
45
46 #ifdef HAVE_LIBDRM
47 unsigned num_layouts = 1;
48 enum pipe_format pipe_format = lvp_vk_format_to_pipe_format(pCreateInfo->format);
49 const VkImageDrmFormatModifierExplicitCreateInfoEXT *modinfo = (void*)vk_find_struct_const(pCreateInfo->pNext,
50 IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
51
52 if (modinfo && pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
53 assert(modinfo->drmFormatModifier == DRM_FORMAT_MOD_LINEAR);
54 assert(modinfo->drmFormatModifierPlaneCount == util_format_get_num_planes(pipe_format));
55 num_layouts = modinfo->drmFormatModifierPlaneCount;
56 layouts = modinfo->pPlaneLayouts;
57 }
58
59 /* planar not supported yet */
60 assert(num_layouts == 1);
61 if (num_layouts > 1) {
62 mesa_loge("lavapipe: planar drm formats are not supported");
63 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
64 }
65
66 modifier = DRM_FORMAT_MOD_LINEAR;
67 #endif
68
69 image = vk_image_create(&device->vk, pCreateInfo, alloc, sizeof(*image));
70 if (image == NULL)
71 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
72
73 image->alignment = 64;
74 if (image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
75 image->alignment = 64 * 1024;
76
77 image->plane_count = vk_format_get_plane_count(pCreateInfo->format);
78 image->disjoint = image->plane_count > 1 &&
79 (pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT);
80
81 /* This section is removed by the optimizer for non-ANDROID builds */
82 VkImageDrmFormatModifierExplicitCreateInfoEXT eci;
83 VkSubresourceLayout a_plane_layouts[LVP_MAX_PLANE_COUNT];
84 if (vk_image_is_android_native_buffer(&image->vk)) {
85 result = vk_android_get_anb_layout(
86 pCreateInfo, &eci, a_plane_layouts, LVP_MAX_PLANE_COUNT);
87 if (result != VK_SUCCESS)
88 goto fail;
89
90 modifier = eci.drmFormatModifier;
91 layouts = a_plane_layouts;
92 android_surface = true;
93 }
94
95 const struct vk_format_ycbcr_info *ycbcr_info =
96 vk_format_get_ycbcr_info(pCreateInfo->format);
97 for (unsigned p = 0; p < image->plane_count; p++) {
98 struct pipe_resource template;
99 VkFormat format = ycbcr_info ?
100 ycbcr_info->planes[p].format : pCreateInfo->format;
101 const uint8_t width_scale = ycbcr_info ?
102 ycbcr_info->planes[p].denominator_scales[0] : 1;
103 const uint8_t height_scale = ycbcr_info ?
104 ycbcr_info->planes[p].denominator_scales[1] : 1;
105 memset(&template, 0, sizeof(template));
106
107 template.screen = device->pscreen;
108 switch (pCreateInfo->imageType) {
109 case VK_IMAGE_TYPE_1D:
110 template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_1D_ARRAY : PIPE_TEXTURE_1D;
111 break;
112 default:
113 case VK_IMAGE_TYPE_2D:
114 template.target = pCreateInfo->arrayLayers > 1 ? PIPE_TEXTURE_2D_ARRAY : PIPE_TEXTURE_2D;
115 break;
116 case VK_IMAGE_TYPE_3D:
117 template.target = PIPE_TEXTURE_3D;
118 break;
119 }
120
121 template.format = lvp_vk_format_to_pipe_format(format);
122
123 bool is_ds = util_format_is_depth_or_stencil(template.format);
124
125 if (pCreateInfo->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
126 template.bind |= PIPE_BIND_RENDER_TARGET;
127 /* sampler view is needed for resolve blits */
128 if (pCreateInfo->samples > 1)
129 template.bind |= PIPE_BIND_SAMPLER_VIEW;
130 }
131
132 if (pCreateInfo->usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
133 if (!is_ds)
134 template.bind |= PIPE_BIND_RENDER_TARGET;
135 else
136 template.bind |= PIPE_BIND_DEPTH_STENCIL;
137 }
138
139 if (pCreateInfo->usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
140 template.bind |= PIPE_BIND_DEPTH_STENCIL;
141
142 if (pCreateInfo->usage & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
143 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
144 template.bind |= PIPE_BIND_SAMPLER_VIEW;
145
146 if (pCreateInfo->usage & (VK_IMAGE_USAGE_STORAGE_BIT |
147 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
148 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
149 template.bind |= PIPE_BIND_SHADER_IMAGE;
150
151 if (pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
152 template.flags |= PIPE_RESOURCE_FLAG_SPARSE;
153
154 template.width0 = pCreateInfo->extent.width / width_scale;
155 template.height0 = pCreateInfo->extent.height / height_scale;
156 template.depth0 = pCreateInfo->extent.depth;
157 template.array_size = pCreateInfo->arrayLayers;
158 template.last_level = pCreateInfo->mipLevels - 1;
159 template.nr_samples = pCreateInfo->samples;
160 template.nr_storage_samples = pCreateInfo->samples;
161
162 #ifdef HAVE_LIBDRM
163 if (android_surface || (modinfo && pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)) {
164 struct winsys_handle whandle;
165 whandle.type = WINSYS_HANDLE_TYPE_UNBACKED;
166 whandle.layer = 0;
167 whandle.plane = p;
168 whandle.handle = 0;
169 whandle.stride = layouts[p].rowPitch;
170 whandle.array_stride = layouts[p].arrayPitch;
171 whandle.image_stride = layouts[p].depthPitch;
172 image->offset = layouts[p].offset;
173 whandle.format = pCreateInfo->format;
174 whandle.modifier = modifier;
175 image->planes[p].bo = device->pscreen->resource_from_handle(device->pscreen,
176 &template,
177 &whandle,
178 PIPE_HANDLE_USAGE_EXPLICIT_FLUSH);
179 image->planes[p].size = whandle.size;
180 } else
181 #endif
182 {
183 image->planes[p].bo = device->pscreen->resource_create_unbacked(device->pscreen,
184 &template,
185 &image->planes[p].size);
186 }
187 if (!image->planes[p].bo)
188 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
189
190 image->planes[p].size = align64(image->planes[p].size, image->alignment);
191
192 image->size += image->planes[p].size;
193 }
194
195 /* This section is removed by the optimizer for non-ANDROID builds */
196 if (vk_image_is_android_native_buffer(&image->vk)) {
197 result = vk_android_import_anb(&device->vk, pCreateInfo, alloc,
198 &image->vk);
199 if (result != VK_SUCCESS) {
200 mesa_logw("Failed to import memory");
201 goto fail;
202 }
203 }
204
205 *pImage = lvp_image_to_handle(image);
206
207 return VK_SUCCESS;
208 fail:
209 vk_image_destroy(&device->vk, alloc, &image->vk);
210 return result;
211 }
212
213 struct lvp_image *
lvp_swapchain_get_image(VkSwapchainKHR swapchain,uint32_t index)214 lvp_swapchain_get_image(VkSwapchainKHR swapchain,
215 uint32_t index)
216 {
217 VkImage image = wsi_common_get_image(swapchain, index);
218 return lvp_image_from_handle(image);
219 }
220
221 static VkResult
lvp_image_from_swapchain(VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkImageSwapchainCreateInfoKHR * swapchain_info,const VkAllocationCallbacks * pAllocator,VkImage * pImage)222 lvp_image_from_swapchain(VkDevice device,
223 const VkImageCreateInfo *pCreateInfo,
224 const VkImageSwapchainCreateInfoKHR *swapchain_info,
225 const VkAllocationCallbacks *pAllocator,
226 VkImage *pImage)
227 {
228 ASSERTED struct lvp_image *swapchain_image = lvp_swapchain_get_image(swapchain_info->swapchain, 0);
229 assert(swapchain_image);
230
231 assert(swapchain_image->vk.image_type == pCreateInfo->imageType);
232
233 VkImageCreateInfo local_create_info;
234 local_create_info = *pCreateInfo;
235 local_create_info.pNext = NULL;
236 /* The following parameters are implictly selected by the wsi code. */
237 local_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
238 local_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
239 local_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
240
241 assert(!(local_create_info.usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT));
242 return lvp_image_create(device, &local_create_info, pAllocator,
243 pImage);
244 }
245
246 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CreateImage(VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)247 lvp_CreateImage(VkDevice device,
248 const VkImageCreateInfo *pCreateInfo,
249 const VkAllocationCallbacks *pAllocator,
250 VkImage *pImage)
251 {
252 #if !DETECT_OS_ANDROID
253 const VkImageSwapchainCreateInfoKHR *swapchain_info =
254 vk_find_struct_const(pCreateInfo->pNext, IMAGE_SWAPCHAIN_CREATE_INFO_KHR);
255 if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE)
256 return lvp_image_from_swapchain(device, pCreateInfo, swapchain_info,
257 pAllocator, pImage);
258 #endif
259 return lvp_image_create(device, pCreateInfo, pAllocator,
260 pImage);
261 }
262
263 VKAPI_ATTR void VKAPI_CALL
lvp_DestroyImage(VkDevice _device,VkImage _image,const VkAllocationCallbacks * pAllocator)264 lvp_DestroyImage(VkDevice _device, VkImage _image,
265 const VkAllocationCallbacks *pAllocator)
266 {
267 LVP_FROM_HANDLE(lvp_device, device, _device);
268 LVP_FROM_HANDLE(lvp_image, image, _image);
269
270 if (!_image)
271 return;
272 for (unsigned p = 0; p < image->plane_count; p++)
273 pipe_resource_reference(&image->planes[p].bo, NULL);
274 vk_image_destroy(&device->vk, pAllocator, &image->vk);
275 }
276
277 #include "lvp_conv.h"
278 #include "util/u_sampler.h"
279 #include "util/u_inlines.h"
280
conv_depth_swiz(char swiz)281 static inline char conv_depth_swiz(char swiz) {
282 switch (swiz) {
283 case PIPE_SWIZZLE_Y:
284 case PIPE_SWIZZLE_Z:
285 return PIPE_SWIZZLE_0;
286 case PIPE_SWIZZLE_W:
287 return PIPE_SWIZZLE_1;
288 default:
289 return swiz;
290 }
291 }
292
293 static struct pipe_sampler_view *
lvp_create_samplerview(struct pipe_context * pctx,struct lvp_image_view * iv,VkFormat plane_format,unsigned image_plane)294 lvp_create_samplerview(struct pipe_context *pctx, struct lvp_image_view *iv, VkFormat plane_format, unsigned image_plane)
295 {
296 if (!iv)
297 return NULL;
298
299 struct pipe_sampler_view templ;
300 enum pipe_format pformat;
301 if (iv->vk.aspects == VK_IMAGE_ASPECT_DEPTH_BIT)
302 pformat = lvp_vk_format_to_pipe_format(plane_format);
303 else if (iv->vk.aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
304 pformat = util_format_stencil_only(lvp_vk_format_to_pipe_format(plane_format));
305 else
306 pformat = lvp_vk_format_to_pipe_format(plane_format);
307 u_sampler_view_default_template(&templ,
308 iv->image->planes[image_plane].bo,
309 pformat);
310 if (iv->vk.view_type == VK_IMAGE_VIEW_TYPE_1D)
311 templ.target = PIPE_TEXTURE_1D;
312 if (iv->vk.view_type == VK_IMAGE_VIEW_TYPE_2D)
313 templ.target = PIPE_TEXTURE_2D;
314 if (iv->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE)
315 templ.target = PIPE_TEXTURE_CUBE;
316 if (iv->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
317 templ.target = PIPE_TEXTURE_CUBE_ARRAY;
318 templ.u.tex.first_layer = iv->vk.base_array_layer;
319 templ.u.tex.last_layer = iv->vk.base_array_layer + iv->vk.layer_count - 1;
320 templ.u.tex.first_level = iv->vk.base_mip_level;
321 templ.u.tex.last_level = iv->vk.base_mip_level + iv->vk.level_count - 1;
322 templ.swizzle_r = vk_conv_swizzle(iv->vk.swizzle.r, PIPE_SWIZZLE_X);
323 templ.swizzle_g = vk_conv_swizzle(iv->vk.swizzle.g, PIPE_SWIZZLE_Y);
324 templ.swizzle_b = vk_conv_swizzle(iv->vk.swizzle.b, PIPE_SWIZZLE_Z);
325 templ.swizzle_a = vk_conv_swizzle(iv->vk.swizzle.a, PIPE_SWIZZLE_W);
326
327 /* depth stencil swizzles need special handling to pass VK CTS
328 * but also for zink GL tests.
329 * piping A swizzle into R fixes GL_ALPHA depth texture mode
330 * only swizzling from R/0/1 (for alpha) fixes VK CTS tests
331 * and a bunch of zink tests.
332 */
333 if (iv->vk.aspects == VK_IMAGE_ASPECT_DEPTH_BIT ||
334 iv->vk.aspects == VK_IMAGE_ASPECT_STENCIL_BIT) {
335 templ.swizzle_r = conv_depth_swiz(templ.swizzle_r);
336 templ.swizzle_g = conv_depth_swiz(templ.swizzle_g);
337 templ.swizzle_b = conv_depth_swiz(templ.swizzle_b);
338 templ.swizzle_a = conv_depth_swiz(templ.swizzle_a);
339 }
340
341 return pctx->create_sampler_view(pctx, iv->image->planes[image_plane].bo, &templ);
342 }
343
344 static struct pipe_image_view
lvp_create_imageview(const struct lvp_image_view * iv,VkFormat plane_format,unsigned image_plane)345 lvp_create_imageview(const struct lvp_image_view *iv, VkFormat plane_format, unsigned image_plane)
346 {
347 struct pipe_image_view view = {0};
348 if (!iv)
349 return view;
350
351 view.resource = iv->image->planes[image_plane].bo;
352 if (iv->vk.aspects == VK_IMAGE_ASPECT_DEPTH_BIT)
353 view.format = lvp_vk_format_to_pipe_format(plane_format);
354 else if (iv->vk.aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
355 view.format = util_format_stencil_only(lvp_vk_format_to_pipe_format(plane_format));
356 else
357 view.format = lvp_vk_format_to_pipe_format(plane_format);
358
359 if (iv->vk.view_type == VK_IMAGE_VIEW_TYPE_3D) {
360 view.u.tex.first_layer = iv->vk.storage.z_slice_offset;
361 view.u.tex.last_layer = view.u.tex.first_layer + iv->vk.storage.z_slice_count - 1;
362 } else {
363 view.u.tex.first_layer = iv->vk.base_array_layer,
364 view.u.tex.last_layer = iv->vk.base_array_layer + iv->vk.layer_count - 1;
365
366 if (view.resource->target == PIPE_TEXTURE_3D)
367 view.u.tex.is_2d_view_of_3d = true;
368 }
369 view.u.tex.level = iv->vk.base_mip_level;
370 return view;
371 }
372
373 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)374 lvp_CreateImageView(VkDevice _device,
375 const VkImageViewCreateInfo *pCreateInfo,
376 const VkAllocationCallbacks *pAllocator,
377 VkImageView *pView)
378 {
379 LVP_FROM_HANDLE(lvp_device, device, _device);
380 LVP_FROM_HANDLE(lvp_image, image, pCreateInfo->image);
381 struct lvp_image_view *view;
382
383 view = vk_image_view_create(&device->vk, false, pCreateInfo,
384 pAllocator, sizeof(*view));
385 if (view == NULL)
386 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
387
388 view->pformat = lvp_vk_format_to_pipe_format(view->vk.format);
389 view->image = image;
390 view->surface = NULL;
391
392 if (image->vk.aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
393 VK_IMAGE_ASPECT_STENCIL_BIT)) {
394 assert(image->plane_count == 1);
395 assert(lvp_image_aspects_to_plane(image, view->vk.aspects) == 0);
396 view->plane_count = 1;
397 view->planes[0].image_plane = 0;
398 } else {
399 /* For other formats, retrieve the plane count from the aspect mask
400 * and then walk through the aspect mask to map each image plane
401 * to its corresponding view plane
402 */
403 assert(util_bitcount(view->vk.aspects) ==
404 vk_format_get_plane_count(view->vk.format));
405 view->plane_count = 0;
406 u_foreach_bit(aspect_bit, view->vk.aspects) {
407 uint8_t image_plane = lvp_image_aspects_to_plane(image, 1u << aspect_bit);
408 view->planes[view->plane_count++].image_plane = image_plane;
409 }
410 }
411
412 simple_mtx_lock(&device->queue.lock);
413
414 for (unsigned view_plane = 0; view_plane < view->plane_count; view_plane++) {
415 const uint8_t image_plane = view->planes[view_plane].image_plane;
416 const struct vk_format_ycbcr_info *ycbcr_info =
417 vk_format_get_ycbcr_info(view->vk.format);
418 assert(ycbcr_info || view_plane == 0);
419 VkFormat plane_format = ycbcr_info ?
420 ycbcr_info->planes[view_plane].format : view->vk.format;
421
422 if (image->planes[image_plane].bo->bind & PIPE_BIND_SHADER_IMAGE) {
423 view->planes[view_plane].iv = lvp_create_imageview(view, plane_format, image_plane);
424 view->planes[view_plane].image_handle = (void *)(uintptr_t)device->queue.ctx->create_image_handle(device->queue.ctx, &view->planes[view_plane].iv);
425 }
426
427 if (image->planes[image_plane].bo->bind & PIPE_BIND_SAMPLER_VIEW) {
428 view->planes[view_plane].sv = lvp_create_samplerview(device->queue.ctx, view, plane_format, image_plane);
429 view->planes[view_plane].texture_handle = (void *)(uintptr_t)device->queue.ctx->create_texture_handle(device->queue.ctx, view->planes[view_plane].sv, NULL);
430 }
431 }
432
433 simple_mtx_unlock(&device->queue.lock);
434
435 *pView = lvp_image_view_to_handle(view);
436
437 return VK_SUCCESS;
438 }
439
440 VKAPI_ATTR void VKAPI_CALL
lvp_DestroyImageView(VkDevice _device,VkImageView _iview,const VkAllocationCallbacks * pAllocator)441 lvp_DestroyImageView(VkDevice _device, VkImageView _iview,
442 const VkAllocationCallbacks *pAllocator)
443 {
444 LVP_FROM_HANDLE(lvp_device, device, _device);
445 LVP_FROM_HANDLE(lvp_image_view, iview, _iview);
446
447 if (!_iview)
448 return;
449
450 simple_mtx_lock(&device->queue.lock);
451
452 for (uint8_t plane = 0; plane < iview->plane_count; plane++) {
453 device->queue.ctx->delete_image_handle(device->queue.ctx, (uint64_t)(uintptr_t)iview->planes[plane].image_handle);
454
455 pipe_sampler_view_reference(&iview->planes[plane].sv, NULL);
456 device->queue.ctx->delete_texture_handle(device->queue.ctx, (uint64_t)(uintptr_t)iview->planes[plane].texture_handle);
457 }
458 simple_mtx_unlock(&device->queue.lock);
459
460 pipe_surface_reference(&iview->surface, NULL);
461 vk_image_view_destroy(&device->vk, pAllocator, &iview->vk);
462 }
463
lvp_GetImageSubresourceLayout(VkDevice _device,VkImage _image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)464 VKAPI_ATTR void VKAPI_CALL lvp_GetImageSubresourceLayout(
465 VkDevice _device,
466 VkImage _image,
467 const VkImageSubresource* pSubresource,
468 VkSubresourceLayout* pLayout)
469 {
470 LVP_FROM_HANDLE(lvp_device, device, _device);
471 LVP_FROM_HANDLE(lvp_image, image, _image);
472 uint64_t value;
473
474 const uint8_t p = lvp_image_aspects_to_plane(image, pSubresource->aspectMask);
475 const struct lvp_image_plane *plane = &image->planes[p];
476
477 device->pscreen->resource_get_param(device->pscreen,
478 NULL,
479 plane->bo,
480 0,
481 pSubresource->arrayLayer,
482 pSubresource->mipLevel,
483 PIPE_RESOURCE_PARAM_STRIDE,
484 0, &value);
485
486 pLayout->rowPitch = value;
487
488 device->pscreen->resource_get_param(device->pscreen,
489 NULL,
490 plane->bo,
491 0,
492 pSubresource->arrayLayer,
493 pSubresource->mipLevel,
494 PIPE_RESOURCE_PARAM_OFFSET,
495 0, &value);
496
497 pLayout->offset = value;
498
499 device->pscreen->resource_get_param(device->pscreen,
500 NULL,
501 plane->bo,
502 0,
503 pSubresource->arrayLayer,
504 pSubresource->mipLevel,
505 PIPE_RESOURCE_PARAM_LAYER_STRIDE,
506 0, &value);
507
508 if (plane->bo->target == PIPE_TEXTURE_3D) {
509 pLayout->depthPitch = value;
510 pLayout->arrayPitch = 0;
511 } else {
512 pLayout->depthPitch = 0;
513 pLayout->arrayPitch = value;
514 }
515 pLayout->offset += plane->plane_offset;
516 pLayout->size = plane->size;
517 }
518
lvp_GetImageSubresourceLayout2KHR(VkDevice _device,VkImage _image,const VkImageSubresource2KHR * pSubresource,VkSubresourceLayout2KHR * pLayout)519 VKAPI_ATTR void VKAPI_CALL lvp_GetImageSubresourceLayout2KHR(
520 VkDevice _device,
521 VkImage _image,
522 const VkImageSubresource2KHR* pSubresource,
523 VkSubresourceLayout2KHR* pLayout)
524 {
525 lvp_GetImageSubresourceLayout(_device, _image, &pSubresource->imageSubresource, &pLayout->subresourceLayout);
526 VkSubresourceHostMemcpySizeEXT *size = vk_find_struct(pLayout, SUBRESOURCE_HOST_MEMCPY_SIZE_EXT);
527 if (size)
528 size->size = pLayout->subresourceLayout.size;
529 }
530
lvp_GetDeviceImageSubresourceLayoutKHR(VkDevice _device,const VkDeviceImageSubresourceInfoKHR * pInfo,VkSubresourceLayout2KHR * pLayout)531 VKAPI_ATTR void VKAPI_CALL lvp_GetDeviceImageSubresourceLayoutKHR(
532 VkDevice _device,
533 const VkDeviceImageSubresourceInfoKHR* pInfo,
534 VkSubresourceLayout2KHR* pLayout)
535 {
536 VkImage image;
537 /* technically supposed to be able to do this without creating an image, but that's harder */
538 if (lvp_image_create(_device, pInfo->pCreateInfo, NULL, &image) != VK_SUCCESS)
539 return;
540 lvp_GetImageSubresourceLayout2KHR(_device, image, pInfo->pSubresource, pLayout);
541 lvp_DestroyImage(_device, image, NULL);
542 }
543
lvp_CreateBuffer(VkDevice _device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)544 VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateBuffer(
545 VkDevice _device,
546 const VkBufferCreateInfo* pCreateInfo,
547 const VkAllocationCallbacks* pAllocator,
548 VkBuffer* pBuffer)
549 {
550 LVP_FROM_HANDLE(lvp_device, device, _device);
551 struct lvp_buffer *buffer;
552
553 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
554
555 /* gallium has max 32-bit buffer sizes */
556 if (pCreateInfo->size > UINT32_MAX)
557 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
558
559 buffer = vk_buffer_create(&device->vk, pCreateInfo,
560 pAllocator, sizeof(*buffer));
561 if (buffer == NULL)
562 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
563
564 {
565 struct pipe_resource template;
566 memset(&template, 0, sizeof(struct pipe_resource));
567
568 if (pCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
569 template.bind |= PIPE_BIND_CONSTANT_BUFFER;
570
571 template.screen = device->pscreen;
572 template.target = PIPE_BUFFER;
573 template.format = PIPE_FORMAT_R8_UNORM;
574 template.width0 = buffer->vk.size;
575 template.height0 = 1;
576 template.depth0 = 1;
577 template.array_size = 1;
578 if (buffer->vk.usage & VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR)
579 template.bind |= PIPE_BIND_SAMPLER_VIEW;
580 if (buffer->vk.usage & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
581 template.bind |= PIPE_BIND_SHADER_BUFFER;
582 if (buffer->vk.usage & VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR)
583 template.bind |= PIPE_BIND_SHADER_IMAGE;
584 template.flags = PIPE_RESOURCE_FLAG_DONT_OVER_ALLOCATE;
585 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
586 template.flags |= PIPE_RESOURCE_FLAG_SPARSE;
587 buffer->bo = device->pscreen->resource_create_unbacked(device->pscreen,
588 &template,
589 &buffer->total_size);
590 if (!buffer->bo) {
591 vk_free2(&device->vk.alloc, pAllocator, buffer);
592 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
593 }
594
595 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
596 buffer->map = device->queue.ctx->buffer_map(device->queue.ctx, buffer->bo, 0,
597 PIPE_MAP_READ | PIPE_MAP_WRITE | PIPE_MAP_PERSISTENT,
598 &(struct pipe_box){ 0 }, &buffer->transfer);
599 }
600 }
601 *pBuffer = lvp_buffer_to_handle(buffer);
602
603 return VK_SUCCESS;
604 }
605
lvp_DestroyBuffer(VkDevice _device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)606 VKAPI_ATTR void VKAPI_CALL lvp_DestroyBuffer(
607 VkDevice _device,
608 VkBuffer _buffer,
609 const VkAllocationCallbacks* pAllocator)
610 {
611 LVP_FROM_HANDLE(lvp_device, device, _device);
612 LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
613
614 if (!_buffer)
615 return;
616
617 if (buffer->map) {
618 simple_mtx_lock(&device->bda_lock);
619 struct hash_entry *he = _mesa_hash_table_search(&device->bda, buffer->map);
620 if (he)
621 _mesa_hash_table_remove(&device->bda, he);
622 simple_mtx_unlock(&device->bda_lock);
623
624 if (buffer->bo->flags & PIPE_RESOURCE_FLAG_SPARSE)
625 device->queue.ctx->buffer_unmap(device->queue.ctx, buffer->transfer);
626 }
627 pipe_resource_reference(&buffer->bo, NULL);
628 vk_buffer_destroy(&device->vk, pAllocator, &buffer->vk);
629 }
630
lvp_GetBufferDeviceAddress(VkDevice _device,const VkBufferDeviceAddressInfo * pInfo)631 VKAPI_ATTR VkDeviceAddress VKAPI_CALL lvp_GetBufferDeviceAddress(
632 VkDevice _device,
633 const VkBufferDeviceAddressInfo* pInfo)
634 {
635 LVP_FROM_HANDLE(lvp_device, device, _device);
636 LVP_FROM_HANDLE(lvp_buffer, buffer, pInfo->buffer);
637 simple_mtx_lock(&device->bda_lock);
638 _mesa_hash_table_insert(&device->bda, buffer->map, buffer);
639 simple_mtx_unlock(&device->bda_lock);
640
641 return (VkDeviceAddress)(uintptr_t)buffer->map;
642 }
643
lvp_GetBufferOpaqueCaptureAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)644 VKAPI_ATTR uint64_t VKAPI_CALL lvp_GetBufferOpaqueCaptureAddress(
645 VkDevice device,
646 const VkBufferDeviceAddressInfo* pInfo)
647 {
648 return 0;
649 }
650
lvp_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)651 VKAPI_ATTR uint64_t VKAPI_CALL lvp_GetDeviceMemoryOpaqueCaptureAddress(
652 VkDevice device,
653 const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
654 {
655 return 0;
656 }
657
658 static struct pipe_sampler_view *
lvp_create_samplerview_buffer(struct pipe_context * pctx,struct lvp_buffer_view * bv)659 lvp_create_samplerview_buffer(struct pipe_context *pctx, struct lvp_buffer_view *bv)
660 {
661 if (!bv)
662 return NULL;
663
664 struct pipe_resource *bo = ((struct lvp_buffer *)bv->vk.buffer)->bo;
665 struct pipe_sampler_view templ;
666 memset(&templ, 0, sizeof(templ));
667 templ.target = PIPE_BUFFER;
668 templ.swizzle_r = PIPE_SWIZZLE_X;
669 templ.swizzle_g = PIPE_SWIZZLE_Y;
670 templ.swizzle_b = PIPE_SWIZZLE_Z;
671 templ.swizzle_a = PIPE_SWIZZLE_W;
672 templ.format = bv->pformat;
673 templ.u.buf.offset = bv->vk.offset;
674 templ.u.buf.size = bv->vk.range;
675 templ.texture = bo;
676 templ.context = pctx;
677 return pctx->create_sampler_view(pctx, bo, &templ);
678 }
679
680 static struct pipe_image_view
lvp_create_imageview_buffer(const struct lvp_buffer_view * bv)681 lvp_create_imageview_buffer(const struct lvp_buffer_view *bv)
682 {
683 struct pipe_image_view view = {0};
684 if (!bv)
685 return view;
686 view.resource = ((struct lvp_buffer *)bv->vk.buffer)->bo;
687 view.format = bv->pformat;
688 view.u.buf.offset = bv->vk.offset;
689 view.u.buf.size = bv->vk.range;
690 return view;
691 }
692
693 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CreateBufferView(VkDevice _device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)694 lvp_CreateBufferView(VkDevice _device,
695 const VkBufferViewCreateInfo *pCreateInfo,
696 const VkAllocationCallbacks *pAllocator,
697 VkBufferView *pView)
698 {
699 LVP_FROM_HANDLE(lvp_device, device, _device);
700 LVP_FROM_HANDLE(lvp_buffer, buffer, pCreateInfo->buffer);
701 struct lvp_buffer_view *view;
702
703 view = vk_buffer_view_create(&device->vk,
704 pCreateInfo,
705 pAllocator,
706 sizeof(*view));
707 if (!view)
708 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
709
710 view->pformat = lvp_vk_format_to_pipe_format(pCreateInfo->format);
711
712 simple_mtx_lock(&device->queue.lock);
713
714 if (buffer->bo->bind & PIPE_BIND_SAMPLER_VIEW) {
715 view->sv = lvp_create_samplerview_buffer(device->queue.ctx, view);
716 view->texture_handle = (void *)(uintptr_t)device->queue.ctx->create_texture_handle(device->queue.ctx, view->sv, NULL);
717 }
718
719 if (buffer->bo->bind & PIPE_BIND_SHADER_IMAGE) {
720 view->iv = lvp_create_imageview_buffer(view);
721 view->image_handle = (void *)(uintptr_t)device->queue.ctx->create_image_handle(device->queue.ctx, &view->iv);
722 }
723
724 simple_mtx_unlock(&device->queue.lock);
725
726 *pView = lvp_buffer_view_to_handle(view);
727
728 return VK_SUCCESS;
729 }
730
731 VKAPI_ATTR void VKAPI_CALL
lvp_DestroyBufferView(VkDevice _device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)732 lvp_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
733 const VkAllocationCallbacks *pAllocator)
734 {
735 LVP_FROM_HANDLE(lvp_device, device, _device);
736 LVP_FROM_HANDLE(lvp_buffer_view, view, bufferView);
737
738 if (!bufferView)
739 return;
740
741 simple_mtx_lock(&device->queue.lock);
742
743 pipe_sampler_view_reference(&view->sv, NULL);
744 device->queue.ctx->delete_texture_handle(device->queue.ctx, (uint64_t)(uintptr_t)view->texture_handle);
745
746 device->queue.ctx->delete_image_handle(device->queue.ctx, (uint64_t)(uintptr_t)view->image_handle);
747
748 simple_mtx_unlock(&device->queue.lock);
749
750 vk_buffer_view_destroy(&device->vk, pAllocator, &view->vk);
751 }
752
753 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyMemoryToImageEXT(VkDevice _device,const VkCopyMemoryToImageInfoEXT * pCopyMemoryToImageInfo)754 lvp_CopyMemoryToImageEXT(VkDevice _device, const VkCopyMemoryToImageInfoEXT *pCopyMemoryToImageInfo)
755 {
756 LVP_FROM_HANDLE(lvp_device, device, _device);
757 LVP_FROM_HANDLE(lvp_image, image, pCopyMemoryToImageInfo->dstImage);
758 for (unsigned i = 0; i < pCopyMemoryToImageInfo->regionCount; i++) {
759 const VkMemoryToImageCopyEXT *copy = &pCopyMemoryToImageInfo->pRegions[i];
760 const VkImageAspectFlagBits aspects = copy->imageSubresource.aspectMask;
761 uint8_t plane = lvp_image_aspects_to_plane(image, aspects);
762 struct pipe_box box = {
763 .x = copy->imageOffset.x,
764 .y = copy->imageOffset.y,
765 .width = copy->imageExtent.width,
766 .height = copy->imageExtent.height,
767 .depth = 1,
768 };
769 switch (image->planes[plane].bo->target) {
770 case PIPE_TEXTURE_CUBE:
771 case PIPE_TEXTURE_CUBE_ARRAY:
772 case PIPE_TEXTURE_2D_ARRAY:
773 case PIPE_TEXTURE_1D_ARRAY:
774 /* these use layer */
775 box.z = copy->imageSubresource.baseArrayLayer;
776 box.depth = copy->imageSubresource.layerCount;
777 break;
778 case PIPE_TEXTURE_3D:
779 /* this uses depth */
780 box.z = copy->imageOffset.z;
781 box.depth = copy->imageExtent.depth;
782 break;
783 default:
784 break;
785 }
786
787 unsigned stride = util_format_get_stride(image->planes[plane].bo->format, copy->memoryRowLength ? copy->memoryRowLength : box.width);
788 unsigned layer_stride = util_format_get_2d_size(image->planes[plane].bo->format, stride, copy->memoryImageHeight ? copy->memoryImageHeight : box.height);
789 device->queue.ctx->texture_subdata(device->queue.ctx, image->planes[plane].bo, copy->imageSubresource.mipLevel, 0,
790 &box, copy->pHostPointer, stride, layer_stride);
791 }
792 return VK_SUCCESS;
793 }
794
795 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyImageToMemoryEXT(VkDevice _device,const VkCopyImageToMemoryInfoEXT * pCopyImageToMemoryInfo)796 lvp_CopyImageToMemoryEXT(VkDevice _device, const VkCopyImageToMemoryInfoEXT *pCopyImageToMemoryInfo)
797 {
798 LVP_FROM_HANDLE(lvp_device, device, _device);
799 LVP_FROM_HANDLE(lvp_image, image, pCopyImageToMemoryInfo->srcImage);
800
801 for (unsigned i = 0; i < pCopyImageToMemoryInfo->regionCount; i++) {
802 const VkImageToMemoryCopyEXT *copy = &pCopyImageToMemoryInfo->pRegions[i];
803
804 const VkImageAspectFlagBits aspects = copy->imageSubresource.aspectMask;
805 uint8_t plane = lvp_image_aspects_to_plane(image, aspects);
806
807 struct pipe_box box = {
808 .x = copy->imageOffset.x,
809 .y = copy->imageOffset.y,
810 .width = copy->imageExtent.width,
811 .height = copy->imageExtent.height,
812 .depth = 1,
813 };
814 switch (image->planes[plane].bo->target) {
815 case PIPE_TEXTURE_CUBE:
816 case PIPE_TEXTURE_CUBE_ARRAY:
817 case PIPE_TEXTURE_2D_ARRAY:
818 case PIPE_TEXTURE_1D_ARRAY:
819 /* these use layer */
820 box.z = copy->imageSubresource.baseArrayLayer;
821 box.depth = copy->imageSubresource.layerCount;
822 break;
823 case PIPE_TEXTURE_3D:
824 /* this uses depth */
825 box.z = copy->imageOffset.z;
826 box.depth = copy->imageExtent.depth;
827 break;
828 default:
829 break;
830 }
831 struct pipe_transfer *xfer;
832 uint8_t *data = device->queue.ctx->texture_map(device->queue.ctx, image->planes[plane].bo, copy->imageSubresource.mipLevel,
833 PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_THREAD_SAFE, &box, &xfer);
834 if (!data)
835 return VK_ERROR_MEMORY_MAP_FAILED;
836
837 unsigned stride = util_format_get_stride(image->planes[plane].bo->format, copy->memoryRowLength ? copy->memoryRowLength : box.width);
838 unsigned layer_stride = util_format_get_2d_size(image->planes[plane].bo->format, stride, copy->memoryImageHeight ? copy->memoryImageHeight : box.height);
839 util_copy_box(copy->pHostPointer, image->planes[plane].bo->format, stride, layer_stride,
840 /* offsets are all zero because texture_map handles the offset */
841 0, 0, 0, box.width, box.height, box.depth, data, xfer->stride, xfer->layer_stride, 0, 0, 0);
842 pipe_texture_unmap(device->queue.ctx, xfer);
843 }
844 return VK_SUCCESS;
845 }
846
847 VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyImageToImageEXT(VkDevice _device,const VkCopyImageToImageInfoEXT * pCopyImageToImageInfo)848 lvp_CopyImageToImageEXT(VkDevice _device, const VkCopyImageToImageInfoEXT *pCopyImageToImageInfo)
849 {
850 LVP_FROM_HANDLE(lvp_device, device, _device);
851 LVP_FROM_HANDLE(lvp_image, src_image, pCopyImageToImageInfo->srcImage);
852 LVP_FROM_HANDLE(lvp_image, dst_image, pCopyImageToImageInfo->dstImage);
853
854 /* basically the same as handle_copy_image() */
855 for (unsigned i = 0; i < pCopyImageToImageInfo->regionCount; i++) {
856
857 const VkImageAspectFlagBits src_aspects = pCopyImageToImageInfo->pRegions[i].srcSubresource.aspectMask;
858 uint8_t src_plane = lvp_image_aspects_to_plane(src_image, src_aspects);
859 const VkImageAspectFlagBits dst_aspects = pCopyImageToImageInfo->pRegions[i].dstSubresource.aspectMask;
860 uint8_t dst_plane = lvp_image_aspects_to_plane(dst_image, dst_aspects);
861
862 struct pipe_box src_box;
863 src_box.x = pCopyImageToImageInfo->pRegions[i].srcOffset.x;
864 src_box.y = pCopyImageToImageInfo->pRegions[i].srcOffset.y;
865 src_box.width = pCopyImageToImageInfo->pRegions[i].extent.width;
866 src_box.height = pCopyImageToImageInfo->pRegions[i].extent.height;
867 if (src_image->planes[src_plane].bo->target == PIPE_TEXTURE_3D) {
868 src_box.depth = pCopyImageToImageInfo->pRegions[i].extent.depth;
869 src_box.z = pCopyImageToImageInfo->pRegions[i].srcOffset.z;
870 } else {
871 src_box.depth = pCopyImageToImageInfo->pRegions[i].srcSubresource.layerCount;
872 src_box.z = pCopyImageToImageInfo->pRegions[i].srcSubresource.baseArrayLayer;
873 }
874
875 unsigned dstz = dst_image->planes[dst_plane].bo->target == PIPE_TEXTURE_3D ?
876 pCopyImageToImageInfo->pRegions[i].dstOffset.z :
877 pCopyImageToImageInfo->pRegions[i].dstSubresource.baseArrayLayer;
878 device->queue.ctx->resource_copy_region(device->queue.ctx, dst_image->planes[dst_plane].bo,
879 pCopyImageToImageInfo->pRegions[i].dstSubresource.mipLevel,
880 pCopyImageToImageInfo->pRegions[i].dstOffset.x,
881 pCopyImageToImageInfo->pRegions[i].dstOffset.y,
882 dstz,
883 src_image->planes[src_plane].bo,
884 pCopyImageToImageInfo->pRegions[i].srcSubresource.mipLevel,
885 &src_box);
886 }
887 return VK_SUCCESS;
888 }
889
890 VKAPI_ATTR VkResult VKAPI_CALL
lvp_TransitionImageLayoutEXT(VkDevice device,uint32_t transitionCount,const VkHostImageLayoutTransitionInfoEXT * pTransitions)891 lvp_TransitionImageLayoutEXT(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfoEXT *pTransitions)
892 {
893 /* no-op */
894 return VK_SUCCESS;
895 }
896
897 VkResult
lvp_buffer_bind_sparse(struct lvp_device * device,struct lvp_queue * queue,VkSparseBufferMemoryBindInfo * bind)898 lvp_buffer_bind_sparse(struct lvp_device *device,
899 struct lvp_queue *queue,
900 VkSparseBufferMemoryBindInfo *bind)
901 {
902 LVP_FROM_HANDLE(lvp_buffer, buffer, bind->buffer);
903
904 for (uint32_t i = 0; i < bind->bindCount; i++) {
905 LVP_FROM_HANDLE(lvp_device_memory, mem, bind->pBinds[i].memory);
906 device->pscreen->resource_bind_backing(device->pscreen,
907 buffer->bo,
908 mem ? mem->pmem : NULL,
909 bind->pBinds[i].memoryOffset,
910 bind->pBinds[i].size,
911 bind->pBinds[i].resourceOffset);
912 }
913
914 return VK_SUCCESS;
915 }
916
917 VkResult
lvp_image_bind_opaque_sparse(struct lvp_device * device,struct lvp_queue * queue,VkSparseImageOpaqueMemoryBindInfo * bind_info)918 lvp_image_bind_opaque_sparse(struct lvp_device *device,
919 struct lvp_queue *queue,
920 VkSparseImageOpaqueMemoryBindInfo *bind_info)
921 {
922 LVP_FROM_HANDLE(lvp_image, image, bind_info->image);
923
924 for (uint32_t i = 0; i < bind_info->bindCount; i++) {
925 const VkSparseMemoryBind *bind = &bind_info->pBinds[i];
926 LVP_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
927
928 uint32_t plane_index;
929 uint32_t offset;
930 if (bind->resourceOffset < image->planes[0].size) {
931 plane_index = 0;
932 offset = bind->resourceOffset;
933 } else if (bind->resourceOffset < image->planes[0].size + image->planes[1].size) {
934 plane_index = 1;
935 offset = bind->resourceOffset - image->planes[0].size;
936 } else {
937 plane_index = 2;
938 offset = bind->resourceOffset - image->planes[0].size - image->planes[1].size;
939 }
940
941 device->pscreen->resource_bind_backing(device->pscreen,
942 image->planes[plane_index].bo,
943 mem ? mem->pmem : NULL,
944 bind->memoryOffset,
945 bind->size,
946 offset);
947 }
948
949 return VK_SUCCESS;
950 }
951
952 VkResult
lvp_image_bind_sparse(struct lvp_device * device,struct lvp_queue * queue,VkSparseImageMemoryBindInfo * bind_info)953 lvp_image_bind_sparse(struct lvp_device *device,
954 struct lvp_queue *queue,
955 VkSparseImageMemoryBindInfo *bind_info)
956 {
957 LVP_FROM_HANDLE(lvp_image, image, bind_info->image);
958
959 enum pipe_format format = vk_format_to_pipe_format(image->vk.format);
960
961 for (uint32_t i = 0; i < bind_info->bindCount; i++) {
962 const VkSparseImageMemoryBind *bind = &bind_info->pBinds[i];
963 LVP_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
964
965 uint8_t plane = lvp_image_aspects_to_plane(image, bind->subresource.aspectMask);
966
967 uint32_t depth = 1;
968 uint32_t z = 0;
969 uint32_t dimensions = 2;
970 switch (image->planes[plane].bo->target) {
971 case PIPE_TEXTURE_CUBE:
972 case PIPE_TEXTURE_CUBE_ARRAY:
973 case PIPE_TEXTURE_2D_ARRAY:
974 case PIPE_TEXTURE_1D_ARRAY:
975 /* these use layer */
976 z = bind->subresource.arrayLayer;
977 break;
978 case PIPE_TEXTURE_3D:
979 /* this uses depth */
980 z = bind->offset.z;
981 depth = bind->extent.depth;
982 dimensions = 3;
983 break;
984 default:
985 break;
986 }
987
988 uint32_t sparse_tile_size[3] = {
989 util_format_get_tilesize(format, dimensions, image->vk.samples, 0),
990 util_format_get_tilesize(format, dimensions, image->vk.samples, 1),
991 util_format_get_tilesize(format, dimensions, image->vk.samples, 2),
992 };
993
994 uint32_t sparse_block_base[3] = {
995 bind->offset.x / (sparse_tile_size[0] * util_format_get_blockwidth(format)),
996 bind->offset.y / (sparse_tile_size[1] * util_format_get_blockheight(format)),
997 z / (sparse_tile_size[2] * util_format_get_blockdepth(format)),
998 };
999
1000 uint32_t sparse_block_counts[3] = {
1001 DIV_ROUND_UP(bind->extent.width, sparse_tile_size[0] * util_format_get_blockwidth(format)),
1002 DIV_ROUND_UP(bind->extent.height, sparse_tile_size[1] * util_format_get_blockheight(format)),
1003 DIV_ROUND_UP(depth, sparse_tile_size[2] * util_format_get_blockdepth(format)),
1004 };
1005
1006 uint32_t sparse_block_count = sparse_block_counts[0] * sparse_block_counts[1] * sparse_block_counts[2];
1007
1008 for (uint32_t block = 0; block < sparse_block_count; block++) {
1009 uint32_t start_x = (sparse_block_base[0] + block % sparse_block_counts[0]) * sparse_tile_size[0];
1010 uint32_t start_y = (sparse_block_base[1] + (block / sparse_block_counts[0]) % sparse_block_counts[1]) *
1011 sparse_tile_size[1];
1012 uint32_t start_z = (sparse_block_base[2] + (block / sparse_block_counts[0] / sparse_block_counts[1]) % sparse_block_counts[2]) *
1013 sparse_tile_size[2];
1014
1015 uint64_t offset = llvmpipe_get_texel_offset(image->planes[plane].bo, bind->subresource.mipLevel, start_x, start_y, start_z);
1016 device->pscreen->resource_bind_backing(device->pscreen,
1017 image->planes[plane].bo,
1018 mem ? mem->pmem : NULL,
1019 bind->memoryOffset + block * 64 * 1024,
1020 64 * 1024,
1021 offset);
1022 }
1023 }
1024
1025 return VK_SUCCESS;
1026 }
1027