1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27 #include <vulkan/vulkan.h>
28 #include <xf86drm.h>
29
30 #include "drm-uapi/pvr_drm.h"
31 #include "pvr_device_info.h"
32 #include "pvr_drm.h"
33 #include "pvr_drm_bo.h"
34 #include "pvr_drm_job_compute.h"
35 #include "pvr_drm_job_null.h"
36 #include "pvr_drm_job_render.h"
37 #include "pvr_drm_job_transfer.h"
38 #include "pvr_drm_public.h"
39 #include "pvr_private.h"
40 #include "pvr_winsys.h"
41 #include "pvr_winsys_helper.h"
42 #include "vk_alloc.h"
43 #include "vk_drm_syncobj.h"
44 #include "vk_log.h"
45
pvr_drm_finish_heaps(struct pvr_drm_winsys * const drm_ws)46 static void pvr_drm_finish_heaps(struct pvr_drm_winsys *const drm_ws)
47 {
48 if (!pvr_winsys_helper_winsys_heap_finish(
49 &drm_ws->transfer_frag_heap.base)) {
50 vk_errorf(NULL,
51 VK_ERROR_UNKNOWN,
52 "Transfer fragment heap in use, can't deinit");
53 }
54
55 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->vis_test_heap.base)) {
56 vk_errorf(NULL,
57 VK_ERROR_UNKNOWN,
58 "Visibility test heap in use, can't deinit");
59 }
60
61 if (drm_ws->rgn_hdr_heap_present) {
62 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->rgn_hdr_heap.base)) {
63 vk_errorf(NULL,
64 VK_ERROR_UNKNOWN,
65 "Region header heap in use, can't deinit");
66 }
67 }
68
69 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->usc_heap.base))
70 vk_errorf(NULL, VK_ERROR_UNKNOWN, "USC heap in use, can't deinit");
71
72 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->pds_heap.base))
73 vk_errorf(NULL, VK_ERROR_UNKNOWN, "PDS heap in use, can't deinit");
74
75 if (!pvr_winsys_helper_winsys_heap_finish(&drm_ws->general_heap.base))
76 vk_errorf(NULL, VK_ERROR_UNKNOWN, "General heap in use, can't deinit");
77 }
78
pvr_drm_winsys_destroy(struct pvr_winsys * ws)79 static void pvr_drm_winsys_destroy(struct pvr_winsys *ws)
80 {
81 struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(ws);
82 struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = {
83 .handle = drm_ws->vm_context,
84 };
85
86 pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
87 drm_ws->pds_vma,
88 drm_ws->usc_vma);
89
90 pvr_drm_finish_heaps(drm_ws);
91
92 pvr_ioctl(ws->render_fd,
93 DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
94 &destroy_vm_context_args,
95 VK_ERROR_UNKNOWN);
96
97 vk_free(ws->alloc, drm_ws);
98 }
99
100 /**
101 * Linear search a uint32_t array for a value.
102 *
103 * \param array Pointer to array start.
104 * \param len Number of uint32_t terms to compare.
105 * \param val The value to search for.
106 * \return
107 * * true if val is found, or
108 * * false.
109 */
110 static bool
pvr_u32_in_array(const uint32_t * array,const size_t len,const uint32_t val)111 pvr_u32_in_array(const uint32_t *array, const size_t len, const uint32_t val)
112 {
113 for (int i = 0; i < len; i++) {
114 if (array[i] == val)
115 return true;
116 }
117
118 return false;
119 }
120
pvr_drm_override_quirks(struct pvr_drm_winsys * drm_ws,struct pvr_device_info * dev_info)121 static VkResult pvr_drm_override_quirks(struct pvr_drm_winsys *drm_ws,
122 struct pvr_device_info *dev_info)
123 {
124 struct drm_pvr_dev_query_quirks query = { 0 };
125 struct drm_pvr_ioctl_dev_query_args args = {
126 .type = DRM_PVR_DEV_QUERY_QUIRKS_GET,
127 .size = sizeof(query),
128 .pointer = (__u64)&query,
129 };
130
131 /* clang-format off */
132 #define PVR_QUIRKS(x) \
133 x(48545) \
134 x(49927) \
135 x(51764) \
136 x(62269)
137 /* clang-format on */
138
139 #define PVR_QUIRK_EXPAND_COMMA(number) number,
140
141 const uint32_t supported_quirks[] = { PVR_QUIRKS(PVR_QUIRK_EXPAND_COMMA) };
142
143 #undef PVR_QUIRK_EXPAND_COMMA
144
145 VkResult result;
146
147 /* Get the length and allocate enough for it */
148 result = pvr_ioctl(drm_ws->base.render_fd,
149 DRM_IOCTL_PVR_DEV_QUERY,
150 &args,
151 VK_ERROR_INITIALIZATION_FAILED);
152 if (result != VK_SUCCESS)
153 goto out;
154
155 /* It's possible there are no quirks, so we can skip the rest. */
156 if (!query.count) {
157 result = VK_SUCCESS;
158 goto out;
159 }
160
161 query.quirks = (__u64)vk_zalloc(drm_ws->base.alloc,
162 sizeof(uint32_t) * query.count,
163 8,
164 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
165 if (!query.quirks) {
166 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
167 goto out;
168 }
169
170 /* Get the data */
171 result = pvr_ioctl(drm_ws->base.render_fd,
172 DRM_IOCTL_PVR_DEV_QUERY,
173 &args,
174 VK_ERROR_INITIALIZATION_FAILED);
175 if (result != VK_SUCCESS)
176 goto out_free_quirks;
177
178 #define PVR_QUIRK_EXPAND_SET(number) \
179 dev_info->quirks.has_brn##number = \
180 pvr_u32_in_array((uint32_t *)query.quirks, query.count, number);
181
182 /*
183 * For each quirk, check that if it is a "must have" that it is set in
184 * dev_info, then set the dev_info value to the one received from the kernel.
185 */
186 PVR_QUIRKS(PVR_QUIRK_EXPAND_SET);
187
188 #undef PVR_QUIRK_EXPAND_SET
189 #undef PVR_QUIRKS
190
191 /* Check all musthave quirks are supported */
192 for (int i = 0; i < query.musthave_count; i++) {
193 if (!pvr_u32_in_array(supported_quirks,
194 ARRAY_SIZE(supported_quirks),
195 ((uint32_t *)query.quirks)[i])) {
196 result = VK_ERROR_INCOMPATIBLE_DRIVER;
197 goto out_free_quirks;
198 }
199 }
200
201 result = VK_SUCCESS;
202
203 out_free_quirks:
204 vk_free(drm_ws->base.alloc, (__u64 *)query.quirks);
205
206 out:
207 return result;
208 }
209
pvr_drm_override_enhancements(struct pvr_drm_winsys * drm_ws,struct pvr_device_info * dev_info)210 static VkResult pvr_drm_override_enhancements(struct pvr_drm_winsys *drm_ws,
211 struct pvr_device_info *dev_info)
212 {
213 struct drm_pvr_dev_query_enhancements query = { 0 };
214 struct drm_pvr_ioctl_dev_query_args args = {
215 .type = DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
216 .size = sizeof(query),
217 .pointer = (__u64)&query
218 };
219
220 VkResult result;
221
222 /* Get the length and allocate enough for it */
223 result = pvr_ioctl(drm_ws->base.render_fd,
224 DRM_IOCTL_PVR_DEV_QUERY,
225 &args,
226 VK_ERROR_INITIALIZATION_FAILED);
227 if (result != VK_SUCCESS)
228 goto out;
229
230 /* It's possible there are no enhancements, so we can skip the rest. */
231 if (!query.count) {
232 result = VK_SUCCESS;
233 goto out;
234 }
235
236 query.enhancements = (__u64)vk_zalloc(drm_ws->base.alloc,
237 sizeof(uint32_t) * query.count,
238 8,
239 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
240 if (!query.enhancements) {
241 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
242 goto out;
243 }
244
245 /* Get the data */
246 result = pvr_ioctl(drm_ws->base.render_fd,
247 DRM_IOCTL_PVR_DEV_QUERY,
248 &args,
249 VK_ERROR_INITIALIZATION_FAILED);
250 if (result != VK_SUCCESS)
251 goto out_free_enhancements;
252
253 /* clang-format off */
254 #define PVR_ENHANCEMENT_SET(number) \
255 dev_info->enhancements.has_ern##number = \
256 pvr_u32_in_array((uint32_t *)query.enhancements, query.count, number)
257 /* clang-format on */
258
259 PVR_ENHANCEMENT_SET(35421);
260
261 #undef PVR_ENHANCEMENT_SET
262
263 result = VK_SUCCESS;
264
265 out_free_enhancements:
266 vk_free(drm_ws->base.alloc, (__u64 *)query.enhancements);
267
268 out:
269 return result;
270 }
271
272 static VkResult
pvr_drm_get_runtime_info(struct pvr_drm_winsys * drm_ws,struct drm_pvr_dev_query_runtime_info * const value)273 pvr_drm_get_runtime_info(struct pvr_drm_winsys *drm_ws,
274 struct drm_pvr_dev_query_runtime_info *const value)
275 {
276 struct drm_pvr_ioctl_dev_query_args args = {
277 .type = DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
278 .size = sizeof(*value),
279 .pointer = (__u64)value
280 };
281
282 return pvr_ioctl(drm_ws->base.render_fd,
283 DRM_IOCTL_PVR_DEV_QUERY,
284 &args,
285 VK_ERROR_INITIALIZATION_FAILED);
286 }
287
288 static VkResult
pvr_drm_get_gpu_info(struct pvr_drm_winsys * drm_ws,struct drm_pvr_dev_query_gpu_info * const value)289 pvr_drm_get_gpu_info(struct pvr_drm_winsys *drm_ws,
290 struct drm_pvr_dev_query_gpu_info *const value)
291 {
292 struct drm_pvr_ioctl_dev_query_args args = {
293 .type = DRM_PVR_DEV_QUERY_GPU_INFO_GET,
294 .size = sizeof(*value),
295 .pointer = (__u64)value
296 };
297
298 return pvr_ioctl(drm_ws->base.render_fd,
299 DRM_IOCTL_PVR_DEV_QUERY,
300 &args,
301 VK_ERROR_INITIALIZATION_FAILED);
302 }
303
304 static VkResult
pvr_drm_winsys_device_info_init(struct pvr_winsys * ws,struct pvr_device_info * dev_info,struct pvr_device_runtime_info * runtime_info)305 pvr_drm_winsys_device_info_init(struct pvr_winsys *ws,
306 struct pvr_device_info *dev_info,
307 struct pvr_device_runtime_info *runtime_info)
308 {
309 struct drm_pvr_dev_query_runtime_info kmd_runtime_info = { 0 };
310 struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
311 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
312 VkResult result;
313 int ret;
314
315 ret = pvr_device_info_init(dev_info, drm_ws->bvnc);
316 if (ret) {
317 result = vk_errorf(NULL,
318 VK_ERROR_INCOMPATIBLE_DRIVER,
319 "Unsupported BVNC: %u.%u.%u.%u\n",
320 PVR_BVNC_UNPACK_B(drm_ws->bvnc),
321 PVR_BVNC_UNPACK_V(drm_ws->bvnc),
322 PVR_BVNC_UNPACK_N(drm_ws->bvnc),
323 PVR_BVNC_UNPACK_C(drm_ws->bvnc));
324 goto err_out;
325 }
326
327 result = pvr_drm_override_quirks(drm_ws, dev_info);
328 if (result != VK_SUCCESS) {
329 mesa_logw("Failed to get quirks for this GPU\n");
330 goto err_out;
331 }
332
333 result = pvr_drm_override_enhancements(drm_ws, dev_info);
334 if (result != VK_SUCCESS) {
335 mesa_logw("Failed to get enhancements for this GPU\n");
336 goto err_out;
337 }
338
339 /* TODO: When kernel support is added, fetch the actual core count. */
340 if (PVR_HAS_FEATURE(dev_info, gpu_multicore_support))
341 mesa_logw("Core count fetching is unimplemented. Setting 1 for now.");
342 runtime_info->core_count = 1;
343
344 result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
345 if (result != VK_SUCCESS)
346 goto err_out;
347
348 runtime_info->num_phantoms = gpu_info.num_phantoms;
349
350 result = pvr_drm_get_runtime_info(drm_ws, &kmd_runtime_info);
351 if (result != VK_SUCCESS)
352 goto err_out;
353
354 runtime_info->min_free_list_size = kmd_runtime_info.free_list_min_pages
355 << ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
356 runtime_info->max_free_list_size = kmd_runtime_info.free_list_max_pages
357 << ROGUE_BIF_PM_PHYSICAL_PAGE_SHIFT;
358 runtime_info->reserved_shared_size =
359 kmd_runtime_info.common_store_alloc_region_size;
360 runtime_info->total_reserved_partition_size =
361 kmd_runtime_info.common_store_partition_space_size;
362 runtime_info->max_coeffs = kmd_runtime_info.max_coeffs;
363 runtime_info->cdm_max_local_mem_size_regs =
364 kmd_runtime_info.cdm_max_local_mem_size_regs;
365
366 return VK_SUCCESS;
367
368 err_out:
369 return result;
370 }
371
pvr_drm_winsys_get_heaps_info(struct pvr_winsys * ws,struct pvr_winsys_heaps * heaps)372 static void pvr_drm_winsys_get_heaps_info(struct pvr_winsys *ws,
373 struct pvr_winsys_heaps *heaps)
374 {
375 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
376
377 heaps->general_heap = &drm_ws->general_heap.base;
378 heaps->pds_heap = &drm_ws->pds_heap.base;
379 heaps->transfer_frag_heap = &drm_ws->transfer_frag_heap.base;
380 heaps->usc_heap = &drm_ws->usc_heap.base;
381 heaps->vis_test_heap = &drm_ws->vis_test_heap.base;
382
383 if (drm_ws->rgn_hdr_heap_present)
384 heaps->rgn_hdr_heap = &drm_ws->rgn_hdr_heap.base;
385 else
386 heaps->rgn_hdr_heap = &drm_ws->general_heap.base;
387 }
388
389 static const struct pvr_winsys_ops drm_winsys_ops = {
390 .destroy = pvr_drm_winsys_destroy,
391 .device_info_init = pvr_drm_winsys_device_info_init,
392 .get_heaps_info = pvr_drm_winsys_get_heaps_info,
393 .buffer_create = pvr_drm_winsys_buffer_create,
394 .buffer_create_from_fd = pvr_drm_winsys_buffer_create_from_fd,
395 .buffer_destroy = pvr_drm_winsys_buffer_destroy,
396 .buffer_get_fd = pvr_drm_winsys_buffer_get_fd,
397 .buffer_map = pvr_drm_winsys_buffer_map,
398 .buffer_unmap = pvr_drm_winsys_buffer_unmap,
399 .heap_alloc = pvr_drm_winsys_heap_alloc,
400 .heap_free = pvr_drm_winsys_heap_free,
401 .vma_map = pvr_drm_winsys_vma_map,
402 .vma_unmap = pvr_drm_winsys_vma_unmap,
403 .free_list_create = pvr_drm_winsys_free_list_create,
404 .free_list_destroy = pvr_drm_winsys_free_list_destroy,
405 .render_target_dataset_create = pvr_drm_render_target_dataset_create,
406 .render_target_dataset_destroy = pvr_drm_render_target_dataset_destroy,
407 .render_ctx_create = pvr_drm_winsys_render_ctx_create,
408 .render_ctx_destroy = pvr_drm_winsys_render_ctx_destroy,
409 .render_submit = pvr_drm_winsys_render_submit,
410 .compute_ctx_create = pvr_drm_winsys_compute_ctx_create,
411 .compute_ctx_destroy = pvr_drm_winsys_compute_ctx_destroy,
412 .compute_submit = pvr_drm_winsys_compute_submit,
413 .transfer_ctx_create = pvr_drm_winsys_transfer_ctx_create,
414 .transfer_ctx_destroy = pvr_drm_winsys_transfer_ctx_destroy,
415 .transfer_submit = pvr_drm_winsys_transfer_submit,
416 .null_job_submit = pvr_drm_winsys_null_job_submit,
417 };
418
419 struct pvr_static_data_area_description {
420 struct pvr_winsys_static_data_offsets offsets;
421 size_t total_size;
422 };
423
pvr_drm_get_heap_static_data_descriptions(struct pvr_drm_winsys * const drm_ws,struct pvr_static_data_area_description desc_out[DRM_PVR_HEAP_COUNT])424 static VkResult pvr_drm_get_heap_static_data_descriptions(
425 struct pvr_drm_winsys *const drm_ws,
426 struct pvr_static_data_area_description desc_out[DRM_PVR_HEAP_COUNT])
427 {
428 struct drm_pvr_dev_query_static_data_areas query = { 0 };
429 struct drm_pvr_ioctl_dev_query_args args = {
430 .type = DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
431 .size = sizeof(query),
432 .pointer = (__u64)&query
433 };
434 struct drm_pvr_static_data_area *array;
435 VkResult result;
436
437 /* Get the array length */
438 result = pvr_ioctlf(drm_ws->base.render_fd,
439 DRM_IOCTL_PVR_DEV_QUERY,
440 &args,
441 VK_ERROR_INITIALIZATION_FAILED,
442 "Failed to fetch static area array size");
443 if (result != VK_SUCCESS)
444 goto out;
445
446 array = vk_alloc(drm_ws->base.alloc,
447 sizeof(*array) * query.static_data_areas.count,
448 8,
449 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
450 if (!array) {
451 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
452 goto out;
453 }
454
455 VG(VALGRIND_MAKE_MEM_DEFINED(array,
456 sizeof(*array) *
457 query.static_data_areas.count));
458
459 query.static_data_areas.array = (__u64)array;
460
461 /* Get the array */
462 result = pvr_ioctlf(drm_ws->base.render_fd,
463 DRM_IOCTL_PVR_DEV_QUERY,
464 &args,
465 VK_ERROR_INITIALIZATION_FAILED,
466 "Failed to fetch static area offset array");
467 if (result != VK_SUCCESS)
468 goto out_free_array;
469
470 for (size_t i = 0; i < query.static_data_areas.count; i++) {
471 /* Unknown heaps might cause a write outside the array bounds. */
472 if (array[i].location_heap_id >= DRM_PVR_HEAP_COUNT)
473 continue;
474
475 switch (array[i].area_usage) {
476 case DRM_PVR_STATIC_DATA_AREA_EOT:
477 desc_out[array[i].location_heap_id].offsets.eot = array[i].offset;
478 break;
479
480 case DRM_PVR_STATIC_DATA_AREA_FENCE:
481 desc_out[array[i].location_heap_id].offsets.fence = array[i].offset;
482 break;
483
484 case DRM_PVR_STATIC_DATA_AREA_VDM_SYNC:
485 desc_out[array[i].location_heap_id].offsets.vdm_sync = array[i].offset;
486 break;
487
488 case DRM_PVR_STATIC_DATA_AREA_YUV_CSC:
489 desc_out[array[i].location_heap_id].offsets.yuv_csc = array[i].offset;
490 break;
491
492 default:
493 mesa_logd("Unknown drm static area id. ID: %d.", array[i].area_usage);
494 continue;
495 }
496
497 desc_out[array[i].location_heap_id].total_size += array[i].size;
498 }
499
500 result = VK_SUCCESS;
501
502 out_free_array:
503 vk_free(drm_ws->base.alloc, array);
504
505 out:
506 return result;
507 }
508
pvr_drm_setup_heaps(struct pvr_drm_winsys * const drm_ws)509 static VkResult pvr_drm_setup_heaps(struct pvr_drm_winsys *const drm_ws)
510 {
511 struct pvr_winsys_heap *const winsys_heaps[DRM_PVR_HEAP_COUNT] = {
512 [DRM_PVR_HEAP_GENERAL] = &drm_ws->general_heap.base,
513 [DRM_PVR_HEAP_PDS_CODE_DATA] = &drm_ws->pds_heap.base,
514 [DRM_PVR_HEAP_USC_CODE] = &drm_ws->usc_heap.base,
515 [DRM_PVR_HEAP_RGNHDR] = &drm_ws->rgn_hdr_heap.base,
516 [DRM_PVR_HEAP_VIS_TEST] = &drm_ws->vis_test_heap.base,
517 [DRM_PVR_HEAP_TRANSFER_FRAG] = &drm_ws->transfer_frag_heap.base,
518 };
519 struct pvr_static_data_area_description
520 static_data_descriptions[DRM_PVR_HEAP_COUNT] = { 0 };
521 struct drm_pvr_dev_query_heap_info query = { 0 };
522 struct drm_pvr_ioctl_dev_query_args args = {
523 .type = DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
524 .size = sizeof(query),
525 .pointer = (__u64)&query
526 };
527 struct drm_pvr_heap *array;
528 VkResult result;
529 int i = 0;
530
531 /* Get the array length */
532 result = pvr_ioctlf(drm_ws->base.render_fd,
533 DRM_IOCTL_PVR_DEV_QUERY,
534 &args,
535 VK_ERROR_INITIALIZATION_FAILED,
536 "Failed to fetch heap info array size");
537 if (result != VK_SUCCESS)
538 goto out;
539
540 array = vk_alloc(drm_ws->base.alloc,
541 sizeof(*array) * query.heaps.count,
542 8,
543 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
544 if (!array) {
545 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
546 goto out;
547 }
548
549 VG(VALGRIND_MAKE_MEM_DEFINED(array, sizeof(*array) * query.heaps.count));
550
551 query.heaps.array = (__u64)array;
552
553 /* Get the array */
554 result = pvr_ioctlf(drm_ws->base.render_fd,
555 DRM_IOCTL_PVR_DEV_QUERY,
556 &args,
557 VK_ERROR_INITIALIZATION_FAILED,
558 "Failed to fetch heap info array");
559 if (result != VK_SUCCESS)
560 goto out_free_array;
561
562 result = pvr_drm_get_heap_static_data_descriptions(drm_ws,
563 static_data_descriptions);
564 if (result != VK_SUCCESS)
565 goto out_free_array;
566
567 for (; i < query.heaps.count; i++) {
568 const bool present = array[i].size;
569 const pvr_dev_addr_t base_addr = PVR_DEV_ADDR(array[i].base);
570 const pvr_dev_addr_t vma_heap_begin_addr =
571 PVR_DEV_ADDR_OFFSET(base_addr, static_data_descriptions[i].total_size);
572 const uint64_t vma_heap_size =
573 array[i].size - static_data_descriptions[i].total_size;
574
575 /* Optional heaps */
576 switch (i) {
577 case DRM_PVR_HEAP_RGNHDR:
578 drm_ws->rgn_hdr_heap_present = present;
579 if (!present)
580 continue;
581 break;
582 default:
583 break;
584 }
585
586 /* Required heaps */
587 if (!present) {
588 result = vk_errorf(NULL,
589 VK_ERROR_INITIALIZATION_FAILED,
590 "Required heap not present: %d.",
591 i);
592 goto err_pvr_drm_heap_finish_all_heaps;
593 }
594
595 assert(base_addr.addr);
596 assert(static_data_descriptions[i].total_size <= array[i].size);
597
598 winsys_heaps[i]->ws = &drm_ws->base;
599 winsys_heaps[i]->base_addr = base_addr;
600 winsys_heaps[i]->static_data_carveout_addr = base_addr;
601 winsys_heaps[i]->size = array[i].size;
602 winsys_heaps[i]->static_data_carveout_size =
603 static_data_descriptions[i].total_size;
604 winsys_heaps[i]->page_size = 1 << array[i].page_size_log2;
605 winsys_heaps[i]->log2_page_size = array[i].page_size_log2;
606
607 /* For now we don't support the heap page size being different from the
608 * host page size.
609 */
610 assert(winsys_heaps[i]->page_size == drm_ws->base.page_size);
611 assert(winsys_heaps[i]->log2_page_size == drm_ws->base.log2_page_size);
612
613 winsys_heaps[i]->static_data_offsets =
614 static_data_descriptions[i].offsets;
615
616 util_vma_heap_init(&winsys_heaps[i]->vma_heap,
617 vma_heap_begin_addr.addr,
618 vma_heap_size);
619
620 winsys_heaps[i]->vma_heap.alloc_high = false;
621
622 /* It's expected that the heap destroy function to be the last thing that
623 * is called, so we start the ref_count at 0.
624 */
625 p_atomic_set(&winsys_heaps[i]->ref_count, 0);
626
627 if (pthread_mutex_init(&winsys_heaps[i]->lock, NULL)) {
628 result = vk_error(NULL, VK_ERROR_INITIALIZATION_FAILED);
629 goto err_pvr_drm_heap_finish_all_heaps;
630 }
631 }
632
633 result = VK_SUCCESS;
634 goto out_free_array;
635
636 err_pvr_drm_heap_finish_all_heaps:
637 /* Undo from where we left off */
638 while (--i >= 0) {
639 /* Optional heaps */
640 switch (i) {
641 case DRM_PVR_HEAP_RGNHDR:
642 if (drm_ws->rgn_hdr_heap_present)
643 break;
644 continue;
645 default:
646 break;
647 }
648
649 pvr_winsys_helper_winsys_heap_finish(winsys_heaps[i]);
650 }
651
652 out_free_array:
653 vk_free(drm_ws->base.alloc, array);
654
655 out:
656 return result;
657 }
658
pvr_drm_winsys_create(const int render_fd,const int display_fd,const VkAllocationCallbacks * alloc,struct pvr_winsys ** const ws_out)659 VkResult pvr_drm_winsys_create(const int render_fd,
660 const int display_fd,
661 const VkAllocationCallbacks *alloc,
662 struct pvr_winsys **const ws_out)
663 {
664 struct drm_pvr_ioctl_create_vm_context_args create_vm_context_args = { 0 };
665 struct drm_pvr_ioctl_destroy_vm_context_args destroy_vm_context_args = { 0 };
666 struct drm_pvr_dev_query_gpu_info gpu_info = { 0 };
667
668 struct pvr_drm_winsys *drm_ws;
669 VkResult result;
670
671 drm_ws =
672 vk_zalloc(alloc, sizeof(*drm_ws), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
673 if (!drm_ws) {
674 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
675 goto err_out;
676 }
677
678 drm_ws->base.ops = &drm_winsys_ops;
679 drm_ws->base.render_fd = render_fd;
680 drm_ws->base.display_fd = display_fd;
681 drm_ws->base.alloc = alloc;
682
683 os_get_page_size(&drm_ws->base.page_size);
684 drm_ws->base.log2_page_size = util_logbase2(drm_ws->base.page_size);
685
686 drm_ws->base.syncobj_type = vk_drm_syncobj_get_type(render_fd);
687 drm_ws->base.sync_types[0] = &drm_ws->base.syncobj_type;
688 drm_ws->base.sync_types[1] = NULL;
689
690 result = pvr_drm_get_gpu_info(drm_ws, &gpu_info);
691 if (result != VK_SUCCESS)
692 goto err_vk_free_drm_ws;
693
694 drm_ws->bvnc = gpu_info.gpu_id;
695
696 result = pvr_ioctl(render_fd,
697 DRM_IOCTL_PVR_CREATE_VM_CONTEXT,
698 &create_vm_context_args,
699 VK_ERROR_INITIALIZATION_FAILED);
700 if (result != VK_SUCCESS)
701 goto err_pvr_destroy_vm_context;
702
703 drm_ws->vm_context = create_vm_context_args.handle;
704
705 result = pvr_drm_setup_heaps(drm_ws);
706 if (result != VK_SUCCESS)
707 goto err_pvr_destroy_vm_context;
708
709 result =
710 pvr_winsys_helper_allocate_static_memory(&drm_ws->base,
711 pvr_drm_heap_alloc_carveout,
712 &drm_ws->general_heap.base,
713 &drm_ws->pds_heap.base,
714 &drm_ws->usc_heap.base,
715 &drm_ws->general_vma,
716 &drm_ws->pds_vma,
717 &drm_ws->usc_vma);
718 if (result != VK_SUCCESS)
719 goto err_pvr_heap_finish;
720
721 result = pvr_winsys_helper_fill_static_memory(&drm_ws->base,
722 drm_ws->general_vma,
723 drm_ws->pds_vma,
724 drm_ws->usc_vma);
725 if (result != VK_SUCCESS)
726 goto err_pvr_free_static_memory;
727
728 *ws_out = &drm_ws->base;
729
730 return VK_SUCCESS;
731
732 err_pvr_free_static_memory:
733 pvr_winsys_helper_free_static_memory(drm_ws->general_vma,
734 drm_ws->pds_vma,
735 drm_ws->usc_vma);
736
737 err_pvr_heap_finish:
738 pvr_drm_finish_heaps(drm_ws);
739
740 err_pvr_destroy_vm_context:
741 destroy_vm_context_args.handle = drm_ws->vm_context;
742 pvr_ioctl(render_fd,
743 DRM_IOCTL_PVR_DESTROY_VM_CONTEXT,
744 &destroy_vm_context_args,
745 VK_ERROR_UNKNOWN);
746
747 err_vk_free_drm_ws:
748 vk_free(alloc, drm_ws);
749
750 err_out:
751 return result;
752 }
753