1 /*
2 * Copyright © 2022 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <stdbool.h>
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <vulkan/vulkan.h>
30 #include <xf86drm.h>
31
32 #include "drm-uapi/pvr_drm.h"
33 #include "pvr_drm.h"
34 #include "pvr_drm_bo.h"
35 #include "pvr_drm_job_common.h"
36 #include "pvr_drm_job_render.h"
37 #include "pvr_private.h"
38 #include "pvr_winsys.h"
39 #include "pvr_winsys_helper.h"
40 #include "util/macros.h"
41 #include "vk_alloc.h"
42 #include "vk_drm_syncobj.h"
43 #include "vk_log.h"
44 #include "vk_util.h"
45 #include "vk_sync.h"
46
47 #define PVR_DRM_FREE_LIST_LOCAL 0U
48 #define PVR_DRM_FREE_LIST_GLOBAL 1U
49 #define PVR_DRM_FREE_LIST_MAX 2U
50
51 struct pvr_drm_winsys_free_list {
52 struct pvr_winsys_free_list base;
53
54 uint32_t handle;
55
56 struct pvr_drm_winsys_free_list *parent;
57 };
58
59 #define to_pvr_drm_winsys_free_list(free_list) \
60 container_of(free_list, struct pvr_drm_winsys_free_list, base)
61
62 struct pvr_drm_winsys_rt_dataset {
63 struct pvr_winsys_rt_dataset base;
64 uint32_t handle;
65 };
66
67 #define to_pvr_drm_winsys_rt_dataset(rt_dataset) \
68 container_of(rt_dataset, struct pvr_drm_winsys_rt_dataset, base)
69
pvr_drm_winsys_free_list_create(struct pvr_winsys * const ws,struct pvr_winsys_vma * const free_list_vma,uint32_t initial_num_pages,uint32_t max_num_pages,uint32_t grow_num_pages,uint32_t grow_threshold,struct pvr_winsys_free_list * const parent_free_list,struct pvr_winsys_free_list ** const free_list_out)70 VkResult pvr_drm_winsys_free_list_create(
71 struct pvr_winsys *const ws,
72 struct pvr_winsys_vma *const free_list_vma,
73 uint32_t initial_num_pages,
74 uint32_t max_num_pages,
75 uint32_t grow_num_pages,
76 uint32_t grow_threshold,
77 struct pvr_winsys_free_list *const parent_free_list,
78 struct pvr_winsys_free_list **const free_list_out)
79 {
80 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
81 struct drm_pvr_ioctl_create_free_list_args free_list_args = {
82 .free_list_gpu_addr = free_list_vma->dev_addr.addr,
83 .initial_num_pages = initial_num_pages,
84 .max_num_pages = max_num_pages,
85 .grow_num_pages = grow_num_pages,
86 .grow_threshold = grow_threshold,
87 .vm_context_handle = drm_ws->vm_context,
88 };
89 struct pvr_drm_winsys_free_list *drm_free_list;
90 VkResult result;
91
92 drm_free_list = vk_zalloc(ws->alloc,
93 sizeof(*drm_free_list),
94 8,
95 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
96 if (!drm_free_list) {
97 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
98 goto err_out;
99 }
100
101 drm_free_list->base.ws = ws;
102
103 if (parent_free_list)
104 drm_free_list->parent = to_pvr_drm_winsys_free_list(parent_free_list);
105
106 /* Returns VK_ERROR_INITIALIZATION_FAILED to match pvrsrv. */
107 result = pvr_ioctlf(ws->render_fd,
108 DRM_IOCTL_PVR_CREATE_FREE_LIST,
109 &free_list_args,
110 VK_ERROR_INITIALIZATION_FAILED,
111 "Failed to create free list");
112 if (result != VK_SUCCESS)
113 goto err_free_free_list;
114
115 drm_free_list->handle = free_list_args.handle;
116
117 *free_list_out = &drm_free_list->base;
118
119 return VK_SUCCESS;
120
121 err_free_free_list:
122 vk_free(ws->alloc, drm_free_list);
123
124 err_out:
125 return result;
126 }
127
pvr_drm_winsys_free_list_destroy(struct pvr_winsys_free_list * free_list)128 void pvr_drm_winsys_free_list_destroy(struct pvr_winsys_free_list *free_list)
129 {
130 struct pvr_drm_winsys_free_list *const drm_free_list =
131 to_pvr_drm_winsys_free_list(free_list);
132 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(free_list->ws);
133 struct drm_pvr_ioctl_destroy_free_list_args args = {
134 .handle = drm_free_list->handle,
135 };
136
137 pvr_ioctlf(drm_ws->base.render_fd,
138 DRM_IOCTL_PVR_DESTROY_FREE_LIST,
139 &args,
140 VK_ERROR_UNKNOWN,
141 "Error destroying free list");
142
143 vk_free(drm_ws->base.alloc, free_list);
144 }
145
pvr_drm_render_ctx_static_state_init(struct pvr_winsys_render_ctx_create_info * create_info,uint8_t * stream_ptr_start,uint32_t * stream_len_ptr)146 static void pvr_drm_render_ctx_static_state_init(
147 struct pvr_winsys_render_ctx_create_info *create_info,
148 uint8_t *stream_ptr_start,
149 uint32_t *stream_len_ptr)
150 {
151 struct pvr_winsys_render_ctx_static_state *ws_static_state =
152 &create_info->static_state;
153 uint64_t *stream_ptr = (uint64_t *)stream_ptr_start;
154
155 /* Leave space for stream header. */
156 stream_ptr += pvr_cmd_length(KMD_STREAM_HDR) / 2;
157
158 *stream_ptr++ = ws_static_state->vdm_ctx_state_base_addr;
159 /* geom_reg_vdm_context_state_resume_addr is unused and zeroed. */
160 *stream_ptr++ = 0;
161 *stream_ptr++ = ws_static_state->geom_ctx_state_base_addr;
162
163 for (uint32_t i = 0; i < ARRAY_SIZE(ws_static_state->geom_state); i++) {
164 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task0;
165 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task1;
166 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_store_task2;
167 /* {store, resume}_task{3, 4} are unused and zeroed. */
168 *stream_ptr++ = 0;
169 *stream_ptr++ = 0;
170
171 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task0;
172 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task1;
173 *stream_ptr++ = ws_static_state->geom_state[i].vdm_ctx_resume_task2;
174 /* {store, resume}_task{3, 4} are unused and zeroed. */
175 *stream_ptr++ = 0;
176 *stream_ptr++ = 0;
177 }
178
179 *stream_len_ptr = ((uint8_t *)stream_ptr - stream_ptr_start);
180
181 pvr_csb_pack ((uint64_t *)stream_ptr_start, KMD_STREAM_HDR, value) {
182 value.length = *stream_len_ptr;
183 }
184 }
185
186 struct pvr_drm_winsys_render_ctx {
187 struct pvr_winsys_render_ctx base;
188
189 /* Handle to kernel context. */
190 uint32_t handle;
191
192 uint32_t geom_to_pr_syncobj;
193 };
194
195 #define to_pvr_drm_winsys_render_ctx(ctx) \
196 container_of(ctx, struct pvr_drm_winsys_render_ctx, base)
197
pvr_drm_winsys_render_ctx_create(struct pvr_winsys * ws,struct pvr_winsys_render_ctx_create_info * create_info,struct pvr_winsys_render_ctx ** const ctx_out)198 VkResult pvr_drm_winsys_render_ctx_create(
199 struct pvr_winsys *ws,
200 struct pvr_winsys_render_ctx_create_info *create_info,
201 struct pvr_winsys_render_ctx **const ctx_out)
202 {
203 uint8_t static_ctx_state_fw_stream[192];
204 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ws);
205 struct drm_pvr_ioctl_create_context_args ctx_args = {
206 .type = DRM_PVR_CTX_TYPE_RENDER,
207 .priority = pvr_drm_from_winsys_priority(create_info->priority),
208 .static_context_state = (uint64_t)&static_ctx_state_fw_stream,
209 .callstack_addr = create_info->vdm_callstack_addr.addr,
210 .vm_context_handle = drm_ws->vm_context,
211 };
212
213 struct pvr_drm_winsys_render_ctx *drm_ctx;
214 uint32_t geom_to_pr_syncobj;
215 VkResult result;
216 int ret;
217
218 drm_ctx = vk_alloc(ws->alloc,
219 sizeof(*drm_ctx),
220 8,
221 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
222 if (!drm_ctx) {
223 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
224 goto err_out;
225 }
226
227 ret = drmSyncobjCreate(ws->render_fd, 0, &geom_to_pr_syncobj);
228 if (ret < 0) {
229 result = vk_errorf(NULL,
230 VK_ERROR_OUT_OF_HOST_MEMORY,
231 "DRM_IOCTL_SYNCOBJ_CREATE failed: %s",
232 strerror(errno));
233 goto err_free_ctx;
234 }
235
236 pvr_drm_render_ctx_static_state_init(create_info,
237 static_ctx_state_fw_stream,
238 &ctx_args.static_context_state_len);
239
240 result = pvr_ioctlf(ws->render_fd,
241 DRM_IOCTL_PVR_CREATE_CONTEXT,
242 &ctx_args,
243 VK_ERROR_INITIALIZATION_FAILED,
244 "Failed to create render context");
245 if (result != VK_SUCCESS)
246 goto err_destroy_syncobj;
247
248 *drm_ctx = (struct pvr_drm_winsys_render_ctx) {
249 .base = {
250 .ws = ws,
251 },
252 .handle = ctx_args.handle,
253 .geom_to_pr_syncobj = geom_to_pr_syncobj,
254 };
255
256 *ctx_out = &drm_ctx->base;
257
258 return VK_SUCCESS;
259
260 err_destroy_syncobj:
261 ret = drmSyncobjDestroy(ws->render_fd, geom_to_pr_syncobj);
262 if (ret < 0) {
263 mesa_loge("DRM_IOCTL_SYNCOBJ_DESTROY failed: %s - leaking it",
264 strerror(errno));
265 }
266
267 err_free_ctx:
268 vk_free(ws->alloc, drm_ctx);
269
270 err_out:
271 return result;
272 }
273
pvr_drm_winsys_render_ctx_destroy(struct pvr_winsys_render_ctx * ctx)274 void pvr_drm_winsys_render_ctx_destroy(struct pvr_winsys_render_ctx *ctx)
275 {
276 struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
277 struct pvr_drm_winsys_render_ctx *drm_ctx =
278 to_pvr_drm_winsys_render_ctx(ctx);
279 struct drm_pvr_ioctl_destroy_context_args args = {
280 .handle = drm_ctx->handle,
281 };
282 int ret;
283
284 ret = drmSyncobjDestroy(ctx->ws->render_fd, drm_ctx->geom_to_pr_syncobj);
285 if (ret < 0) {
286 mesa_loge("DRM_IOCTL_SYNCOBJ_DESTROY failed: %s - leaking it",
287 strerror(errno));
288 }
289
290 pvr_ioctlf(drm_ws->base.render_fd,
291 DRM_IOCTL_PVR_DESTROY_CONTEXT,
292 &args,
293 VK_ERROR_UNKNOWN,
294 "Error destroying render context");
295
296 vk_free(drm_ws->base.alloc, drm_ctx);
297 }
298
pvr_drm_render_target_dataset_create(struct pvr_winsys * const ws,const struct pvr_winsys_rt_dataset_create_info * const create_info,UNUSED const struct pvr_device_info * dev_info,struct pvr_winsys_rt_dataset ** const rt_dataset_out)299 VkResult pvr_drm_render_target_dataset_create(
300 struct pvr_winsys *const ws,
301 const struct pvr_winsys_rt_dataset_create_info *const create_info,
302 UNUSED const struct pvr_device_info *dev_info,
303 struct pvr_winsys_rt_dataset **const rt_dataset_out)
304 {
305 struct pvr_drm_winsys_free_list *drm_free_list =
306 to_pvr_drm_winsys_free_list(create_info->local_free_list);
307
308 /* 0 is just a placeholder. It doesn't indicate an invalid handle. */
309 uint32_t parent_free_list_handle =
310 drm_free_list->parent ? drm_free_list->parent->handle : 0;
311
312 struct drm_pvr_ioctl_create_hwrt_dataset_args args = {
313 .geom_data_args = {
314 .tpc_dev_addr = create_info->tpc_dev_addr.addr,
315 .tpc_size = create_info->tpc_size,
316 .tpc_stride = create_info->tpc_stride,
317 .vheap_table_dev_addr = create_info->vheap_table_dev_addr.addr,
318 .rtc_dev_addr = create_info->rtc_dev_addr.addr,
319 },
320
321 .rt_data_args = {
322 [0] = {
323 .pm_mlist_dev_addr =
324 create_info->rt_datas[0].pm_mlist_dev_addr.addr,
325 .macrotile_array_dev_addr =
326 create_info->rt_datas[0].macrotile_array_dev_addr.addr,
327 .region_header_dev_addr =
328 create_info->rt_datas[0].rgn_header_dev_addr.addr,
329 },
330 [1] = {
331 .pm_mlist_dev_addr =
332 create_info->rt_datas[1].pm_mlist_dev_addr.addr,
333 .macrotile_array_dev_addr =
334 create_info->rt_datas[1].macrotile_array_dev_addr.addr,
335 .region_header_dev_addr =
336 create_info->rt_datas[1].rgn_header_dev_addr.addr,
337 },
338 },
339
340 .free_list_handles = {
341 [PVR_DRM_FREE_LIST_LOCAL] = drm_free_list->handle,
342 [PVR_DRM_FREE_LIST_GLOBAL] = parent_free_list_handle,
343 },
344
345 .width = create_info->width,
346 .height = create_info->height,
347 .samples = create_info->samples,
348 .layers = create_info->layers,
349
350 .isp_merge_lower_x = create_info->isp_merge_lower_x,
351 .isp_merge_lower_y = create_info->isp_merge_lower_y,
352 .isp_merge_scale_x = create_info->isp_merge_scale_x,
353 .isp_merge_scale_y = create_info->isp_merge_scale_y,
354 .isp_merge_upper_x = create_info->isp_merge_upper_x,
355 .isp_merge_upper_y = create_info->isp_merge_upper_y,
356
357 .region_header_size = create_info->rgn_header_size,
358 };
359
360 struct pvr_drm_winsys_rt_dataset *drm_rt_dataset;
361 VkResult result;
362
363 STATIC_ASSERT(ARRAY_SIZE(args.rt_data_args) ==
364 ARRAY_SIZE(create_info->rt_datas));
365
366 drm_rt_dataset = vk_zalloc(ws->alloc,
367 sizeof(*drm_rt_dataset),
368 8,
369 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
370 if (!drm_rt_dataset) {
371 result = vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
372 goto err_out;
373 }
374
375 /* Returns VK_ERROR_INITIALIZATION_FAILED to match pvrsrv. */
376 result = pvr_ioctlf(ws->render_fd,
377 DRM_IOCTL_PVR_CREATE_HWRT_DATASET,
378 &args,
379 VK_ERROR_INITIALIZATION_FAILED,
380 "Failed to create render target dataset");
381 if (result != VK_SUCCESS)
382 goto err_free_dataset;
383
384 drm_rt_dataset->handle = args.handle;
385 drm_rt_dataset->base.ws = ws;
386
387 *rt_dataset_out = &drm_rt_dataset->base;
388
389 return VK_SUCCESS;
390
391 err_free_dataset:
392 vk_free(ws->alloc, drm_rt_dataset);
393
394 err_out:
395 return result;
396 }
397
pvr_drm_render_target_dataset_destroy(struct pvr_winsys_rt_dataset * const rt_dataset)398 void pvr_drm_render_target_dataset_destroy(
399 struct pvr_winsys_rt_dataset *const rt_dataset)
400 {
401 struct pvr_drm_winsys_rt_dataset *const drm_rt_dataset =
402 to_pvr_drm_winsys_rt_dataset(rt_dataset);
403 struct pvr_drm_winsys *const drm_ws = to_pvr_drm_winsys(rt_dataset->ws);
404 struct drm_pvr_ioctl_destroy_hwrt_dataset_args args = {
405 .handle = drm_rt_dataset->handle,
406 };
407
408 pvr_ioctlf(drm_ws->base.render_fd,
409 DRM_IOCTL_PVR_DESTROY_HWRT_DATASET,
410 &args,
411 VK_ERROR_UNKNOWN,
412 "Error destroying render target dataset");
413
414 vk_free(drm_ws->base.alloc, drm_rt_dataset);
415 }
416
pvr_winsys_geom_flags_to_drm(const struct pvr_winsys_geometry_state_flags * const ws_flags)417 static uint32_t pvr_winsys_geom_flags_to_drm(
418 const struct pvr_winsys_geometry_state_flags *const ws_flags)
419 {
420 uint32_t flags = 0U;
421
422 if (ws_flags->is_first_geometry)
423 flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST;
424
425 if (ws_flags->is_last_geometry)
426 flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST;
427
428 if (ws_flags->use_single_core)
429 flags |= DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE;
430
431 return flags;
432 }
433
pvr_winsys_frag_flags_to_drm(const struct pvr_winsys_fragment_state_flags * const ws_flags)434 static uint32_t pvr_winsys_frag_flags_to_drm(
435 const struct pvr_winsys_fragment_state_flags *const ws_flags)
436 {
437 uint32_t flags = 0U;
438
439 if (ws_flags->use_single_core)
440 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE;
441
442 if (ws_flags->has_depth_buffer)
443 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER;
444
445 if (ws_flags->has_stencil_buffer)
446 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER;
447
448 if (ws_flags->prevent_cdm_overlap)
449 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP;
450
451 if (ws_flags->get_vis_results)
452 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS;
453
454 if (ws_flags->has_spm_scratch_buffer)
455 flags |= DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER;
456
457 return flags;
458 }
459
pvr_drm_winsys_render_submit(const struct pvr_winsys_render_ctx * ctx,const struct pvr_winsys_render_submit_info * submit_info,UNUSED const struct pvr_device_info * dev_info,struct vk_sync * signal_sync_geom,struct vk_sync * signal_sync_frag)460 VkResult pvr_drm_winsys_render_submit(
461 const struct pvr_winsys_render_ctx *ctx,
462 const struct pvr_winsys_render_submit_info *submit_info,
463 UNUSED const struct pvr_device_info *dev_info,
464 struct vk_sync *signal_sync_geom,
465 struct vk_sync *signal_sync_frag)
466
467 {
468 const struct pvr_drm_winsys *drm_ws = to_pvr_drm_winsys(ctx->ws);
469 const struct pvr_drm_winsys_render_ctx *drm_ctx =
470 to_pvr_drm_winsys_render_ctx(ctx);
471 const struct pvr_winsys_geometry_state *const geom_state =
472 &submit_info->geometry;
473 const struct pvr_winsys_fragment_state *const frag_state =
474 &submit_info->fragment;
475 const struct pvr_winsys_fragment_state *const pr_state =
476 &submit_info->fragment_pr;
477 const struct pvr_drm_winsys_rt_dataset *drm_rt_dataset =
478 to_pvr_drm_winsys_rt_dataset(submit_info->rt_dataset);
479
480 struct drm_pvr_sync_op geom_sync_ops[2], pr_sync_ops[1], frag_sync_ops[3];
481 unsigned num_geom_syncs = 0, num_pr_syncs = 0, num_frag_syncs = 0;
482 uint32_t geom_to_pr_syncobj;
483
484 struct drm_pvr_job jobs_args[3] = {
485 [0] = {
486 .type = DRM_PVR_JOB_TYPE_GEOMETRY,
487 .cmd_stream = (__u64)&geom_state->fw_stream[0],
488 .cmd_stream_len = geom_state->fw_stream_len,
489 .context_handle = drm_ctx->handle,
490 .flags = pvr_winsys_geom_flags_to_drm(&geom_state->flags),
491 .sync_ops = DRM_PVR_OBJ_ARRAY(0, geom_sync_ops),
492 .hwrt = {
493 .set_handle = drm_rt_dataset->handle,
494 .data_index = submit_info->rt_data_idx,
495 },
496 },
497 [1] = {
498 .type = DRM_PVR_JOB_TYPE_FRAGMENT,
499 .cmd_stream = (__u64)&pr_state->fw_stream[0],
500 .cmd_stream_len = pr_state->fw_stream_len,
501 .context_handle = drm_ctx->handle,
502 .flags = DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER |
503 pvr_winsys_frag_flags_to_drm(&pr_state->flags),
504 .sync_ops = DRM_PVR_OBJ_ARRAY(0, pr_sync_ops),
505 .hwrt = {
506 .set_handle = drm_rt_dataset->handle,
507 .data_index = submit_info->rt_data_idx,
508 },
509 }
510 };
511
512 struct drm_pvr_ioctl_submit_jobs_args args = {
513 .jobs = DRM_PVR_OBJ_ARRAY(2, jobs_args),
514 };
515
516 /* Geom syncs */
517
518 if (submit_info->geometry.wait) {
519 struct vk_sync *sync = submit_info->geometry.wait;
520
521 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
522 geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
523 .handle = vk_sync_as_drm_syncobj(sync)->syncobj,
524 .flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
525 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
526 .value = 0,
527 };
528 }
529
530 if (signal_sync_geom) {
531 assert(!(signal_sync_geom->flags & VK_SYNC_IS_TIMELINE));
532 geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
533 .handle = vk_sync_as_drm_syncobj(signal_sync_geom)->syncobj,
534 .flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
535 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
536 .value = 0,
537 };
538 }
539
540 /* PR syncs */
541
542 if (signal_sync_geom) {
543 assert(!(signal_sync_geom->flags & VK_SYNC_IS_TIMELINE));
544 geom_to_pr_syncobj = vk_sync_as_drm_syncobj(signal_sync_geom)->syncobj;
545 } else {
546 geom_to_pr_syncobj = drm_ctx->geom_to_pr_syncobj;
547
548 geom_sync_ops[num_geom_syncs++] = (struct drm_pvr_sync_op){
549 .handle = geom_to_pr_syncobj,
550 .flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
551 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
552 .value = 0,
553 };
554 }
555
556 pr_sync_ops[num_pr_syncs++] = (struct drm_pvr_sync_op){
557 .handle = geom_to_pr_syncobj,
558 .flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
559 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
560 .value = 0,
561 };
562
563 /* Frag job */
564
565 if (submit_info->has_fragment_job) {
566 jobs_args[args.jobs.count++] = (struct drm_pvr_job) {
567 .type = DRM_PVR_JOB_TYPE_FRAGMENT,
568 .cmd_stream = (__u64)&frag_state->fw_stream[0],
569 .cmd_stream_len = frag_state->fw_stream_len,
570 .context_handle = drm_ctx->handle,
571 .flags = pvr_winsys_frag_flags_to_drm(&frag_state->flags),
572 .sync_ops = DRM_PVR_OBJ_ARRAY(0, frag_sync_ops),
573 .hwrt = {
574 .set_handle = drm_rt_dataset->handle,
575 .data_index = submit_info->rt_data_idx,
576 },
577 };
578
579 /* There's no need to setup a geom -> frag dependency here, as we always
580 * setup a geom -> pr dependency (a PR just being a frag job) and the KMD
581 * respects submission order for jobs of the same type.
582 *
583 * Note that, in the case where PRs aren't needed, because we didn't run
584 * out of PB space during the geometry phase, the PR job will still be
585 * scheduled after the geometry job, but no PRs will be performed, as
586 * they aren't needed.
587 */
588
589 if (submit_info->fragment.wait) {
590 struct vk_sync *sync = submit_info->fragment.wait;
591
592 assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
593 frag_sync_ops[num_frag_syncs++] = (struct drm_pvr_sync_op){
594 .handle = vk_sync_as_drm_syncobj(sync)->syncobj,
595 .flags = DRM_PVR_SYNC_OP_FLAG_WAIT |
596 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
597 .value = 0,
598 };
599 }
600
601 if (signal_sync_frag) {
602 assert(!(signal_sync_frag->flags & VK_SYNC_IS_TIMELINE));
603 frag_sync_ops[num_frag_syncs++] = (struct drm_pvr_sync_op){
604 .handle = vk_sync_as_drm_syncobj(signal_sync_frag)->syncobj,
605 .flags = DRM_PVR_SYNC_OP_FLAG_SIGNAL |
606 DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ,
607 .value = 0,
608 };
609 }
610 }
611
612 jobs_args[0].sync_ops.count = num_geom_syncs;
613 jobs_args[1].sync_ops.count = num_pr_syncs;
614
615 if (submit_info->has_fragment_job)
616 jobs_args[2].sync_ops.count = num_frag_syncs;
617
618 /* Returns VK_ERROR_OUT_OF_DEVICE_MEMORY to match pvrsrv. */
619 return pvr_ioctlf(drm_ws->base.render_fd,
620 DRM_IOCTL_PVR_SUBMIT_JOBS,
621 &args,
622 VK_ERROR_OUT_OF_DEVICE_MEMORY,
623 "Failed to submit render job");
624 }
625