Lines Matching full:job
25 struct pvr_job *job = container_of(kref, struct pvr_job, ref_count); in pvr_job_release() local
27 xa_erase(&job->pvr_dev->job_ids, job->id); in pvr_job_release()
29 pvr_hwrt_data_put(job->hwrt); in pvr_job_release()
30 pvr_context_put(job->ctx); in pvr_job_release()
32 WARN_ON(job->paired_job); in pvr_job_release()
34 pvr_queue_job_cleanup(job); in pvr_job_release()
35 pvr_job_release_pm_ref(job); in pvr_job_release()
37 kfree(job->cmd); in pvr_job_release()
38 kfree(job); in pvr_job_release()
42 * pvr_job_put() - Release reference on job
43 * @job: Target job.
46 pvr_job_put(struct pvr_job *job) in pvr_job_put() argument
48 if (job) in pvr_job_put()
49 kref_put(&job->ref_count, pvr_job_release); in pvr_job_put()
53 * pvr_job_process_stream() - Build job FW structure from stream
58 * @job: Pointer to job.
69 void *stream, u32 stream_size, struct pvr_job *job) in pvr_job_process_stream() argument
73 job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL); in pvr_job_process_stream()
74 if (!job->cmd) in pvr_job_process_stream()
77 job->cmd_len = cmd_defs->dest_size; in pvr_job_process_stream()
79 err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd); in pvr_job_process_stream()
81 kfree(job->cmd); in pvr_job_process_stream()
86 static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, in pvr_fw_cmd_init() argument
97 err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job); in pvr_fw_cmd_init()
142 pvr_geom_job_fw_cmd_init(struct pvr_job *job, in pvr_geom_job_fw_cmd_init() argument
151 if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER) in pvr_geom_job_fw_cmd_init()
154 if (!job->hwrt) in pvr_geom_job_fw_cmd_init()
157 job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM; in pvr_geom_job_fw_cmd_init()
158 err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream, in pvr_geom_job_fw_cmd_init()
163 cmd = job->cmd; in pvr_geom_job_fw_cmd_init()
166 pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr); in pvr_geom_job_fw_cmd_init()
171 pvr_frag_job_fw_cmd_init(struct pvr_job *job, in pvr_frag_job_fw_cmd_init() argument
180 if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER) in pvr_frag_job_fw_cmd_init()
183 if (!job->hwrt) in pvr_frag_job_fw_cmd_init()
186 job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ? in pvr_frag_job_fw_cmd_init()
189 err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream, in pvr_frag_job_fw_cmd_init()
194 cmd = job->cmd; in pvr_frag_job_fw_cmd_init()
197 pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr); in pvr_frag_job_fw_cmd_init()
215 pvr_compute_job_fw_cmd_init(struct pvr_job *job, in pvr_compute_job_fw_cmd_init() argument
224 if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE) in pvr_compute_job_fw_cmd_init()
227 job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM; in pvr_compute_job_fw_cmd_init()
228 err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream, in pvr_compute_job_fw_cmd_init()
233 cmd = job->cmd; in pvr_compute_job_fw_cmd_init()
251 pvr_transfer_job_fw_cmd_init(struct pvr_job *job, in pvr_transfer_job_fw_cmd_init() argument
260 if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG) in pvr_transfer_job_fw_cmd_init()
263 job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D; in pvr_transfer_job_fw_cmd_init()
264 err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream, in pvr_transfer_job_fw_cmd_init()
269 cmd = job->cmd; in pvr_transfer_job_fw_cmd_init()
276 pvr_job_fw_cmd_init(struct pvr_job *job, in pvr_job_fw_cmd_init() argument
281 return pvr_geom_job_fw_cmd_init(job, args); in pvr_job_fw_cmd_init()
284 return pvr_frag_job_fw_cmd_init(job, args); in pvr_job_fw_cmd_init()
287 return pvr_compute_job_fw_cmd_init(job, args); in pvr_job_fw_cmd_init()
290 return pvr_transfer_job_fw_cmd_init(job, args); in pvr_job_fw_cmd_init()
302 /** @job: Pointer to the job. */
303 struct pvr_job *job; member
305 /** @sync_ops: Pointer to the sync_ops associated with @job. */
313 * prepare_job_syncs() - Prepare all sync objects for a single job.
315 * @job_data: Precreated job and sync_ops array.
338 err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base, in prepare_job_syncs()
344 if (job_data->job->hwrt) { in prepare_job_syncs()
345 /* The geometry job writes the HWRT region headers, which are in prepare_job_syncs()
346 * then read by the fragment job. in prepare_job_syncs()
349 gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem); in prepare_job_syncs()
351 dma_resv_usage_rw(job_data->job->type == in prepare_job_syncs()
355 err = drm_sched_job_add_resv_dependencies(&job_data->job->base, in prepare_job_syncs()
362 /* We need to arm the job to get the job done fence. */ in prepare_job_syncs()
363 done_fence = pvr_queue_job_arm(job_data->job); in prepare_job_syncs()
407 struct pvr_job *job = NULL; in create_job() local
418 job = kzalloc(sizeof(*job), GFP_KERNEL); in create_job()
419 if (!job) in create_job()
422 kref_init(&job->ref_count); in create_job()
423 job->type = args->type; in create_job()
424 job->pvr_dev = pvr_dev; in create_job()
426 err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL); in create_job()
430 job->ctx = pvr_context_lookup(pvr_file, args->context_handle); in create_job()
431 if (!job->ctx) { in create_job()
437 job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle, in create_job()
439 if (!job->hwrt) { in create_job()
445 err = pvr_job_fw_cmd_init(job, args); in create_job()
449 err = pvr_queue_job_init(job); in create_job()
453 return job; in create_job()
456 pvr_job_put(job); in create_job()
461 * pvr_job_data_fini() - Cleanup all allocs used to set up job submission.
462 * @job_data: Job data array.
469 pvr_job_put(job_data[i].job); in pvr_job_data_fini()
479 * @job_args: Job args array copied from user.
481 * @job_data_out: Job data array.
492 job_data_out[i].job = in pvr_job_data_init()
494 err = PTR_ERR_OR_ZERO(job_data_out[i].job); in pvr_job_data_init()
498 job_data_out[i].job = NULL; in pvr_job_data_init()
507 /* Ensure the job created above is also cleaned up. */ in pvr_job_data_init()
527 pvr_queue_job_push(job_data[i].job); in push_jobs()
541 struct pvr_job *job = job_data[i].job; in jobs_lock_all_objs() local
547 gem_from_pvr_gem(job->ctx->fw_obj->gem)); in jobs_lock_all_objs()
552 if (job->hwrt) { in jobs_lock_all_objs()
554 job->hwrt->fw_obj); in jobs_lock_all_objs()
579 update_job_resvs(struct pvr_job *job) in update_job_resvs() argument
581 if (job->hwrt) { in update_job_resvs()
582 enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? in update_job_resvs()
584 struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem); in update_job_resvs()
586 dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage); in update_job_resvs()
594 update_job_resvs(job_data[i].job); in update_job_resvs_for_each()
626 /* We iterate over the current job array in reverse order to grab the in get_last_queued_job_scheduled_fence()
627 * last to-be-queued job targeting the same queue. in get_last_queued_job_scheduled_fence()
630 struct pvr_job *job = job_data[i - 1].job; in get_last_queued_job_scheduled_fence() local
632 if (job->ctx == queue->ctx && job->type == queue->type) in get_last_queued_job_scheduled_fence()
633 return dma_fence_get(&job->base.s_fence->scheduled); in get_last_queued_job_scheduled_fence()
636 /* If we didn't find any, we just return the last queued job scheduled in get_last_queued_job_scheduled_fence()
646 struct pvr_job *geom_job = job_data[i].job; in pvr_jobs_link_geom_frag()
647 struct pvr_job *frag_job = job_data[i + 1].job; in pvr_jobs_link_geom_frag()
651 if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job)) in pvr_jobs_link_geom_frag()
654 /* The fragment job will be submitted by the geometry queue. We in pvr_jobs_link_geom_frag()
671 /* The KCCB slot will be reserved by the geometry job, so we can in pvr_jobs_link_geom_frag()
672 * drop the KCCB fence on the fragment job. in pvr_jobs_link_geom_frag()
680 /* The geometry job pvr_job structure is used when the fragment in pvr_jobs_link_geom_frag()
681 * job is being prepared by the GPU scheduler. Have the fragment in pvr_jobs_link_geom_frag()
682 * job hold a reference on the geometry job to prevent it being in pvr_jobs_link_geom_frag()
683 * freed until the fragment job has finished with it. in pvr_jobs_link_geom_frag()
687 /* Skip the fragment job we just paired to the geometry job. */ in pvr_jobs_link_geom_frag()
766 /* Anything after that point must succeed because we start exposing job in pvr_submit_jobs()