Lines Matching full:job
238 * Call this function to allocate job CCCB and done fences. This only
303 * pvr_queue_job_fence_init() - Initializes a job done fence object.
308 * a job.
350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument
353 * and a command for the job itself. in job_cmds_size()
356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
361 * @job: Job to operate on.
365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument
371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
387 * @queue: The queue this job will be submitted to.
388 * @job: The job to get the CCCB fence on.
390 * The CCCB fence is a synchronization primitive allowing us to delay job
391 * submission until there's enough space in the CCCB to submit the job.
394 * * NULL if there's enough space in the CCCB to submit this job, or
398 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument
404 * enough space in the cccb for our job. in pvr_queue_get_job_cccb_fence()
406 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence()
411 /* Count remaining native dependencies and check if the job fits in the CCCB. */ in pvr_queue_get_job_cccb_fence()
412 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_get_job_cccb_fence()
413 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
414 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
415 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence()
419 /* There should be no job attached to the CCCB fence context: in pvr_queue_get_job_cccb_fence()
422 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
423 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
425 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
428 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_get_job_cccb_fence()
430 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
435 return dma_fence_get(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
439 * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
440 * @queue: The queue this job will be submitted to.
441 * @job: The job to get the KCCB fence on.
443 * The KCCB fence is a synchronization primitive allowing us to delay job
444 * submission until there's enough space in the KCCB to submit the job.
447 * * NULL if there's enough space in the KCCB to submit this job, or
451 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_kccb_fence() argument
457 * enough space in the KCCB for our job. in pvr_queue_get_job_kccb_fence()
459 if (!job->kccb_fence) in pvr_queue_get_job_kccb_fence()
462 if (!WARN_ON(job->kccb_fence->ops)) { in pvr_queue_get_job_kccb_fence()
463 kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); in pvr_queue_get_job_kccb_fence()
464 job->kccb_fence = NULL; in pvr_queue_get_job_kccb_fence()
471 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_paired_frag_job_dep() argument
473 struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? in pvr_queue_get_paired_frag_job_dep()
474 job->paired_job : NULL; in pvr_queue_get_paired_frag_job_dep()
487 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep()
498 * @sched_job: The job to query the next internal dependency on
499 * @s_entity: The entity this job is queue on.
508 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_prepare_job() local
514 * here because otherwise by the time of run_job() the job will end up in pvr_queue_prepare_job()
517 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_prepare_job()
519 * This will be called on a paired fragment job after being in pvr_queue_prepare_job()
522 * geometry job, which would issue a pm ref. in pvr_queue_prepare_job()
524 if (job->paired_job->has_pm_ref) in pvr_queue_prepare_job()
528 * In this case we need to use the job's own ctx to initialise in pvr_queue_prepare_job()
530 * paired geometry job. in pvr_queue_prepare_job()
532 pvr_queue_job_fence_init(job->done_fence, in pvr_queue_prepare_job()
533 job->ctx->queues.fragment); in pvr_queue_prepare_job()
535 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
541 internal_dep = pvr_queue_get_job_cccb_fence(queue, job); in pvr_queue_prepare_job()
547 internal_dep = pvr_queue_get_job_kccb_fence(queue, job); in pvr_queue_prepare_job()
553 * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job); in pvr_queue_prepare_job()
556 /* The paired job fence should come last, when everything else is ready. */ in pvr_queue_prepare_job()
558 internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); in pvr_queue_prepare_job()
599 * This function should be called any time a job is submitted or it done
611 static void pvr_queue_submit_job_to_cccb(struct pvr_job *job) in pvr_queue_submit_job_to_cccb() argument
613 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb()
628 xa_for_each(&job->base.dependencies, index, fence) { in pvr_queue_submit_job_to_cccb()
634 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job && in pvr_queue_submit_job_to_cccb()
635 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb()
653 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_submit_job_to_cccb()
654 jfence = to_pvr_queue_job_fence(job->paired_job->done_fence); in pvr_queue_submit_job_to_cccb()
658 ufos[ufo_count++].value = job->paired_job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
667 if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) { in pvr_queue_submit_job_to_cccb()
668 struct rogue_fwif_cmd_geom *cmd = job->cmd; in pvr_queue_submit_job_to_cccb()
675 cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1; in pvr_queue_submit_job_to_cccb()
678 /* Submit job to FW */ in pvr_queue_submit_job_to_cccb()
679 pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd, in pvr_queue_submit_job_to_cccb()
680 job->id, job->id); in pvr_queue_submit_job_to_cccb()
682 /* Signal the job fence. */ in pvr_queue_submit_job_to_cccb()
684 ufos[0].value = job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
690 * pvr_queue_run_job() - Submit a job to the FW.
691 * @sched_job: The job to submit.
694 * when the commands resulting from this job are guaranteed to fit in the CCCB.
698 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_run_job() local
699 struct pvr_device *pvr_dev = job->pvr_dev; in pvr_queue_run_job()
702 /* The fragment job is issued along the geometry job when we use combined in pvr_queue_run_job()
706 if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT && in pvr_queue_run_job()
707 job->done_fence->ops) { in pvr_queue_run_job()
708 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
712 * we bail out early if we see a fragment job that's paired with a geomtry in pvr_queue_run_job()
713 * job. in pvr_queue_run_job()
717 if (WARN_ON(job->paired_job && in pvr_queue_run_job()
718 (job->type != DRM_PVR_JOB_TYPE_GEOMETRY || in pvr_queue_run_job()
719 job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT || in pvr_queue_run_job()
720 job->hwrt != job->paired_job->hwrt || in pvr_queue_run_job()
721 job->ctx != job->paired_job->ctx))) in pvr_queue_run_job()
724 err = pvr_job_get_pm_ref(job); in pvr_queue_run_job()
728 if (job->paired_job) { in pvr_queue_run_job()
729 err = pvr_job_get_pm_ref(job->paired_job); in pvr_queue_run_job()
734 /* Submit our job to the CCCB */ in pvr_queue_run_job()
735 pvr_queue_submit_job_to_cccb(job); in pvr_queue_run_job()
737 if (job->paired_job) { in pvr_queue_run_job()
738 struct pvr_job *geom_job = job; in pvr_queue_run_job()
739 struct pvr_job *frag_job = job->paired_job; in pvr_queue_run_job()
740 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
741 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
743 /* Submit the fragment job along the geometry job and send a combined kick. */ in pvr_queue_run_job()
751 job->hwrt, in pvr_queue_run_job()
755 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job()
759 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
760 job->hwrt); in pvr_queue_run_job()
763 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
773 struct pvr_job *job; in pvr_queue_start() local
780 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
781 if (dma_fence_is_signaled(job->done_fence)) { in pvr_queue_start()
785 WARN_ON(job->base.s_fence->parent); in pvr_queue_start()
786 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start()
789 * new job can be submitted. in pvr_queue_start()
799 * pvr_queue_timedout_job() - Handle a job timeout event.
800 * @s_job: The job this timeout occurred on.
814 struct pvr_job *job; in pvr_queue_timedout_job() local
817 dev_err(sched->dev, "Job timeout\n"); in pvr_queue_timedout_job()
835 /* Re-assign job parent fences. */ in pvr_queue_timedout_job()
836 list_for_each_entry(job, &sched->pending_list, base.list) { in pvr_queue_timedout_job()
837 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job()
861 * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
862 * @sched_job: Job object to free.
866 struct pvr_job *job = container_of(sched_job, struct pvr_job, base); in pvr_queue_free_job() local
870 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) in pvr_queue_free_job()
871 pvr_job_put(job->paired_job); in pvr_queue_free_job()
873 job->paired_job = NULL; in pvr_queue_free_job()
874 pvr_job_put(job); in pvr_queue_free_job()
918 struct pvr_job *job, *tmp_job; in pvr_queue_signal_done_fences() local
923 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
924 if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0) in pvr_queue_signal_done_fences()
927 if (!dma_fence_is_signaled(job->done_fence)) { in pvr_queue_signal_done_fences()
928 dma_fence_signal(job->done_fence); in pvr_queue_signal_done_fences()
929 pvr_job_release_pm_ref(job); in pvr_queue_signal_done_fences()
937 * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
942 * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal
950 struct pvr_job *job; in pvr_queue_check_job_waiting_for_cccb_space() local
953 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
954 if (!job) in pvr_queue_check_job_waiting_for_cccb_space()
957 /* If we have a job attached to the CCCB fence context, its CCCB fence in pvr_queue_check_job_waiting_for_cccb_space()
960 if (WARN_ON(!job->cccb_fence)) { in pvr_queue_check_job_waiting_for_cccb_space()
961 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
966 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_check_job_waiting_for_cccb_space()
968 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
973 * If the job fits, signal the CCCB fence, this should unblock in pvr_queue_check_job_waiting_for_cccb_space()
976 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_check_job_waiting_for_cccb_space()
977 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
978 job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
982 dma_fence_signal(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
983 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
984 job->cccb_fence = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
985 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
990 pvr_job_put(job); in pvr_queue_check_job_waiting_for_cccb_space()
997 * Signal job fences and check if jobs waiting for CCCB space can be unblocked.
1075 * @job: The job to initialize.
1077 * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
1079 * valid and the job can fit in the CCCB.
1085 int pvr_queue_job_init(struct pvr_job *job) in pvr_queue_job_init() argument
1087 /* Fragment jobs need at least one native fence wait on the geometry job fence. */ in pvr_queue_job_init()
1088 u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; in pvr_queue_job_init()
1092 if (atomic_read(&job->ctx->faulty)) in pvr_queue_job_init()
1095 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1099 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1102 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1106 job->cccb_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1107 job->kccb_fence = pvr_kccb_fence_alloc(); in pvr_queue_job_init()
1108 job->done_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1109 if (!job->cccb_fence || !job->kccb_fence || !job->done_fence) in pvr_queue_job_init()
1116 * pvr_queue_job_arm() - Arm a job object.
1117 * @job: The job to arm.
1121 * make sure the job is pushed using pvr_queue_job_push(), or guarantee that
1123 * we do multi-job submission, and something failed when creating/initializing
1124 * a job. In that case, we know the fence didn't leave the driver, and we
1130 struct dma_fence *pvr_queue_job_arm(struct pvr_job *job) in pvr_queue_job_arm() argument
1132 drm_sched_job_arm(&job->base); in pvr_queue_job_arm()
1134 return &job->base.s_fence->finished; in pvr_queue_job_arm()
1138 * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
1139 * @job: The job to cleanup.
1141 * Should be called in the job release path.
1143 void pvr_queue_job_cleanup(struct pvr_job *job) in pvr_queue_job_cleanup() argument
1145 pvr_queue_fence_put(job->done_fence); in pvr_queue_job_cleanup()
1146 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_job_cleanup()
1147 pvr_kccb_fence_put(job->kccb_fence); in pvr_queue_job_cleanup()
1149 if (job->base.s_fence) in pvr_queue_job_cleanup()
1150 drm_sched_job_cleanup(&job->base); in pvr_queue_job_cleanup()
1154 * pvr_queue_job_push() - Push a job to its queue.
1155 * @job: The job to push.
1158 * have been added to the job. This will effectively queue the job to
1160 * the job object, so the caller is free to drop its reference when it's
1161 * done accessing the job object.
1163 void pvr_queue_job_push(struct pvr_job *job) in pvr_queue_job_push() argument
1165 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push()
1167 /* Keep track of the last queued job scheduled fence for combined submit. */ in pvr_queue_job_push()
1169 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1171 pvr_job_get(job); in pvr_queue_job_push()
1172 drm_sched_entity_push_job(&job->base); in pvr_queue_job_push()