Lines Matching full:vdev
27 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) in ivpu_cmdq_ring_db() argument
29 ivpu_hw_db_set(vdev, cmdq->db_id); in ivpu_cmdq_ring_db()
32 static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, in ivpu_preemption_buffers_create() argument
35 u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); in ivpu_preemption_buffers_create()
36 u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); in ivpu_preemption_buffers_create()
38 if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || in ivpu_preemption_buffers_create()
42 cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, in ivpu_preemption_buffers_create()
45 ivpu_err(vdev, "Failed to create primary preemption buffer\n"); in ivpu_preemption_buffers_create()
49 cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, in ivpu_preemption_buffers_create()
52 ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); in ivpu_preemption_buffers_create()
64 static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, in ivpu_preemption_buffers_free() argument
67 if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) in ivpu_preemption_buffers_free()
78 struct ivpu_device *vdev = file_priv->vdev; in ivpu_cmdq_alloc() local
86 ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next, in ivpu_cmdq_alloc()
89 ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret); in ivpu_cmdq_alloc()
96 ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret); in ivpu_cmdq_alloc()
100 cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); in ivpu_cmdq_alloc()
104 ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); in ivpu_cmdq_alloc()
106 ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); in ivpu_cmdq_alloc()
113 xa_erase(&vdev->db_xa, cmdq->db_id); in ivpu_cmdq_alloc()
124 ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq); in ivpu_cmdq_free()
126 xa_erase(&file_priv->vdev->db_xa, cmdq->db_id); in ivpu_cmdq_free()
133 struct ivpu_device *vdev = file_priv->vdev; in ivpu_hws_cmdq_init() local
136 ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id, in ivpu_hws_cmdq_init()
142 ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id, in ivpu_hws_cmdq_init()
152 struct ivpu_device *vdev = file_priv->vdev; in ivpu_register_db() local
155 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) in ivpu_register_db()
156 ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id, in ivpu_register_db()
159 ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, in ivpu_register_db()
163 ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n", in ivpu_register_db()
172 struct ivpu_device *vdev = file_priv->vdev; in ivpu_cmdq_init() local
190 ivpu_dbg(vdev, JOB, "Turbo mode enabled"); in ivpu_cmdq_init()
196 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { in ivpu_cmdq_init()
213 struct ivpu_device *vdev = file_priv->vdev; in ivpu_cmdq_fini() local
223 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { in ivpu_cmdq_fini()
224 ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id); in ivpu_cmdq_fini()
226 ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id); in ivpu_cmdq_fini()
229 ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id); in ivpu_cmdq_fini()
231 ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id); in ivpu_cmdq_fini()
295 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) in ivpu_cmdq_reset_all_contexts() argument
300 mutex_lock(&vdev->context_list_lock); in ivpu_cmdq_reset_all_contexts()
302 xa_for_each(&vdev->context_xa, ctx_id, file_priv) in ivpu_cmdq_reset_all_contexts()
305 mutex_unlock(&vdev->context_list_lock); in ivpu_cmdq_reset_all_contexts()
319 struct ivpu_device *vdev = file_priv->vdev; in ivpu_context_abort_locked() local
325 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) in ivpu_context_abort_locked()
326 ivpu_jsm_context_release(vdev, file_priv->ctx.id); in ivpu_context_abort_locked()
331 struct ivpu_device *vdev = job->vdev; in ivpu_cmdq_push_job() local
339 ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n", in ivpu_cmdq_push_job()
351 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { in ivpu_cmdq_push_job()
374 struct ivpu_device *vdev; member
391 return dev_name(ivpu_fence->vdev->drm.dev); in ivpu_fence_get_timeline_name()
399 static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev) in ivpu_fence_create() argument
407 fence->vdev = vdev; in ivpu_fence_create()
416 struct ivpu_device *vdev = job->vdev; in ivpu_job_destroy() local
419 ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d", in ivpu_job_destroy()
434 struct ivpu_device *vdev = file_priv->vdev; in ivpu_job_create() local
441 job->vdev = vdev; in ivpu_job_create()
444 job->done_fence = ivpu_fence_create(vdev); in ivpu_job_create()
446 ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); in ivpu_job_create()
453 ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx); in ivpu_job_create()
461 static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id) in ivpu_job_remove_from_submitted_jobs() argument
465 xa_lock(&vdev->submitted_jobs_xa); in ivpu_job_remove_from_submitted_jobs()
466 job = __xa_erase(&vdev->submitted_jobs_xa, job_id); in ivpu_job_remove_from_submitted_jobs()
468 if (xa_empty(&vdev->submitted_jobs_xa) && job) { in ivpu_job_remove_from_submitted_jobs()
469 vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts), in ivpu_job_remove_from_submitted_jobs()
470 vdev->busy_time); in ivpu_job_remove_from_submitted_jobs()
473 xa_unlock(&vdev->submitted_jobs_xa); in ivpu_job_remove_from_submitted_jobs()
478 static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) in ivpu_job_signal_and_destroy() argument
482 job = ivpu_job_remove_from_submitted_jobs(vdev, job_id); in ivpu_job_signal_and_destroy()
493 ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", in ivpu_job_signal_and_destroy()
497 ivpu_stop_job_timeout_detection(vdev); in ivpu_job_signal_and_destroy()
499 ivpu_rpm_put(vdev); in ivpu_job_signal_and_destroy()
503 void ivpu_jobs_abort_all(struct ivpu_device *vdev) in ivpu_jobs_abort_all() argument
508 xa_for_each(&vdev->submitted_jobs_xa, id, job) in ivpu_jobs_abort_all()
509 ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED); in ivpu_jobs_abort_all()
515 struct ivpu_device *vdev = job->vdev; in ivpu_job_submit() local
520 ret = ivpu_rpm_get(vdev); in ivpu_job_submit()
528 ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n", in ivpu_job_submit()
534 xa_lock(&vdev->submitted_jobs_xa); in ivpu_job_submit()
535 is_first_job = xa_empty(&vdev->submitted_jobs_xa); in ivpu_job_submit()
536 ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit, in ivpu_job_submit()
539 ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n", in ivpu_job_submit()
549 ivpu_start_job_timeout_detection(vdev); in ivpu_job_submit()
555 ivpu_cmdq_ring_db(vdev, cmdq); in ivpu_job_submit()
557 vdev->busy_start_ts = ktime_get(); in ivpu_job_submit()
561 ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n", in ivpu_job_submit()
565 xa_unlock(&vdev->submitted_jobs_xa); in ivpu_job_submit()
570 ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS); in ivpu_job_submit()
575 __xa_erase(&vdev->submitted_jobs_xa, job->job_id); in ivpu_job_submit()
577 xa_unlock(&vdev->submitted_jobs_xa); in ivpu_job_submit()
580 ivpu_rpm_put(vdev); in ivpu_job_submit()
589 struct ivpu_device *vdev = file_priv->vdev; in ivpu_job_prepare_bos_for_submit() local
611 ivpu_warn(vdev, "Buffer is already in use\n"); in ivpu_job_prepare_bos_for_submit()
616 ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); in ivpu_job_prepare_bos_for_submit()
625 ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); in ivpu_job_prepare_bos_for_submit()
632 ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); in ivpu_job_prepare_bos_for_submit()
661 struct ivpu_device *vdev = file_priv->vdev; in ivpu_submit_ioctl() local
698 if (!drm_dev_enter(&vdev->drm, &idx)) { in ivpu_submit_ioctl()
703 ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", in ivpu_submit_ioctl()
708 ivpu_err(vdev, "Failed to create job\n"); in ivpu_submit_ioctl()
716 ivpu_err(vdev, "Failed to prepare job: %d\n", ret); in ivpu_submit_ioctl()
722 down_read(&vdev->pm->reset_lock); in ivpu_submit_ioctl()
724 up_read(&vdev->pm->reset_lock); in ivpu_submit_ioctl()
744 ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, in ivpu_job_done_callback() argument
751 ivpu_err(vdev, "IPC message has no JSM payload\n"); in ivpu_job_done_callback()
756 ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result); in ivpu_job_done_callback()
761 ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); in ivpu_job_done_callback()
762 if (!ret && !xa_empty(&vdev->submitted_jobs_xa)) in ivpu_job_done_callback()
763 ivpu_start_job_timeout_detection(vdev); in ivpu_job_done_callback()
766 void ivpu_job_done_consumer_init(struct ivpu_device *vdev) in ivpu_job_done_consumer_init() argument
768 ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer, in ivpu_job_done_consumer_init()
772 void ivpu_job_done_consumer_fini(struct ivpu_device *vdev) in ivpu_job_done_consumer_fini() argument
774 ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer); in ivpu_job_done_consumer_fini()