Home
last modified time | relevance | path

Searched full:job (Results 1 – 25 of 806) sorted by relevance

12345678910>>...33

/linux-6.14.4/drivers/gpu/drm/nouveau/
Dnouveau_sched.c26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument
32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init()
34 job->file_priv = args->file_priv; in nouveau_job_init()
35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init()
36 job->sched = sched; in nouveau_job_init()
38 job->sync = args->sync; in nouveau_job_init()
39 job->resv_usage = args->resv_usage; in nouveau_job_init()
41 job->ops = args->ops; in nouveau_job_init()
43 job->in_sync.count = args->in_sync.count; in nouveau_job_init()
44 if (job->in_sync.count) { in nouveau_job_init()
[all …]
Dnouveau_exec.c65 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
67 * A VM_BIND job can be executed either synchronously or asynchronously. If
68 * exectued asynchronously, userspace may provide a list of syncobjs this job
70 * VM_BIND job finished execution. If executed synchronously the ioctl will
71 * block until the bind job is finished. For synchronous jobs the kernel will
80 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
87 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument
90 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit()
91 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit()
118 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument
[all …]
/linux-6.14.4/drivers/gpu/drm/xe/
Dxe_sched_job.c68 static void job_free(struct xe_sched_job *job) in job_free() argument
70 struct xe_exec_queue *q = job->q; in job_free()
73 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ? in job_free()
74 xe_sched_job_parallel_slab : xe_sched_job_slab, job); in job_free()
77 static struct xe_device *job_to_xe(struct xe_sched_job *job) in job_to_xe() argument
79 return gt_to_xe(job->q->gt); in job_to_xe()
83 static void xe_sched_job_free_fences(struct xe_sched_job *job) in xe_sched_job_free_fences() argument
87 for (i = 0; i < job->q->width; ++i) { in xe_sched_job_free_fences()
88 struct xe_job_ptrs *ptrs = &job->ptrs[i]; in xe_sched_job_free_fences()
100 struct xe_sched_job *job; in xe_sched_job_create() local
[all …]
Dxe_sched_job.h26 * xe_sched_job_get - get reference to XE schedule job
27 * @job: XE schedule job object
29 * Increment XE schedule job's reference count
31 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) in xe_sched_job_get() argument
33 kref_get(&job->refcount); in xe_sched_job_get()
34 return job; in xe_sched_job_get()
38 * xe_sched_job_put - put reference to XE schedule job
39 * @job: XE schedule job object
41 * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
44 static inline void xe_sched_job_put(struct xe_sched_job *job) in xe_sched_job_put() argument
[all …]
Dxe_ring_ops.c174 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) in emit_render_cache_flush() argument
176 struct xe_gt *gt = job->q->gt; in emit_render_cache_flush()
192 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) in emit_render_cache_flush()
222 static u32 get_ppgtt_flag(struct xe_sched_job *job) in get_ppgtt_flag() argument
224 if (job->q->vm && !job->ggtt) in get_ppgtt_flag()
244 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, in __emit_job_gen12_simple() argument
248 u32 ppgtt_flag = get_ppgtt_flag(job); in __emit_job_gen12_simple()
249 struct xe_gt *gt = job->q->gt; in __emit_job_gen12_simple()
253 if (job->ring_ops_flush_tlb) { in __emit_job_gen12_simple()
265 if (job->user_fence.used) { in __emit_job_gen12_simple()
[all …]
/linux-6.14.4/drivers/gpu/host1x/
Djob.c3 * Tegra host1x Job
21 #include "job.h"
30 struct host1x_job *job = NULL; in host1x_job_alloc() local
51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
52 if (!job) in host1x_job_alloc()
55 job->enable_firewall = enable_firewall; in host1x_job_alloc()
57 kref_init(&job->ref); in host1x_job_alloc()
58 job->channel = ch; in host1x_job_alloc()
62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
[all …]
Dcdma.c23 #include "job.h"
270 * Start timer that tracks the time spent by the job.
274 struct host1x_job *job) in cdma_start_timer_locked() argument
281 cdma->timeout.client = job->client; in cdma_start_timer_locked()
282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked()
283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked()
287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked()
313 struct host1x_job *job, *n; in update_cdma_locked() local
319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked()
320 struct host1x_syncpt *sp = job->syncpt; in update_cdma_locked()
[all …]
/linux-6.14.4/drivers/gpu/drm/imagination/
Dpvr_job.c25 struct pvr_job *job = container_of(kref, struct pvr_job, ref_count); in pvr_job_release() local
27 xa_erase(&job->pvr_dev->job_ids, job->id); in pvr_job_release()
29 pvr_hwrt_data_put(job->hwrt); in pvr_job_release()
30 pvr_context_put(job->ctx); in pvr_job_release()
32 WARN_ON(job->paired_job); in pvr_job_release()
34 pvr_queue_job_cleanup(job); in pvr_job_release()
35 pvr_job_release_pm_ref(job); in pvr_job_release()
37 kfree(job->cmd); in pvr_job_release()
38 kfree(job); in pvr_job_release()
42 * pvr_job_put() - Release reference on job
[all …]
Dpvr_queue.c238 * Call this function to allocate job CCCB and done fences. This only
303 * pvr_queue_job_fence_init() - Initializes a job done fence object.
308 * a job.
350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument
353 * and a command for the job itself. in job_cmds_size()
356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
361 * @job: Job to operate on.
365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument
371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
[all …]
Dpvr_job.h34 /** @ref_count: Refcount for job. */
37 /** @type: Type of job. */
40 /** @id: Job ID number. */
44 * @paired_job: Job paired to this job.
50 * fragment job to execute when the Parameter Manager runs out of memory.
52 * The geometry job should point to the fragment job it's paired with,
53 * and the fragment job should point to the geometry job it's paired with.
63 /** @done_fence: Fence to signal when the job is done. */
87 * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until
88 * the job is done.
[all …]
/linux-6.14.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument
48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump()
52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument
80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump()
91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
101 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
106 * Do the coredump immediately after a job timeout to get a very in amdgpu_job_timedout()
109 * before job timeout in amdgpu_job_timedout()
112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout()
115 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
[all …]
Damdgpu_ids.c169 struct amdgpu_job *job) in amdgpu_vmid_gds_switch_needed() argument
171 return id->gds_base != job->gds_base || in amdgpu_vmid_gds_switch_needed()
172 id->gds_size != job->gds_size || in amdgpu_vmid_gds_switch_needed()
173 id->gws_base != job->gws_base || in amdgpu_vmid_gds_switch_needed()
174 id->gws_size != job->gws_size || in amdgpu_vmid_gds_switch_needed()
175 id->oa_base != job->oa_base || in amdgpu_vmid_gds_switch_needed()
176 id->oa_size != job->oa_size; in amdgpu_vmid_gds_switch_needed()
179 /* Check if the id is compatible with the job */
181 struct amdgpu_job *job) in amdgpu_vmid_compatible() argument
183 return id->pd_gpu_addr == job->vm_pd_addr && in amdgpu_vmid_compatible()
[all …]
/linux-6.14.4/drivers/md/
Ddm-kcopyd.c41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
353 * Error state of the job.
373 * Set this to ensure you are notified when the job has
380 * These fields are only used if the job has been split
414 * Functions to push and pop a job onto the head of a given job
420 struct kcopyd_job *job; in pop_io_job() local
426 list_for_each_entry(job, jobs, list) { in pop_io_job()
427 if (job->op == REQ_OP_READ || in pop_io_job()
428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job()
429 list_del(&job->list); in pop_io_job()
[all …]
/linux-6.14.4/drivers/gpu/drm/v3d/
Dv3d_submit.c14 * we can attach fences and update the reservations after pushing the job
22 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
27 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
31 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
32 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations()
36 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations()
37 job->bo[i], true); in v3d_lock_bo_reservations()
45 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
50 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
51 * referenced by the job.
[all …]
Dv3d_sched.c13 * jobs when bulk background jobs are queued up, we submit a new job
71 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local
73 v3d_job_cleanup(job); in v3d_sched_job_free()
109 struct v3d_cpu_job *job = to_cpu_job(sched_job); in v3d_cpu_job_free() local
111 v3d_timestamp_query_info_free(&job->timestamp_query, in v3d_cpu_job_free()
112 job->timestamp_query.count); in v3d_cpu_job_free()
114 v3d_performance_query_info_free(&job->performance_query, in v3d_cpu_job_free()
115 job->performance_query.count); in v3d_cpu_job_free()
117 v3d_job_cleanup(&job->base); in v3d_cpu_job_free()
121 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument
[all …]
/linux-6.14.4/drivers/gpu/host1x/hw/
Dchannel_hw.c17 #include "../job.h"
50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument
53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait()
63 if (job->memory_context) in submit_wait()
64 stream_id = job->memory_context->stream_id; in submit_wait()
66 stream_id = job->engine_fallback_streamid; in submit_wait()
79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait()
80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait()
82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait()
113 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument
[all …]
/linux-6.14.4/block/
Dbsg-lib.c31 struct bsg_job *job; in bsg_transport_sg_io_fn() local
49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn()
50 reply = job->reply; in bsg_transport_sg_io_fn()
51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn()
52 job->reply = reply; in bsg_transport_sg_io_fn()
53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn()
54 job->dd_data = job + 1; in bsg_transport_sg_io_fn()
56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn()
57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn()
58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn()
[all …]
/linux-6.14.4/drivers/gpu/drm/panfrost/
Dpanfrost_job.c106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument
148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag()
150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag()
159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local
[all …]
/linux-6.14.4/drivers/accel/ivpu/
Divpu_job.c163 ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n", in ivpu_register_db()
190 ivpu_dbg(vdev, JOB, "Turbo mode enabled"); in ivpu_cmdq_init()
226 ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id); in ivpu_cmdq_fini()
231 ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id); in ivpu_cmdq_fini()
279 * and FW loses job queue state. The next time job queue is used it
329 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) in ivpu_cmdq_push_job() argument
331 struct ivpu_device *vdev = job->vdev; in ivpu_cmdq_push_job()
337 /* Check if there is space left in job queue */ in ivpu_cmdq_push_job()
339 ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n", in ivpu_cmdq_push_job()
340 job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail); in ivpu_cmdq_push_job()
[all …]
/linux-6.14.4/drivers/gpu/drm/scheduler/
Dsched_main.c32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
46 * Note that once a job was taken from the entities queue and pushed to the
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
69 * struct drm_sched_backend_ops to update the job's credits dynamically. The
70 * scheduler executes this callback every time the scheduler considers a job for
71 * execution and subsequently checks whether the job fits the scheduler's credit
[all …]
/linux-6.14.4/drivers/accel/amdxdna/
Daie2_ctx.c33 struct amdxdna_sched_job *job; in aie2_job_release() local
35 job = container_of(ref, struct amdxdna_sched_job, refcnt); in aie2_job_release()
36 amdxdna_sched_job_cleanup(job); in aie2_job_release()
37 if (job->out_fence) in aie2_job_release()
38 dma_fence_put(job->out_fence); in aie2_job_release()
39 kfree(job); in aie2_job_release()
42 static void aie2_job_put(struct amdxdna_sched_job *job) in aie2_job_put() argument
44 kref_put(&job->refcnt, aie2_job_release); in aie2_job_put()
172 aie2_sched_notify(struct amdxdna_sched_job *job) in aie2_sched_notify() argument
174 struct dma_fence *fence = job->fence; in aie2_sched_notify()
[all …]
/linux-6.14.4/drivers/scsi/lpfc/
Dlpfc_bsg.c71 /* driver data associated with the job */
96 struct bsg_job *set_job; /* job waiting for this iocb to finish */
305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local
316 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp()
318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp()
319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp()
321 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp()
322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp()
342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
[all …]
/linux-6.14.4/drivers/gpu/drm/tegra/
Dsubmit.c28 "%s: job submission failed: " fmt "\n", \
327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument
337 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt()
344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt()
345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt()
350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument
370 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather()
381 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather()
385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather()
399 struct host1x_job *job; in submit_create_job() local
[all …]
/linux-6.14.4/drivers/gpu/drm/
Ddrm_writeback.c306 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument
308 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job()
314 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job()
319 job->prepared = true; in drm_writeback_prepare_job()
325 * drm_writeback_queue_job - Queue a writeback job for later signalling
326 * @wb_connector: The writeback connector to queue a job on
327 * @conn_state: The connector state containing the job to queue
329 * This function adds the job contained in @conn_state to the job_queue for a
330 * writeback connector. It takes ownership of the writeback job and sets the
331 * @conn_state->writeback_job to NULL, and so no access to the job may be
[all …]
/linux-6.14.4/include/drm/
Dgpu_scheduler.h74 /* Used to choose between FIFO and RR job-scheduling */
81 * struct drm_sched_entity - A wrapper around a job queue (typically
179 * The dependency fence of the job which is on the top of the job queue.
200 * Points to the finished fence of the last scheduled job. Only written
207 * @last_user: last group leader pushing a job into the entity.
231 * Marks earliest job waiting in SW queue
268 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
273 * when the job is scheduled.
279 * when the job is completed.
281 * When setting up an out fence for the job, you should use
[all …]

12345678910>>...33