Lines Matching full:slice

161 	struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);  in free_slice()  local
163 slice->bo->total_slice_nents -= slice->nents; in free_slice()
164 list_del(&slice->slice); in free_slice()
165 drm_gem_object_put(&slice->bo->base); in free_slice()
166 sg_free_table(slice->sgt); in free_slice()
167 kfree(slice->sgt); in free_slice()
168 kfree(slice->reqs); in free_slice()
169 kfree(slice); in free_slice()
255 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, in encode_reqs() argument
267 if (!slice->no_xfer) in encode_reqs()
268 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); in encode_reqs()
298 * When we end up splitting up a single request (ie a buf slice) into in encode_reqs()
307 for_each_sgtable_dma_sg(slice->sgt, sg, i) { in encode_reqs()
308 slice->reqs[i].cmd = cmd; in encode_reqs()
309 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
311 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
319 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); in encode_reqs()
322 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, in encode_reqs()
329 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, in encode_reqs()
336 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, in encode_reqs()
343 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, in encode_reqs()
354 slice->reqs[i].cmd |= GEN_COMPLETION; in encode_reqs()
355 slice->reqs[i].db_addr = db_addr; in encode_reqs()
356 slice->reqs[i].db_len = db_len; in encode_reqs()
357 slice->reqs[i].db_data = db_data; in encode_reqs()
372 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? in encode_reqs()
374 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, in encode_reqs()
377 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, in encode_reqs()
380 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, in encode_reqs()
383 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, in encode_reqs()
394 struct bo_slice *slice; in qaic_map_one_slice() local
401 slice = kmalloc(sizeof(*slice), GFP_KERNEL); in qaic_map_one_slice()
402 if (!slice) { in qaic_map_one_slice()
407 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); in qaic_map_one_slice()
408 if (!slice->reqs) { in qaic_map_one_slice()
413 slice->no_xfer = !slice_ent->size; in qaic_map_one_slice()
414 slice->sgt = sgt; in qaic_map_one_slice()
415 slice->nents = sgt->nents; in qaic_map_one_slice()
416 slice->dir = bo->dir; in qaic_map_one_slice()
417 slice->bo = bo; in qaic_map_one_slice()
418 slice->size = slice_ent->size; in qaic_map_one_slice()
419 slice->offset = slice_ent->offset; in qaic_map_one_slice()
421 ret = encode_reqs(qdev, slice, slice_ent); in qaic_map_one_slice()
426 kref_init(&slice->ref_count); in qaic_map_one_slice()
428 list_add_tail(&slice->slice, &bo->slices); in qaic_map_one_slice()
433 kfree(slice->reqs); in qaic_map_one_slice()
435 kfree(slice); in qaic_map_one_slice()
909 struct bo_slice *slice, *temp; in qaic_free_slices_bo() local
911 list_for_each_entry_safe(slice, temp, &bo->slices, slice) in qaic_free_slices_bo()
912 kref_put(&slice->ref_count, free_slice); in qaic_free_slices_bo()
1075 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, in copy_exec_reqs() argument
1079 struct dbc_req *reqs = slice->reqs; in copy_exec_reqs()
1084 if (avail < slice->nents) in copy_exec_reqs()
1087 if (tail + slice->nents > dbc->nelem) { in copy_exec_reqs()
1089 avail = min_t(u32, avail, slice->nents); in copy_exec_reqs()
1092 avail = slice->nents - avail; in copy_exec_reqs()
1096 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); in copy_exec_reqs()
1099 *ptail = (tail + slice->nents) % dbc->nelem; in copy_exec_reqs()
1104 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, in copy_partial_exec_reqs() argument
1108 struct dbc_req *reqs = slice->reqs; in copy_partial_exec_reqs()
1119 * of the last DMA request of this slice that needs to be in copy_partial_exec_reqs()
1124 for (first_n = 0; first_n < slice->nents; first_n++) in copy_partial_exec_reqs()
1152 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); in copy_partial_exec_reqs()
1178 struct bo_slice *slice; in send_bo_list_to_device() local
1220 list_for_each_entry(slice, &bo->slices, slice) { in send_bo_list_to_device()
1221 for (j = 0; j < slice->nents; j++) in send_bo_list_to_device()
1222 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); in send_bo_list_to_device()
1224 if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) in send_bo_list_to_device()
1225 /* Configure the slice for no DMA transfer */ in send_bo_list_to_device()
1226 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); in send_bo_list_to_device()
1227 else if (is_partial && pexec[i].resize < slice->offset + slice->size) in send_bo_list_to_device()
1228 /* Configure the slice to be partially DMA transferred */ in send_bo_list_to_device()
1229 ret = copy_partial_exec_reqs(qdev, slice, in send_bo_list_to_device()
1230 pexec[i].resize - slice->offset, dbc, in send_bo_list_to_device()
1233 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); in send_bo_list_to_device()