Lines Matching +full:ccode +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
11 * get_free_pending_entry - get free entry from pending queue
20 ent = &q->head[q->rear]; in get_free_pending_entry()
21 if (unlikely(ent->busy)) { in get_free_pending_entry()
26 q->rear++; in get_free_pending_entry()
27 if (unlikely(q->rear == qlen)) in get_free_pending_entry()
28 q->rear = 0; in get_free_pending_entry()
37 struct pending_queue *queue = &pqinfo->queue[qno]; in pending_queue_inc_front()
39 queue->front++; in pending_queue_inc_front()
40 if (unlikely(queue->front == pqinfo->qlen)) in pending_queue_inc_front()
41 queue->front = 0; in pending_queue_inc_front()
50 struct pci_dev *pdev = cptvf->pdev; in setup_sgio_components()
53 dev_err(&pdev->dev, "Input List pointer is NULL\n"); in setup_sgio_components()
54 return -EFAULT; in setup_sgio_components()
59 list[i].dma_addr = dma_map_single(&pdev->dev, in setup_sgio_components()
63 if (unlikely(dma_mapping_error(&pdev->dev, in setup_sgio_components()
65 dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n", in setup_sgio_components()
67 ret = -EIO; in setup_sgio_components()
76 sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); in setup_sgio_components()
77 sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); in setup_sgio_components()
78 sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); in setup_sgio_components()
79 sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size); in setup_sgio_components()
80 sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); in setup_sgio_components()
81 sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); in setup_sgio_components()
82 sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); in setup_sgio_components()
83 sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr); in setup_sgio_components()
91 sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); in setup_sgio_components()
92 sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); in setup_sgio_components()
95 sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); in setup_sgio_components()
96 sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); in setup_sgio_components()
99 sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); in setup_sgio_components()
100 sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); in setup_sgio_components()
111 dma_unmap_single(&pdev->dev, list[i].dma_addr, in setup_sgio_components()
127 struct pci_dev *pdev = cptvf->pdev; in setup_sgio_list()
129 if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) { in setup_sgio_list()
130 dev_err(&pdev->dev, "Request SG components are higher than supported\n"); in setup_sgio_list()
131 ret = -EINVAL; in setup_sgio_list()
136 g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); in setup_sgio_list()
137 info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in setup_sgio_list()
138 if (!info->gather_components) { in setup_sgio_list()
139 ret = -ENOMEM; in setup_sgio_list()
143 ret = setup_sgio_components(cptvf, req->in, in setup_sgio_list()
144 req->incnt, in setup_sgio_list()
145 info->gather_components); in setup_sgio_list()
147 dev_err(&pdev->dev, "Failed to setup gather list\n"); in setup_sgio_list()
148 ret = -EFAULT; in setup_sgio_list()
153 s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); in setup_sgio_list()
154 info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in setup_sgio_list()
155 if (!info->scatter_components) { in setup_sgio_list()
156 ret = -ENOMEM; in setup_sgio_list()
160 ret = setup_sgio_components(cptvf, req->out, in setup_sgio_list()
161 req->outcnt, in setup_sgio_list()
162 info->scatter_components); in setup_sgio_list()
164 dev_err(&pdev->dev, "Failed to setup gather list\n"); in setup_sgio_list()
165 ret = -EFAULT; in setup_sgio_list()
170 info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; in setup_sgio_list()
171 info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in setup_sgio_list()
172 if (!info->in_buffer) { in setup_sgio_list()
173 ret = -ENOMEM; in setup_sgio_list()
177 ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); in setup_sgio_list()
178 ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); in setup_sgio_list()
179 ((__be16 *)info->in_buffer)[2] = 0; in setup_sgio_list()
180 ((__be16 *)info->in_buffer)[3] = 0; in setup_sgio_list()
182 memcpy(&info->in_buffer[8], info->gather_components, in setup_sgio_list()
184 memcpy(&info->in_buffer[8 + g_sz_bytes], in setup_sgio_list()
185 info->scatter_components, s_sz_bytes); in setup_sgio_list()
187 info->dptr_baddr = dma_map_single(&pdev->dev, in setup_sgio_list()
188 (void *)info->in_buffer, in setup_sgio_list()
189 info->dlen, in setup_sgio_list()
191 if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) { in setup_sgio_list()
192 dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen); in setup_sgio_list()
193 ret = -EIO; in setup_sgio_list()
198 info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in setup_sgio_list()
199 if (!info->out_buffer) { in setup_sgio_list()
200 ret = -ENOMEM; in setup_sgio_list()
204 *((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT); in setup_sgio_list()
205 info->alternate_caddr = (u64 *)info->out_buffer; in setup_sgio_list()
206 info->rptr_baddr = dma_map_single(&pdev->dev, in setup_sgio_list()
207 (void *)info->out_buffer, in setup_sgio_list()
210 if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) { in setup_sgio_list()
211 dev_err(&pdev->dev, "Mapping RPTR Failed %d\n", in setup_sgio_list()
213 ret = -EIO; in setup_sgio_list()
226 struct pci_dev *pdev = cptvf->pdev; in send_cpt_command()
233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command()
234 dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n", in send_cpt_command()
235 qno, cptvf->nr_queues); in send_cpt_command()
236 return -EINVAL; in send_cpt_command()
239 qinfo = &cptvf->cqinfo; in send_cpt_command()
240 queue = &qinfo->queue[qno]; in send_cpt_command()
242 spin_lock(&queue->lock); in send_cpt_command()
243 ent = &queue->qhead->head[queue->idx * qinfo->cmd_size]; in send_cpt_command()
244 memcpy(ent, (void *)cmd, qinfo->cmd_size); in send_cpt_command()
246 if (++queue->idx >= queue->qhead->size / 64) { in send_cpt_command()
247 hlist_for_each_entry(chunk, &queue->chead, nextchunk) { in send_cpt_command()
248 if (chunk == queue->qhead) { in send_cpt_command()
251 queue->qhead = chunk; in send_cpt_command()
255 queue->idx = 0; in send_cpt_command()
261 spin_unlock(&queue->lock); in send_cpt_command()
270 struct pci_dev *pdev = cptvf->pdev; in do_request_cleanup()
273 if (info->dptr_baddr) in do_request_cleanup()
274 dma_unmap_single(&pdev->dev, info->dptr_baddr, in do_request_cleanup()
275 info->dlen, DMA_BIDIRECTIONAL); in do_request_cleanup()
277 if (info->rptr_baddr) in do_request_cleanup()
278 dma_unmap_single(&pdev->dev, info->rptr_baddr, in do_request_cleanup()
281 if (info->comp_baddr) in do_request_cleanup()
282 dma_unmap_single(&pdev->dev, info->comp_baddr, in do_request_cleanup()
285 if (info->req) { in do_request_cleanup()
286 req = info->req; in do_request_cleanup()
287 for (i = 0; i < req->outcnt; i++) { in do_request_cleanup()
288 if (req->out[i].dma_addr) in do_request_cleanup()
289 dma_unmap_single(&pdev->dev, in do_request_cleanup()
290 req->out[i].dma_addr, in do_request_cleanup()
291 req->out[i].size, in do_request_cleanup()
295 for (i = 0; i < req->incnt; i++) { in do_request_cleanup()
296 if (req->in[i].dma_addr) in do_request_cleanup()
297 dma_unmap_single(&pdev->dev, in do_request_cleanup()
298 req->in[i].dma_addr, in do_request_cleanup()
299 req->in[i].size, in do_request_cleanup()
304 kfree_sensitive(info->scatter_components); in do_request_cleanup()
305 kfree_sensitive(info->gather_components); in do_request_cleanup()
306 kfree_sensitive(info->out_buffer); in do_request_cleanup()
307 kfree_sensitive(info->in_buffer); in do_request_cleanup()
308 kfree_sensitive((void *)info->completion_addr); in do_request_cleanup()
314 struct pci_dev *pdev = cptvf->pdev; in do_post_process()
317 dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n"); in do_post_process()
328 struct pci_dev *pdev = cptvf->pdev; in process_pending_queue()
329 struct pending_queue *pqueue = &pqinfo->queue[qno]; in process_pending_queue()
333 unsigned char ccode; in process_pending_queue() local
336 spin_lock_bh(&pqueue->lock); in process_pending_queue()
337 pentry = &pqueue->head[pqueue->front]; in process_pending_queue()
338 if (unlikely(!pentry->busy)) { in process_pending_queue()
339 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
343 info = (struct cpt_info_buffer *)pentry->post_arg; in process_pending_queue()
345 dev_err(&pdev->dev, "Pending Entry post arg NULL\n"); in process_pending_queue()
347 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
351 status = (union cpt_res_s *)pentry->completion_addr; in process_pending_queue()
352 ccode = status->s.compcode; in process_pending_queue()
353 if ((status->s.compcode == CPT_COMP_E_FAULT) || in process_pending_queue()
354 (status->s.compcode == CPT_COMP_E_SWERR)) { in process_pending_queue()
355 dev_err(&pdev->dev, "Request failed with %s\n", in process_pending_queue()
356 (status->s.compcode == CPT_COMP_E_FAULT) ? in process_pending_queue()
358 pentry->completion_addr = NULL; in process_pending_queue()
359 pentry->busy = false; in process_pending_queue()
360 atomic64_dec((&pqueue->pending_count)); in process_pending_queue()
361 pentry->post_arg = NULL; in process_pending_queue()
364 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
366 } else if (status->s.compcode == COMPLETION_CODE_INIT) { in process_pending_queue()
369 (info->time_in + in process_pending_queue()
371 dev_err(&pdev->dev, "Request timed out"); in process_pending_queue()
372 pentry->completion_addr = NULL; in process_pending_queue()
373 pentry->busy = false; in process_pending_queue()
374 atomic64_dec((&pqueue->pending_count)); in process_pending_queue()
375 pentry->post_arg = NULL; in process_pending_queue()
378 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
380 } else if ((*info->alternate_caddr == in process_pending_queue()
382 (info->extra_time < TIME_IN_RESET_COUNT)) { in process_pending_queue()
383 info->time_in = jiffies; in process_pending_queue()
384 info->extra_time++; in process_pending_queue()
385 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
390 pentry->completion_addr = NULL; in process_pending_queue()
391 pentry->busy = false; in process_pending_queue()
392 pentry->post_arg = NULL; in process_pending_queue()
393 atomic64_dec((&pqueue->pending_count)); in process_pending_queue()
395 spin_unlock_bh(&pqueue->lock); in process_pending_queue()
397 do_post_process(info->cptvf, info); in process_pending_queue()
402 pentry->callback(ccode, pentry->callback_arg); in process_pending_queue()
415 struct pci_dev *pdev = cptvf->pdev; in process_request()
420 info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in process_request()
422 dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); in process_request()
423 return -ENOMEM; in process_request()
426 cpt_req = (struct cptvf_request *)&req->req; in process_request()
427 ctrl = (union ctrl_info *)&req->ctrl; in process_request()
429 info->cptvf = cptvf; in process_request()
430 group = ctrl->s.grp; in process_request()
433 dev_err(&pdev->dev, "Setting up SG list failed"); in process_request()
437 cpt_req->dlen = info->dlen; in process_request()
442 info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); in process_request()
443 if (unlikely(!info->completion_addr)) { in process_request()
444 dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); in process_request()
445 ret = -ENOMEM; in process_request()
449 result = (union cpt_res_s *)info->completion_addr; in process_request()
450 result->s.compcode = COMPLETION_CODE_INIT; in process_request()
451 info->comp_baddr = dma_map_single(&pdev->dev, in process_request()
452 (void *)info->completion_addr, in process_request()
455 if (dma_mapping_error(&pdev->dev, info->comp_baddr)) { in process_request()
456 dev_err(&pdev->dev, "mapping compptr Failed %lu\n", in process_request()
458 ret = -EFAULT; in process_request()
464 vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags); in process_request()
465 vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1); in process_request()
466 vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); in process_request()
467 vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); in process_request()
469 vq_cmd.dptr = info->dptr_baddr; in process_request()
470 vq_cmd.rptr = info->rptr_baddr; in process_request()
476 pqueue = &cptvf->pqinfo.queue[queue]; in process_request()
478 if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) { in process_request()
479 dev_err(&pdev->dev, "pending threshold reached\n"); in process_request()
480 process_pending_queue(cptvf, &cptvf->pqinfo, queue); in process_request()
484 spin_lock_bh(&pqueue->lock); in process_request()
485 pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen); in process_request()
487 spin_unlock_bh(&pqueue->lock); in process_request()
489 process_pending_queue(cptvf, &cptvf->pqinfo, queue); in process_request()
493 dev_err(&pdev->dev, "Get free entry failed\n"); in process_request()
494 dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n", in process_request()
495 queue, pqueue->rear, pqueue->front); in process_request()
496 ret = -EFAULT; in process_request()
500 pentry->completion_addr = info->completion_addr; in process_request()
501 pentry->post_arg = (void *)info; in process_request()
502 pentry->callback = req->callback; in process_request()
503 pentry->callback_arg = req->callback_arg; in process_request()
504 info->pentry = pentry; in process_request()
505 pentry->busy = true; in process_request()
506 atomic64_inc(&pqueue->pending_count); in process_request()
509 info->pentry = pentry; in process_request()
510 info->time_in = jiffies; in process_request()
511 info->req = req; in process_request()
515 cptinst.s.res_addr = (u64)info->comp_baddr; in process_request()
525 spin_unlock_bh(&pqueue->lock); in process_request()
527 dev_err(&pdev->dev, "Send command failed for AE\n"); in process_request()
528 ret = -EFAULT; in process_request()
535 dev_dbg(&pdev->dev, "Failed to submit CPT command\n"); in process_request()
543 struct pci_dev *pdev = cptvf->pdev; in vq_post_process()
545 if (unlikely(qno > cptvf->nr_queues)) { in vq_post_process()
546 dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n", in vq_post_process()
551 process_pending_queue(cptvf, &cptvf->pqinfo, qno); in vq_post_process()
557 struct pci_dev *pdev = cptvf->pdev; in cptvf_do_request()
560 dev_err(&pdev->dev, "CPT Device is not ready"); in cptvf_do_request()
561 return -ENODEV; in cptvf_do_request()
564 if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) { in cptvf_do_request()
565 dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request", in cptvf_do_request()
566 cptvf->vfid); in cptvf_do_request()
567 return -EINVAL; in cptvf_do_request()
568 } else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) { in cptvf_do_request()
569 dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request", in cptvf_do_request()
570 cptvf->vfid); in cptvf_do_request()
571 return -EINVAL; in cptvf_do_request()