Lines Matching +full:needs +full:- +full:double +full:- +full:reset

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/io-64-nonatomic-lo-hi.h>
35 struct device *dev = &idxd->pdev->dev; in idxd_device_reinit()
47 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_reinit()
48 if (test_bit(i, idxd->wq_enable_map)) { in idxd_device_reinit()
49 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_reinit()
53 clear_bit(i, idxd->wq_enable_map); in idxd_device_reinit()
54 dev_warn(dev, "Unable to re-enable wq %s\n", in idxd_device_reinit()
74 struct idxd_device *idxd = wq->idxd; in idxd_int_handle_revoke_drain()
75 struct device *dev = &idxd->pdev->dev; in idxd_int_handle_revoke_drain()
85 if (ie->pasid != IOMMU_PASID_INVALID) in idxd_int_handle_revoke_drain()
86 desc.pasid = ie->pasid; in idxd_int_handle_revoke_drain()
87 desc.int_handle = ie->int_handle; in idxd_int_handle_revoke_drain()
101 dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id); in idxd_int_handle_revoke_drain()
111 spin_lock(&ie->list_lock); in idxd_abort_invalid_int_handle_descs()
112 head = llist_del_all(&ie->pending_llist); in idxd_abort_invalid_int_handle_descs()
115 list_add_tail(&d->list, &ie->work_list); in idxd_abort_invalid_int_handle_descs()
118 list_for_each_entry_safe(d, t, &ie->work_list, list) { in idxd_abort_invalid_int_handle_descs()
119 if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL) in idxd_abort_invalid_int_handle_descs()
120 list_move_tail(&d->list, &flist); in idxd_abort_invalid_int_handle_descs()
122 spin_unlock(&ie->list_lock); in idxd_abort_invalid_int_handle_descs()
125 list_del(&d->list); in idxd_abort_invalid_int_handle_descs()
134 struct idxd_device *idxd = revoke->idxd; in idxd_int_handle_revoke()
135 struct pci_dev *pdev = idxd->pdev; in idxd_int_handle_revoke()
136 struct device *dev = &pdev->dev; in idxd_int_handle_revoke()
139 if (!idxd->request_int_handles) { in idxd_int_handle_revoke()
154 for (i = 1; i < idxd->irq_cnt; i++) { in idxd_int_handle_revoke()
158 if (ie->int_handle == INVALID_INT_HANDLE) in idxd_int_handle_revoke()
169 ie->int_handle = INVALID_INT_HANDLE; in idxd_int_handle_revoke()
175 /* No change in interrupt handle, nothing needs to be done */ in idxd_int_handle_revoke()
176 if (ie->int_handle == new_handle) in idxd_int_handle_revoke()
179 if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) { in idxd_int_handle_revoke()
185 ie->int_handle = new_handle; in idxd_int_handle_revoke()
189 mutex_lock(&wq->wq_lock); in idxd_int_handle_revoke()
190 reinit_completion(&wq->wq_resurrect); in idxd_int_handle_revoke()
193 percpu_ref_kill(&wq->wq_active); in idxd_int_handle_revoke()
196 wait_for_completion(&wq->wq_dead); in idxd_int_handle_revoke()
198 ie->int_handle = new_handle; in idxd_int_handle_revoke()
201 percpu_ref_reinit(&wq->wq_active); in idxd_int_handle_revoke()
202 complete_all(&wq->wq_resurrect); in idxd_int_handle_revoke()
203 mutex_unlock(&wq->wq_lock); in idxd_int_handle_revoke()
209 * needs to ensure that the drain descriptor issued does not pass in idxd_int_handle_revoke()
225 struct idxd_wq *wq = fault->wq; in idxd_evl_fault_work()
226 struct idxd_device *idxd = wq->idxd; in idxd_evl_fault_work()
227 struct device *dev = &idxd->pdev->dev; in idxd_evl_fault_work()
228 struct idxd_evl *evl = idxd->evl; in idxd_evl_fault_work()
229 struct __evl_entry *entry_head = fault->entry; in idxd_evl_fault_work()
230 void *cr = (void *)entry_head + idxd->data->evl_cr_off; in idxd_evl_fault_work()
231 int cr_size = idxd->data->compl_size; in idxd_evl_fault_work()
232 u8 *status = (u8 *)cr + idxd->data->cr_status_off; in idxd_evl_fault_work()
233 u8 *result = (u8 *)cr + idxd->data->cr_result_off; in idxd_evl_fault_work()
237 switch (fault->status) { in idxd_evl_fault_work()
239 if (entry_head->batch && entry_head->first_err_in_batch) in idxd_evl_fault_work()
240 evl->batch_fail[entry_head->batch_id] = false; in idxd_evl_fault_work()
243 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); in idxd_evl_fault_work()
246 bf = &evl->batch_fail[entry_head->batch_id]; in idxd_evl_fault_work()
248 copy_size = entry_head->rcr || *bf ? cr_size : 0; in idxd_evl_fault_work()
255 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULTS); in idxd_evl_fault_work()
262 dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status); in idxd_evl_fault_work()
273 copied = idxd_copy_cr(wq, entry_head->pasid, entry_head->fault_addr, in idxd_evl_fault_work()
284 switch (fault->status) { in idxd_evl_fault_work()
287 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); in idxd_evl_fault_work()
290 if (entry_head->batch) in idxd_evl_fault_work()
291 evl->batch_fail[entry_head->batch_id] = true; in idxd_evl_fault_work()
296 idxd_user_counter_increment(wq, entry_head->pasid, COUNTER_FAULT_FAILS); in idxd_evl_fault_work()
308 kmem_cache_free(idxd->evl_cache, fault); in idxd_evl_fault_work()
314 struct device *dev = &idxd->pdev->dev; in process_evl_entry()
315 struct idxd_evl *evl = idxd->evl; in process_evl_entry()
318 if (test_bit(index, evl->bmap)) { in process_evl_entry()
319 clear_bit(index, evl->bmap); in process_evl_entry()
321 status = DSA_COMP_STATUS(entry_head->error); in process_evl_entry()
328 if (entry_head->rci) in process_evl_entry()
331 if (!entry_head->rcr && status == DSA_COMP_DRAIN_EVL) in process_evl_entry()
334 fault = kmem_cache_alloc(idxd->evl_cache, GFP_ATOMIC); in process_evl_entry()
336 struct idxd_wq *wq = idxd->wqs[entry_head->wq_idx]; in process_evl_entry()
338 fault->wq = wq; in process_evl_entry()
339 fault->status = status; in process_evl_entry()
340 memcpy(&fault->entry, entry_head, ent_size); in process_evl_entry()
341 INIT_WORK(&fault->work, idxd_evl_fault_work); in process_evl_entry()
342 queue_work(wq->wq, &fault->work); in process_evl_entry()
348 status, entry_head->operation, in process_evl_entry()
349 entry_head->fault_addr); in process_evl_entry()
358 struct idxd_evl *evl = idxd->evl; in process_evl_entries()
366 mutex_lock(&evl->lock); in process_evl_entries()
369 idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32)); in process_evl_entries()
370 evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); in process_evl_entries()
373 size = idxd->evl->size; in process_evl_entries()
376 entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); in process_evl_entries()
382 iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET); in process_evl_entries()
383 mutex_unlock(&evl->lock); in process_evl_entries()
392 * IDXD device requires a Function Level Reset (FLR). in idxd_device_flr()
393 * pci_reset_function() will reset the device with FLR. in idxd_device_flr()
395 rc = pci_reset_function(idxd->pdev); in idxd_device_flr()
397 dev_err(&idxd->pdev->dev, "FLR failed\n"); in idxd_device_flr()
404 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_halt()
406 idxd->state = IDXD_DEV_HALTED; in idxd_halt()
409 * If we need a software reset, we will throw the work in idxd_halt()
413 INIT_WORK(&idxd->work, idxd_device_reinit); in idxd_halt()
414 queue_work(idxd->wq, &idxd->work); in idxd_halt()
416 idxd->state = IDXD_DEV_HALTED; in idxd_halt()
418 dev_dbg(&idxd->pdev->dev, in idxd_halt()
420 INIT_WORK(&idxd->work, idxd_device_flr); in idxd_halt()
421 queue_work(idxd->wq, &idxd->work); in idxd_halt()
424 idxd->state = IDXD_DEV_HALTED; in idxd_halt()
428 dev_err(&idxd->pdev->dev, in idxd_halt()
429 "idxd halted, need system reset"); in idxd_halt()
431 return -ENXIO; in idxd_halt()
442 struct device *dev = &idxd->pdev->dev; in idxd_misc_thread()
447 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); in idxd_misc_thread()
451 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); in idxd_misc_thread()
457 spin_lock(&idxd->dev_lock); in idxd_misc_thread()
459 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + in idxd_misc_thread()
462 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, in idxd_misc_thread()
463 idxd->reg_base + IDXD_SWERR_OFFSET); in idxd_misc_thread()
465 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { in idxd_misc_thread()
466 int id = idxd->sw_err.wq_idx; in idxd_misc_thread()
467 struct idxd_wq *wq = idxd->wqs[id]; in idxd_misc_thread()
469 if (wq->type == IDXD_WQT_USER) in idxd_misc_thread()
470 wake_up_interruptible(&wq->err_queue); in idxd_misc_thread()
474 for (i = 0; i < idxd->max_wqs; i++) { in idxd_misc_thread()
475 struct idxd_wq *wq = idxd->wqs[i]; in idxd_misc_thread()
477 if (wq->type == IDXD_WQT_USER) in idxd_misc_thread()
478 wake_up_interruptible(&wq->err_queue); in idxd_misc_thread()
482 spin_unlock(&idxd->dev_lock); in idxd_misc_thread()
487 i, idxd->sw_err.bits[i]); in idxd_misc_thread()
497 revoke->idxd = idxd; in idxd_misc_thread()
498 INIT_WORK(&revoke->work, idxd_int_handle_revoke); in idxd_misc_thread()
499 queue_work(idxd->wq, &revoke->work); in idxd_misc_thread()
509 complete(idxd->cmd_done); in idxd_misc_thread()
538 struct idxd_desc *desc = irw->desc; in idxd_int_handle_resubmit_work()
539 struct idxd_wq *wq = desc->wq; in idxd_int_handle_resubmit_work()
542 desc->completion->status = 0; in idxd_int_handle_resubmit_work()
545 dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n", in idxd_int_handle_resubmit_work()
546 desc->id, wq->id); in idxd_int_handle_resubmit_work()
548 * If the error is not -EAGAIN, it means the submission failed due to wq in idxd_int_handle_resubmit_work()
549 * has been killed instead of ENQCMDS failure. Here the driver needs to in idxd_int_handle_resubmit_work()
552 * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the in idxd_int_handle_resubmit_work()
555 if (rc != -EAGAIN) { in idxd_int_handle_resubmit_work()
556 desc->completion->status = IDXD_COMP_DESC_ABORT; in idxd_int_handle_resubmit_work()
566 struct idxd_wq *wq = desc->wq; in idxd_queue_int_handle_resubmit()
567 struct idxd_device *idxd = wq->idxd; in idxd_queue_int_handle_resubmit()
574 irw->desc = desc; in idxd_queue_int_handle_resubmit()
575 INIT_WORK(&irw->work, idxd_int_handle_resubmit_work); in idxd_queue_int_handle_resubmit()
576 queue_work(idxd->wq, &irw->work); in idxd_queue_int_handle_resubmit()
585 head = llist_del_all(&irq_entry->pending_llist); in irq_process_pending_llist()
590 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; in irq_process_pending_llist()
597 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { in irq_process_pending_llist()
604 spin_lock(&irq_entry->list_lock); in irq_process_pending_llist()
605 list_add_tail(&desc->list, in irq_process_pending_llist()
606 &irq_entry->work_list); in irq_process_pending_llist()
607 spin_unlock(&irq_entry->list_lock); in irq_process_pending_llist()
621 spin_lock(&irq_entry->list_lock); in irq_process_work_list()
622 if (list_empty(&irq_entry->work_list)) { in irq_process_work_list()
623 spin_unlock(&irq_entry->list_lock); in irq_process_work_list()
627 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { in irq_process_work_list()
628 if (desc->completion->status) { in irq_process_work_list()
629 list_move_tail(&desc->list, &flist); in irq_process_work_list()
633 spin_unlock(&irq_entry->list_lock); in irq_process_work_list()
640 list_del(&desc->list); in irq_process_work_list()
642 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { in irq_process_work_list()
659 * is the common linux double linked list. We are in a scenario of in idxd_wq_thread()