Lines Matching full:wq

42 	struct idxd_wq *wq;  member
54 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
100 struct idxd_wq *wq = ctx->wq; in cdev_file_attr_visible() local
102 if (!wq_pasid_enabled(wq)) in cdev_file_attr_visible()
121 struct idxd_wq *wq = ctx->wq; in idxd_file_dev_release() local
122 struct idxd_device *idxd = wq->idxd; in idxd_file_dev_release()
128 if (wq_shared(wq)) { in idxd_file_dev_release()
132 /* The wq disable in the disable pasid function will drain the wq */ in idxd_file_dev_release()
133 rc = idxd_wq_disable_pasid(wq); in idxd_file_dev_release()
135 dev_err(dev, "wq disable pasid failed.\n"); in idxd_file_dev_release()
137 idxd_wq_drain(wq); in idxd_file_dev_release()
142 idxd_cdev_evl_drain_pasid(wq, ctx->pasid); in idxd_file_dev_release()
147 mutex_lock(&wq->wq_lock); in idxd_file_dev_release()
148 idxd_wq_put(wq); in idxd_file_dev_release()
149 mutex_unlock(&wq->wq_lock); in idxd_file_dev_release()
162 struct idxd_wq *wq = idxd_cdev->wq; in idxd_cdev_dev_release() local
164 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_cdev_dev_release()
185 return idxd_cdev->wq; in inode_wq()
190 struct idxd_wq *wq = ctx->wq; in idxd_xa_pasid_remove() local
193 mutex_lock(&wq->uc_lock); in idxd_xa_pasid_remove()
194 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); in idxd_xa_pasid_remove()
196 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", in idxd_xa_pasid_remove()
198 mutex_unlock(&wq->uc_lock); in idxd_xa_pasid_remove()
201 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) in idxd_user_counter_increment() argument
208 mutex_lock(&wq->uc_lock); in idxd_user_counter_increment()
209 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_user_counter_increment()
211 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
215 mutex_unlock(&wq->uc_lock); in idxd_user_counter_increment()
222 struct idxd_wq *wq; in idxd_cdev_open() local
229 wq = inode_wq(inode); in idxd_cdev_open()
230 idxd = wq->idxd; in idxd_cdev_open()
233 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); in idxd_cdev_open()
239 mutex_lock(&wq->wq_lock); in idxd_cdev_open()
241 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { in idxd_cdev_open()
246 ctx->wq = wq; in idxd_cdev_open()
268 mutex_lock(&wq->uc_lock); in idxd_cdev_open()
269 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); in idxd_cdev_open()
270 mutex_unlock(&wq->uc_lock); in idxd_cdev_open()
274 if (wq_dedicated(wq)) { in idxd_cdev_open()
275 rc = idxd_wq_set_pasid(wq, pasid); in idxd_cdev_open()
277 dev_err(dev, "wq set pasid failed: %d\n", rc); in idxd_cdev_open()
283 idxd_cdev = wq->idxd_cdev; in idxd_cdev_open()
308 idxd_wq_get(wq); in idxd_cdev_open()
309 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
323 mutex_unlock(&wq->wq_lock); in idxd_cdev_open()
328 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) in idxd_cdev_evl_drain_pasid() argument
330 struct idxd_device *idxd = wq->idxd; in idxd_cdev_evl_drain_pasid()
348 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id) in idxd_cdev_evl_drain_pasid()
352 drain_workqueue(wq->wq); in idxd_cdev_evl_drain_pasid()
359 struct idxd_wq *wq = ctx->wq; in idxd_cdev_release() local
360 struct idxd_device *idxd = wq->idxd; in idxd_cdev_release()
371 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, in check_vma() argument
374 struct device *dev = &wq->idxd->pdev->dev; in check_vma()
390 struct idxd_wq *wq = ctx->wq; in idxd_cdev_mmap() local
391 struct idxd_device *idxd = wq->idxd; in idxd_cdev_mmap()
410 rc = check_vma(wq, vma, __func__); in idxd_cdev_mmap()
415 pfn = (base + idxd_get_wq_portal_full_offset(wq->id, in idxd_cdev_mmap()
427 struct idxd_wq *wq = ctx->wq; in idxd_submit_user_descriptor() local
428 struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; in idxd_submit_user_descriptor()
430 void __iomem *portal = idxd_wq_portal_addr(wq); in idxd_submit_user_descriptor()
445 !wq->idxd->user_submission_safe) in idxd_submit_user_descriptor()
455 if (wq_dedicated(wq)) in idxd_submit_user_descriptor()
460 rc = idxd_enqcmds(wq, portal, &descriptor); in idxd_submit_user_descriptor()
492 struct idxd_wq *wq = ctx->wq; in idxd_cdev_poll() local
493 struct idxd_device *idxd = wq->idxd; in idxd_cdev_poll()
496 poll_wait(filp, &wq->err_queue, wait); in idxd_cdev_poll()
519 int idxd_wq_add_cdev(struct idxd_wq *wq) in idxd_wq_add_cdev() argument
521 struct idxd_device *idxd = wq->idxd; in idxd_wq_add_cdev()
533 idxd_cdev->wq = wq; in idxd_wq_add_cdev()
536 cdev_ctx = &ictx[wq->idxd->data->type]; in idxd_wq_add_cdev()
545 dev->parent = wq_confdev(wq); in idxd_wq_add_cdev()
550 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); in idxd_wq_add_cdev()
554 wq->idxd_cdev = idxd_cdev; in idxd_wq_add_cdev()
558 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); in idxd_wq_add_cdev()
566 wq->idxd_cdev = NULL; in idxd_wq_add_cdev()
570 void idxd_wq_del_cdev(struct idxd_wq *wq) in idxd_wq_del_cdev() argument
574 idxd_cdev = wq->idxd_cdev; in idxd_wq_del_cdev()
575 wq->idxd_cdev = NULL; in idxd_wq_del_cdev()
583 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); in idxd_user_drv_probe() local
584 struct idxd_device *idxd = wq->idxd; in idxd_user_drv_probe()
590 mutex_lock(&wq->wq_lock); in idxd_user_drv_probe()
592 if (!idxd_wq_driver_name_match(wq, dev)) { in idxd_user_drv_probe()
599 * User type WQ is enabled only when SVA is enabled for two reasons: in idxd_user_drv_probe()
601 * can directly access physical address through the WQ. in idxd_user_drv_probe()
611 "User type WQ cannot be enabled without SVA.\n"); in idxd_user_drv_probe()
617 wq->wq = create_workqueue(dev_name(wq_confdev(wq))); in idxd_user_drv_probe()
618 if (!wq->wq) { in idxd_user_drv_probe()
623 wq->type = IDXD_WQT_USER; in idxd_user_drv_probe()
624 rc = idxd_drv_enable_wq(wq); in idxd_user_drv_probe()
628 rc = idxd_wq_add_cdev(wq); in idxd_user_drv_probe()
635 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
639 idxd_drv_disable_wq(wq); in idxd_user_drv_probe()
641 destroy_workqueue(wq->wq); in idxd_user_drv_probe()
642 wq->type = IDXD_WQT_NONE; in idxd_user_drv_probe()
644 mutex_unlock(&wq->wq_lock); in idxd_user_drv_probe()
650 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); in idxd_user_drv_remove() local
652 mutex_lock(&wq->wq_lock); in idxd_user_drv_remove()
653 idxd_wq_del_cdev(wq); in idxd_user_drv_remove()
654 idxd_drv_disable_wq(wq); in idxd_user_drv_remove()
655 wq->type = IDXD_WQT_NONE; in idxd_user_drv_remove()
656 destroy_workqueue(wq->wq); in idxd_user_drv_remove()
657 wq->wq = NULL; in idxd_user_drv_remove()
658 mutex_unlock(&wq->wq_lock); in idxd_user_drv_remove()
706 * idxd_copy_cr - copy completion record to user address space found by wq and
708 * @wq: work queue
718 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, in idxd_copy_cr() argument
721 struct device *dev = &wq->idxd->pdev->dev; in idxd_copy_cr()
726 mutex_lock(&wq->uc_lock); in idxd_copy_cr()
728 ctx = xa_load(&wq->upasid_xa, pasid); in idxd_copy_cr()
767 mutex_unlock(&wq->uc_lock); in idxd_copy_cr()