Lines Matching +full:sub +full:- +full:engines

1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/dma-mapping.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
25 MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common dri…
51 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
62 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
72 /* DSA on GNR-D platforms */
89 struct pci_dev *pdev = idxd->pdev; in idxd_setup_interrupts()
90 struct device *dev = &pdev->dev; in idxd_setup_interrupts()
97 dev_err(dev, "Not MSI-X interrupt capable.\n"); in idxd_setup_interrupts()
98 return -ENOSPC; in idxd_setup_interrupts()
100 idxd->irq_cnt = msixcnt; in idxd_setup_interrupts()
105 return -ENOSPC; in idxd_setup_interrupts()
111 ie->vector = pci_irq_vector(pdev, 0); in idxd_setup_interrupts()
112 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); in idxd_setup_interrupts()
117 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); in idxd_setup_interrupts()
119 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_interrupts()
123 ie->id = msix_idx; in idxd_setup_interrupts()
124 ie->int_handle = INVALID_INT_HANDLE; in idxd_setup_interrupts()
125 ie->pasid = IOMMU_PASID_INVALID; in idxd_setup_interrupts()
127 spin_lock_init(&ie->list_lock); in idxd_setup_interrupts()
128 init_llist_head(&ie->pending_llist); in idxd_setup_interrupts()
129 INIT_LIST_HEAD(&ie->work_list); in idxd_setup_interrupts()
144 struct pci_dev *pdev = idxd->pdev; in idxd_cleanup_interrupts()
154 free_irq(ie->vector, ie); in idxd_cleanup_interrupts()
160 struct device *dev = &idxd->pdev->dev; in idxd_setup_wqs()
165 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), in idxd_setup_wqs()
167 if (!idxd->wqs) in idxd_setup_wqs()
168 return -ENOMEM; in idxd_setup_wqs()
170 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); in idxd_setup_wqs()
171 if (!idxd->wq_enable_map) { in idxd_setup_wqs()
172 kfree(idxd->wqs); in idxd_setup_wqs()
173 return -ENOMEM; in idxd_setup_wqs()
176 for (i = 0; i < idxd->max_wqs; i++) { in idxd_setup_wqs()
179 rc = -ENOMEM; in idxd_setup_wqs()
183 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); in idxd_setup_wqs()
185 wq->id = i; in idxd_setup_wqs()
186 wq->idxd = idxd; in idxd_setup_wqs()
188 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_wqs()
189 conf_dev->bus = &dsa_bus_type; in idxd_setup_wqs()
190 conf_dev->type = &idxd_wq_device_type; in idxd_setup_wqs()
191 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); in idxd_setup_wqs()
197 mutex_init(&wq->wq_lock); in idxd_setup_wqs()
198 init_waitqueue_head(&wq->err_queue); in idxd_setup_wqs()
199 init_completion(&wq->wq_dead); in idxd_setup_wqs()
200 init_completion(&wq->wq_resurrect); in idxd_setup_wqs()
201 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; in idxd_setup_wqs()
202 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); in idxd_setup_wqs()
203 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; in idxd_setup_wqs()
204 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); in idxd_setup_wqs()
205 if (!wq->wqcfg) { in idxd_setup_wqs()
207 rc = -ENOMEM; in idxd_setup_wqs()
211 if (idxd->hw.wq_cap.op_config) { in idxd_setup_wqs()
212 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); in idxd_setup_wqs()
213 if (!wq->opcap_bmap) { in idxd_setup_wqs()
215 rc = -ENOMEM; in idxd_setup_wqs()
218 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); in idxd_setup_wqs()
220 mutex_init(&wq->uc_lock); in idxd_setup_wqs()
221 xa_init(&wq->upasid_xa); in idxd_setup_wqs()
222 idxd->wqs[i] = wq; in idxd_setup_wqs()
228 while (--i >= 0) { in idxd_setup_wqs()
229 wq = idxd->wqs[i]; in idxd_setup_wqs()
239 struct device *dev = &idxd->pdev->dev; in idxd_setup_engines()
243 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), in idxd_setup_engines()
245 if (!idxd->engines) in idxd_setup_engines()
246 return -ENOMEM; in idxd_setup_engines()
248 for (i = 0; i < idxd->max_engines; i++) { in idxd_setup_engines()
251 rc = -ENOMEM; in idxd_setup_engines()
255 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); in idxd_setup_engines()
257 engine->id = i; in idxd_setup_engines()
258 engine->idxd = idxd; in idxd_setup_engines()
260 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_engines()
261 conf_dev->bus = &dsa_bus_type; in idxd_setup_engines()
262 conf_dev->type = &idxd_engine_device_type; in idxd_setup_engines()
263 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); in idxd_setup_engines()
269 idxd->engines[i] = engine; in idxd_setup_engines()
275 while (--i >= 0) { in idxd_setup_engines()
276 engine = idxd->engines[i]; in idxd_setup_engines()
285 struct device *dev = &idxd->pdev->dev; in idxd_setup_groups()
290 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), in idxd_setup_groups()
292 if (!idxd->groups) in idxd_setup_groups()
293 return -ENOMEM; in idxd_setup_groups()
295 for (i = 0; i < idxd->max_groups; i++) { in idxd_setup_groups()
298 rc = -ENOMEM; in idxd_setup_groups()
302 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); in idxd_setup_groups()
304 group->id = i; in idxd_setup_groups()
305 group->idxd = idxd; in idxd_setup_groups()
307 conf_dev->parent = idxd_confdev(idxd); in idxd_setup_groups()
308 conf_dev->bus = &dsa_bus_type; in idxd_setup_groups()
309 conf_dev->type = &idxd_group_device_type; in idxd_setup_groups()
310 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); in idxd_setup_groups()
316 idxd->groups[i] = group; in idxd_setup_groups()
317 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { in idxd_setup_groups()
318 group->tc_a = 1; in idxd_setup_groups()
319 group->tc_b = 1; in idxd_setup_groups()
321 group->tc_a = -1; in idxd_setup_groups()
322 group->tc_b = -1; in idxd_setup_groups()
328 group->rdbufs_allowed = idxd->max_rdbufs; in idxd_setup_groups()
334 while (--i >= 0) { in idxd_setup_groups()
335 group = idxd->groups[i]; in idxd_setup_groups()
345 for (i = 0; i < idxd->max_groups; i++) in idxd_cleanup_internals()
346 put_device(group_confdev(idxd->groups[i])); in idxd_cleanup_internals()
347 for (i = 0; i < idxd->max_engines; i++) in idxd_cleanup_internals()
348 put_device(engine_confdev(idxd->engines[i])); in idxd_cleanup_internals()
349 for (i = 0; i < idxd->max_wqs; i++) in idxd_cleanup_internals()
350 put_device(wq_confdev(idxd->wqs[i])); in idxd_cleanup_internals()
351 destroy_workqueue(idxd->wq); in idxd_cleanup_internals()
356 struct device *dev = &idxd->pdev->dev; in idxd_init_evl()
361 if (idxd->hw.gen_cap.evl_support == 0) in idxd_init_evl()
366 return -ENOMEM; in idxd_init_evl()
368 mutex_init(&evl->lock); in idxd_init_evl()
369 evl->size = IDXD_EVL_SIZE_MIN; in idxd_init_evl()
378 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, in idxd_init_evl()
381 if (!idxd->evl_cache) { in idxd_init_evl()
383 return -ENOMEM; in idxd_init_evl()
386 idxd->evl = evl; in idxd_init_evl()
392 struct device *dev = &idxd->pdev->dev; in idxd_setup_internals()
395 init_waitqueue_head(&idxd->cmd_waitq); in idxd_setup_internals()
409 idxd->wq = create_workqueue(dev_name(dev)); in idxd_setup_internals()
410 if (!idxd->wq) { in idxd_setup_internals()
411 rc = -ENOMEM; in idxd_setup_internals()
422 destroy_workqueue(idxd->wq); in idxd_setup_internals()
424 for (i = 0; i < idxd->max_groups; i++) in idxd_setup_internals()
425 put_device(group_confdev(idxd->groups[i])); in idxd_setup_internals()
427 for (i = 0; i < idxd->max_engines; i++) in idxd_setup_internals()
428 put_device(engine_confdev(idxd->engines[i])); in idxd_setup_internals()
430 for (i = 0; i < idxd->max_wqs; i++) in idxd_setup_internals()
431 put_device(wq_confdev(idxd->wqs[i])); in idxd_setup_internals()
439 struct device *dev = &idxd->pdev->dev; in idxd_read_table_offsets()
441 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); in idxd_read_table_offsets()
442 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); in idxd_read_table_offsets()
443 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; in idxd_read_table_offsets()
444 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); in idxd_read_table_offsets()
445 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; in idxd_read_table_offsets()
446 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); in idxd_read_table_offsets()
447 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; in idxd_read_table_offsets()
448 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); in idxd_read_table_offsets()
449 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; in idxd_read_table_offsets()
450 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); in idxd_read_table_offsets()
468 struct device *dev = &idxd->pdev->dev; in idxd_read_caps()
472 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); in idxd_read_caps()
473 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); in idxd_read_caps()
475 if (idxd->hw.gen_cap.cmd_cap) { in idxd_read_caps()
476 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); in idxd_read_caps()
477 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); in idxd_read_caps()
481 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) in idxd_read_caps()
482 idxd->request_int_handles = true; in idxd_read_caps()
484 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; in idxd_read_caps()
485 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); in idxd_read_caps()
486 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); in idxd_read_caps()
487 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); in idxd_read_caps()
488 if (idxd->hw.gen_cap.config_en) in idxd_read_caps()
489 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); in idxd_read_caps()
492 idxd->hw.group_cap.bits = in idxd_read_caps()
493 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); in idxd_read_caps()
494 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); in idxd_read_caps()
495 idxd->max_groups = idxd->hw.group_cap.num_groups; in idxd_read_caps()
496 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); in idxd_read_caps()
497 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; in idxd_read_caps()
498 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); in idxd_read_caps()
499 idxd->nr_rdbufs = idxd->max_rdbufs; in idxd_read_caps()
502 idxd->hw.engine_cap.bits = in idxd_read_caps()
503 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); in idxd_read_caps()
504 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); in idxd_read_caps()
505 idxd->max_engines = idxd->hw.engine_cap.num_engines; in idxd_read_caps()
506 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); in idxd_read_caps()
509 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); in idxd_read_caps()
510 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); in idxd_read_caps()
511 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; in idxd_read_caps()
512 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); in idxd_read_caps()
513 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; in idxd_read_caps()
514 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); in idxd_read_caps()
515 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); in idxd_read_caps()
516 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); in idxd_read_caps()
520 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + in idxd_read_caps()
522 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); in idxd_read_caps()
524 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); in idxd_read_caps()
527 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) in idxd_read_caps()
528 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); in idxd_read_caps()
533 struct device *dev = &pdev->dev; in idxd_alloc()
543 idxd->pdev = pdev; in idxd_alloc()
544 idxd->data = data; in idxd_alloc()
545 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); in idxd_alloc()
546 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); in idxd_alloc()
547 if (idxd->id < 0) in idxd_alloc()
550 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); in idxd_alloc()
551 if (!idxd->opcap_bmap) { in idxd_alloc()
552 ida_free(&idxd_ida, idxd->id); in idxd_alloc()
557 conf_dev->parent = dev; in idxd_alloc()
558 conf_dev->bus = &dsa_bus_type; in idxd_alloc()
559 conf_dev->type = idxd->data->dev_type; in idxd_alloc()
560 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); in idxd_alloc()
566 spin_lock_init(&idxd->dev_lock); in idxd_alloc()
567 spin_lock_init(&idxd->cmd_lock); in idxd_alloc()
574 struct pci_dev *pdev = idxd->pdev; in idxd_enable_system_pasid()
575 struct device *dev = &pdev->dev; in idxd_enable_system_pasid()
586 return -EPERM; in idxd_enable_system_pasid()
590 return -ENOSPC; in idxd_enable_system_pasid()
594 * types such as DMA-FQ, identity, etc. in idxd_enable_system_pasid()
599 pasid, domain->type); in idxd_enable_system_pasid()
606 idxd->pasid = pasid; in idxd_enable_system_pasid()
613 struct pci_dev *pdev = idxd->pdev; in idxd_disable_system_pasid()
614 struct device *dev = &pdev->dev; in idxd_disable_system_pasid()
621 iommu_detach_device_pasid(domain, dev, idxd->pasid); in idxd_disable_system_pasid()
622 iommu_free_global_pasid(idxd->pasid); in idxd_disable_system_pasid()
625 idxd->sva = NULL; in idxd_disable_system_pasid()
626 idxd->pasid = IOMMU_PASID_INVALID; in idxd_disable_system_pasid()
633 ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); in idxd_enable_sva()
637 ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); in idxd_enable_sva()
639 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); in idxd_enable_sva()
646 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); in idxd_disable_sva()
647 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); in idxd_disable_sva()
652 struct pci_dev *pdev = idxd->pdev; in idxd_probe()
653 struct device *dev = &pdev->dev; in idxd_probe()
667 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); in idxd_probe()
671 dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); in idxd_probe()
673 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); in idxd_probe()
687 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_probe()
698 idxd->major = idxd_cdev_get_major(idxd); in idxd_probe()
704 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); in idxd_probe()
725 idxd_disable_sva(idxd->pdev); in idxd_cleanup()
733 const struct bus_type *bus = drv->bus; in idxd_bind()
735 int err = -ENODEV; in idxd_bind()
751 const struct bus_type *bus = drv->bus; in idxd_unbind()
755 if (dev && dev->driver == drv) in idxd_unbind()
775 idxd_free_saved_configs(saved_groups, idxd->max_groups); in idxd_free_saved()
777 idxd_free_saved_configs(saved_engines, idxd->max_engines); in idxd_free_saved()
779 idxd_free_saved_configs(saved_wqs, idxd->max_wqs); in idxd_free_saved()
783 * Save IDXD device configurations including engines, groups, wqs etc.
789 struct device *dev = &idxd->pdev->dev; in idxd_device_config_save()
792 memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd)); in idxd_device_config_save()
794 if (idxd->evl) { in idxd_device_config_save()
795 memcpy(&idxd_saved->saved_evl, idxd->evl, in idxd_device_config_save()
800 kcalloc_node(idxd->max_groups, in idxd_device_config_save()
804 return -ENOMEM; in idxd_device_config_save()
806 for (i = 0; i < idxd->max_groups; i++) { in idxd_device_config_save()
815 return -ENOMEM; in idxd_device_config_save()
818 memcpy(saved_group, idxd->groups[i], sizeof(*saved_group)); in idxd_device_config_save()
823 kcalloc_node(idxd->max_engines, in idxd_device_config_save()
830 return -ENOMEM; in idxd_device_config_save()
832 for (i = 0; i < idxd->max_engines; i++) { in idxd_device_config_save()
837 /* Free saved groups and engines */ in idxd_device_config_save()
841 return -ENOMEM; in idxd_device_config_save()
844 memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine)); in idxd_device_config_save()
849 bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, in idxd_device_config_save()
852 /* Free saved groups and engines */ in idxd_device_config_save()
855 return -ENOMEM; in idxd_device_config_save()
858 bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs); in idxd_device_config_save()
861 kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), in idxd_device_config_save()
864 /* Free saved groups and engines */ in idxd_device_config_save()
867 return -ENOMEM; in idxd_device_config_save()
870 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_config_save()
877 /* Free saved groups, engines, and wqs */ in idxd_device_config_save()
881 return -ENOMEM; in idxd_device_config_save()
887 wq = idxd->wqs[i]; in idxd_device_config_save()
888 mutex_lock(&wq->wq_lock); in idxd_device_config_save()
891 mutex_unlock(&wq->wq_lock); in idxd_device_config_save()
895 idxd_saved->saved_groups = no_free_ptr(saved_groups); in idxd_device_config_save()
896 idxd_saved->saved_engines = no_free_ptr(saved_engines); in idxd_device_config_save()
897 idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map); in idxd_device_config_save()
898 idxd_saved->saved_wqs = no_free_ptr(saved_wqs); in idxd_device_config_save()
904 * Restore IDXD device configurations including engines, groups, wqs etc
910 struct idxd_evl *saved_evl = &idxd_saved->saved_evl; in idxd_device_config_restore()
913 idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; in idxd_device_config_restore()
916 idxd->evl->size = saved_evl->size; in idxd_device_config_restore()
918 for (i = 0; i < idxd->max_groups; i++) { in idxd_device_config_restore()
921 saved_group = idxd_saved->saved_groups[i]; in idxd_device_config_restore()
922 group = idxd->groups[i]; in idxd_device_config_restore()
924 group->rdbufs_allowed = saved_group->rdbufs_allowed; in idxd_device_config_restore()
925 group->rdbufs_reserved = saved_group->rdbufs_reserved; in idxd_device_config_restore()
926 group->tc_a = saved_group->tc_a; in idxd_device_config_restore()
927 group->tc_b = saved_group->tc_b; in idxd_device_config_restore()
928 group->use_rdbuf_limit = saved_group->use_rdbuf_limit; in idxd_device_config_restore()
932 kfree(idxd_saved->saved_groups); in idxd_device_config_restore()
934 for (i = 0; i < idxd->max_engines; i++) { in idxd_device_config_restore()
937 saved_engine = idxd_saved->saved_engines[i]; in idxd_device_config_restore()
938 engine = idxd->engines[i]; in idxd_device_config_restore()
940 engine->group = saved_engine->group; in idxd_device_config_restore()
944 kfree(idxd_saved->saved_engines); in idxd_device_config_restore()
946 bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map, in idxd_device_config_restore()
947 idxd->max_wqs); in idxd_device_config_restore()
948 bitmap_free(idxd_saved->saved_wq_enable_map); in idxd_device_config_restore()
950 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_config_restore()
954 if (!test_bit(i, idxd->wq_enable_map)) in idxd_device_config_restore()
957 saved_wq = idxd_saved->saved_wqs[i]; in idxd_device_config_restore()
958 wq = idxd->wqs[i]; in idxd_device_config_restore()
960 mutex_lock(&wq->wq_lock); in idxd_device_config_restore()
962 wq->group = saved_wq->group; in idxd_device_config_restore()
963 wq->flags = saved_wq->flags; in idxd_device_config_restore()
964 wq->threshold = saved_wq->threshold; in idxd_device_config_restore()
965 wq->size = saved_wq->size; in idxd_device_config_restore()
966 wq->priority = saved_wq->priority; in idxd_device_config_restore()
967 wq->type = saved_wq->type; in idxd_device_config_restore()
968 len = strlen(saved_wq->name) + 1; in idxd_device_config_restore()
969 strscpy(wq->name, saved_wq->name, len); in idxd_device_config_restore()
970 wq->max_xfer_bytes = saved_wq->max_xfer_bytes; in idxd_device_config_restore()
971 wq->max_batch_size = saved_wq->max_batch_size; in idxd_device_config_restore()
972 wq->enqcmds_retries = saved_wq->enqcmds_retries; in idxd_device_config_restore()
973 wq->descs = saved_wq->descs; in idxd_device_config_restore()
974 wq->idxd_chan = saved_wq->idxd_chan; in idxd_device_config_restore()
975 len = strlen(saved_wq->driver_name) + 1; in idxd_device_config_restore()
976 strscpy(wq->driver_name, saved_wq->driver_name, len); in idxd_device_config_restore()
978 mutex_unlock(&wq->wq_lock); in idxd_device_config_restore()
983 kfree(idxd_saved->saved_wqs); in idxd_device_config_restore()
989 struct device *dev = &idxd->pdev->dev; in idxd_reset_prepare()
993 dev = &idxd->pdev->dev; in idxd_reset_prepare()
998 dev_to_node(&pdev->dev)); in idxd_reset_prepare()
1013 idxd->idxd_saved = no_free_ptr(idxd_saved); in idxd_reset_prepare()
1016 pci_save_state(idxd->pdev); in idxd_reset_prepare()
1026 if (!idxd->idxd_saved) in idxd_reset_done()
1029 dev = &idxd->pdev->dev; in idxd_reset_done()
1033 pci_restore_state(idxd->pdev); in idxd_reset_done()
1045 idxd_device_config_restore(idxd, idxd->idxd_saved); in idxd_reset_done()
1047 /* Re-configure IDXD device if allowed. */ in idxd_reset_done()
1048 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_reset_done()
1064 for (i = 0; i < idxd->max_wqs; i++) { in idxd_reset_done()
1065 if (test_bit(i, idxd->wq_enable_map)) { in idxd_reset_done()
1066 struct idxd_wq *wq = idxd->wqs[i]; in idxd_reset_done()
1069 wq->state = IDXD_WQ_DISABLED; in idxd_reset_done()
1070 sprintf(wq_name, "wq%d.%d", idxd->id, wq->id); in idxd_reset_done()
1077 if (wq->type == IDXD_WQT_USER) in idxd_reset_done()
1080 rc = -EINVAL; in idxd_reset_done()
1082 clear_bit(i, idxd->wq_enable_map); in idxd_reset_done()
1084 "HALT: unable to re-enable wq %s\n", in idxd_reset_done()
1090 kfree(idxd->idxd_saved); in idxd_reset_done()
1103 * configure device without re-allocating and re-configuring idxd data.
1114 pdev = idxd ? idxd->pdev : pdev; in idxd_pci_probe_alloc()
1115 dev = &pdev->dev; in idxd_pci_probe_alloc()
1116 data = id ? (struct idxd_driver_data *)id->driver_data : NULL; in idxd_pci_probe_alloc()
1125 rc = -ENOMEM; in idxd_pci_probe_alloc()
1130 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); in idxd_pci_probe_alloc()
1131 if (!idxd->reg_base) { in idxd_pci_probe_alloc()
1132 rc = -ENOMEM; in idxd_pci_probe_alloc()
1137 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in idxd_pci_probe_alloc()
1147 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); in idxd_pci_probe_alloc()
1154 if (data->load_device_defaults) { in idxd_pci_probe_alloc()
1155 rc = data->load_device_defaults(idxd); in idxd_pci_probe_alloc()
1175 /* Re-enable interrupts in the IDXD device. */ in idxd_pci_probe_alloc()
1181 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", in idxd_pci_probe_alloc()
1182 idxd->hw.version); in idxd_pci_probe_alloc()
1185 idxd->user_submission_safe = data->user_submission_safe; in idxd_pci_probe_alloc()
1192 pci_iounmap(pdev, idxd->reg_base); in idxd_pci_probe_alloc()
1210 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_quiesce()
1211 wq = idxd->wqs[i]; in idxd_wqs_quiesce()
1212 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) in idxd_wqs_quiesce()
1225 dev_err(&pdev->dev, "Disabling device failed\n"); in idxd_shutdown()
1227 irq_entry = &idxd->ie; in idxd_shutdown()
1228 synchronize_irq(irq_entry->vector); in idxd_shutdown()
1230 flush_workqueue(idxd->wq); in idxd_shutdown()
1240 * When ->release() is called for the idxd->conf_dev, it frees all the memory related in idxd_remove()
1242 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref in idxd_remove()
1243 * on the device here to hold off the freeing while allowing the idxd sub-driver in idxd_remove()
1254 free_irq(irq_entry->vector, irq_entry); in idxd_remove()
1256 pci_iounmap(pdev, idxd->reg_base); in idxd_remove()
1260 destroy_workqueue(idxd->wq); in idxd_remove()
1284 return -ENODEV; in idxd_init_module()