Lines Matching +full:riscv +full:- +full:iommu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for RISC-V IOMMU implementations.
5 * Copyright © 2022-2024 Rivos Inc.
6 * Copyright © 2023 FORTH-ICS/CARV
13 #define pr_fmt(fmt) "riscv-iommu: " fmt
18 #include <linux/iommu.h>
23 #include "../iommu-pages.h"
24 #include "iommu-bits.h"
25 #include "iommu.h"
37 /* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */
38 #define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10))
39 #define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12))
42 iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu)
44 /* IOMMU PSCID allocation namespace. */
46 #define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1)
48 /* Device resource-managed allocations */
58 iommu_free_pages(devres->addr, devres->order);
66 return devres->addr == target->addr;
69 static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
74 addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
87 devres->addr = addr;
88 devres->order = order;
90 devres_add(iommu->dev, devres);
95 static void riscv_iommu_free_pages(struct riscv_iommu_device *iommu, void *addr)
99 devres_release(iommu->dev, riscv_iommu_devres_pages_release,
110 _q->qid = RISCV_IOMMU_INTR_ ## name; \
111 _q->qbr = RISCV_IOMMU_REG_ ## name ## B; \
112 _q->qcr = RISCV_IOMMU_REG_ ## name ## CSR; \
113 _q->mask = _q->mask ?: (RISCV_IOMMU_DEF_ ## name ## _COUNT) - 1;\
117 #define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB))
118 #define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB))
119 #define Q_ITEM(q, index) ((q)->mask & (index))
120 #define Q_IPSR(q) BIT((q)->qid)
123 * Discover queue ring buffer hardware configuration, allocate in-memory
127 * @queue - data structure, configured with RISCV_IOMMU_QUEUE_INIT()
128 * @entry_size - queue single element size in bytes.
130 static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
141 riscv_iommu_writeq(iommu, queue->qbr, RISCV_IOMMU_QUEUE_LOG2SZ_FIELD);
142 qb = riscv_iommu_readq(iommu, queue->qbr);
149 logsz = ilog2(queue->mask);
161 queue->phys = pfn_to_phys(FIELD_GET(RISCV_IOMMU_PPN_FIELD, qb));
162 queue->base = devm_ioremap(iommu->dev, queue->phys, queue_size);
168 queue->base = riscv_iommu_get_pages(iommu, order);
169 queue->phys = __pa(queue->base);
170 } while (!queue->base && logsz-- > 0);
173 if (!queue->base)
174 return -ENOMEM;
176 qb = phys_to_ppn(queue->phys) |
180 riscv_iommu_writeq(iommu, queue->qbr, qb);
181 rb = riscv_iommu_readq(iommu, queue->qbr);
183 dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid);
184 return -ENODEV;
188 queue->mask = (2U << logsz) - 1;
190 dev_dbg(iommu->dev, "queue #%u allocated 2^%u entries",
191 queue->qid, logsz + 1);
201 if (riscv_iommu_readl(queue->iommu, RISCV_IOMMU_REG_IPSR) & Q_IPSR(queue))
207 static int riscv_iommu_queue_vec(struct riscv_iommu_device *iommu, int n)
210 return (iommu->icvec >> (n * 4)) & RISCV_IOMMU_ICVEC_CIV;
216 * @queue - data structure, already allocated with riscv_iommu_queue_alloc()
217 * @irq_handler - threaded interrupt handler.
219 static int riscv_iommu_queue_enable(struct riscv_iommu_device *iommu,
223 const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)];
227 if (queue->iommu)
228 return -EBUSY;
232 return -ENODEV;
234 queue->iommu = iommu;
237 dev_name(iommu->dev), queue);
239 queue->iommu = NULL;
244 if (queue->qid == RISCV_IOMMU_INTR_CQ)
245 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0);
247 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0);
255 riscv_iommu_writel(iommu, queue->qcr,
260 riscv_iommu_readl_timeout(iommu, queue->qcr,
268 riscv_iommu_writel(iommu, queue->qcr, 0);
270 queue->iommu = NULL;
271 dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid);
272 return -EBUSY;
276 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue));
287 struct riscv_iommu_device *iommu = queue->iommu;
290 if (!iommu)
293 free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue);
294 riscv_iommu_writel(iommu, queue->qcr, 0);
295 riscv_iommu_readl_timeout(iommu, queue->qcr,
300 dev_err(iommu->dev, "fail to disable hardware queue #%u, csr 0x%x\n",
301 queue->qid, csr);
303 queue->iommu = NULL;
313 unsigned int head = atomic_read(&queue->head);
314 unsigned int tail = atomic_read(&queue->tail);
316 int available = (int)(tail - head);
324 if (riscv_iommu_readl_timeout(queue->iommu, Q_TAIL(queue),
325 tail, (tail & ~queue->mask) == 0,
327 dev_err_once(queue->iommu->dev,
336 return (int)(atomic_add_return((tail - last) & queue->mask, &queue->tail) - head);
344 const unsigned int head = atomic_add_return(count, &queue->head);
346 riscv_iommu_writel(queue->iommu, Q_HEAD(queue), Q_ITEM(queue, head));
352 const unsigned int cons = atomic_read(&queue->head);
356 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head,
357 !(head & ~queue->mask),
361 return cons + ((head - last) & queue->mask);
369 unsigned int cons = atomic_read(&queue->head);
372 if ((int)(cons - index) > 0)
377 (int)(cons - index) > 0, 0, timeout_us);
382 * Error handling for IOMMU hardware not responding in reasonable time
398 prod = atomic_inc_return(&queue->prod) - 1;
399 head = atomic_read(&queue->head);
402 if ((prod - head) > queue->mask) {
403 if (readx_poll_timeout(atomic_read, &queue->head,
404 head, (prod - head) < queue->mask,
407 } else if ((prod - head) == queue->mask) {
410 if (riscv_iommu_readl_timeout(queue->iommu, Q_HEAD(queue), head,
411 !(head & ~queue->mask) && head != last,
414 atomic_add((head - last) & queue->mask, &queue->head);
418 memcpy(queue->base + Q_ITEM(queue, prod) * entry_size, entry, entry_size);
421 if (readx_poll_timeout(atomic_read, &queue->tail, tail, prod == tail,
431 riscv_iommu_writel(queue->iommu, Q_TAIL(queue), Q_ITEM(queue, prod + 1));
438 atomic_inc(&queue->tail);
447 dev_err_once(queue->iommu->dev, "Hardware error: command enqueue failed\n");
453 * IOMMU Command queue chapter 3.1
463 ctrl = riscv_iommu_readl(queue->iommu, queue->qcr);
466 riscv_iommu_writel(queue->iommu, queue->qcr, ctrl);
467 dev_warn(queue->iommu->dev,
469 queue->qid,
479 riscv_iommu_writel(queue->iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue));
484 /* Send command to the IOMMU command queue */
485 static void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu,
488 riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd));
492 static void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu,
499 prod = riscv_iommu_queue_send(&iommu->cmdq, &cmd, sizeof(cmd));
504 if (riscv_iommu_queue_wait(&iommu->cmdq, prod, timeout_us))
505 dev_err_once(iommu->dev,
510 * IOMMU Fault/Event queue chapter 3.2
513 static void riscv_iommu_fault(struct riscv_iommu_device *iommu,
516 unsigned int err = FIELD_GET(RISCV_IOMMU_FQ_HDR_CAUSE, event->hdr);
517 unsigned int devid = FIELD_GET(RISCV_IOMMU_FQ_HDR_DID, event->hdr);
521 dev_warn_ratelimited(iommu->dev,
523 err, devid, event->iotval, event->iotval2);
530 struct riscv_iommu_device *iommu = queue->iommu;
535 events = (struct riscv_iommu_fq_record *)queue->base;
538 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_IPSR, Q_IPSR(queue));
543 riscv_iommu_fault(iommu, &events[Q_ITEM(queue, idx)]);
548 ctrl = riscv_iommu_readl(iommu, queue->qcr);
550 riscv_iommu_writel(iommu, queue->qcr, ctrl);
551 dev_warn(iommu->dev,
553 queue->qid,
562 static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iommu,
565 const bool base_format = !(iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT);
573 if (iommu->ddt_mode < RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL ||
574 iommu->ddt_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_3LVL)
579 * DDI[0]: bits 0 - 6 (1st level) (7 bits)
580 * DDI[1]: bits 7 - 15 (2nd level) (9 bits)
581 * DDI[2]: bits 16 - 23 (3rd level) (8 bits)
584 * DDI[0]: bits 0 - 5 (1st level) (6 bits)
585 * DDI[1]: bits 6 - 14 (2nd level) (9 bits)
586 * DDI[2]: bits 15 - 23 (3rd level) (9 bits)
599 depth = iommu->ddt_mode - RISCV_IOMMU_DDTP_IOMMU_MODE_1LVL;
603 /* Get to the level of the non-leaf node that holds the device context */
604 for (ddtp = iommu->ddt_root; depth-- > 0;) {
607 * Each non-leaf node is 64bits wide and on each level
623 ptr = riscv_iommu_get_pages(iommu, 0);
635 /* Race setting DDT detected, re-read and retry. */
636 riscv_iommu_free_pages(iommu, ptr);
643 * is 8 * 64bits, hence the (3 - base_format) below.
645 ddtp += (devid & ((64 << base_format) - 1)) << (3 - base_format);
651 * This is best effort IOMMU translation shutdown flow.
652 * Disable IOMMU without waiting for hardware response.
654 void riscv_iommu_disable(struct riscv_iommu_device *iommu)
656 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP,
659 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0);
660 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0);
661 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0);
664 #define riscv_iommu_read_ddtp(iommu) ({ \
666 riscv_iommu_readq_timeout((iommu), RISCV_IOMMU_REG_DDTP, ddtp, \
671 static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
676 ddtp = riscv_iommu_read_ddtp(iommu);
678 return -EBUSY;
688 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP,
690 ddtp = riscv_iommu_read_ddtp(iommu);
692 return -EBUSY;
694 iommu->ddt_phys = ppn_to_phys(ddtp);
695 if (iommu->ddt_phys)
696 iommu->ddt_root = devm_ioremap(iommu->dev,
697 iommu->ddt_phys, PAGE_SIZE);
698 if (iommu->ddt_root)
699 memset(iommu->ddt_root, 0, PAGE_SIZE);
702 if (!iommu->ddt_root) {
703 iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
704 iommu->ddt_phys = __pa(iommu->ddt_root);
707 if (!iommu->ddt_root)
708 return -ENOMEM;
716 * Accepted iommu->ddt_mode is updated on success.
718 static int riscv_iommu_iodir_set_mode(struct riscv_iommu_device *iommu,
721 struct device *dev = iommu->dev;
726 ddtp = riscv_iommu_read_ddtp(iommu);
728 return -EBUSY;
736 return -EINVAL;
741 rq_ddtp |= phys_to_ppn(iommu->ddt_phys);
743 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, rq_ddtp);
744 ddtp = riscv_iommu_read_ddtp(iommu);
748 return -EBUSY;
751 /* Verify IOMMU hardware accepts new DDTP config. */
761 return -EINVAL;
765 * Mode field is WARL, an IOMMU may support a subset of
774 rq_mode--;
779 * We tried all supported modes and IOMMU hardware failed to
785 return -EINVAL;
788 iommu->ddt_mode = mode;
794 riscv_iommu_cmd_send(iommu, &cmd);
798 riscv_iommu_cmd_send(iommu, &cmd);
801 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT);
806 /* This struct contains protection domain specific IOMMU driver data. */
821 /* Private IOMMU data for managed devices, dev_iommu_priv_* */
833 * Blocking and identity domains are not tracked here, as the IOMMU hardware
837 * The device pointer and IOMMU data remain stable in the bond struct after
838 * _probe_device() where it's attached to the managed IOMMU, up to the
851 struct riscv_iommu_device *iommu = dev_to_iommu(dev);
857 return -ENOMEM;
858 bond->dev = dev;
862 * managed IOMMU device.
865 spin_lock(&domain->lock);
866 list_for_each(bonds, &domain->bonds)
867 if (dev_to_iommu(list_entry(bonds, struct riscv_iommu_bond, list)->dev) == iommu)
869 list_add_rcu(&bond->list, bonds);
870 spin_unlock(&domain->lock);
881 struct riscv_iommu_device *iommu = dev_to_iommu(dev);
889 spin_lock(&domain->lock);
890 list_for_each_entry(bond, &domain->bonds, list) {
893 else if (bond->dev == dev)
895 else if (dev_to_iommu(bond->dev) == iommu)
899 list_del_rcu(&found->list);
900 spin_unlock(&domain->lock);
904 * If this was the last bond between this domain and the IOMMU
909 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid);
910 riscv_iommu_cmd_send(iommu, &cmd);
912 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT);
919 * the hardware, when RISC-V IOMMU architecture specification update for
928 struct riscv_iommu_device *iommu, *prev;
930 unsigned long len = end - start + 1;
934 * For each IOMMU linked with this protection domain (via bonds->dev),
938 * bond creation - riscv_iommu_bond_link(), and device directory
939 * update - riscv_iommu_iodir_update().
942 * -------------------------- --------------------------
951 * be configured with already valid page table content. If an IOMMU is
960 list_for_each_entry_rcu(bond, &domain->bonds, list) {
961 iommu = dev_to_iommu(bond->dev);
965 * to the IOMMU for the same PSCID, and with domain->bonds list
966 * arranged based on the device's IOMMU, it's sufficient to check
969 if (iommu == prev)
973 riscv_iommu_cmd_inval_set_pscid(&cmd, domain->pscid);
977 riscv_iommu_cmd_send(iommu, &cmd);
980 riscv_iommu_cmd_send(iommu, &cmd);
982 prev = iommu;
986 list_for_each_entry_rcu(bond, &domain->bonds, list) {
987 iommu = dev_to_iommu(bond->dev);
988 if (iommu == prev)
991 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT);
992 prev = iommu;
1006 * cached by the IOMMU hardware.
1011 static void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu,
1021 for (i = 0; i < fwspec->num_ids; i++) {
1022 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]);
1023 tc = READ_ONCE(dc->tc);
1027 WRITE_ONCE(dc->tc, tc & ~RISCV_IOMMU_DC_TC_V);
1031 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]);
1032 riscv_iommu_cmd_send(iommu, &cmd);
1037 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT);
1043 for (i = 0; i < fwspec->num_ids; i++) {
1044 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]);
1045 tc = READ_ONCE(dc->tc);
1048 WRITE_ONCE(dc->fsc, fsc);
1049 WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID);
1052 WRITE_ONCE(dc->tc, tc);
1056 riscv_iommu_cmd_iodir_set_did(&cmd, fwspec->ids[i]);
1057 riscv_iommu_cmd_send(iommu, &cmd);
1060 riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT);
1079 riscv_iommu_iotlb_inval(domain, gather->start, gather->end);
1082 #define PT_SHIFT (PAGE_SHIFT - ilog2(sizeof(pte_t)))
1108 list_add_tail(&virt_to_page(ptr)->lru, freelist);
1117 unsigned long *ptr = domain->pgd_root;
1119 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2;
1125 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1));
1127 * Note: returned entry might be a non-leaf if there was
1143 * Non-leaf entry is missing, allocate and try to add to the
1147 addr = iommu_alloc_page_node(domain->numa_node, gfp);
1158 } while (level-- > 0);
1166 unsigned long *ptr = domain->pgd_root;
1168 int level = domain->pgd_mode - RISCV_IOMMU_DC_FSC_IOSATP_MODE_SV39 + 2;
1173 ptr += ((iova >> shift) & (PTRS_PER_PTE - 1));
1182 } while (level-- > 0);
1201 else if (domain->amo_enabled)
1209 rc = -ENOMEM;
1223 --pgcount;
1231 * invalidate all levels of page table (i.e. leaf and non-leaf)
1232 * is an invalidate-all-PSCID IOTINVAL.VMA with AV=0.
1234 * capability.NL (non-leaf) IOTINVAL command.
1260 if (iova & (pte_size - 1))
1267 iommu_iotlb_gather_add_page(&domain->domain, gather, iova,
1288 return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1));
1294 const unsigned long pfn = virt_to_pfn(domain->pgd_root);
1296 WARN_ON(!list_empty(&domain->bonds));
1298 if ((int)domain->pscid > 0)
1299 ida_free(&riscv_iommu_pscids, domain->pscid);
1305 static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_mode)
1309 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39;
1312 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48;
1315 return iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57;
1324 struct riscv_iommu_device *iommu = dev_to_iommu(dev);
1328 if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode))
1329 return -ENODEV;
1331 fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) |
1332 FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root));
1333 ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) |
1337 return -ENOMEM;
1339 riscv_iommu_iodir_update(iommu, dev, fsc, ta);
1340 riscv_iommu_bond_unlink(info->domain, dev);
1341 info->domain = domain;
1359 struct riscv_iommu_device *iommu;
1364 iommu = dev_to_iommu(dev);
1365 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV57) {
1368 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV48) {
1371 } else if (iommu->caps & RISCV_IOMMU_CAPABILITIES_SV39) {
1376 return ERR_PTR(-ENODEV);
1381 return ERR_PTR(-ENOMEM);
1383 INIT_LIST_HEAD_RCU(&domain->bonds);
1384 spin_lock_init(&domain->lock);
1385 domain->numa_node = dev_to_node(iommu->dev);
1386 domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
1387 domain->pgd_mode = pgd_mode;
1388 domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
1390 if (!domain->pgd_root) {
1392 return ERR_PTR(-ENOMEM);
1395 domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
1397 if (domain->pscid < 0) {
1398 iommu_free_page(domain->pgd_root);
1400 return ERR_PTR(-ENOMEM);
1404 * Note: RISC-V Privilege spec mandates that virtual addresses
1405 * need to be sign-extended, so if (VA_BITS - 1) is set, all
1411 * limit the available virtual addresses to VA_BITS - 1.
1413 va_mask = DMA_BIT_MASK(va_bits - 1);
1415 domain->domain.geometry.aperture_start = 0;
1416 domain->domain.geometry.aperture_end = va_mask;
1417 domain->domain.geometry.force_aperture = true;
1418 domain->domain.pgsize_bitmap = va_mask & (SZ_4K | SZ_2M | SZ_1G | SZ_512G);
1420 domain->domain.ops = &riscv_iommu_paging_domain_ops;
1422 return &domain->domain;
1428 struct riscv_iommu_device *iommu = dev_to_iommu(dev);
1432 riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, 0);
1433 riscv_iommu_bond_unlink(info->domain, dev);
1434 info->domain = NULL;
1449 struct riscv_iommu_device *iommu = dev_to_iommu(dev);
1452 riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, RISCV_IOMMU_PC_TA_V);
1453 riscv_iommu_bond_unlink(info->domain, dev);
1454 info->domain = NULL;
1475 return iommu_fwspec_add_ids(dev, args->args, 1);
1481 struct riscv_iommu_device *iommu;
1487 if (!fwspec || !fwspec->iommu_fwnode->dev || !fwspec->num_ids)
1488 return ERR_PTR(-ENODEV);
1490 iommu = dev_get_drvdata(fwspec->iommu_fwnode->dev);
1491 if (!iommu)
1492 return ERR_PTR(-ENODEV);
1495 * IOMMU hardware operating in fail-over BARE mode will provide
1498 if (iommu->ddt_mode <= RISCV_IOMMU_DDTP_IOMMU_MODE_BARE)
1499 return ERR_PTR(-ENODEV);
1503 return ERR_PTR(-ENOMEM);
1505 * Allocate and pre-configure device context entries in
1509 if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD)
1511 for (i = 0; i < fwspec->num_ids; i++) {
1512 dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]);
1515 return ERR_PTR(-ENODEV);
1517 if (READ_ONCE(dc->tc) & RISCV_IOMMU_DC_TC_V)
1518 dev_warn(dev, "already attached to IOMMU device directory\n");
1519 WRITE_ONCE(dc->tc, tc);
1524 return &iommu->iommu;
1546 static int riscv_iommu_init_check(struct riscv_iommu_device *iommu)
1551 * Make sure the IOMMU is switched off or in pass-through mode during
1555 ddtp = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_DDTP);
1557 return -EBUSY;
1562 return -EBUSY;
1563 riscv_iommu_disable(iommu);
1566 /* Configure accesses to in-memory data structures for CPU-native byte order. */
1568 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE)) {
1569 if (!(iommu->caps & RISCV_IOMMU_CAPABILITIES_END))
1570 return -EINVAL;
1571 riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL,
1572 iommu->fctl ^ RISCV_IOMMU_FCTL_BE);
1573 iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL);
1575 !!(iommu->fctl & RISCV_IOMMU_FCTL_BE))
1576 return -EINVAL;
1583 if (!iommu->irqs_count)
1584 return -EINVAL;
1586 iommu->icvec = FIELD_PREP(RISCV_IOMMU_ICVEC_FIV, 1 % iommu->irqs_count) |
1587 FIELD_PREP(RISCV_IOMMU_ICVEC_PIV, 2 % iommu->irqs_count) |
1588 FIELD_PREP(RISCV_IOMMU_ICVEC_PMIV, 3 % iommu->irqs_count);
1589 riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_ICVEC, iommu->icvec);
1590 iommu->icvec = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_ICVEC);
1591 if (max(max(FIELD_GET(RISCV_IOMMU_ICVEC_CIV, iommu->icvec),
1592 FIELD_GET(RISCV_IOMMU_ICVEC_FIV, iommu->icvec)),
1593 max(FIELD_GET(RISCV_IOMMU_ICVEC_PIV, iommu->icvec),
1594 FIELD_GET(RISCV_IOMMU_ICVEC_PMIV, iommu->icvec))) >= iommu->irqs_count)
1595 return -EINVAL;
1600 void riscv_iommu_remove(struct riscv_iommu_device *iommu)
1602 iommu_device_unregister(&iommu->iommu);
1603 iommu_device_sysfs_remove(&iommu->iommu);
1604 riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF);
1605 riscv_iommu_queue_disable(&iommu->cmdq);
1606 riscv_iommu_queue_disable(&iommu->fltq);
1609 int riscv_iommu_init(struct riscv_iommu_device *iommu)
1613 RISCV_IOMMU_QUEUE_INIT(&iommu->cmdq, CQ);
1614 RISCV_IOMMU_QUEUE_INIT(&iommu->fltq, FQ);
1616 rc = riscv_iommu_init_check(iommu);
1618 return dev_err_probe(iommu->dev, rc, "unexpected device state\n");
1620 rc = riscv_iommu_iodir_alloc(iommu);
1624 rc = riscv_iommu_queue_alloc(iommu, &iommu->cmdq,
1629 rc = riscv_iommu_queue_alloc(iommu, &iommu->fltq,
1634 rc = riscv_iommu_queue_enable(iommu, &iommu->cmdq, riscv_iommu_cmdq_process);
1638 rc = riscv_iommu_queue_enable(iommu, &iommu->fltq, riscv_iommu_fltq_process);
1642 rc = riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_MAX);
1646 rc = iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "riscv-iommu@%s",
1647 dev_name(iommu->dev));
1649 dev_err_probe(iommu->dev, rc, "cannot register sysfs interface\n");
1653 rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev);
1655 dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n");
1662 iommu_device_sysfs_remove(&iommu->iommu);
1664 riscv_iommu_iodir_set_mode(iommu, RISCV_IOMMU_DDTP_IOMMU_MODE_OFF);
1666 riscv_iommu_queue_disable(&iommu->fltq);
1667 riscv_iommu_queue_disable(&iommu->cmdq);