Lines Matching +full:iommu +full:- +full:base

1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for MTK architected m4u v1 implementations
5 * Copyright (c) 2015-2016 MediaTek Inc.
8 * Based on driver/iommu/mtk_iommu.c
14 #include <linux/dma-mapping.h>
18 #include <linux/iommu.h>
30 #include <asm/dma-iommu.h>
31 #include <dt-bindings/memory/mtk-memory-port.h>
32 #include <dt-bindings/memory/mt2701-larb-port.h>
77 #define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7))
79 /* MTK generation one iommu HW only support 4K size mapping */
98 void __iomem *base; member
102 phys_addr_t protect_base; /* protect memory base */
105 struct iommu_device iommu; member
124 return component_bind_all(dev, &data->larb_imu); in mtk_iommu_v1_bind()
131 component_unbind_all(dev, &data->larb_imu); in mtk_iommu_v1_unbind()
148 for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) in mt2701_m4u_to_larb()
159 return id - mt2701_m4u_in_larb[larb]; in mt2701_m4u_to_port()
165 data->base + REG_MMU_INV_SEL); in mtk_iommu_v1_tlb_flush_all()
166 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); in mtk_iommu_v1_tlb_flush_all()
177 data->base + REG_MMU_INV_SEL); in mtk_iommu_v1_tlb_flush_range()
179 data->base + REG_MMU_INVLD_START_A); in mtk_iommu_v1_tlb_flush_range()
180 writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, in mtk_iommu_v1_tlb_flush_range()
181 data->base + REG_MMU_INVLD_END_A); in mtk_iommu_v1_tlb_flush_range()
182 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); in mtk_iommu_v1_tlb_flush_range()
184 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, in mtk_iommu_v1_tlb_flush_range()
187 dev_warn(data->dev, in mtk_iommu_v1_tlb_flush_range()
192 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); in mtk_iommu_v1_tlb_flush_range()
198 struct mtk_iommu_v1_domain *dom = data->m4u_dom; in mtk_iommu_v1_isr()
203 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); in mtk_iommu_v1_isr()
204 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); in mtk_iommu_v1_isr()
207 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); in mtk_iommu_v1_isr()
208 regval = readl_relaxed(data->base + REG_MMU_INT_ID); in mtk_iommu_v1_isr()
213 * MTK v1 iommu HW could not determine whether the fault is read or in mtk_iommu_v1_isr()
216 if (report_iommu_fault(&dom->domain, data->dev, fault_iova, in mtk_iommu_v1_isr()
218 dev_err_ratelimited(data->dev, in mtk_iommu_v1_isr()
224 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); in mtk_iommu_v1_isr()
226 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); in mtk_iommu_v1_isr()
241 for (i = 0; i < fwspec->num_ids; ++i) { in mtk_iommu_v1_config()
242 larbid = mt2701_m4u_to_larb(fwspec->ids[i]); in mtk_iommu_v1_config()
243 portid = mt2701_m4u_to_port(fwspec->ids[i]); in mtk_iommu_v1_config()
244 larb_mmu = &data->larb_imu[larbid]; in mtk_iommu_v1_config()
246 dev_dbg(dev, "%s iommu port: %d\n", in mtk_iommu_v1_config()
250 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); in mtk_iommu_v1_config()
252 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); in mtk_iommu_v1_config()
258 struct mtk_iommu_v1_domain *dom = data->m4u_dom; in mtk_iommu_v1_domain_finalise()
260 spin_lock_init(&dom->pgtlock); in mtk_iommu_v1_domain_finalise()
262 dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE, in mtk_iommu_v1_domain_finalise()
263 &dom->pgt_pa, GFP_KERNEL); in mtk_iommu_v1_domain_finalise()
264 if (!dom->pgt_va) in mtk_iommu_v1_domain_finalise()
265 return -ENOMEM; in mtk_iommu_v1_domain_finalise()
267 writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); in mtk_iommu_v1_domain_finalise()
269 dom->data = data; in mtk_iommu_v1_domain_finalise()
282 return &dom->domain; in mtk_iommu_v1_domain_alloc_paging()
288 struct mtk_iommu_v1_data *data = dom->data; in mtk_iommu_v1_domain_free()
290 dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, in mtk_iommu_v1_domain_free()
291 dom->pgt_va, dom->pgt_pa); in mtk_iommu_v1_domain_free()
303 mtk_mapping = data->mapping; in mtk_iommu_v1_attach_device()
304 if (mtk_mapping->domain != domain) in mtk_iommu_v1_attach_device()
307 if (!data->m4u_dom) { in mtk_iommu_v1_attach_device()
308 data->m4u_dom = dom; in mtk_iommu_v1_attach_device()
311 data->m4u_dom = NULL; in mtk_iommu_v1_attach_device()
345 u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); in mtk_iommu_v1_map()
348 spin_lock_irqsave(&dom->pgtlock, flags); in mtk_iommu_v1_map()
356 spin_unlock_irqrestore(&dom->pgtlock, flags); in mtk_iommu_v1_map()
359 mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped); in mtk_iommu_v1_map()
361 return i == pgcount ? 0 : -EEXIST; in mtk_iommu_v1_map()
370 u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); in mtk_iommu_v1_unmap()
373 spin_lock_irqsave(&dom->pgtlock, flags); in mtk_iommu_v1_unmap()
375 spin_unlock_irqrestore(&dom->pgtlock, flags); in mtk_iommu_v1_unmap()
377 mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); in mtk_iommu_v1_unmap()
388 spin_lock_irqsave(&dom->pgtlock, flags); in mtk_iommu_v1_iova_to_phys()
389 pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); in mtk_iommu_v1_iova_to_phys()
390 pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); in mtk_iommu_v1_iova_to_phys()
391 spin_unlock_irqrestore(&dom->pgtlock, flags); in mtk_iommu_v1_iova_to_phys()
399 * MTK generation one iommu HW only support one iommu domain, and all the client
410 if (args->args_count != 1) { in mtk_iommu_v1_create_mapping()
411 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", in mtk_iommu_v1_create_mapping()
412 args->args_count); in mtk_iommu_v1_create_mapping()
413 return -EINVAL; in mtk_iommu_v1_create_mapping()
416 ret = iommu_fwspec_init(dev, of_fwnode_handle(args->np)); in mtk_iommu_v1_create_mapping()
422 m4updev = of_find_device_by_node(args->np); in mtk_iommu_v1_create_mapping()
424 return -EINVAL; in mtk_iommu_v1_create_mapping()
429 ret = iommu_fwspec_add_ids(dev, args->args, 1); in mtk_iommu_v1_create_mapping()
434 mtk_mapping = data->mapping; in mtk_iommu_v1_create_mapping()
436 /* MTK iommu support 4GB iova address space. */ in mtk_iommu_v1_create_mapping()
441 data->mapping = mtk_mapping; in mtk_iommu_v1_create_mapping()
465 while (!of_parse_phandle_with_args(dev->of_node, "iommus", in mtk_iommu_v1_probe_device()
466 "#iommu-cells", in mtk_iommu_v1_probe_device()
474 /* dev->iommu_fwspec might have changed */ in mtk_iommu_v1_probe_device()
481 /* Link the consumer device with the smi-larb device(supplier) */ in mtk_iommu_v1_probe_device()
482 larbid = mt2701_m4u_to_larb(fwspec->ids[0]); in mtk_iommu_v1_probe_device()
484 return ERR_PTR(-EINVAL); in mtk_iommu_v1_probe_device()
486 for (idx = 1; idx < fwspec->num_ids; idx++) { in mtk_iommu_v1_probe_device()
487 larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]); in mtk_iommu_v1_probe_device()
489 dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n", in mtk_iommu_v1_probe_device()
491 return ERR_PTR(-EINVAL); in mtk_iommu_v1_probe_device()
495 larbdev = data->larb_imu[larbid].dev; in mtk_iommu_v1_probe_device()
497 return ERR_PTR(-EINVAL); in mtk_iommu_v1_probe_device()
504 return &data->iommu; in mtk_iommu_v1_probe_device()
514 mtk_mapping = data->mapping; in mtk_iommu_v1_probe_finalize()
518 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); in mtk_iommu_v1_probe_finalize()
529 larbid = mt2701_m4u_to_larb(fwspec->ids[0]); in mtk_iommu_v1_release_device()
530 larbdev = data->larb_imu[larbid].dev; in mtk_iommu_v1_release_device()
539 ret = clk_prepare_enable(data->bclk); in mtk_iommu_v1_hw_init()
541 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); in mtk_iommu_v1_hw_init()
546 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); in mtk_iommu_v1_hw_init()
556 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); in mtk_iommu_v1_hw_init()
559 writel_relaxed(data->protect_base, in mtk_iommu_v1_hw_init()
560 data->base + REG_MMU_IVRP_PADDR); in mtk_iommu_v1_hw_init()
562 writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); in mtk_iommu_v1_hw_init()
564 if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0, in mtk_iommu_v1_hw_init()
565 dev_name(data->dev), (void *)data)) { in mtk_iommu_v1_hw_init()
566 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); in mtk_iommu_v1_hw_init()
567 clk_disable_unprepare(data->bclk); in mtk_iommu_v1_hw_init()
568 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); in mtk_iommu_v1_hw_init()
569 return -ENODEV; in mtk_iommu_v1_hw_init()
594 { .compatible = "mediatek,mt2701-m4u", },
606 struct device *dev = &pdev->dev; in mtk_iommu_v1_probe()
615 return -ENOMEM; in mtk_iommu_v1_probe()
617 data->dev = dev; in mtk_iommu_v1_probe()
623 return -ENOMEM; in mtk_iommu_v1_probe()
624 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); in mtk_iommu_v1_probe()
627 data->base = devm_ioremap_resource(dev, res); in mtk_iommu_v1_probe()
628 if (IS_ERR(data->base)) in mtk_iommu_v1_probe()
629 return PTR_ERR(data->base); in mtk_iommu_v1_probe()
631 data->irq = platform_get_irq(pdev, 0); in mtk_iommu_v1_probe()
632 if (data->irq < 0) in mtk_iommu_v1_probe()
633 return data->irq; in mtk_iommu_v1_probe()
635 data->bclk = devm_clk_get(dev, "bclk"); in mtk_iommu_v1_probe()
636 if (IS_ERR(data->bclk)) in mtk_iommu_v1_probe()
637 return PTR_ERR(data->bclk); in mtk_iommu_v1_probe()
639 larb_nr = of_count_phandle_with_args(dev->of_node, in mtk_iommu_v1_probe()
648 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); in mtk_iommu_v1_probe()
650 return -EINVAL; in mtk_iommu_v1_probe()
660 return -ENODEV; in mtk_iommu_v1_probe()
662 if (!plarbdev->dev.driver) { in mtk_iommu_v1_probe()
664 return -EPROBE_DEFER; in mtk_iommu_v1_probe()
666 data->larb_imu[i].dev = &plarbdev->dev; in mtk_iommu_v1_probe()
678 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in mtk_iommu_v1_probe()
679 dev_name(&pdev->dev)); in mtk_iommu_v1_probe()
683 ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev); in mtk_iommu_v1_probe()
693 iommu_device_unregister(&data->iommu); in mtk_iommu_v1_probe()
695 iommu_device_sysfs_remove(&data->iommu); in mtk_iommu_v1_probe()
697 clk_disable_unprepare(data->bclk); in mtk_iommu_v1_probe()
705 iommu_device_sysfs_remove(&data->iommu); in mtk_iommu_v1_remove()
706 iommu_device_unregister(&data->iommu); in mtk_iommu_v1_remove()
708 clk_disable_unprepare(data->bclk); in mtk_iommu_v1_remove()
709 devm_free_irq(&pdev->dev, data->irq, data); in mtk_iommu_v1_remove()
710 component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops); in mtk_iommu_v1_remove()
716 struct mtk_iommu_v1_suspend_reg *reg = &data->reg; in mtk_iommu_v1_suspend()
717 void __iomem *base = data->base; in mtk_iommu_v1_suspend() local
719 reg->standard_axi_mode = readl_relaxed(base + in mtk_iommu_v1_suspend()
721 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); in mtk_iommu_v1_suspend()
722 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); in mtk_iommu_v1_suspend()
723 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); in mtk_iommu_v1_suspend()
730 struct mtk_iommu_v1_suspend_reg *reg = &data->reg; in mtk_iommu_v1_resume()
731 void __iomem *base = data->base; in mtk_iommu_v1_resume() local
733 writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); in mtk_iommu_v1_resume()
734 writel_relaxed(reg->standard_axi_mode, in mtk_iommu_v1_resume()
735 base + REG_MMU_STANDARD_AXI_MODE); in mtk_iommu_v1_resume()
736 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); in mtk_iommu_v1_resume()
737 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); in mtk_iommu_v1_resume()
738 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); in mtk_iommu_v1_resume()
739 writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); in mtk_iommu_v1_resume()
751 .name = "mtk-iommu-v1",
758 MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");