Lines Matching +full:iommu +full:- +full:ctx
1 // SPDX-License-Identifier: GPL-2.0-only
3 * omap iommu: tlb and pagetable primitives
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
12 #include <linux/dma-mapping.h>
18 #include <linux/iommu.h>
19 #include <linux/omap-iommu.h>
30 #include <linux/platform_data/iommu-omap.h>
32 #include "omap-iopgtable.h"
33 #include "omap-iommu.h"
56 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
57 * @dom: generic iommu domain handle
65 * omap_iommu_save_ctx - Save registers for pm off-mode support
81 while (arch_data->iommu_dev) { in omap_iommu_save_ctx()
82 obj = arch_data->iommu_dev; in omap_iommu_save_ctx()
83 p = obj->ctx; in omap_iommu_save_ctx()
86 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, in omap_iommu_save_ctx()
95 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
111 while (arch_data->iommu_dev) { in omap_iommu_restore_ctx()
112 obj = arch_data->iommu_dev; in omap_iommu_restore_ctx()
113 p = obj->ctx; in omap_iommu_restore_ctx()
116 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, in omap_iommu_restore_ctx()
128 if (!obj->syscfg) in dra7_cfg_dspsys_mmu()
131 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); in dra7_cfg_dspsys_mmu()
133 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); in dra7_cfg_dspsys_mmu()
158 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) in omap2_iommu_enable()
159 return -EINVAL; in omap2_iommu_enable()
161 pa = virt_to_phys(obj->iopgd); in omap2_iommu_enable()
163 return -EINVAL; in omap2_iommu_enable()
166 dev_info(obj->dev, "%s: version %d.%d\n", obj->name, in omap2_iommu_enable()
173 if (obj->has_bus_err_back) in omap2_iommu_enable()
189 dev_dbg(obj->dev, "%s is shutting down\n", obj->name); in omap2_iommu_disable()
196 ret = pm_runtime_get_sync(obj->dev); in iommu_enable()
198 pm_runtime_put_noidle(obj->dev); in iommu_enable()
205 pm_runtime_put_sync(obj->dev); in iommu_disable()
213 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; in iotlb_cr_to_virt()
214 u32 mask = get_cam_va_mask(cr->cam & page_size); in iotlb_cr_to_virt()
216 return cr->cam & mask; in iotlb_cr_to_virt()
223 attr = e->mixed << 5; in get_iopte_attr()
224 attr |= e->endian; in get_iopte_attr()
225 attr |= e->elsz >> 3; in get_iopte_attr()
226 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || in get_iopte_attr()
227 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); in get_iopte_attr()
256 l->base = MMU_LOCK_BASE(val); in iotlb_lock_get()
257 l->vict = MMU_LOCK_VICT(val); in iotlb_lock_get()
264 val = (l->base << MMU_LOCK_BASE_SHIFT); in iotlb_lock_set()
265 val |= (l->vict << MMU_LOCK_VICT_SHIFT); in iotlb_lock_set()
272 cr->cam = iommu_read_reg(obj, MMU_READ_CAM); in iotlb_read_cr()
273 cr->ram = iommu_read_reg(obj, MMU_READ_RAM); in iotlb_read_cr()
278 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); in iotlb_load_cr()
279 iommu_write_reg(obj, cr->ram, MMU_RAM); in iotlb_load_cr()
285 /* only used in iotlb iteration for-loop */
308 if (e->da & ~(get_cam_va_mask(e->pgsz))) { in iotlb_alloc_cr()
309 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, in iotlb_alloc_cr()
310 e->da); in iotlb_alloc_cr()
311 return ERR_PTR(-EINVAL); in iotlb_alloc_cr()
316 return ERR_PTR(-ENOMEM); in iotlb_alloc_cr()
318 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; in iotlb_alloc_cr()
319 cr->ram = e->pa | e->endian | e->elsz | e->mixed; in iotlb_alloc_cr()
325 * load_iotlb_entry - Set an iommu tlb entry
326 * @obj: target iommu
327 * @e: an iommu tlb entry info
335 if (!obj || !obj->nr_tlb_entries || !e) in load_iotlb_entry()
336 return -EINVAL; in load_iotlb_entry()
338 pm_runtime_get_sync(obj->dev); in load_iotlb_entry()
341 if (l.base == obj->nr_tlb_entries) { in load_iotlb_entry()
342 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); in load_iotlb_entry()
343 err = -EBUSY; in load_iotlb_entry()
346 if (!e->prsvd) { in load_iotlb_entry()
350 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) in load_iotlb_entry()
354 if (i == obj->nr_tlb_entries) { in load_iotlb_entry()
355 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); in load_iotlb_entry()
356 err = -EBUSY; in load_iotlb_entry()
368 pm_runtime_put_sync(obj->dev); in load_iotlb_entry()
375 if (e->prsvd) in load_iotlb_entry()
378 if (++l.vict == obj->nr_tlb_entries) in load_iotlb_entry()
382 pm_runtime_put_sync(obj->dev); in load_iotlb_entry()
401 * flush_iotlb_page - Clear an iommu tlb entry
402 * @obj: target iommu
403 * @da: iommu device virtual address
405 * Clear an iommu tlb entry which includes 'da' address.
412 pm_runtime_get_sync(obj->dev); in flush_iotlb_page()
414 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { in flush_iotlb_page()
425 dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", in flush_iotlb_page()
432 pm_runtime_put_sync(obj->dev); in flush_iotlb_page()
434 if (i == obj->nr_tlb_entries) in flush_iotlb_page()
435 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); in flush_iotlb_page()
439 * flush_iotlb_all - Clear all iommu tlb entries
440 * @obj: target iommu
446 pm_runtime_get_sync(obj->dev); in flush_iotlb_all()
454 pm_runtime_put_sync(obj->dev); in flush_iotlb_all()
472 /* Note: freed iopte's must be clean ready for re-use */ in iopte_free()
476 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, in iopte_free()
497 spin_unlock(&obj->page_table_lock); in iopte_alloc()
499 spin_lock(&obj->page_table_lock); in iopte_alloc()
503 return ERR_PTR(-ENOMEM); in iopte_alloc()
505 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, in iopte_alloc()
507 if (dma_mapping_error(obj->dev, *pt_dma)) { in iopte_alloc()
508 dev_err(obj->dev, "DMA map error for L2 table\n"); in iopte_alloc()
510 return ERR_PTR(-ENOMEM); in iopte_alloc()
518 dev_err(obj->dev, "DMA translation error for L2 table\n"); in iopte_alloc()
519 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, in iopte_alloc()
522 return ERR_PTR(-ENOMEM); in iopte_alloc()
527 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); in iopte_alloc()
528 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); in iopte_alloc()
537 dev_vdbg(obj->dev, in iopte_alloc()
550 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", in iopgd_alloc_section()
552 return -EINVAL; in iopgd_alloc_section()
556 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); in iopgd_alloc_section()
567 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", in iopgd_alloc_super()
569 return -EINVAL; in iopgd_alloc_super()
574 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); in iopgd_alloc_super()
589 flush_iopte_range(obj->dev, pt_dma, offset, 1); in iopte_alloc_page()
591 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", in iopte_alloc_page()
606 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", in iopte_alloc_large()
608 return -EINVAL; in iopte_alloc_large()
616 flush_iopte_range(obj->dev, pt_dma, offset, 16); in iopte_alloc_large()
628 return -EINVAL; in iopgtable_store_entry_core()
630 switch (e->pgsz) { in iopgtable_store_entry_core()
649 return -EINVAL; in iopgtable_store_entry_core()
653 spin_lock(&obj->page_table_lock); in iopgtable_store_entry_core()
654 err = fn(obj, e->da, e->pa, prot); in iopgtable_store_entry_core()
655 spin_unlock(&obj->page_table_lock); in iopgtable_store_entry_core()
661 * omap_iopgtable_store_entry - Make an iommu pte entry
662 * @obj: target iommu
663 * @e: an iommu tlb entry info
670 flush_iotlb_page(obj, e->da); in omap_iopgtable_store_entry()
678 * iopgtable_lookup_entry - Lookup an iommu pte entry
679 * @obj: target iommu
680 * @da: iommu device virtual address
681 * @ppgd: iommu pgd entry pointer to be returned
682 * @ppte: iommu pte entry pointer to be returned
725 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); in iopgtable_clear_entry_core()
747 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); in iopgtable_clear_entry_core()
753 * iopgtable_clear_entry - Remove an iommu pte entry
754 * @obj: target iommu
755 * @da: iommu device virtual address
761 spin_lock(&obj->page_table_lock); in iopgtable_clear_entry()
766 spin_unlock(&obj->page_table_lock); in iopgtable_clear_entry()
776 spin_lock(&obj->page_table_lock); in iopgtable_clear_entry_all()
793 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); in iopgtable_clear_entry_all()
798 spin_unlock(&obj->page_table_lock); in iopgtable_clear_entry_all()
802 * Device IOMMU generic operations
809 struct iommu_domain *domain = obj->domain; in iommu_fault_handler()
812 if (!omap_domain->dev) in iommu_fault_handler()
820 if (!report_iommu_fault(domain, obj->dev, da, 0)) in iommu_fault_handler()
828 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", in iommu_fault_handler()
829 obj->name, errs, da, iopgd, *iopgd); in iommu_fault_handler()
835 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", in iommu_fault_handler()
836 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); in iommu_fault_handler()
842 * omap_iommu_attach() - attach iommu device to an iommu domain
843 * @obj: target omap iommu device
850 spin_lock(&obj->iommu_lock); in omap_iommu_attach()
852 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, in omap_iommu_attach()
854 if (dma_mapping_error(obj->dev, obj->pd_dma)) { in omap_iommu_attach()
855 dev_err(obj->dev, "DMA map error for L1 table\n"); in omap_iommu_attach()
856 err = -ENOMEM; in omap_iommu_attach()
860 obj->iopgd = iopgd; in omap_iommu_attach()
866 spin_unlock(&obj->iommu_lock); in omap_iommu_attach()
868 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); in omap_iommu_attach()
873 spin_unlock(&obj->iommu_lock); in omap_iommu_attach()
879 * omap_iommu_detach - release iommu device
880 * @obj: target iommu
887 spin_lock(&obj->iommu_lock); in omap_iommu_detach()
889 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, in omap_iommu_detach()
891 obj->pd_dma = 0; in omap_iommu_detach()
892 obj->iopgd = NULL; in omap_iommu_detach()
895 spin_unlock(&obj->iommu_lock); in omap_iommu_detach()
897 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); in omap_iommu_detach()
909 obj->num_cr_ctx = lock.base; in omap_iommu_save_tlb_entries()
910 if (!obj->num_cr_ctx) in omap_iommu_save_tlb_entries()
913 tmp = obj->cr_ctx; in omap_iommu_save_tlb_entries()
914 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) in omap_iommu_save_tlb_entries()
925 if (!obj->num_cr_ctx) in omap_iommu_restore_tlb_entries()
929 tmp = obj->cr_ctx; in omap_iommu_restore_tlb_entries()
930 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { in omap_iommu_restore_tlb_entries()
935 l.base = obj->num_cr_ctx; in omap_iommu_restore_tlb_entries()
941 * omap_iommu_domain_deactivate - deactivate attached iommu devices
942 * @domain: iommu domain attached to the target iommu device
944 * This API allows the client devices of IOMMU devices to suspend
952 struct omap_iommu_device *iommu; in omap_iommu_domain_deactivate() local
956 if (!omap_domain->dev) in omap_iommu_domain_deactivate()
959 iommu = omap_domain->iommus; in omap_iommu_domain_deactivate()
960 iommu += (omap_domain->num_iommus - 1); in omap_iommu_domain_deactivate()
961 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { in omap_iommu_domain_deactivate()
962 oiommu = iommu->iommu_dev; in omap_iommu_domain_deactivate()
963 pm_runtime_put_sync(oiommu->dev); in omap_iommu_domain_deactivate()
971 * omap_iommu_domain_activate - activate attached iommu devices
972 * @domain: iommu domain attached to the target iommu device
974 * This API allows the client devices of IOMMU devices to resume the
981 struct omap_iommu_device *iommu; in omap_iommu_domain_activate() local
985 if (!omap_domain->dev) in omap_iommu_domain_activate()
988 iommu = omap_domain->iommus; in omap_iommu_domain_activate()
989 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { in omap_iommu_domain_activate()
990 oiommu = iommu->iommu_dev; in omap_iommu_domain_activate()
991 pm_runtime_get_sync(oiommu->dev); in omap_iommu_domain_activate()
999 * omap_iommu_runtime_suspend - disable an iommu device
1000 * @dev: iommu device
1003 * IOMMU device, either during final detachment from a client
1005 * includes programming all the appropriate IOMMU registers, and
1018 if (obj->domain && obj->iopgd) in omap_iommu_runtime_suspend()
1023 if (pdata && pdata->device_idle) in omap_iommu_runtime_suspend()
1024 pdata->device_idle(pdev); in omap_iommu_runtime_suspend()
1026 if (pdata && pdata->assert_reset) in omap_iommu_runtime_suspend()
1027 pdata->assert_reset(pdev, pdata->reset_name); in omap_iommu_runtime_suspend()
1029 if (pdata && pdata->set_pwrdm_constraint) { in omap_iommu_runtime_suspend()
1030 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); in omap_iommu_runtime_suspend()
1032 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", in omap_iommu_runtime_suspend()
1041 * omap_iommu_runtime_resume - enable an iommu device
1042 * @dev: iommu device
1045 * IOMMU device, either during initial attachment to a client
1047 * includes programming all the appropriate IOMMU registers, and
1059 if (pdata && pdata->set_pwrdm_constraint) { in omap_iommu_runtime_resume()
1060 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); in omap_iommu_runtime_resume()
1062 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", in omap_iommu_runtime_resume()
1067 if (pdata && pdata->deassert_reset) { in omap_iommu_runtime_resume()
1068 ret = pdata->deassert_reset(pdev, pdata->reset_name); in omap_iommu_runtime_resume()
1075 if (pdata && pdata->device_enable) in omap_iommu_runtime_resume()
1076 pdata->device_enable(pdev); in omap_iommu_runtime_resume()
1079 if (obj->domain) in omap_iommu_runtime_resume()
1088 * omap_iommu_prepare - prepare() dev_pm_ops implementation
1089 * @dev: iommu device
1091 * This function performs the necessary checks to determine if the IOMMU
1106 struct device_node *np = pdev->dev.of_node; in omap_iommu_can_register()
1108 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) in omap_iommu_can_register()
1112 * restrict IOMMU core registration only for processor-port MDMA MMUs in omap_iommu_can_register()
1115 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) || in omap_iommu_can_register()
1116 (!strcmp(dev_name(&pdev->dev), "41501000.mmu"))) in omap_iommu_can_register()
1125 struct device_node *np = pdev->dev.of_node; in omap_iommu_dra7_get_dsp_system_cfg()
1128 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) in omap_iommu_dra7_get_dsp_system_cfg()
1131 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { in omap_iommu_dra7_get_dsp_system_cfg()
1132 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); in omap_iommu_dra7_get_dsp_system_cfg()
1133 return -EINVAL; in omap_iommu_dra7_get_dsp_system_cfg()
1136 obj->syscfg = in omap_iommu_dra7_get_dsp_system_cfg()
1137 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); in omap_iommu_dra7_get_dsp_system_cfg()
1138 if (IS_ERR(obj->syscfg)) { in omap_iommu_dra7_get_dsp_system_cfg()
1139 /* can fail with -EPROBE_DEFER */ in omap_iommu_dra7_get_dsp_system_cfg()
1140 ret = PTR_ERR(obj->syscfg); in omap_iommu_dra7_get_dsp_system_cfg()
1144 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, in omap_iommu_dra7_get_dsp_system_cfg()
1145 &obj->id)) { in omap_iommu_dra7_get_dsp_system_cfg()
1146 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); in omap_iommu_dra7_get_dsp_system_cfg()
1147 return -EINVAL; in omap_iommu_dra7_get_dsp_system_cfg()
1150 if (obj->id != 0 && obj->id != 1) { in omap_iommu_dra7_get_dsp_system_cfg()
1151 dev_err(&pdev->dev, "invalid IOMMU instance id\n"); in omap_iommu_dra7_get_dsp_system_cfg()
1152 return -EINVAL; in omap_iommu_dra7_get_dsp_system_cfg()
1159 * OMAP Device MMU(IOMMU) detection
1163 int err = -ENODEV; in omap_iommu_probe()
1167 struct device_node *of = pdev->dev.of_node; in omap_iommu_probe()
1170 pr_err("%s: only DT-based devices are supported\n", __func__); in omap_iommu_probe()
1171 return -ENODEV; in omap_iommu_probe()
1174 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); in omap_iommu_probe()
1176 return -ENOMEM; in omap_iommu_probe()
1179 * self-manage the ordering dependencies between omap_device_enable/idle in omap_iommu_probe()
1182 if (pdev->dev.pm_domain) { in omap_iommu_probe()
1183 dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); in omap_iommu_probe()
1184 pdev->dev.pm_domain = NULL; in omap_iommu_probe()
1187 obj->name = dev_name(&pdev->dev); in omap_iommu_probe()
1188 obj->nr_tlb_entries = 32; in omap_iommu_probe()
1189 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); in omap_iommu_probe()
1190 if (err && err != -EINVAL) in omap_iommu_probe()
1192 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) in omap_iommu_probe()
1193 return -EINVAL; in omap_iommu_probe()
1194 if (of_property_read_bool(of, "ti,iommu-bus-err-back")) in omap_iommu_probe()
1195 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; in omap_iommu_probe()
1197 obj->dev = &pdev->dev; in omap_iommu_probe()
1198 obj->ctx = (void *)obj + sizeof(*obj); in omap_iommu_probe()
1199 obj->cr_ctx = devm_kzalloc(&pdev->dev, in omap_iommu_probe()
1200 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, in omap_iommu_probe()
1202 if (!obj->cr_ctx) in omap_iommu_probe()
1203 return -ENOMEM; in omap_iommu_probe()
1205 spin_lock_init(&obj->iommu_lock); in omap_iommu_probe()
1206 spin_lock_init(&obj->page_table_lock); in omap_iommu_probe()
1209 obj->regbase = devm_ioremap_resource(obj->dev, res); in omap_iommu_probe()
1210 if (IS_ERR(obj->regbase)) in omap_iommu_probe()
1211 return PTR_ERR(obj->regbase); in omap_iommu_probe()
1219 return -ENODEV; in omap_iommu_probe()
1221 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, in omap_iommu_probe()
1222 dev_name(obj->dev), obj); in omap_iommu_probe()
1228 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, in omap_iommu_probe()
1229 obj->name); in omap_iommu_probe()
1233 obj->has_iommu_driver = true; in omap_iommu_probe()
1236 err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev); in omap_iommu_probe()
1240 pm_runtime_enable(obj->dev); in omap_iommu_probe()
1244 dev_info(&pdev->dev, "%s registered\n", obj->name); in omap_iommu_probe()
1249 if (obj->has_iommu_driver) in omap_iommu_probe()
1250 iommu_device_sysfs_remove(&obj->iommu); in omap_iommu_probe()
1258 if (obj->has_iommu_driver) in omap_iommu_remove()
1259 iommu_device_sysfs_remove(&obj->iommu); in omap_iommu_remove()
1261 iommu_device_unregister(&obj->iommu); in omap_iommu_remove()
1265 pm_runtime_disable(obj->dev); in omap_iommu_remove()
1267 dev_info(&pdev->dev, "%s removed\n", obj->name); in omap_iommu_remove()
1279 { .compatible = "ti,omap2-iommu" },
1280 { .compatible = "ti,omap4-iommu" },
1281 { .compatible = "ti,dra7-iommu" },
1282 { .compatible = "ti,dra7-dsp-iommu" },
1290 .name = "omap-iommu",
1300 e->da = da; in iotlb_init_entry()
1301 e->pa = pa; in iotlb_init_entry()
1302 e->valid = MMU_CAM_V; in iotlb_init_entry()
1303 e->pgsz = pgsz; in iotlb_init_entry()
1304 e->endian = MMU_RAM_ENDIAN_LITTLE; in iotlb_init_entry()
1305 e->elsz = MMU_RAM_ELSZ_8; in iotlb_init_entry()
1306 e->mixed = 0; in iotlb_init_entry()
1308 return iopgsz_to_bytes(e->pgsz); in iotlb_init_entry()
1316 struct device *dev = omap_domain->dev; in omap_iommu_map()
1317 struct omap_iommu_device *iommu; in omap_iommu_map() local
1321 u32 ret = -EINVAL; in omap_iommu_map()
1327 return -EINVAL; in omap_iommu_map()
1334 iommu = omap_domain->iommus; in omap_iommu_map()
1335 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { in omap_iommu_map()
1336 oiommu = iommu->iommu_dev; in omap_iommu_map()
1346 while (i--) { in omap_iommu_map()
1347 iommu--; in omap_iommu_map()
1348 oiommu = iommu->iommu_dev; in omap_iommu_map()
1362 struct device *dev = omap_domain->dev; in omap_iommu_unmap()
1363 struct omap_iommu_device *iommu; in omap_iommu_unmap() local
1371 iommu = omap_domain->iommus; in omap_iommu_unmap()
1372 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { in omap_iommu_unmap()
1373 oiommu = iommu->iommu_dev; in omap_iommu_unmap()
1380 * simplify return - we are only checking if any of the iommus in omap_iommu_unmap()
1393 while (arch_data->iommu_dev) { in omap_iommu_count()
1405 struct omap_iommu_device *iommu; in omap_iommu_attach_init() local
1408 odomain->num_iommus = omap_iommu_count(dev); in omap_iommu_attach_init()
1409 if (!odomain->num_iommus) in omap_iommu_attach_init()
1410 return -ENODEV; in omap_iommu_attach_init()
1412 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu), in omap_iommu_attach_init()
1414 if (!odomain->iommus) in omap_iommu_attach_init()
1415 return -ENOMEM; in omap_iommu_attach_init()
1417 iommu = odomain->iommus; in omap_iommu_attach_init()
1418 for (i = 0; i < odomain->num_iommus; i++, iommu++) { in omap_iommu_attach_init()
1419 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); in omap_iommu_attach_init()
1420 if (!iommu->pgtable) in omap_iommu_attach_init()
1421 return -ENOMEM; in omap_iommu_attach_init()
1427 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, in omap_iommu_attach_init()
1429 return -EINVAL; in omap_iommu_attach_init()
1438 struct omap_iommu_device *iommu = odomain->iommus; in omap_iommu_detach_fini() local
1440 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++) in omap_iommu_detach_fini()
1441 kfree(iommu->pgtable); in omap_iommu_detach_fini()
1443 kfree(odomain->iommus); in omap_iommu_detach_fini()
1444 odomain->num_iommus = 0; in omap_iommu_detach_fini()
1445 odomain->iommus = NULL; in omap_iommu_detach_fini()
1453 struct omap_iommu_device *iommu; in omap_iommu_attach_dev() local
1458 if (!arch_data || !arch_data->iommu_dev) { in omap_iommu_attach_dev()
1459 dev_err(dev, "device doesn't have an associated iommu\n"); in omap_iommu_attach_dev()
1460 return -ENODEV; in omap_iommu_attach_dev()
1463 spin_lock(&omap_domain->lock); in omap_iommu_attach_dev()
1466 if (omap_domain->dev) { in omap_iommu_attach_dev()
1467 dev_err(dev, "iommu domain is already attached\n"); in omap_iommu_attach_dev()
1468 ret = -EINVAL; in omap_iommu_attach_dev()
1474 dev_err(dev, "failed to allocate required iommu data %d\n", in omap_iommu_attach_dev()
1479 iommu = omap_domain->iommus; in omap_iommu_attach_dev()
1480 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) { in omap_iommu_attach_dev()
1481 /* configure and enable the omap iommu */ in omap_iommu_attach_dev()
1482 oiommu = arch_data->iommu_dev; in omap_iommu_attach_dev()
1483 ret = omap_iommu_attach(oiommu, iommu->pgtable); in omap_iommu_attach_dev()
1485 dev_err(dev, "can't get omap iommu: %d\n", ret); in omap_iommu_attach_dev()
1489 oiommu->domain = domain; in omap_iommu_attach_dev()
1490 iommu->iommu_dev = oiommu; in omap_iommu_attach_dev()
1493 omap_domain->dev = dev; in omap_iommu_attach_dev()
1498 while (i--) { in omap_iommu_attach_dev()
1499 iommu--; in omap_iommu_attach_dev()
1500 arch_data--; in omap_iommu_attach_dev()
1501 oiommu = iommu->iommu_dev; in omap_iommu_attach_dev()
1503 iommu->iommu_dev = NULL; in omap_iommu_attach_dev()
1504 oiommu->domain = NULL; in omap_iommu_attach_dev()
1509 spin_unlock(&omap_domain->lock); in omap_iommu_attach_dev()
1517 struct omap_iommu_device *iommu = omap_domain->iommus; in _omap_iommu_detach_dev() local
1521 if (!omap_domain->dev) { in _omap_iommu_detach_dev()
1527 if (omap_domain->dev != dev) { in _omap_iommu_detach_dev()
1533 * cleanup in the reverse order of attachment - this addresses in _omap_iommu_detach_dev()
1536 iommu += (omap_domain->num_iommus - 1); in _omap_iommu_detach_dev()
1537 arch_data += (omap_domain->num_iommus - 1); in _omap_iommu_detach_dev()
1538 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) { in _omap_iommu_detach_dev()
1539 oiommu = iommu->iommu_dev; in _omap_iommu_detach_dev()
1543 iommu->iommu_dev = NULL; in _omap_iommu_detach_dev()
1544 oiommu->domain = NULL; in _omap_iommu_detach_dev()
1549 omap_domain->dev = NULL; in _omap_iommu_detach_dev()
1562 spin_lock(&omap_domain->lock); in omap_iommu_identity_attach()
1564 spin_unlock(&omap_domain->lock); in omap_iommu_identity_attach()
1585 spin_lock_init(&omap_domain->lock); in omap_iommu_domain_alloc_paging()
1587 omap_domain->domain.geometry.aperture_start = 0; in omap_iommu_domain_alloc_paging()
1588 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; in omap_iommu_domain_alloc_paging()
1589 omap_domain->domain.geometry.force_aperture = true; in omap_iommu_domain_alloc_paging()
1591 return &omap_domain->domain; in omap_iommu_domain_alloc_paging()
1599 * An iommu device is still attached in omap_iommu_domain_free()
1602 if (omap_domain->dev) in omap_iommu_domain_free()
1603 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); in omap_iommu_domain_free()
1612 struct omap_iommu_device *iommu = omap_domain->iommus; in omap_iommu_iova_to_phys() local
1613 struct omap_iommu *oiommu = iommu->iommu_dev; in omap_iommu_iova_to_phys()
1614 struct device *dev = oiommu->dev; in omap_iommu_iova_to_phys()
1620 * so perform the lookup using just the first iommu in omap_iommu_iova_to_phys()
1654 * Allocate the per-device iommu structure for DT-based devices. in omap_iommu_probe_device()
1656 * TODO: Simplify this when removing non-DT support completely from the in omap_iommu_probe_device()
1657 * IOMMU users. in omap_iommu_probe_device()
1659 if (!dev->of_node) in omap_iommu_probe_device()
1660 return ERR_PTR(-ENODEV); in omap_iommu_probe_device()
1663 * retrieve the count of IOMMU nodes using phandle size as element size in omap_iommu_probe_device()
1664 * since #iommu-cells = 0 for OMAP in omap_iommu_probe_device()
1666 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", in omap_iommu_probe_device()
1669 return ERR_PTR(-ENODEV); in omap_iommu_probe_device()
1673 return ERR_PTR(-ENOMEM); in omap_iommu_probe_device()
1676 np = of_parse_phandle(dev->of_node, "iommus", i); in omap_iommu_probe_device()
1679 return ERR_PTR(-EINVAL); in omap_iommu_probe_device()
1686 return ERR_PTR(-ENODEV); in omap_iommu_probe_device()
1693 return ERR_PTR(-EINVAL); in omap_iommu_probe_device()
1696 tmp->iommu_dev = oiommu; in omap_iommu_probe_device()
1697 tmp->dev = &pdev->dev; in omap_iommu_probe_device()
1705 * use the first IOMMU alone for the sysfs device linking. in omap_iommu_probe_device()
1709 oiommu = arch_data->iommu_dev; in omap_iommu_probe_device()
1711 return &oiommu->iommu; in omap_iommu_probe_device()
1718 if (!dev->of_node || !arch_data) in omap_iommu_release_device()
1727 /* TODO: collect args->np to save re-parsing in probe above */ in omap_iommu_of_xlate()
1765 return -ENOMEM; in omap_iommu_init()