Lines Matching +full:iommu +full:- +full:base
1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for Rockchip
5 * Module Authors: Simon Xue <xxm@rock-chips.com>
13 #include <linux/dma-mapping.h>
17 #include <linux/iommu.h>
30 #include "iommu-pages.h"
39 #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
63 #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
95 /* list of clocks required by IOMMU */
116 struct iommu_device iommu; member
118 struct iommu_domain *domain; /* domain to which iommu is attached */
122 struct device_link *link; /* runtime PM link from IOMMU to master */
123 struct rk_iommu *iommu; member
144 * The Rockchip rk3288 iommu uses a 2-level page table.
146 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
149 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
152 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
153 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
159 * MMU_DTE_ADDR -> +-----+
161 * +-----+ PT
162 * | DTE | -> +-----+
163 * +-----+ | | Memory
164 * | | +-----+ Page
165 * | | | PTE | -> +-----+
166 * +-----+ +-----+ | |
169 * +-----+ | |
172 * +-----+
177 * +---------------------+-----------+-+
179 * +---------------------+-----------+-+
180 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
181 * 11: 1 - Reserved
182 * 0 - 1 if PT @ PT address is valid
194 * 31:12 - PT address bit 31:0
195 * 11: 8 - PT address bit 35:32
196 * 7: 4 - PT address bit 39:36
197 * 3: 1 - Reserved
198 * 0 - 1 if PT @ PT address is valid
240 * +---------------------+---+-------+-+
242 * +---------------------+---+-------+-+
243 * 31:12 - Page address (Pages always start on a 4 KB boundary)
244 * 11: 9 - Reserved
245 * 8: 1 - Flags
246 * 8 - Read allocate - allocate cache space on read misses
247 * 7 - Read cache - enable cache & prefetch of data
248 * 6 - Write buffer - enable delaying writes on their way to memory
249 * 5 - Write allocate - allocate cache space on write misses
250 * 4 - Write cache - different writes can be merged together
251 * 3 - Override cache attributes
252 * if 1, bits 4-8 control cache attributes
254 * 2 - Writable
255 * 1 - Readable
256 * 0 - 1 if Page @ Page address is valid
281 * 31:12 - Page address bit 31:0
282 * 11: 8 - Page address bit 35:32
283 * 7: 4 - Page address bit 39:36
284 * 3 - Security
285 * 2 - Writable
286 * 1 - Readable
287 * 0 - 1 if Page @ Page address is valid
306 * rk3288 iova (IOMMU Virtual Address) format
308 * +-----------+-----------+-------------+
310 * +-----------+-----------+-------------+
311 * 31:22 - DTE index - index of DTE in DT
312 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
313 * 11: 0 - Page offset - offset into page @ PTE.page_address
337 static u32 rk_iommu_read(void __iomem *base, u32 offset) in rk_iommu_read() argument
339 return readl(base + offset); in rk_iommu_read()
342 static void rk_iommu_write(void __iomem *base, u32 offset, u32 value) in rk_iommu_write() argument
344 writel(value, base + offset); in rk_iommu_write()
347 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
351 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
352 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
355 static void rk_iommu_base_command(void __iomem *base, u32 command) in rk_iommu_base_command() argument
357 writel(command, base + RK_MMU_COMMAND); in rk_iommu_base_command()
359 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
368 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
372 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
376 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
381 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
382 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_stall_active()
388 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) in rk_iommu_is_paging_enabled() argument
393 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_paging_enabled()
394 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_paging_enabled()
400 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) in rk_iommu_is_reset_done() argument
405 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_reset_done()
406 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; in rk_iommu_is_reset_done()
411 static int rk_iommu_enable_stall(struct rk_iommu *iommu) in rk_iommu_enable_stall() argument
416 if (rk_iommu_is_stall_active(iommu)) in rk_iommu_enable_stall()
420 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_stall()
423 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); in rk_iommu_enable_stall()
425 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_enable_stall()
429 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_stall()
430 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", in rk_iommu_enable_stall()
431 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_stall()
436 static int rk_iommu_disable_stall(struct rk_iommu *iommu) in rk_iommu_disable_stall() argument
441 if (!rk_iommu_is_stall_active(iommu)) in rk_iommu_disable_stall()
444 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); in rk_iommu_disable_stall()
446 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_disable_stall()
450 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_stall()
451 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", in rk_iommu_disable_stall()
452 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_stall()
457 static int rk_iommu_enable_paging(struct rk_iommu *iommu) in rk_iommu_enable_paging() argument
462 if (rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_paging()
465 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); in rk_iommu_enable_paging()
467 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_enable_paging()
471 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_paging()
472 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", in rk_iommu_enable_paging()
473 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_paging()
478 static int rk_iommu_disable_paging(struct rk_iommu *iommu) in rk_iommu_disable_paging() argument
483 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_disable_paging()
486 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); in rk_iommu_disable_paging()
488 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_disable_paging()
492 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_paging()
493 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", in rk_iommu_disable_paging()
494 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_paging()
499 static int rk_iommu_force_reset(struct rk_iommu *iommu) in rk_iommu_force_reset() argument
505 if (iommu->reset_disabled) in rk_iommu_force_reset()
512 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_force_reset()
513 dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY); in rk_iommu_force_reset()
514 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); in rk_iommu_force_reset()
516 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) { in rk_iommu_force_reset()
517 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); in rk_iommu_force_reset()
518 return -EFAULT; in rk_iommu_force_reset()
522 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); in rk_iommu_force_reset()
524 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, in rk_iommu_force_reset()
528 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); in rk_iommu_force_reset()
535 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument
537 void __iomem *base = iommu->bases[index]; in log_iova() local
553 mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); in log_iova()
554 mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr); in log_iova()
563 pte_addr_phys = rk_ops->pt_address(dte) + (pte_index * 4); in log_iova()
570 page_addr_phys = rk_ops->pt_address(pte) + page_offset; in log_iova()
574 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", in log_iova()
576 …dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa … in log_iova()
584 struct rk_iommu *iommu = dev_id; in rk_iommu_irq() local
591 err = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_irq()
595 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) in rk_iommu_irq()
598 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_irq()
599 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); in rk_iommu_irq()
604 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); in rk_iommu_irq()
609 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); in rk_iommu_irq()
613 dev_err(iommu->dev, "Page fault at %pad of type %s\n", in rk_iommu_irq()
617 log_iova(iommu, i, iova); in rk_iommu_irq()
624 if (iommu->domain != &rk_identity_domain) in rk_iommu_irq()
625 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
628 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); in rk_iommu_irq()
630 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_irq()
631 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); in rk_iommu_irq()
635 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); in rk_iommu_irq()
638 dev_err(iommu->dev, "unexpected int_status: %#08x\n", in rk_iommu_irq()
641 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); in rk_iommu_irq()
644 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_irq()
647 pm_runtime_put(iommu->dev); in rk_iommu_irq()
660 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
662 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_iova_to_phys()
666 pt_phys = rk_ops->pt_address(dte); in rk_iommu_iova_to_phys()
672 phys = rk_ops->pt_address(pte) + rk_iova_page_offset(iova); in rk_iommu_iova_to_phys()
674 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
686 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
687 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_zap_iova()
688 struct rk_iommu *iommu; in rk_iommu_zap_iova() local
691 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_zap_iova()
694 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_zap_iova()
698 WARN_ON(clk_bulk_enable(iommu->num_clocks, in rk_iommu_zap_iova()
699 iommu->clocks)); in rk_iommu_zap_iova()
700 rk_iommu_zap_lines(iommu, iova, size); in rk_iommu_zap_iova()
701 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_zap_iova()
702 pm_runtime_put(iommu->dev); in rk_iommu_zap_iova()
705 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
713 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, in rk_iommu_zap_iova_first_last()
725 assert_spin_locked(&rk_domain->dt_lock); in rk_dte_get_page_table()
728 dte_addr = &rk_domain->dt[dte_index]; in rk_dte_get_page_table()
733 page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags); in rk_dte_get_page_table()
735 return ERR_PTR(-ENOMEM); in rk_dte_get_page_table()
741 return ERR_PTR(-ENOMEM); in rk_dte_get_page_table()
744 dte = rk_ops->mk_dtentries(pt_dma); in rk_dte_get_page_table()
748 rk_domain->dt_dma + dte_index * sizeof(u32), 1); in rk_dte_get_page_table()
750 pt_phys = rk_ops->pt_address(dte); in rk_dte_get_page_table()
761 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_unmap_iova()
784 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_map_iova()
792 pte_addr[pte_count] = rk_ops->mk_ptentries(paddr, prot); in rk_iommu_map_iova()
814 page_phys = rk_ops->pt_address(pte_addr[pte_count]); in rk_iommu_map_iova()
818 return -EADDRINUSE; in rk_iommu_map_iova()
832 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_map()
836 * (1024 4-KiB pages = 4 MiB). in rk_iommu_map()
843 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
847 dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_map()
851 pte_dma = rk_ops->pt_address(dte_index) + pte_index * sizeof(u32); in rk_iommu_map()
855 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
873 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
877 * (1024 4-KiB pages = 4 MiB). in rk_iommu_unmap()
882 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_unmap()
885 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
889 pt_phys = rk_ops->pt_address(dte); in rk_iommu_unmap()
894 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
906 return data ? data->iommu : NULL; in rk_iommu_from_dev()
909 /* Must be called with iommu powered on and attached */
910 static void rk_iommu_disable(struct rk_iommu *iommu) in rk_iommu_disable() argument
915 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); in rk_iommu_disable()
916 rk_iommu_enable_stall(iommu); in rk_iommu_disable()
917 rk_iommu_disable_paging(iommu); in rk_iommu_disable()
918 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_disable()
919 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); in rk_iommu_disable()
920 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); in rk_iommu_disable()
922 rk_iommu_disable_stall(iommu); in rk_iommu_disable()
923 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_disable()
926 /* Must be called with iommu powered on and attached */
927 static int rk_iommu_enable(struct rk_iommu *iommu) in rk_iommu_enable() argument
929 struct iommu_domain *domain = iommu->domain; in rk_iommu_enable()
933 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
937 ret = rk_iommu_enable_stall(iommu); in rk_iommu_enable()
941 ret = rk_iommu_force_reset(iommu); in rk_iommu_enable()
945 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_enable()
946 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, in rk_iommu_enable()
947 rk_ops->mk_dtentries(rk_domain->dt_dma)); in rk_iommu_enable()
948 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_enable()
949 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rk_iommu_enable()
952 ret = rk_iommu_enable_paging(iommu); in rk_iommu_enable()
955 rk_iommu_disable_stall(iommu); in rk_iommu_enable()
957 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
964 struct rk_iommu *iommu; in rk_iommu_identity_attach() local
970 iommu = rk_iommu_from_dev(dev); in rk_iommu_identity_attach()
971 if (!iommu) in rk_iommu_identity_attach()
972 return -ENODEV; in rk_iommu_identity_attach()
974 rk_domain = to_rk_domain(iommu->domain); in rk_iommu_identity_attach()
976 dev_dbg(dev, "Detaching from iommu domain\n"); in rk_iommu_identity_attach()
978 if (iommu->domain == identity_domain) in rk_iommu_identity_attach()
981 iommu->domain = identity_domain; in rk_iommu_identity_attach()
983 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_identity_attach()
984 list_del_init(&iommu->node); in rk_iommu_identity_attach()
985 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_identity_attach()
987 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_identity_attach()
990 rk_iommu_disable(iommu); in rk_iommu_identity_attach()
991 pm_runtime_put(iommu->dev); in rk_iommu_identity_attach()
1009 struct rk_iommu *iommu; in rk_iommu_attach_device() local
1016 * Such a device does not belong to an iommu group. in rk_iommu_attach_device()
1018 iommu = rk_iommu_from_dev(dev); in rk_iommu_attach_device()
1019 if (!iommu) in rk_iommu_attach_device()
1022 dev_dbg(dev, "Attaching to iommu domain\n"); in rk_iommu_attach_device()
1024 /* iommu already attached */ in rk_iommu_attach_device()
1025 if (iommu->domain == domain) in rk_iommu_attach_device()
1032 iommu->domain = domain; in rk_iommu_attach_device()
1034 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1035 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1036 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1038 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_attach_device()
1042 ret = rk_iommu_enable(iommu); in rk_iommu_attach_device()
1046 pm_runtime_put(iommu->dev); in rk_iommu_attach_device()
1064 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. in rk_iommu_domain_alloc_paging()
1067 rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags); in rk_iommu_domain_alloc_paging()
1068 if (!rk_domain->dt) in rk_iommu_domain_alloc_paging()
1071 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, in rk_iommu_domain_alloc_paging()
1073 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { in rk_iommu_domain_alloc_paging()
1078 spin_lock_init(&rk_domain->iommus_lock); in rk_iommu_domain_alloc_paging()
1079 spin_lock_init(&rk_domain->dt_lock); in rk_iommu_domain_alloc_paging()
1080 INIT_LIST_HEAD(&rk_domain->iommus); in rk_iommu_domain_alloc_paging()
1082 rk_domain->domain.geometry.aperture_start = 0; in rk_iommu_domain_alloc_paging()
1083 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); in rk_iommu_domain_alloc_paging()
1084 rk_domain->domain.geometry.force_aperture = true; in rk_iommu_domain_alloc_paging()
1086 return &rk_domain->domain; in rk_iommu_domain_alloc_paging()
1089 iommu_free_page(rk_domain->dt); in rk_iommu_domain_alloc_paging()
1101 WARN_ON(!list_empty(&rk_domain->iommus)); in rk_iommu_domain_free()
1104 u32 dte = rk_domain->dt[i]; in rk_iommu_domain_free()
1106 phys_addr_t pt_phys = rk_ops->pt_address(dte); in rk_iommu_domain_free()
1114 dma_unmap_single(dma_dev, rk_domain->dt_dma, in rk_iommu_domain_free()
1116 iommu_free_page(rk_domain->dt); in rk_iommu_domain_free()
1124 struct rk_iommu *iommu; in rk_iommu_probe_device() local
1128 return ERR_PTR(-ENODEV); in rk_iommu_probe_device()
1130 iommu = rk_iommu_from_dev(dev); in rk_iommu_probe_device()
1132 data->link = device_link_add(dev, iommu->dev, in rk_iommu_probe_device()
1135 return &iommu->iommu; in rk_iommu_probe_device()
1142 device_link_del(data->link); in rk_iommu_release_device()
1153 return -ENOMEM; in rk_iommu_of_xlate()
1155 iommu_dev = of_find_device_by_node(args->np); in rk_iommu_of_xlate()
1157 data->iommu = platform_get_drvdata(iommu_dev); in rk_iommu_of_xlate()
1158 data->iommu->domain = &rk_identity_domain; in rk_iommu_of_xlate()
1185 struct device *dev = &pdev->dev; in rk_iommu_probe()
1186 struct rk_iommu *iommu; in rk_iommu_probe() local
1189 int num_res = pdev->num_resources; in rk_iommu_probe()
1192 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); in rk_iommu_probe()
1193 if (!iommu) in rk_iommu_probe()
1194 return -ENOMEM; in rk_iommu_probe()
1196 platform_set_drvdata(pdev, iommu); in rk_iommu_probe()
1197 iommu->dev = dev; in rk_iommu_probe()
1198 iommu->num_mmu = 0; in rk_iommu_probe()
1209 return -EINVAL; in rk_iommu_probe()
1211 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), in rk_iommu_probe()
1213 if (!iommu->bases) in rk_iommu_probe()
1214 return -ENOMEM; in rk_iommu_probe()
1220 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); in rk_iommu_probe()
1221 if (IS_ERR(iommu->bases[i])) in rk_iommu_probe()
1223 iommu->num_mmu++; in rk_iommu_probe()
1225 if (iommu->num_mmu == 0) in rk_iommu_probe()
1226 return PTR_ERR(iommu->bases[0]); in rk_iommu_probe()
1228 iommu->num_irq = platform_irq_count(pdev); in rk_iommu_probe()
1229 if (iommu->num_irq < 0) in rk_iommu_probe()
1230 return iommu->num_irq; in rk_iommu_probe()
1232 iommu->reset_disabled = device_property_read_bool(dev, in rk_iommu_probe()
1233 "rockchip,disable-mmu-reset"); in rk_iommu_probe()
1235 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); in rk_iommu_probe()
1236 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, in rk_iommu_probe()
1237 sizeof(*iommu->clocks), GFP_KERNEL); in rk_iommu_probe()
1238 if (!iommu->clocks) in rk_iommu_probe()
1239 return -ENOMEM; in rk_iommu_probe()
1241 for (i = 0; i < iommu->num_clocks; ++i) in rk_iommu_probe()
1242 iommu->clocks[i].id = rk_iommu_clocks[i]; in rk_iommu_probe()
1245 * iommu clocks should be present for all new devices and devicetrees in rk_iommu_probe()
1249 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1250 if (err == -ENOENT) in rk_iommu_probe()
1251 iommu->num_clocks = 0; in rk_iommu_probe()
1255 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1259 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); in rk_iommu_probe()
1263 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev); in rk_iommu_probe()
1268 * Use the first registered IOMMU device for domain to use with DMA in rk_iommu_probe()
1270 * IOMMU device.. in rk_iommu_probe()
1273 dma_dev = &pdev->dev; in rk_iommu_probe()
1277 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_probe()
1285 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, in rk_iommu_probe()
1286 IRQF_SHARED, dev_name(dev), iommu); in rk_iommu_probe()
1291 dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); in rk_iommu_probe()
1297 iommu_device_sysfs_remove(&iommu->iommu); in rk_iommu_probe()
1299 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1305 struct rk_iommu *iommu = platform_get_drvdata(pdev); in rk_iommu_shutdown() local
1308 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_shutdown()
1311 devm_free_irq(iommu->dev, irq, iommu); in rk_iommu_shutdown()
1314 pm_runtime_force_suspend(&pdev->dev); in rk_iommu_shutdown()
1319 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_suspend() local
1321 if (iommu->domain == &rk_identity_domain) in rk_iommu_suspend()
1324 rk_iommu_disable(iommu); in rk_iommu_suspend()
1330 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_resume() local
1332 if (iommu->domain == &rk_identity_domain) in rk_iommu_resume()
1335 return rk_iommu_enable(iommu); in rk_iommu_resume()
1361 { .compatible = "rockchip,iommu",
1364 { .compatible = "rockchip,rk3568-iommu",