Lines Matching +full:iommu +full:- +full:secure +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-only
19 #include "arm-smmu.h"
20 #include "arm-smmu-qcom.h"
74 dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n"); in qcom_smmu_tlb_sync_debug()
76 cfg = qsmmu->data->cfg; in qcom_smmu_tlb_sync_debug()
80 ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS], in qcom_smmu_tlb_sync_debug()
83 dev_err(smmu->dev, in qcom_smmu_tlb_sync_debug()
86 ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK], in qcom_smmu_tlb_sync_debug()
89 dev_err(smmu->dev, in qcom_smmu_tlb_sync_debug()
92 ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR], in qcom_smmu_tlb_sync_debug()
95 dev_err(smmu->dev, in qcom_smmu_tlb_sync_debug()
98 dev_err(smmu->dev, in qcom_smmu_tlb_sync_debug()
115 start = tbu->sid_range[0]; in qcom_find_tbu()
116 end = start + tbu->sid_range[1]; in qcom_find_tbu()
118 if (qsmmu->smmu.dev->of_node == tbu->smmu_np && in qcom_find_tbu()
122 dev_err(qsmmu->smmu.dev, "Unable to find TBU for sid 0x%x\n", sid); in qcom_find_tbu()
129 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_tbu_halt()
130 int ret = 0, idx = smmu_domain->cfg.cbndx; in qcom_tbu_halt()
133 guard(spinlock_irqsave)(&tbu->halt_lock); in qcom_tbu_halt()
134 if (tbu->halt_count) { in qcom_tbu_halt()
135 tbu->halt_count++; in qcom_tbu_halt()
139 val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_halt()
141 writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_halt()
150 * itself) have completed. Disable iommu faults and terminate in qcom_tbu_halt()
161 if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG, status, in qcom_tbu_halt()
164 dev_err(tbu->dev, "Timeout while trying to halt TBU!\n"); in qcom_tbu_halt()
165 ret = -ETIMEDOUT; in qcom_tbu_halt()
167 val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_halt()
169 writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_halt()
174 tbu->halt_count = 1; in qcom_tbu_halt()
183 guard(spinlock_irqsave)(&tbu->halt_lock); in qcom_tbu_resume()
184 if (!tbu->halt_count) { in qcom_tbu_resume()
185 WARN(1, "%s: halt_count is 0", dev_name(tbu->dev)); in qcom_tbu_resume()
189 if (tbu->halt_count > 1) { in qcom_tbu_resume()
190 tbu->halt_count--; in qcom_tbu_resume()
194 val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_resume()
196 writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_resume()
198 tbu->halt_count = 0; in qcom_tbu_resume()
209 /* Set address and stream-id */ in qcom_tbu_trigger_atos()
210 val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_trigger_atos()
213 writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_trigger_atos()
214 writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG); in qcom_tbu_trigger_atos()
216 writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG); in qcom_tbu_trigger_atos()
218 /* Write-back read and write-allocate */ in qcom_tbu_trigger_atos()
221 /* Non-secure access */ in qcom_tbu_trigger_atos()
228 writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG); in qcom_tbu_trigger_atos()
232 val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG); in qcom_tbu_trigger_atos()
235 val = readl_relaxed(tbu->base + DEBUG_PAR_REG); in qcom_tbu_trigger_atos()
244 val = readq_relaxed(tbu->base + DEBUG_PAR_REG); in qcom_tbu_trigger_atos()
246 dev_err(tbu->dev, "ATOS generated a fault interrupt! PAR = %llx, SID=0x%x\n", in qcom_tbu_trigger_atos()
249 dev_err_ratelimited(tbu->dev, "ATOS translation timed out!\n"); in qcom_tbu_trigger_atos()
254 writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG); in qcom_tbu_trigger_atos()
255 writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG); in qcom_tbu_trigger_atos()
256 val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_trigger_atos()
258 writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); in qcom_tbu_trigger_atos()
266 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_iova_to_phys()
268 int idx = smmu_domain->cfg.cbndx; in qcom_iova_to_phys()
280 ret = icc_set_bw(tbu->path, 0, UINT_MAX); in qcom_iova_to_phys()
284 ret = clk_prepare_enable(tbu->clk); in qcom_iova_to_phys()
338 readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG); in qcom_iova_to_phys()
341 clk_disable_unprepare(tbu->clk); in qcom_iova_to_phys()
343 icc_set_bw(tbu->path, 0, 0); in qcom_iova_to_phys()
350 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_smmu_iova_to_phys_hard()
351 int idx = smmu_domain->cfg.cbndx; in qcom_smmu_iova_to_phys_hard()
363 struct io_pgtable *iop = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); in qcom_smmu_verify_fault()
364 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_smmu_verify_fault()
373 dev_err(smmu->dev, in qcom_smmu_verify_fault()
384 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in qcom_smmu_context_fault()
385 struct arm_smmu_device *smmu = smmu_domain->smmu; in qcom_smmu_context_fault()
388 int idx = smmu_domain->cfg.cbndx; in qcom_smmu_context_fault()
402 ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, in qcom_smmu_context_fault()
405 if (ret == -ENOSYS) in qcom_smmu_context_fault()
412 phys_soft = ops->iova_to_phys(ops, cfi.iova); in qcom_smmu_context_fault()
414 tmp = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, in qcom_smmu_context_fault()
416 if (!tmp || tmp == -EBUSY) { in qcom_smmu_context_fault()
425 dev_err(smmu->dev, in qcom_smmu_context_fault()
426 "soft iova-to-phys=%pa\n", &phys_soft); in qcom_smmu_context_fault()
428 dev_err(smmu->dev, in qcom_smmu_context_fault()
430 dev_name(smmu->dev)); in qcom_smmu_context_fault()
432 dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n", in qcom_smmu_context_fault()
435 dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n"); in qcom_smmu_context_fault()
442 * If the client returns -EBUSY, do not clear FSR and do not RESUME in qcom_smmu_context_fault()
443 * if stalled. This is required to keep the IOMMU client stalled on in qcom_smmu_context_fault()
449 * 3) Client terminates the stalled transaction and resumes the IOMMU in qcom_smmu_context_fault()
455 if (tmp != -EBUSY) { in qcom_smmu_context_fault()
470 struct device_node *np = pdev->dev.of_node; in qcom_tbu_probe()
471 struct device *dev = &pdev->dev; in qcom_tbu_probe()
476 return -ENOMEM; in qcom_tbu_probe()
478 tbu->dev = dev; in qcom_tbu_probe()
479 INIT_LIST_HEAD(&tbu->list); in qcom_tbu_probe()
480 spin_lock_init(&tbu->halt_lock); in qcom_tbu_probe()
482 if (of_parse_phandle_with_args(np, "qcom,stream-id-range", "#iommu-cells", 0, &args)) { in qcom_tbu_probe()
483 dev_err(dev, "Cannot parse the 'qcom,stream-id-range' DT property\n"); in qcom_tbu_probe()
484 return -EINVAL; in qcom_tbu_probe()
487 tbu->smmu_np = args.np; in qcom_tbu_probe()
488 tbu->sid_range[0] = args.args[0]; in qcom_tbu_probe()
489 tbu->sid_range[1] = args.args[1]; in qcom_tbu_probe()
492 tbu->base = devm_of_iomap(dev, np, 0, NULL); in qcom_tbu_probe()
493 if (IS_ERR(tbu->base)) in qcom_tbu_probe()
494 return PTR_ERR(tbu->base); in qcom_tbu_probe()
496 tbu->clk = devm_clk_get_optional(dev, NULL); in qcom_tbu_probe()
497 if (IS_ERR(tbu->clk)) in qcom_tbu_probe()
498 return PTR_ERR(tbu->clk); in qcom_tbu_probe()
500 tbu->path = devm_of_icc_get(dev, NULL); in qcom_tbu_probe()
501 if (IS_ERR(tbu->path)) in qcom_tbu_probe()
502 return PTR_ERR(tbu->path); in qcom_tbu_probe()
505 list_add_tail(&tbu->list, &tbu_list); in qcom_tbu_probe()