Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0
19 #include <linux/io-pgtable.h>
27 #include <linux/pci-ats.h>
33 #include "arm-smmu-v3.h"
34 #include "../../dma-iommu.h"
39 "Disable MSI-based polling for CMD_SYNC completion.");
82 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
83 { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
116 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options()
118 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options()
119 dev_notice(smmu->dev, "option %s\n", in parse_driver_options()
125 /* Low-level queue manipulation functions */
130 prod = Q_IDX(q, q->prod); in queue_has_space()
131 cons = Q_IDX(q, q->cons); in queue_has_space()
133 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) in queue_has_space()
134 space = (1 << q->max_n_shift) - (prod - cons); in queue_has_space()
136 space = cons - prod; in queue_has_space()
143 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_full()
144 Q_WRP(q, q->prod) != Q_WRP(q, q->cons); in queue_full()
149 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && in queue_empty()
150 Q_WRP(q, q->prod) == Q_WRP(q, q->cons); in queue_empty()
155 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && in queue_consumed()
156 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || in queue_consumed()
157 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && in queue_consumed()
158 (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); in queue_consumed()
168 writel_relaxed(q->llq.cons, q->cons_reg); in queue_sync_cons_out()
173 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; in queue_inc_cons()
174 q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); in queue_inc_cons()
179 struct arm_smmu_ll_queue *llq = &q->llq; in queue_sync_cons_ovf()
181 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons))) in queue_sync_cons_ovf()
184 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in queue_sync_cons_ovf()
185 Q_IDX(llq, llq->cons); in queue_sync_cons_ovf()
199 prod = readl(q->prod_reg); in queue_sync_prod_in()
201 if (Q_OVF(prod) != Q_OVF(q->llq.prod)) in queue_sync_prod_in()
202 ret = -EOVERFLOW; in queue_sync_prod_in()
204 q->llq.prod = prod; in queue_sync_prod_in()
210 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; in queue_inc_prod_n()
211 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); in queue_inc_prod_n()
217 qp->delay = 1; in queue_poll_init()
218 qp->spin_cnt = 0; in queue_poll_init()
219 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init()
220 qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); in queue_poll_init()
225 if (ktime_compare(ktime_get(), qp->timeout) > 0) in queue_poll()
226 return -ETIMEDOUT; in queue_poll()
228 if (qp->wfe) { in queue_poll()
230 } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { in queue_poll()
233 udelay(qp->delay); in queue_poll()
234 qp->delay *= 2; in queue_poll()
235 qp->spin_cnt = 0; in queue_poll()
259 if (queue_empty(&q->llq)) in queue_remove_raw()
260 return -EAGAIN; in queue_remove_raw()
262 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); in queue_remove_raw()
263 queue_inc_cons(&q->llq); in queue_remove_raw()
268 /* High-level queue accessors */
272 cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); in arm_smmu_cmdq_build_cmd()
274 switch (ent->opcode) { in arm_smmu_cmdq_build_cmd()
279 cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); in arm_smmu_cmdq_build_cmd()
282 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SSID, ent->cfgi.ssid); in arm_smmu_cmdq_build_cmd()
285 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
286 cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); in arm_smmu_cmdq_build_cmd()
289 cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); in arm_smmu_cmdq_build_cmd()
296 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
299 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
300 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
301 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
302 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
303 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
304 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
305 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; in arm_smmu_cmdq_build_cmd()
308 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); in arm_smmu_cmdq_build_cmd()
309 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); in arm_smmu_cmdq_build_cmd()
310 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
311 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); in arm_smmu_cmdq_build_cmd()
312 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); in arm_smmu_cmdq_build_cmd()
313 cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); in arm_smmu_cmdq_build_cmd()
314 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; in arm_smmu_cmdq_build_cmd()
317 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
321 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); in arm_smmu_cmdq_build_cmd()
324 cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); in arm_smmu_cmdq_build_cmd()
327 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
328 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); in arm_smmu_cmdq_build_cmd()
329 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); in arm_smmu_cmdq_build_cmd()
330 cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); in arm_smmu_cmdq_build_cmd()
331 cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); in arm_smmu_cmdq_build_cmd()
332 cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; in arm_smmu_cmdq_build_cmd()
335 cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); in arm_smmu_cmdq_build_cmd()
336 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); in arm_smmu_cmdq_build_cmd()
337 cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); in arm_smmu_cmdq_build_cmd()
338 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); in arm_smmu_cmdq_build_cmd()
339 switch (ent->pri.resp) { in arm_smmu_cmdq_build_cmd()
345 return -EINVAL; in arm_smmu_cmdq_build_cmd()
347 cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); in arm_smmu_cmdq_build_cmd()
350 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_SID, ent->resume.sid); in arm_smmu_cmdq_build_cmd()
351 cmd[0] |= FIELD_PREP(CMDQ_RESUME_0_RESP, ent->resume.resp); in arm_smmu_cmdq_build_cmd()
352 cmd[1] |= FIELD_PREP(CMDQ_RESUME_1_STAG, ent->resume.stag); in arm_smmu_cmdq_build_cmd()
355 if (ent->sync.msiaddr) { in arm_smmu_cmdq_build_cmd()
357 cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; in arm_smmu_cmdq_build_cmd()
365 return -ENOENT; in arm_smmu_cmdq_build_cmd()
376 if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) in arm_smmu_get_cmdq()
377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); in arm_smmu_get_cmdq()
379 return cmdq ?: &smmu->cmdq; in arm_smmu_get_cmdq()
385 if (cmdq == &smmu->cmdq) in arm_smmu_cmdq_needs_busy_polling()
388 return smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV; in arm_smmu_cmdq_needs_busy_polling()
394 struct arm_smmu_queue *q = &cmdq->q; in arm_smmu_cmdq_build_sync_cmd()
403 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) { in arm_smmu_cmdq_build_sync_cmd()
404 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * in arm_smmu_cmdq_build_sync_cmd()
405 q->ent_dwords * 8; in arm_smmu_cmdq_build_sync_cmd()
422 struct arm_smmu_queue *q = &cmdq->q; in __arm_smmu_cmdq_skip_err()
426 u32 cons = readl_relaxed(q->cons_reg); in __arm_smmu_cmdq_skip_err()
432 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err()
437 dev_err(smmu->dev, "retrying command fetch\n"); in __arm_smmu_cmdq_skip_err()
458 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); in __arm_smmu_cmdq_skip_err()
459 dev_err(smmu->dev, "skipping command in error state:\n"); in __arm_smmu_cmdq_skip_err()
461 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); in __arm_smmu_cmdq_skip_err()
468 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); in __arm_smmu_cmdq_skip_err()
473 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq); in arm_smmu_cmdq_skip_err()
480 * - The only LOCK routines are exclusive_trylock() and shared_lock().
484 * - The UNLOCK routines are supplemented with shared_tryunlock(), which
498 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
502 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
503 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
508 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
513 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
524 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
532 atomic_set_release(&cmdq->lock, 0); \
541 * you like mixed-size concurrency, dependency ordering and relaxed atomics,
581 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
596 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
601 mask = GENMASK(limit - 1, sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
605 * that a zero-initialised queue is invalid and, after marking in __arm_smmu_cmdq_poll_set_valid_map()
618 llq.prod = queue_inc_prod_n(&llq, limit - sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
636 /* Wait for the command queue to become non-full */
650 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
652 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
658 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
678 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
688 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); in __arm_smmu_cmdq_poll_until_msi()
693 * Wait until the SMMU cons index passes llq->prod.
701 u32 prod = llq->prod; in __arm_smmu_cmdq_poll_until_consumed()
705 llq->val = READ_ONCE(cmdq->q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
720 * cmdq->q.llq.cons. Roughly speaking: in __arm_smmu_cmdq_poll_until_consumed()
740 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
750 if (smmu->options & ARM_SMMU_OPT_MSIPOLL && in arm_smmu_cmdq_poll_until_sync()
762 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
770 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_write_entries()
778 * - There is a dma_wmb() before publishing any commands to the queue.
782 * - On completion of a CMD_SYNC, there is a control dependency.
786 * - Command insertion is totally ordered, so if two CPUs each race to
801 llq.max_n_shift = cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_issue_cmdlist()
805 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
812 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); in arm_smmu_cmdq_issue_cmdlist()
820 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
838 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); in arm_smmu_cmdq_issue_cmdlist()
856 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
860 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
874 writel_relaxed(prod, cmdq->q.prod_reg); in arm_smmu_cmdq_issue_cmdlist()
881 atomic_set_release(&cmdq->owner_prod, prod); in arm_smmu_cmdq_issue_cmdlist()
889 dev_err_ratelimited(smmu->dev, in arm_smmu_cmdq_issue_cmdlist()
892 readl_relaxed(cmdq->q.prod_reg), in arm_smmu_cmdq_issue_cmdlist()
893 readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_issue_cmdlist()
898 * reader, in which case we can safely update cmdq->q.llq.cons in arm_smmu_cmdq_issue_cmdlist()
901 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
917 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in __arm_smmu_cmdq_issue_cmd()
918 ent->opcode); in __arm_smmu_cmdq_issue_cmd()
919 return -EINVAL; in __arm_smmu_cmdq_issue_cmd()
942 cmds->num = 0; in arm_smmu_cmdq_batch_init()
943 cmds->cmdq = arm_smmu_get_cmdq(smmu, ent); in arm_smmu_cmdq_batch_init()
950 bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd); in arm_smmu_cmdq_batch_add()
951 bool force_sync = (cmds->num == CMDQ_BATCH_ENTRIES - 1) && in arm_smmu_cmdq_batch_add()
952 (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC); in arm_smmu_cmdq_batch_add()
956 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_add()
957 cmds->num, true); in arm_smmu_cmdq_batch_add()
961 if (cmds->num == CMDQ_BATCH_ENTRIES) { in arm_smmu_cmdq_batch_add()
962 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_add()
963 cmds->num, false); in arm_smmu_cmdq_batch_add()
967 index = cmds->num * CMDQ_ENT_DWORDS; in arm_smmu_cmdq_batch_add()
968 if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) { in arm_smmu_cmdq_batch_add()
969 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", in arm_smmu_cmdq_batch_add()
970 cmd->opcode); in arm_smmu_cmdq_batch_add()
974 cmds->num++; in arm_smmu_cmdq_batch_add()
980 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, in arm_smmu_cmdq_batch_submit()
981 cmds->num, true); in arm_smmu_cmdq_batch_submit()
989 int sid = master->streams[0].id; in arm_smmu_page_response()
991 if (WARN_ON(!master->stall_enabled)) in arm_smmu_page_response()
996 cmd.resume.stag = resp->grpid; in arm_smmu_page_response()
997 switch (resp->code) { in arm_smmu_page_response()
1009 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); in arm_smmu_page_response()
1022 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_asid()
1100 writer->ops->get_used(entry, cur_used); in arm_smmu_entry_qword_diff()
1101 writer->ops->get_used(target, target_used); in arm_smmu_entry_qword_diff()
1131 for (i = start; len != 0; len--, i++) { in entry_set()
1139 writer->ops->sync(writer); in entry_set()
1152 * determine which of three updates are required - disruptive, hitless or no
1156 * - Disrupting the entry (V=0)
1157 * - Fill now unused qwords, execpt qword 0 which contains V
1158 * - Make qword 0 have the final value and valid (V=1) with a single 64
1186 unsigned int critical_qword_index = ffs(used_qword_diff) - 1; in arm_smmu_write_entry()
1206 entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1); in arm_smmu_write_entry()
1225 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_sync_cd()
1235 for (i = 0; i < master->num_streams; i++) { in arm_smmu_sync_cd()
1236 cmd.cfgi.sid = master->streams[i].id; in arm_smmu_sync_cd()
1249 WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); in arm_smmu_write_cd_l1_desc()
1254 return le64_to_cpu(src->l2ptr) & CTXDESC_L1_DESC_L2PTR_MASK; in arm_smmu_cd_l1_get_desc()
1261 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; in arm_smmu_get_cd_ptr()
1266 if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) in arm_smmu_get_cd_ptr()
1267 return &cd_table->linear.table[ssid]; in arm_smmu_get_cd_ptr()
1269 l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)]; in arm_smmu_get_cd_ptr()
1272 return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)]; in arm_smmu_get_cd_ptr()
1278 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; in arm_smmu_alloc_cd_ptr()
1279 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_alloc_cd_ptr()
1282 iommu_group_mutex_assert(master->dev); in arm_smmu_alloc_cd_ptr()
1289 if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { in arm_smmu_alloc_cd_ptr()
1291 struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx]; in arm_smmu_alloc_cd_ptr()
1296 *l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr), in arm_smmu_alloc_cd_ptr()
1301 arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx], in arm_smmu_alloc_cd_ptr()
1342 arm_smmu_sync_cd(writer->master, cd_writer->ssid, true); in arm_smmu_cd_writer_sync_entry()
1354 bool target_valid = target->data[0] & cpu_to_le64(CTXDESC_CD_0_V); in arm_smmu_write_cd_entry()
1355 bool cur_valid = cdptr->data[0] & cpu_to_le64(CTXDESC_CD_0_V); in arm_smmu_write_cd_entry()
1366 master->cd_table.used_ssids--; in arm_smmu_write_cd_entry()
1368 master->cd_table.used_ssids++; in arm_smmu_write_cd_entry()
1371 arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); in arm_smmu_write_cd_entry()
1378 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; in arm_smmu_make_s1_cd()
1380 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; in arm_smmu_make_s1_cd()
1381 typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = in arm_smmu_make_s1_cd()
1382 &pgtbl_cfg->arm_lpae_s1_cfg.tcr; in arm_smmu_make_s1_cd()
1386 target->data[0] = cpu_to_le64( in arm_smmu_make_s1_cd()
1387 FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | in arm_smmu_make_s1_cd()
1388 FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | in arm_smmu_make_s1_cd()
1389 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | in arm_smmu_make_s1_cd()
1390 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | in arm_smmu_make_s1_cd()
1391 FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | in arm_smmu_make_s1_cd()
1397 FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | in arm_smmu_make_s1_cd()
1399 (master->stall_enabled ? CTXDESC_CD_0_S : 0) | in arm_smmu_make_s1_cd()
1403 FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) in arm_smmu_make_s1_cd()
1407 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_HD) in arm_smmu_make_s1_cd()
1408 target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_HA | in arm_smmu_make_s1_cd()
1411 target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr & in arm_smmu_make_s1_cd()
1413 target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); in arm_smmu_make_s1_cd()
1422 if (!arm_smmu_cdtab_allocated(&master->cd_table)) in arm_smmu_clear_cd()
1435 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_alloc_cd_tables()
1436 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; in arm_smmu_alloc_cd_tables()
1438 cd_table->s1cdmax = master->ssid_bits; in arm_smmu_alloc_cd_tables()
1439 max_contexts = 1 << cd_table->s1cdmax; in arm_smmu_alloc_cd_tables()
1441 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) || in arm_smmu_alloc_cd_tables()
1443 cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR; in arm_smmu_alloc_cd_tables()
1444 cd_table->linear.num_ents = max_contexts; in arm_smmu_alloc_cd_tables()
1447 cd_table->linear.table = dma_alloc_coherent(smmu->dev, l1size, in arm_smmu_alloc_cd_tables()
1448 &cd_table->cdtab_dma, in arm_smmu_alloc_cd_tables()
1450 if (!cd_table->linear.table) in arm_smmu_alloc_cd_tables()
1451 return -ENOMEM; in arm_smmu_alloc_cd_tables()
1453 cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2; in arm_smmu_alloc_cd_tables()
1454 cd_table->l2.num_l1_ents = in arm_smmu_alloc_cd_tables()
1457 cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents, in arm_smmu_alloc_cd_tables()
1458 sizeof(*cd_table->l2.l2ptrs), in arm_smmu_alloc_cd_tables()
1460 if (!cd_table->l2.l2ptrs) in arm_smmu_alloc_cd_tables()
1461 return -ENOMEM; in arm_smmu_alloc_cd_tables()
1463 l1size = cd_table->l2.num_l1_ents * sizeof(struct arm_smmu_cdtab_l1); in arm_smmu_alloc_cd_tables()
1464 cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size, in arm_smmu_alloc_cd_tables()
1465 &cd_table->cdtab_dma, in arm_smmu_alloc_cd_tables()
1467 if (!cd_table->l2.l2ptrs) { in arm_smmu_alloc_cd_tables()
1468 ret = -ENOMEM; in arm_smmu_alloc_cd_tables()
1475 kfree(cd_table->l2.l2ptrs); in arm_smmu_alloc_cd_tables()
1476 cd_table->l2.l2ptrs = NULL; in arm_smmu_alloc_cd_tables()
1483 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_free_cd_tables()
1484 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; in arm_smmu_free_cd_tables()
1486 if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) { in arm_smmu_free_cd_tables()
1487 for (i = 0; i < cd_table->l2.num_l1_ents; i++) { in arm_smmu_free_cd_tables()
1488 if (!cd_table->l2.l2ptrs[i]) in arm_smmu_free_cd_tables()
1491 dma_free_coherent(smmu->dev, in arm_smmu_free_cd_tables()
1492 sizeof(*cd_table->l2.l2ptrs[i]), in arm_smmu_free_cd_tables()
1493 cd_table->l2.l2ptrs[i], in arm_smmu_free_cd_tables()
1494 arm_smmu_cd_l1_get_desc(&cd_table->l2.l1tab[i])); in arm_smmu_free_cd_tables()
1496 kfree(cd_table->l2.l2ptrs); in arm_smmu_free_cd_tables()
1498 dma_free_coherent(smmu->dev, in arm_smmu_free_cd_tables()
1499 cd_table->l2.num_l1_ents * in arm_smmu_free_cd_tables()
1501 cd_table->l2.l1tab, cd_table->cdtab_dma); in arm_smmu_free_cd_tables()
1503 dma_free_coherent(smmu->dev, in arm_smmu_free_cd_tables()
1504 cd_table->linear.num_ents * in arm_smmu_free_cd_tables()
1506 cd_table->linear.table, cd_table->cdtab_dma); in arm_smmu_free_cd_tables()
1520 WRITE_ONCE(dst->l2ptr, cpu_to_le64(val)); in arm_smmu_write_strtab_l1_desc()
1535 .sid = ste_writer->sid, in arm_smmu_ste_writer_sync_entry()
1540 arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd); in arm_smmu_ste_writer_sync_entry()
1552 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_write_ste()
1561 arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data); in arm_smmu_write_ste()
1564 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) { in arm_smmu_write_ste()
1578 target->data[0] = cpu_to_le64( in arm_smmu_make_abort_ste()
1589 target->data[0] = cpu_to_le64( in arm_smmu_make_bypass_ste()
1593 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) in arm_smmu_make_bypass_ste()
1594 target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, in arm_smmu_make_bypass_ste()
1604 struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; in arm_smmu_make_cdtable_ste()
1605 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_make_cdtable_ste()
1608 target->data[0] = cpu_to_le64( in arm_smmu_make_cdtable_ste()
1611 FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | in arm_smmu_make_cdtable_ste()
1612 (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | in arm_smmu_make_cdtable_ste()
1613 FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); in arm_smmu_make_cdtable_ste()
1615 target->data[1] = cpu_to_le64( in arm_smmu_make_cdtable_ste()
1620 ((smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_make_cdtable_ste()
1621 !master->stall_enabled) ? in arm_smmu_make_cdtable_ste()
1627 if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) && in arm_smmu_make_cdtable_ste()
1629 target->data[1] |= cpu_to_le64(FIELD_PREP( in arm_smmu_make_cdtable_ste()
1632 if (smmu->features & ARM_SMMU_FEAT_E2H) { in arm_smmu_make_cdtable_ste()
1636 * properly matched. This means either S/NS-EL2-E2H (hypervisor) in arm_smmu_make_cdtable_ste()
1637 * or NS-EL1 (guest). Since an SVA domain can be installed in a in arm_smmu_make_cdtable_ste()
1641 target->data[1] |= cpu_to_le64( in arm_smmu_make_cdtable_ste()
1644 target->data[1] |= cpu_to_le64( in arm_smmu_make_cdtable_ste()
1648 * VMID 0 is reserved for stage-2 bypass EL1 STEs, see in arm_smmu_make_cdtable_ste()
1651 target->data[2] = in arm_smmu_make_cdtable_ste()
1662 struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; in arm_smmu_make_s2_domain_ste()
1664 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; in arm_smmu_make_s2_domain_ste()
1665 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr = in arm_smmu_make_s2_domain_ste()
1666 &pgtbl_cfg->arm_lpae_s2_cfg.vtcr; in arm_smmu_make_s2_domain_ste()
1668 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_make_s2_domain_ste()
1671 target->data[0] = cpu_to_le64( in arm_smmu_make_s2_domain_ste()
1675 target->data[1] = cpu_to_le64( in arm_smmu_make_s2_domain_ste()
1679 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_S2FWB) in arm_smmu_make_s2_domain_ste()
1680 target->data[1] |= cpu_to_le64(STRTAB_STE_1_S2FWB); in arm_smmu_make_s2_domain_ste()
1681 if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) in arm_smmu_make_s2_domain_ste()
1682 target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, in arm_smmu_make_s2_domain_ste()
1685 vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) | in arm_smmu_make_s2_domain_ste()
1686 FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) | in arm_smmu_make_s2_domain_ste()
1687 FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) | in arm_smmu_make_s2_domain_ste()
1688 FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) | in arm_smmu_make_s2_domain_ste()
1689 FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) | in arm_smmu_make_s2_domain_ste()
1690 FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) | in arm_smmu_make_s2_domain_ste()
1691 FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps); in arm_smmu_make_s2_domain_ste()
1692 target->data[2] = cpu_to_le64( in arm_smmu_make_s2_domain_ste()
1693 FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | in arm_smmu_make_s2_domain_ste()
1700 (master->stall_enabled ? STRTAB_STE_2_S2S : 0) | in arm_smmu_make_s2_domain_ste()
1703 target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & in arm_smmu_make_s2_domain_ste()
1726 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_l2_strtab()
1729 l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)]; in arm_smmu_init_l2_strtab()
1733 *l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table), in arm_smmu_init_l2_strtab()
1736 dev_err(smmu->dev, in arm_smmu_init_l2_strtab()
1739 return -ENOMEM; in arm_smmu_init_l2_strtab()
1742 arm_smmu_init_initial_stes((*l2table)->stes, in arm_smmu_init_l2_strtab()
1743 ARRAY_SIZE((*l2table)->stes)); in arm_smmu_init_l2_strtab()
1744 arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)], in arm_smmu_init_l2_strtab()
1755 if (*sid_lhs < stream_rhs->id) in arm_smmu_streams_cmp_key()
1756 return -1; in arm_smmu_streams_cmp_key()
1757 if (*sid_lhs > stream_rhs->id) in arm_smmu_streams_cmp_key()
1766 &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs); in arm_smmu_streams_cmp_node()
1774 lockdep_assert_held(&smmu->streams_mutex); in arm_smmu_find_master()
1776 node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key); in arm_smmu_find_master()
1779 return rb_entry(node, struct arm_smmu_stream, node)->master; in arm_smmu_find_master()
1788 event->id = FIELD_GET(EVTQ_0_ID, raw[0]); in arm_smmu_decode_event()
1789 event->sid = FIELD_GET(EVTQ_0_SID, raw[0]); in arm_smmu_decode_event()
1790 event->ssv = FIELD_GET(EVTQ_0_SSV, raw[0]); in arm_smmu_decode_event()
1791 event->ssid = event->ssv ? FIELD_GET(EVTQ_0_SSID, raw[0]) : IOMMU_NO_PASID; in arm_smmu_decode_event()
1792 event->privileged = FIELD_GET(EVTQ_1_PnU, raw[1]); in arm_smmu_decode_event()
1793 event->instruction = FIELD_GET(EVTQ_1_InD, raw[1]); in arm_smmu_decode_event()
1794 event->s2 = FIELD_GET(EVTQ_1_S2, raw[1]); in arm_smmu_decode_event()
1795 event->read = FIELD_GET(EVTQ_1_RnW, raw[1]); in arm_smmu_decode_event()
1796 event->stag = FIELD_GET(EVTQ_1_STAG, raw[1]); in arm_smmu_decode_event()
1797 event->stall = FIELD_GET(EVTQ_1_STALL, raw[1]); in arm_smmu_decode_event()
1798 event->class = FIELD_GET(EVTQ_1_CLASS, raw[1]); in arm_smmu_decode_event()
1799 event->iova = FIELD_GET(EVTQ_2_ADDR, raw[2]); in arm_smmu_decode_event()
1800 event->ipa = raw[3] & EVTQ_3_IPA; in arm_smmu_decode_event()
1801 event->fetch_addr = raw[3] & EVTQ_3_FETCH_ADDR; in arm_smmu_decode_event()
1802 event->ttrnw = FIELD_GET(EVTQ_1_TT_READ, raw[1]); in arm_smmu_decode_event()
1803 event->class_tt = false; in arm_smmu_decode_event()
1804 event->dev = NULL; in arm_smmu_decode_event()
1806 if (event->id == EVT_ID_PERMISSION_FAULT) in arm_smmu_decode_event()
1807 event->class_tt = (event->class == EVTQ_1_CLASS_TT); in arm_smmu_decode_event()
1809 mutex_lock(&smmu->streams_mutex); in arm_smmu_decode_event()
1810 master = arm_smmu_find_master(smmu, event->sid); in arm_smmu_decode_event()
1812 event->dev = get_device(master->dev); in arm_smmu_decode_event()
1813 mutex_unlock(&smmu->streams_mutex); in arm_smmu_decode_event()
1825 switch (event->id) { in arm_smmu_handle_event()
1832 return -EOPNOTSUPP; in arm_smmu_handle_event()
1835 if (!event->stall) in arm_smmu_handle_event()
1836 return -EOPNOTSUPP; in arm_smmu_handle_event()
1838 if (event->read) in arm_smmu_handle_event()
1843 if (event->instruction) in arm_smmu_handle_event()
1846 if (event->privileged) in arm_smmu_handle_event()
1849 flt->type = IOMMU_FAULT_PAGE_REQ; in arm_smmu_handle_event()
1850 flt->prm = (struct iommu_fault_page_request) { in arm_smmu_handle_event()
1852 .grpid = event->stag, in arm_smmu_handle_event()
1854 .addr = event->iova, in arm_smmu_handle_event()
1857 if (event->ssv) { in arm_smmu_handle_event()
1858 flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; in arm_smmu_handle_event()
1859 flt->prm.pasid = event->ssid; in arm_smmu_handle_event()
1862 mutex_lock(&smmu->streams_mutex); in arm_smmu_handle_event()
1863 master = arm_smmu_find_master(smmu, event->sid); in arm_smmu_handle_event()
1865 ret = -EINVAL; in arm_smmu_handle_event()
1869 ret = iommu_report_device_fault(master->dev, &fault_evt); in arm_smmu_handle_event()
1871 mutex_unlock(&smmu->streams_mutex); in arm_smmu_handle_event()
1880 dev_err(smmu->dev, "event 0x%02x received:\n", event->id); in arm_smmu_dump_raw_event()
1883 dev_err(smmu->dev, "\t0x%016llx\n", raw[i]); in arm_smmu_dump_raw_event()
1886 #define ARM_SMMU_EVT_KNOWN(e) ((e)->id < ARRAY_SIZE(event_str) && event_str[(e)->id])
1887 #define ARM_SMMU_LOG_EVT_STR(e) ARM_SMMU_EVT_KNOWN(e) ? event_str[(e)->id] : "UNKNOWN"
1888 #define ARM_SMMU_LOG_CLIENT(e) (e)->dev ? dev_name((e)->dev) : "(unassigned sid)"
1899 switch (evt->id) { in arm_smmu_dump_event()
1904 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x iova: %#llx ipa: %#llx", in arm_smmu_dump_event()
1906 evt->sid, evt->ssid, evt->iova, evt->ipa); in arm_smmu_dump_event()
1908 dev_err(smmu->dev, "%s %s %s %s \"%s\"%s%s stag: %#x", in arm_smmu_dump_event()
1909 evt->privileged ? "priv" : "unpriv", in arm_smmu_dump_event()
1910 evt->instruction ? "inst" : "data", in arm_smmu_dump_event()
1911 str_read_write(evt->read), in arm_smmu_dump_event()
1912 evt->s2 ? "s2" : "s1", event_class_str[evt->class], in arm_smmu_dump_event()
1913 evt->class_tt ? (evt->ttrnw ? " ttd_read" : " ttd_write") : "", in arm_smmu_dump_event()
1914 evt->stall ? " stall" : "", evt->stag); in arm_smmu_dump_event()
1921 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x fetch_addr: %#llx", in arm_smmu_dump_event()
1923 evt->sid, evt->ssid, evt->fetch_addr); in arm_smmu_dump_event()
1928 dev_err(smmu->dev, "event: %s client: %s sid: %#x ssid: %#x", in arm_smmu_dump_event()
1930 evt->sid, evt->ssid); in arm_smmu_dump_event()
1939 struct arm_smmu_queue *q = &smmu->evtq.q; in arm_smmu_evtq_thread()
1940 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_evtq_thread()
1958 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_evtq_thread()
1959 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); in arm_smmu_evtq_thread()
1979 dev_info(smmu->dev, "unexpected PRI request received:\n"); in arm_smmu_handle_ppr()
1980 dev_info(smmu->dev, in arm_smmu_handle_ppr()
2008 struct arm_smmu_queue *q = &smmu->priq.q; in arm_smmu_priq_thread()
2009 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_priq_thread()
2016 if (queue_sync_prod_in(q) == -EOVERFLOW) in arm_smmu_priq_thread()
2017 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); in arm_smmu_priq_thread()
2032 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR); in arm_smmu_gerror_handler()
2033 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
2039 dev_warn(smmu->dev, in arm_smmu_gerror_handler()
2044 dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); in arm_smmu_gerror_handler()
2049 dev_warn(smmu->dev, "GERROR MSI write aborted\n"); in arm_smmu_gerror_handler()
2052 dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); in arm_smmu_gerror_handler()
2055 dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); in arm_smmu_gerror_handler()
2058 dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); in arm_smmu_gerror_handler()
2061 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
2064 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); in arm_smmu_gerror_handler()
2069 writel(gerror, smmu->base + ARM_SMMU_GERRORN); in arm_smmu_gerror_handler()
2078 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_combined_irq_thread()
2096 /* ATC invalidates are always on 4096-bytes pages */ in arm_smmu_atc_inv_to_cmd()
2101 * ATS and PASID: in arm_smmu_atc_inv_to_cmd()
2108 * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID in arm_smmu_atc_inv_to_cmd()
2111 * This has the unpleasant side-effect of invalidating all PASID-tagged in arm_smmu_atc_inv_to_cmd()
2121 cmd->atc.size = ATC_INV_SIZE_ALL; in arm_smmu_atc_inv_to_cmd()
2126 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
2129 * In an ATS Invalidate Request, the address must be aligned on the in arm_smmu_atc_inv_to_cmd()
2131 * thus have to choose between grossly over-invalidating the region, or in arm_smmu_atc_inv_to_cmd()
2149 span_mask = (1ULL << log2_span) - 1; in arm_smmu_atc_inv_to_cmd()
2153 cmd->atc.addr = page_start << inval_grain_shift; in arm_smmu_atc_inv_to_cmd()
2154 cmd->atc.size = log2_span; in arm_smmu_atc_inv_to_cmd()
2166 arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd); in arm_smmu_atc_inv_master()
2167 for (i = 0; i < master->num_streams; i++) { in arm_smmu_atc_inv_master()
2168 cmd.atc.sid = master->streams[i].id; in arm_smmu_atc_inv_master()
2169 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); in arm_smmu_atc_inv_master()
2172 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); in arm_smmu_atc_inv_master()
2186 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
2200 * ATS was enabled at the PCI device before completion of the TLBI. in arm_smmu_atc_inv_domain()
2203 if (!atomic_read(&smmu_domain->nr_ats_masters)) in arm_smmu_atc_inv_domain()
2206 arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
2208 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
2209 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_atc_inv_domain()
2211 struct arm_smmu_master *master = master_domain->master; in arm_smmu_atc_inv_domain()
2213 if (!master->ats_enabled) in arm_smmu_atc_inv_domain()
2216 if (master_domain->nested_ats_flush) { in arm_smmu_atc_inv_domain()
2223 arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, in arm_smmu_atc_inv_domain()
2227 for (i = 0; i < master->num_streams; i++) { in arm_smmu_atc_inv_domain()
2228 cmd.atc.sid = master->streams[i].id; in arm_smmu_atc_inv_domain()
2229 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
2232 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
2234 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
2241 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context()
2245 * NOTE: when io-pgtable is in non-strict mode, we may get here with in arm_smmu_tlb_inv_context()
2251 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_context()
2252 arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); in arm_smmu_tlb_inv_context()
2255 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_context()
2266 struct arm_smmu_device *smmu = smmu_domain->smmu; in __arm_smmu_tlb_inv_range()
2274 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
2276 tg = __ffs(smmu_domain->domain.pgsize_bitmap); in __arm_smmu_tlb_inv_range()
2281 cmd->tlbi.tg = (tg - 10) / 2; in __arm_smmu_tlb_inv_range()
2284 * Determine what level the granule is at. For non-leaf, both in __arm_smmu_tlb_inv_range()
2285 * io-pgtable and SVA pass a nominal last-level granule because in __arm_smmu_tlb_inv_range()
2291 if (cmd->tlbi.leaf) in __arm_smmu_tlb_inv_range()
2292 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
2300 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { in __arm_smmu_tlb_inv_range()
2312 cmd->tlbi.scale = scale; in __arm_smmu_tlb_inv_range()
2316 cmd->tlbi.num = num - 1; in __arm_smmu_tlb_inv_range()
2322 num_pages -= num << scale; in __arm_smmu_tlb_inv_range()
2325 cmd->tlbi.addr = iova; in __arm_smmu_tlb_inv_range()
2342 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_range_domain()
2343 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_domain()
2345 cmd.tlbi.asid = smmu_domain->cd.asid; in arm_smmu_tlb_inv_range_domain()
2348 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_range_domain()
2352 if (smmu_domain->nest_parent) { in arm_smmu_tlb_inv_range_domain()
2358 arm_smmu_cmdq_issue_cmd_with_sync(smmu_domain->smmu, &cmd); in arm_smmu_tlb_inv_range_domain()
2362 * Unfortunately, this can't be leaf-only since we may have in arm_smmu_tlb_inv_range_domain()
2373 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_asid()
2389 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_tlb_inv_page_nosync()
2410 return (smmu->features & features) == features; in arm_smmu_dbm_capable()
2421 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_capable()
2428 return arm_smmu_dbm_capable(master->smmu); in arm_smmu_capable()
2441 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_enforce_cache_coherency()
2442 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_enforce_cache_coherency()
2444 if (!arm_smmu_master_canwbs(master_domain->master)) { in arm_smmu_enforce_cache_coherency()
2449 smmu_domain->enforce_cache_coherency = ret; in arm_smmu_enforce_cache_coherency()
2450 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_enforce_cache_coherency()
2460 return ERR_PTR(-ENOMEM); in arm_smmu_domain_alloc()
2462 INIT_LIST_HEAD(&smmu_domain->devices); in arm_smmu_domain_alloc()
2463 spin_lock_init(&smmu_domain->devices_lock); in arm_smmu_domain_alloc()
2471 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free_paging()
2473 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_domain_free_paging()
2476 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_domain_free_paging()
2479 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); in arm_smmu_domain_free_paging()
2482 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_free_paging()
2483 if (cfg->vmid) in arm_smmu_domain_free_paging()
2484 ida_free(&smmu->vmid_map, cfg->vmid); in arm_smmu_domain_free_paging()
2495 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; in arm_smmu_domain_finalise_s1()
2500 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_domain_finalise_s1()
2501 cd->asid = (u16)asid; in arm_smmu_domain_finalise_s1()
2510 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_finalise_s2()
2512 /* Reserve VMID 0 for stage-2 bypass STEs */ in arm_smmu_domain_finalise_s2()
2513 vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, in arm_smmu_domain_finalise_s2()
2518 cfg->vmid = (u16)vmid; in arm_smmu_domain_finalise_s2()
2534 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_domain_finalise()
2535 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, in arm_smmu_domain_finalise()
2537 .iommu_dev = smmu->dev, in arm_smmu_domain_finalise()
2540 switch (smmu_domain->stage) { in arm_smmu_domain_finalise()
2542 unsigned long ias = (smmu->features & in arm_smmu_domain_finalise()
2546 pgtbl_cfg.oas = smmu->ias; in arm_smmu_domain_finalise()
2555 return -EOPNOTSUPP; in arm_smmu_domain_finalise()
2556 pgtbl_cfg.ias = smmu->ias; in arm_smmu_domain_finalise()
2557 pgtbl_cfg.oas = smmu->oas; in arm_smmu_domain_finalise()
2560 if ((smmu->features & ARM_SMMU_FEAT_S2FWB) && in arm_smmu_domain_finalise()
2565 return -EINVAL; in arm_smmu_domain_finalise()
2570 return -ENOMEM; in arm_smmu_domain_finalise()
2572 smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_domain_finalise()
2573 smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; in arm_smmu_domain_finalise()
2574 smmu_domain->domain.geometry.force_aperture = true; in arm_smmu_domain_finalise()
2575 if (enable_dirty && smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_domain_finalise()
2576 smmu_domain->domain.dirty_ops = &arm_smmu_dirty_ops; in arm_smmu_domain_finalise()
2584 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_domain_finalise()
2585 smmu_domain->smmu = smmu; in arm_smmu_domain_finalise()
2592 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_get_step_for_sid()
2594 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_get_step_for_sid()
2595 /* Two-level walk */ in arm_smmu_get_step_for_sid()
2596 return &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)] in arm_smmu_get_step_for_sid()
2597 ->stes[arm_smmu_strtab_l2_idx(sid)]; in arm_smmu_get_step_for_sid()
2600 return &cfg->linear.table[sid]; in arm_smmu_get_step_for_sid()
2608 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_install_ste_for_dev()
2610 master->cd_table.in_ste = in arm_smmu_install_ste_for_dev()
2611 FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(target->data[0])) == in arm_smmu_install_ste_for_dev()
2613 master->ste_ats_enabled = in arm_smmu_install_ste_for_dev()
2614 FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(target->data[1])) == in arm_smmu_install_ste_for_dev()
2617 for (i = 0; i < master->num_streams; ++i) { in arm_smmu_install_ste_for_dev()
2618 u32 sid = master->streams[i].id; in arm_smmu_install_ste_for_dev()
2624 if (master->streams[j].id == sid) in arm_smmu_install_ste_for_dev()
2635 struct device *dev = master->dev; in arm_smmu_ats_supported()
2636 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_ats_supported()
2639 if (!(smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_ats_supported()
2642 if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS)) in arm_smmu_ats_supported()
2652 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_enable_ats()
2654 /* Smallest Translation Unit: log2 of the smallest supported granule */ in arm_smmu_enable_ats()
2655 stu = __ffs(smmu->pgsize_bitmap); in arm_smmu_enable_ats()
2656 pdev = to_pci_dev(master->dev); in arm_smmu_enable_ats()
2663 dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); in arm_smmu_enable_ats()
2673 if (!dev_is_pci(master->dev)) in arm_smmu_enable_pasid()
2674 return -ENODEV; in arm_smmu_enable_pasid()
2676 pdev = to_pci_dev(master->dev); in arm_smmu_enable_pasid()
2688 dev_err(&pdev->dev, "Failed to enable PASID\n"); in arm_smmu_enable_pasid()
2692 master->ssid_bits = min_t(u8, ilog2(num_pasids), in arm_smmu_enable_pasid()
2693 master->smmu->ssid_bits); in arm_smmu_enable_pasid()
2701 if (!dev_is_pci(master->dev)) in arm_smmu_disable_pasid()
2704 pdev = to_pci_dev(master->dev); in arm_smmu_disable_pasid()
2706 if (!pdev->pasid_enabled) in arm_smmu_disable_pasid()
2709 master->ssid_bits = 0; in arm_smmu_disable_pasid()
2720 lockdep_assert_held(&smmu_domain->devices_lock); in arm_smmu_find_master_domain()
2722 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_find_master_domain()
2724 if (master_domain->master == master && in arm_smmu_find_master_domain()
2725 master_domain->ssid == ssid && in arm_smmu_find_master_domain()
2726 master_domain->nested_ats_flush == nested_ats_flush) in arm_smmu_find_master_domain()
2733 * If the domain uses the smmu_domain->devices list return the arm_smmu_domain
2743 if ((domain->type & __IOMMU_DOMAIN_PAGING) || in to_smmu_domain_devices()
2744 domain->type == IOMMU_DOMAIN_SVA) in to_smmu_domain_devices()
2746 if (domain->type == IOMMU_DOMAIN_NESTED) in to_smmu_domain_devices()
2747 return to_smmu_nested_domain(domain)->vsmmu->s2_parent; in to_smmu_domain_devices()
2763 if (domain->type == IOMMU_DOMAIN_NESTED) in arm_smmu_remove_master_domain()
2764 nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats; in arm_smmu_remove_master_domain()
2766 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_remove_master_domain()
2770 list_del(&master_domain->devices_elm); in arm_smmu_remove_master_domain()
2772 if (master->ats_enabled) in arm_smmu_remove_master_domain()
2773 atomic_dec(&smmu_domain->nr_ats_masters); in arm_smmu_remove_master_domain()
2775 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_remove_master_domain()
2788 * If the device supports ATS then this determines if EATS should be enabled
2791 * The change of the EATS in the STE and the PCI ATS config space is managed by
2792 * this sequence to be in the right order so that if PCI ATS is enabled then
2795 * new_domain can be a non-paging domain. In this case ATS will not be enabled,
2801 struct arm_smmu_master *master = state->master; in arm_smmu_attach_prepare()
2814 if (smmu_domain || state->cd_needs_ats) { in arm_smmu_attach_prepare()
2816 * The SMMU does not support enabling ATS with bypass/abort. in arm_smmu_attach_prepare()
2817 * When the STE is in bypass (STE.Config[2:0] == 0b100), ATS in arm_smmu_attach_prepare()
2819 * as though ATS is disabled for the stream (STE.EATS == 0b00), in arm_smmu_attach_prepare()
2824 * then ATS will work in S1DSS bypass. See "13.6.4 Full ATS in arm_smmu_attach_prepare()
2827 * Disable ATS if we are going to create a normal 0b100 bypass in arm_smmu_attach_prepare()
2830 state->ats_enabled = !state->disable_ats && in arm_smmu_attach_prepare()
2837 return -ENOMEM; in arm_smmu_attach_prepare()
2838 master_domain->master = master; in arm_smmu_attach_prepare()
2839 master_domain->ssid = state->ssid; in arm_smmu_attach_prepare()
2840 if (new_domain->type == IOMMU_DOMAIN_NESTED) in arm_smmu_attach_prepare()
2841 master_domain->nested_ats_flush = in arm_smmu_attach_prepare()
2842 to_smmu_nested_domain(new_domain)->enable_ats; in arm_smmu_attach_prepare()
2847 * HW. This ensures that both domains will send ATS in arm_smmu_attach_prepare()
2851 * using ATS, but arm_smmu_share_asid() also uses this to change in arm_smmu_attach_prepare()
2852 * the ASID of a domain, unrelated to ATS. in arm_smmu_attach_prepare()
2854 * Notice if we are re-attaching the same domain then the list in arm_smmu_attach_prepare()
2858 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_attach_prepare()
2859 if (smmu_domain->enforce_cache_coherency && in arm_smmu_attach_prepare()
2861 spin_unlock_irqrestore(&smmu_domain->devices_lock, in arm_smmu_attach_prepare()
2864 return -EINVAL; in arm_smmu_attach_prepare()
2867 if (state->ats_enabled) in arm_smmu_attach_prepare()
2868 atomic_inc(&smmu_domain->nr_ats_masters); in arm_smmu_attach_prepare()
2869 list_add(&master_domain->devices_elm, &smmu_domain->devices); in arm_smmu_attach_prepare()
2870 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_attach_prepare()
2873 if (!state->ats_enabled && master->ats_enabled) { in arm_smmu_attach_prepare()
2874 pci_disable_ats(to_pci_dev(master->dev)); in arm_smmu_attach_prepare()
2877 * ATS should complete before the STE is configured to generate in arm_smmu_attach_prepare()
2888 * smmu_domain->devices list.
2892 struct arm_smmu_master *master = state->master; in arm_smmu_attach_commit()
2896 if (state->ats_enabled && !master->ats_enabled) { in arm_smmu_attach_commit()
2898 } else if (state->ats_enabled && master->ats_enabled) { in arm_smmu_attach_commit()
2904 arm_smmu_atc_inv_master(master, state->ssid); in arm_smmu_attach_commit()
2905 } else if (!state->ats_enabled && master->ats_enabled) { in arm_smmu_attach_commit()
2906 /* ATS is being switched off, invalidate the entire ATC */ in arm_smmu_attach_commit()
2909 master->ats_enabled = state->ats_enabled; in arm_smmu_attach_commit()
2911 arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); in arm_smmu_attach_commit()
2929 return -ENOENT; in arm_smmu_attach_dev()
2932 smmu = master->smmu; in arm_smmu_attach_dev()
2934 if (smmu_domain->smmu != smmu) in arm_smmu_attach_dev()
2937 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_attach_dev()
2940 return -ENOMEM; in arm_smmu_attach_dev()
2941 } else if (arm_smmu_ssids_in_use(&master->cd_table)) in arm_smmu_attach_dev()
2942 return -EBUSY; in arm_smmu_attach_dev()
2947 * This allows the STE and the smmu_domain->devices list to in arm_smmu_attach_dev()
2958 switch (smmu_domain->stage) { in arm_smmu_attach_dev()
2989 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_s1_set_dev_pasid()
2992 if (smmu_domain->smmu != smmu) in arm_smmu_s1_set_dev_pasid()
2993 return -EINVAL; in arm_smmu_s1_set_dev_pasid()
2995 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) in arm_smmu_s1_set_dev_pasid()
2996 return -EINVAL; in arm_smmu_s1_set_dev_pasid()
3014 if (master->cd_table.in_ste && master->ste_ats_enabled == ats_enabled) in arm_smmu_update_ste()
3017 if (sid_domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_update_ste()
3020 WARN_ON(sid_domain->type != IOMMU_DOMAIN_BLOCKED); in arm_smmu_update_ste()
3036 struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev); in arm_smmu_set_pasid()
3047 if (smmu_domain->smmu != master->smmu) in arm_smmu_set_pasid()
3048 return -EINVAL; in arm_smmu_set_pasid()
3050 if (!master->cd_table.in_ste && in arm_smmu_set_pasid()
3051 sid_domain->type != IOMMU_DOMAIN_IDENTITY && in arm_smmu_set_pasid()
3052 sid_domain->type != IOMMU_DOMAIN_BLOCKED) in arm_smmu_set_pasid()
3053 return -EINVAL; in arm_smmu_set_pasid()
3057 return -ENOMEM; in arm_smmu_set_pasid()
3060 ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain); in arm_smmu_set_pasid()
3068 cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID); in arm_smmu_set_pasid()
3069 cd->data[0] |= cpu_to_le64( in arm_smmu_set_pasid()
3070 FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); in arm_smmu_set_pasid()
3091 if (master->ats_enabled) in arm_smmu_blocking_set_dev_pasid()
3093 arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid); in arm_smmu_blocking_set_dev_pasid()
3098 * to a non-cd_table one. in arm_smmu_blocking_set_dev_pasid()
3100 if (!arm_smmu_ssids_in_use(&master->cd_table)) { in arm_smmu_blocking_set_dev_pasid()
3102 iommu_get_domain_for_dev(master->dev); in arm_smmu_blocking_set_dev_pasid()
3104 if (sid_domain->type == IOMMU_DOMAIN_IDENTITY || in arm_smmu_blocking_set_dev_pasid()
3105 sid_domain->type == IOMMU_DOMAIN_BLOCKED) in arm_smmu_blocking_set_dev_pasid()
3106 sid_domain->ops->attach_dev(sid_domain, dev); in arm_smmu_blocking_set_dev_pasid()
3133 if (arm_smmu_ssids_in_use(&master->cd_table)) { in arm_smmu_attach_dev_ste()
3135 * If a CD table has to be present then we need to run with ATS in arm_smmu_attach_dev_ste()
3136 * on because we have to assume a PASID is using ATS. For in arm_smmu_attach_dev_ste()
3138 * follows the explanation in "13.6.4 Full ATS skipping stage 1" in arm_smmu_attach_dev_ste()
3139 * and allows for ATS on the RID to work. in arm_smmu_attach_dev_ste()
3153 * arm_smmu_domain->devices to avoid races updating the same context in arm_smmu_attach_dev_ste()
3165 arm_smmu_make_bypass_ste(master->smmu, &ste); in arm_smmu_attach_dev_identity()
3205 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_domain_alloc_paging_flags()
3213 return ERR_PTR(-EOPNOTSUPP); in arm_smmu_domain_alloc_paging_flags()
3215 return ERR_PTR(-EOPNOTSUPP); in arm_smmu_domain_alloc_paging_flags()
3224 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_domain_alloc_paging_flags()
3225 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_alloc_paging_flags()
3227 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_alloc_paging_flags()
3230 if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) { in arm_smmu_domain_alloc_paging_flags()
3231 ret = -EOPNOTSUPP; in arm_smmu_domain_alloc_paging_flags()
3234 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_alloc_paging_flags()
3235 smmu_domain->nest_parent = true; in arm_smmu_domain_alloc_paging_flags()
3240 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) { in arm_smmu_domain_alloc_paging_flags()
3241 ret = -EOPNOTSUPP; in arm_smmu_domain_alloc_paging_flags()
3244 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_alloc_paging_flags()
3247 ret = -EOPNOTSUPP; in arm_smmu_domain_alloc_paging_flags()
3251 smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; in arm_smmu_domain_alloc_paging_flags()
3252 smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; in arm_smmu_domain_alloc_paging_flags()
3256 return &smmu_domain->domain; in arm_smmu_domain_alloc_paging_flags()
3267 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map_pages()
3270 return -ENODEV; in arm_smmu_map_pages()
3272 return ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages()
3280 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_unmap_pages()
3285 return ops->unmap_pages(ops, iova, pgsize, pgcount, gather); in arm_smmu_unmap_pages()
3292 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
3301 if (!gather->pgsize) in arm_smmu_iotlb_sync()
3304 arm_smmu_tlb_inv_range_domain(gather->start, in arm_smmu_iotlb_sync()
3305 gather->end - gather->start + 1, in arm_smmu_iotlb_sync()
3306 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
3312 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_iova_to_phys()
3317 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
3333 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_sid_in_range()
3334 return arm_smmu_strtab_l1_idx(sid) < smmu->strtab_cfg.l2.num_l1_ents; in arm_smmu_sid_in_range()
3335 return sid < smmu->strtab_cfg.linear.num_ents; in arm_smmu_sid_in_range()
3342 return -ERANGE; in arm_smmu_init_sid_strtab()
3345 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_sid_strtab()
3356 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); in arm_smmu_insert_master()
3358 master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), in arm_smmu_insert_master()
3360 if (!master->streams) in arm_smmu_insert_master()
3361 return -ENOMEM; in arm_smmu_insert_master()
3362 master->num_streams = fwspec->num_ids; in arm_smmu_insert_master()
3364 mutex_lock(&smmu->streams_mutex); in arm_smmu_insert_master()
3365 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_insert_master()
3366 struct arm_smmu_stream *new_stream = &master->streams[i]; in arm_smmu_insert_master()
3367 u32 sid = fwspec->ids[i]; in arm_smmu_insert_master()
3369 new_stream->id = sid; in arm_smmu_insert_master()
3370 new_stream->master = master; in arm_smmu_insert_master()
3377 if (rb_find_add(&new_stream->node, &smmu->streams, in arm_smmu_insert_master()
3379 dev_warn(master->dev, "stream %u already in tree\n", in arm_smmu_insert_master()
3381 ret = -EINVAL; in arm_smmu_insert_master()
3387 for (i--; i >= 0; i--) in arm_smmu_insert_master()
3388 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_insert_master()
3389 kfree(master->streams); in arm_smmu_insert_master()
3391 mutex_unlock(&smmu->streams_mutex); in arm_smmu_insert_master()
3399 struct arm_smmu_device *smmu = master->smmu; in arm_smmu_remove_master()
3400 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); in arm_smmu_remove_master()
3402 if (!smmu || !master->streams) in arm_smmu_remove_master()
3405 mutex_lock(&smmu->streams_mutex); in arm_smmu_remove_master()
3406 for (i = 0; i < fwspec->num_ids; i++) in arm_smmu_remove_master()
3407 rb_erase(&master->streams[i].node, &smmu->streams); in arm_smmu_remove_master()
3408 mutex_unlock(&smmu->streams_mutex); in arm_smmu_remove_master()
3410 kfree(master->streams); in arm_smmu_remove_master()
3421 return ERR_PTR(-EBUSY); in arm_smmu_probe_device()
3423 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
3425 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
3429 return ERR_PTR(-ENOMEM); in arm_smmu_probe_device()
3431 master->dev = dev; in arm_smmu_probe_device()
3432 master->smmu = smmu; in arm_smmu_probe_device()
3439 device_property_read_u32(dev, "pasid-num-bits", &master->ssid_bits); in arm_smmu_probe_device()
3440 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits); in arm_smmu_probe_device()
3443 * Note that PASID must be enabled before, and disabled after ATS: in arm_smmu_probe_device()
3444 * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register in arm_smmu_probe_device()
3452 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) in arm_smmu_probe_device()
3453 master->ssid_bits = min_t(u8, master->ssid_bits, in arm_smmu_probe_device()
3456 if ((smmu->features & ARM_SMMU_FEAT_STALLS && in arm_smmu_probe_device()
3457 device_property_read_bool(dev, "dma-can-stall")) || in arm_smmu_probe_device()
3458 smmu->features & ARM_SMMU_FEAT_STALL_FORCE) in arm_smmu_probe_device()
3459 master->stall_enabled = true; in arm_smmu_probe_device()
3462 unsigned int stu = __ffs(smmu->pgsize_bitmap); in arm_smmu_probe_device()
3467 return &smmu->iommu; in arm_smmu_probe_device()
3479 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_release_device()
3482 if (dev->iommu->require_direct) in arm_smmu_release_device()
3489 if (arm_smmu_cdtab_allocated(&master->cd_table)) in arm_smmu_release_device()
3500 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_read_and_clear_dirty()
3502 return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); in arm_smmu_read_and_clear_dirty()
3521 * aliases, since the necessary ID-to-device lookup becomes rather in arm_smmu_device_group()
3522 * impractical given a potential sparse 32-bit stream ID space. in arm_smmu_device_group()
3535 return iommu_fwspec_add_ids(dev, args->args, 1); in arm_smmu_of_xlate()
3549 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
3560 return -ENODEV; in arm_smmu_dev_enable_feature()
3565 return -EINVAL; in arm_smmu_dev_enable_feature()
3566 if (master->iopf_enabled) in arm_smmu_dev_enable_feature()
3567 return -EBUSY; in arm_smmu_dev_enable_feature()
3568 master->iopf_enabled = true; in arm_smmu_dev_enable_feature()
3572 return -EINVAL; in arm_smmu_dev_enable_feature()
3574 return -EBUSY; in arm_smmu_dev_enable_feature()
3577 return -EINVAL; in arm_smmu_dev_enable_feature()
3587 return -EINVAL; in arm_smmu_dev_disable_feature()
3591 if (!master->iopf_enabled) in arm_smmu_dev_disable_feature()
3592 return -EINVAL; in arm_smmu_dev_disable_feature()
3593 if (master->sva_enabled) in arm_smmu_dev_disable_feature()
3594 return -EBUSY; in arm_smmu_dev_disable_feature()
3595 master->iopf_enabled = false; in arm_smmu_dev_disable_feature()
3599 return -EINVAL; in arm_smmu_dev_disable_feature()
3602 return -EINVAL; in arm_smmu_dev_disable_feature()
3611 #define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
3612 (pdev)->device == 0xa12e)
3644 .pgsize_bitmap = -1UL, /* Restricted during device attach */
3673 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; in arm_smmu_init_one_queue()
3674 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, in arm_smmu_init_one_queue()
3676 if (q->base || qsz < PAGE_SIZE) in arm_smmu_init_one_queue()
3679 q->llq.max_n_shift--; in arm_smmu_init_one_queue()
3682 if (!q->base) { in arm_smmu_init_one_queue()
3683 dev_err(smmu->dev, in arm_smmu_init_one_queue()
3686 return -ENOMEM; in arm_smmu_init_one_queue()
3689 if (!WARN_ON(q->base_dma & (qsz - 1))) { in arm_smmu_init_one_queue()
3690 dev_info(smmu->dev, "allocated %u entries for %s\n", in arm_smmu_init_one_queue()
3691 1 << q->llq.max_n_shift, name); in arm_smmu_init_one_queue()
3694 q->prod_reg = page + prod_off; in arm_smmu_init_one_queue()
3695 q->cons_reg = page + cons_off; in arm_smmu_init_one_queue()
3696 q->ent_dwords = dwords; in arm_smmu_init_one_queue()
3698 q->q_base = Q_BASE_RWA; in arm_smmu_init_one_queue()
3699 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; in arm_smmu_init_one_queue()
3700 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); in arm_smmu_init_one_queue()
3702 q->llq.prod = q->llq.cons = 0; in arm_smmu_init_one_queue()
3709 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
3711 atomic_set(&cmdq->owner_prod, 0); in arm_smmu_cmdq_init()
3712 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
3714 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, in arm_smmu_cmdq_init()
3716 if (!cmdq->valid_map) in arm_smmu_cmdq_init()
3717 return -ENOMEM; in arm_smmu_cmdq_init()
3727 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base, in arm_smmu_init_queues()
3733 ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq); in arm_smmu_init_queues()
3738 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1, in arm_smmu_init_queues()
3744 if ((smmu->features & ARM_SMMU_FEAT_SVA) && in arm_smmu_init_queues()
3745 (smmu->features & ARM_SMMU_FEAT_STALLS)) { in arm_smmu_init_queues()
3746 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev)); in arm_smmu_init_queues()
3747 if (!smmu->evtq.iopf) in arm_smmu_init_queues()
3748 return -ENOMEM; in arm_smmu_init_queues()
3752 if (!(smmu->features & ARM_SMMU_FEAT_PRI)) in arm_smmu_init_queues()
3755 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1, in arm_smmu_init_queues()
3763 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_2lvl()
3765 arm_smmu_strtab_l1_idx((1ULL << smmu->sid_bits) - 1); in arm_smmu_init_strtab_2lvl()
3768 cfg->l2.num_l1_ents = min(last_sid_idx + 1, STRTAB_MAX_L1_ENTRIES); in arm_smmu_init_strtab_2lvl()
3769 if (cfg->l2.num_l1_ents <= last_sid_idx) in arm_smmu_init_strtab_2lvl()
3770 dev_warn(smmu->dev, in arm_smmu_init_strtab_2lvl()
3771 "2-level strtab only covers %u/%u bits of SID\n", in arm_smmu_init_strtab_2lvl()
3772 ilog2(cfg->l2.num_l1_ents * STRTAB_NUM_L2_STES), in arm_smmu_init_strtab_2lvl()
3773 smmu->sid_bits); in arm_smmu_init_strtab_2lvl()
3775 l1size = cfg->l2.num_l1_ents * sizeof(struct arm_smmu_strtab_l1); in arm_smmu_init_strtab_2lvl()
3776 cfg->l2.l1tab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->l2.l1_dma, in arm_smmu_init_strtab_2lvl()
3778 if (!cfg->l2.l1tab) { in arm_smmu_init_strtab_2lvl()
3779 dev_err(smmu->dev, in arm_smmu_init_strtab_2lvl()
3782 return -ENOMEM; in arm_smmu_init_strtab_2lvl()
3785 cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, cfg->l2.num_l1_ents, in arm_smmu_init_strtab_2lvl()
3786 sizeof(*cfg->l2.l2ptrs), GFP_KERNEL); in arm_smmu_init_strtab_2lvl()
3787 if (!cfg->l2.l2ptrs) in arm_smmu_init_strtab_2lvl()
3788 return -ENOMEM; in arm_smmu_init_strtab_2lvl()
3796 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_init_strtab_linear()
3798 size = (1 << smmu->sid_bits) * sizeof(struct arm_smmu_ste); in arm_smmu_init_strtab_linear()
3799 cfg->linear.table = dmam_alloc_coherent(smmu->dev, size, in arm_smmu_init_strtab_linear()
3800 &cfg->linear.ste_dma, in arm_smmu_init_strtab_linear()
3802 if (!cfg->linear.table) { in arm_smmu_init_strtab_linear()
3803 dev_err(smmu->dev, in arm_smmu_init_strtab_linear()
3806 return -ENOMEM; in arm_smmu_init_strtab_linear()
3808 cfg->linear.num_ents = 1 << smmu->sid_bits; in arm_smmu_init_strtab_linear()
3810 arm_smmu_init_initial_stes(cfg->linear.table, cfg->linear.num_ents); in arm_smmu_init_strtab_linear()
3818 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) in arm_smmu_init_strtab()
3825 ida_init(&smmu->vmid_map); in arm_smmu_init_strtab()
3834 mutex_init(&smmu->streams_mutex); in arm_smmu_init_structures()
3835 smmu->streams = RB_ROOT; in arm_smmu_init_structures()
3845 if (smmu->impl_ops && smmu->impl_ops->init_structures) in arm_smmu_init_structures()
3846 return smmu->impl_ops->init_structures(smmu); in arm_smmu_init_structures()
3856 writel_relaxed(val, smmu->base + reg_off); in arm_smmu_write_reg_sync()
3857 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val, in arm_smmu_write_reg_sync()
3865 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; in arm_smmu_update_gbpa()
3879 dev_err(smmu->dev, "GBPA not responding to update\n"); in arm_smmu_update_gbpa()
3895 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index]; in arm_smmu_write_msi_msg()
3897 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; in arm_smmu_write_msi_msg()
3900 writeq_relaxed(doorbell, smmu->base + cfg[0]); in arm_smmu_write_msi_msg()
3901 writel_relaxed(msg->data, smmu->base + cfg[1]); in arm_smmu_write_msi_msg()
3902 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); in arm_smmu_write_msi_msg()
3908 struct device *dev = smmu->dev; in arm_smmu_setup_msis()
3911 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); in arm_smmu_setup_msis()
3912 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); in arm_smmu_setup_msis()
3914 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_msis()
3915 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); in arm_smmu_setup_msis()
3917 nvec--; in arm_smmu_setup_msis()
3919 if (!(smmu->features & ARM_SMMU_FEAT_MSI)) in arm_smmu_setup_msis()
3922 if (!dev->msi.domain) { in arm_smmu_setup_msis()
3923 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n"); in arm_smmu_setup_msis()
3930 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); in arm_smmu_setup_msis()
3934 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX); in arm_smmu_setup_msis()
3935 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); in arm_smmu_setup_msis()
3936 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX); in arm_smmu_setup_msis()
3949 irq = smmu->evtq.q.irq; in arm_smmu_setup_unique_irqs()
3951 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3954 "arm-smmu-v3-evtq", smmu); in arm_smmu_setup_unique_irqs()
3956 dev_warn(smmu->dev, "failed to enable evtq irq\n"); in arm_smmu_setup_unique_irqs()
3958 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3961 irq = smmu->gerr_irq; in arm_smmu_setup_unique_irqs()
3963 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler, in arm_smmu_setup_unique_irqs()
3964 0, "arm-smmu-v3-gerror", smmu); in arm_smmu_setup_unique_irqs()
3966 dev_warn(smmu->dev, "failed to enable gerror irq\n"); in arm_smmu_setup_unique_irqs()
3968 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n"); in arm_smmu_setup_unique_irqs()
3971 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_setup_unique_irqs()
3972 irq = smmu->priq.q.irq; in arm_smmu_setup_unique_irqs()
3974 ret = devm_request_threaded_irq(smmu->dev, irq, NULL, in arm_smmu_setup_unique_irqs()
3977 "arm-smmu-v3-priq", in arm_smmu_setup_unique_irqs()
3980 dev_warn(smmu->dev, in arm_smmu_setup_unique_irqs()
3983 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); in arm_smmu_setup_unique_irqs()
3997 dev_err(smmu->dev, "failed to disable irqs\n"); in arm_smmu_setup_irqs()
4001 irq = smmu->combined_irq; in arm_smmu_setup_irqs()
4007 ret = devm_request_threaded_irq(smmu->dev, irq, in arm_smmu_setup_irqs()
4011 "arm-smmu-v3-combined-irq", smmu); in arm_smmu_setup_irqs()
4013 dev_warn(smmu->dev, "failed to enable combined irq\n"); in arm_smmu_setup_irqs()
4017 if (smmu->features & ARM_SMMU_FEAT_PRI) in arm_smmu_setup_irqs()
4024 dev_warn(smmu->dev, "failed to enable irqs\n"); in arm_smmu_setup_irqs()
4035 dev_err(smmu->dev, "failed to clear cr0\n"); in arm_smmu_device_disable()
4042 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; in arm_smmu_write_strtab()
4046 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { in arm_smmu_write_strtab()
4050 ilog2(cfg->l2.num_l1_ents) + STRTAB_SPLIT) | in arm_smmu_write_strtab()
4052 dma = cfg->l2.l1_dma; in arm_smmu_write_strtab()
4056 FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); in arm_smmu_write_strtab()
4057 dma = cfg->linear.ste_dma; in arm_smmu_write_strtab()
4060 smmu->base + ARM_SMMU_STRTAB_BASE); in arm_smmu_write_strtab()
4061 writel_relaxed(reg, smmu->base + ARM_SMMU_STRTAB_BASE_CFG); in arm_smmu_write_strtab()
4071 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0); in arm_smmu_device_reset()
4073 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n"); in arm_smmu_device_reset()
4088 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); in arm_smmu_device_reset()
4093 if (smmu->features & ARM_SMMU_FEAT_E2H) in arm_smmu_device_reset()
4096 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); in arm_smmu_device_reset()
4102 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); in arm_smmu_device_reset()
4103 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
4104 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
4110 dev_err(smmu->dev, "failed to enable command queue\n"); in arm_smmu_device_reset()
4119 if (smmu->features & ARM_SMMU_FEAT_HYP) { in arm_smmu_device_reset()
4128 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); in arm_smmu_device_reset()
4129 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD); in arm_smmu_device_reset()
4130 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS); in arm_smmu_device_reset()
4136 dev_err(smmu->dev, "failed to enable event queue\n"); in arm_smmu_device_reset()
4141 if (smmu->features & ARM_SMMU_FEAT_PRI) { in arm_smmu_device_reset()
4142 writeq_relaxed(smmu->priq.q.q_base, in arm_smmu_device_reset()
4143 smmu->base + ARM_SMMU_PRIQ_BASE); in arm_smmu_device_reset()
4144 writel_relaxed(smmu->priq.q.llq.prod, in arm_smmu_device_reset()
4145 smmu->page1 + ARM_SMMU_PRIQ_PROD); in arm_smmu_device_reset()
4146 writel_relaxed(smmu->priq.q.llq.cons, in arm_smmu_device_reset()
4147 smmu->page1 + ARM_SMMU_PRIQ_CONS); in arm_smmu_device_reset()
4153 dev_err(smmu->dev, "failed to enable PRI queue\n"); in arm_smmu_device_reset()
4158 if (smmu->features & ARM_SMMU_FEAT_ATS) { in arm_smmu_device_reset()
4163 dev_err(smmu->dev, "failed to enable ATS check\n"); in arm_smmu_device_reset()
4170 dev_err(smmu->dev, "failed to setup irqs\n"); in arm_smmu_device_reset()
4182 dev_err(smmu->dev, "failed to enable SMMU interface\n"); in arm_smmu_device_reset()
4186 if (smmu->impl_ops && smmu->impl_ops->device_reset) { in arm_smmu_device_reset()
4187 ret = smmu->impl_ops->device_reset(smmu); in arm_smmu_device_reset()
4189 dev_err(smmu->dev, "failed to reset impl\n"); in arm_smmu_device_reset()
4206 reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR); in arm_smmu_device_iidr_probe()
4218 smmu->features &= ~ARM_SMMU_FEAT_SEV; in arm_smmu_device_iidr_probe()
4221 smmu->features &= ~ARM_SMMU_FEAT_NESTING; in arm_smmu_device_iidr_probe()
4225 smmu->features &= ~ARM_SMMU_FEAT_BTM; in arm_smmu_device_iidr_probe()
4226 smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; in arm_smmu_device_iidr_probe()
4228 smmu->features &= ~ARM_SMMU_FEAT_NESTING; in arm_smmu_device_iidr_probe()
4237 u32 fw_features = smmu->features & (ARM_SMMU_FEAT_HA | ARM_SMMU_FEAT_HD); in arm_smmu_get_httu()
4248 if (smmu->dev->of_node) in arm_smmu_get_httu()
4249 smmu->features |= hw_features; in arm_smmu_get_httu()
4252 dev_warn(smmu->dev, in arm_smmu_get_httu()
4260 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_hw_probe()
4263 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); in arm_smmu_device_hw_probe()
4265 /* 2-level structures */ in arm_smmu_device_hw_probe()
4267 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
4270 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB; in arm_smmu_device_hw_probe()
4279 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
4283 smmu->features |= ARM_SMMU_FEAT_TT_BE; in arm_smmu_device_hw_probe()
4287 smmu->features |= ARM_SMMU_FEAT_TT_LE; in arm_smmu_device_hw_probe()
4291 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n"); in arm_smmu_device_hw_probe()
4292 return -ENXIO; in arm_smmu_device_hw_probe()
4297 smmu->features |= ARM_SMMU_FEAT_PRI; in arm_smmu_device_hw_probe()
4300 smmu->features |= ARM_SMMU_FEAT_ATS; in arm_smmu_device_hw_probe()
4303 smmu->features |= ARM_SMMU_FEAT_SEV; in arm_smmu_device_hw_probe()
4306 smmu->features |= ARM_SMMU_FEAT_MSI; in arm_smmu_device_hw_probe()
4308 smmu->options |= ARM_SMMU_OPT_MSIPOLL; in arm_smmu_device_hw_probe()
4312 smmu->features |= ARM_SMMU_FEAT_HYP; in arm_smmu_device_hw_probe()
4314 smmu->features |= ARM_SMMU_FEAT_E2H; in arm_smmu_device_hw_probe()
4324 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n", in arm_smmu_device_hw_probe()
4329 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE; in arm_smmu_device_hw_probe()
4332 smmu->features |= ARM_SMMU_FEAT_STALLS; in arm_smmu_device_hw_probe()
4336 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_hw_probe()
4339 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_hw_probe()
4342 dev_err(smmu->dev, "no translation support!\n"); in arm_smmu_device_hw_probe()
4343 return -ENXIO; in arm_smmu_device_hw_probe()
4349 smmu->ias = 40; in arm_smmu_device_hw_probe()
4354 dev_err(smmu->dev, "AArch64 table format not supported!\n"); in arm_smmu_device_hw_probe()
4355 return -ENXIO; in arm_smmu_device_hw_probe()
4359 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8; in arm_smmu_device_hw_probe()
4360 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8; in arm_smmu_device_hw_probe()
4363 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1); in arm_smmu_device_hw_probe()
4365 dev_err(smmu->dev, "embedded implementation not supported\n"); in arm_smmu_device_hw_probe()
4366 return -ENXIO; in arm_smmu_device_hw_probe()
4370 smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR; in arm_smmu_device_hw_probe()
4373 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4375 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()
4380 * restrictions on the base pointer for a unit-length queue. in arm_smmu_device_hw_probe()
4382 dev_err(smmu->dev, "command queue size <= %d entries not supported\n", in arm_smmu_device_hw_probe()
4384 return -ENXIO; in arm_smmu_device_hw_probe()
4387 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4389 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4393 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); in arm_smmu_device_hw_probe()
4394 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg); in arm_smmu_device_hw_probe()
4395 smmu->iommu.max_pasids = 1UL << smmu->ssid_bits; in arm_smmu_device_hw_probe()
4401 if (smmu->sid_bits <= STRTAB_SPLIT) in arm_smmu_device_hw_probe()
4402 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; in arm_smmu_device_hw_probe()
4405 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); in arm_smmu_device_hw_probe()
4407 smmu->features |= ARM_SMMU_FEAT_RANGE_INV; in arm_smmu_device_hw_probe()
4410 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); in arm_smmu_device_hw_probe()
4413 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg); in arm_smmu_device_hw_probe()
4417 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_hw_probe()
4419 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_hw_probe()
4421 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_hw_probe()
4425 smmu->features |= ARM_SMMU_FEAT_VAX; in arm_smmu_device_hw_probe()
4430 smmu->oas = 32; in arm_smmu_device_hw_probe()
4433 smmu->oas = 36; in arm_smmu_device_hw_probe()
4436 smmu->oas = 40; in arm_smmu_device_hw_probe()
4439 smmu->oas = 42; in arm_smmu_device_hw_probe()
4442 smmu->oas = 44; in arm_smmu_device_hw_probe()
4445 smmu->oas = 52; in arm_smmu_device_hw_probe()
4446 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */ in arm_smmu_device_hw_probe()
4449 dev_info(smmu->dev, in arm_smmu_device_hw_probe()
4450 "unknown output address size. Truncating to 48-bit\n"); in arm_smmu_device_hw_probe()
4453 smmu->oas = 48; in arm_smmu_device_hw_probe()
4456 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_hw_probe()
4457 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
4459 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_hw_probe()
4462 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) in arm_smmu_device_hw_probe()
4463 dev_warn(smmu->dev, in arm_smmu_device_hw_probe()
4466 smmu->ias = max(smmu->ias, smmu->oas); in arm_smmu_device_hw_probe()
4468 if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) && in arm_smmu_device_hw_probe()
4469 (smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_device_hw_probe()
4470 smmu->features |= ARM_SMMU_FEAT_NESTING; in arm_smmu_device_hw_probe()
4475 smmu->features |= ARM_SMMU_FEAT_SVA; in arm_smmu_device_hw_probe()
4477 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", in arm_smmu_device_hw_probe()
4478 smmu->ias, smmu->oas, smmu->features); in arm_smmu_device_hw_probe()
4487 const char *uid = kasprintf(GFP_KERNEL, "%u", node->identifier); in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4491 adev = acpi_dev_get_first_match_dev("NVDA200C", uid, -1); in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4494 smmu->impl_dev = &adev->dev; in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4495 smmu->options |= ARM_SMMU_OPT_TEGRA241_CMDQV; in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4496 dev_info(smmu->dev, "found companion CMDQV device: %s\n", in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4497 dev_name(smmu->impl_dev)); in acpi_smmu_dsdt_probe_tegra241_cmdqv()
4512 (struct acpi_iort_smmu_v3 *)node->node_data; in acpi_smmu_iort_probe_model()
4514 switch (iort_smmu->model) { in acpi_smmu_iort_probe_model()
4516 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; in acpi_smmu_iort_probe_model()
4519 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; in acpi_smmu_iort_probe_model()
4530 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); in acpi_smmu_iort_probe_model()
4538 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
4544 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_device_acpi_probe()
4546 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) in arm_smmu_device_acpi_probe()
4547 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_acpi_probe()
4549 switch (FIELD_GET(ACPI_IORT_SMMU_V3_HTTU_OVERRIDE, iort_smmu->flags)) { in arm_smmu_device_acpi_probe()
4551 smmu->features |= ARM_SMMU_FEAT_HD; in arm_smmu_device_acpi_probe()
4554 smmu->features |= ARM_SMMU_FEAT_HA; in arm_smmu_device_acpi_probe()
4563 return -ENODEV; in arm_smmu_device_acpi_probe()
4570 struct device *dev = &pdev->dev; in arm_smmu_device_dt_probe()
4572 int ret = -EINVAL; in arm_smmu_device_dt_probe()
4574 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) in arm_smmu_device_dt_probe()
4575 dev_err(dev, "missing #iommu-cells property\n"); in arm_smmu_device_dt_probe()
4577 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); in arm_smmu_device_dt_probe()
4583 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
4584 smmu->features |= ARM_SMMU_FEAT_COHERENCY; in arm_smmu_device_dt_probe()
4591 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) in arm_smmu_resource_size()
4611 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
4618 for (i = 0; i < rmr->num_sids; i++) { in arm_smmu_rmr_install_bypass_ste()
4619 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
4621 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n", in arm_smmu_rmr_install_bypass_ste()
4622 rmr->sids[i]); in arm_smmu_rmr_install_bypass_ste()
4631 arm_smmu_get_step_for_sid(smmu, rmr->sids[i])); in arm_smmu_rmr_install_bypass_ste()
4635 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_ste()
4642 if (smmu->impl_ops && smmu->impl_ops->device_remove) in arm_smmu_impl_remove()
4643 smmu->impl_ops->device_remove(smmu); in arm_smmu_impl_remove()
4653 struct arm_smmu_device *new_smmu = ERR_PTR(-ENODEV); in arm_smmu_impl_probe()
4656 if (smmu->impl_dev && (smmu->options & ARM_SMMU_OPT_TEGRA241_CMDQV)) in arm_smmu_impl_probe()
4659 if (new_smmu == ERR_PTR(-ENODEV)) in arm_smmu_impl_probe()
4664 ret = devm_add_action_or_reset(new_smmu->dev, arm_smmu_impl_remove, in arm_smmu_impl_probe()
4677 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
4681 return -ENOMEM; in arm_smmu_device_probe()
4682 smmu->dev = dev; in arm_smmu_device_probe()
4684 if (dev->of_node) { in arm_smmu_device_probe()
4699 return -EINVAL; in arm_smmu_device_probe()
4702 return -EINVAL; in arm_smmu_device_probe()
4704 ioaddr = res->start; in arm_smmu_device_probe()
4710 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ); in arm_smmu_device_probe()
4711 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
4712 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
4715 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K, in arm_smmu_device_probe()
4717 if (IS_ERR(smmu->page1)) in arm_smmu_device_probe()
4718 return PTR_ERR(smmu->page1); in arm_smmu_device_probe()
4720 smmu->page1 = smmu->base; in arm_smmu_device_probe()
4727 smmu->combined_irq = irq; in arm_smmu_device_probe()
4731 smmu->evtq.q.irq = irq; in arm_smmu_device_probe()
4735 smmu->priq.q.irq = irq; in arm_smmu_device_probe()
4739 smmu->gerr_irq = irq; in arm_smmu_device_probe()
4746 /* Initialise in-memory data structures */ in arm_smmu_device_probe()
4763 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, in arm_smmu_device_probe()
4768 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); in arm_smmu_device_probe()
4777 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
4781 iopf_queue_free(smmu->evtq.iopf); in arm_smmu_device_probe()
4789 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
4790 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
4792 iopf_queue_free(smmu->evtq.iopf); in arm_smmu_device_remove()
4793 ida_destroy(&smmu->vmid_map); in arm_smmu_device_remove()
4804 { .compatible = "arm,smmu-v3", },
4817 .name = "arm-smmu-v3",
4830 MODULE_ALIAS("platform:arm-smmu-v3");