Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0
8 #include "arm-smmu-v3.h"
19 return ERR_PTR(-ENOMEM); in arm_smmu_hw_info()
21 base_idr = master->smmu->base + ARM_SMMU_IDR0; in arm_smmu_hw_info()
23 info->idr[i] = readl_relaxed(base_idr + i); in arm_smmu_hw_info()
24 info->iidr = readl_relaxed(master->smmu->base + ARM_SMMU_IIDR); in arm_smmu_hw_info()
25 info->aidr = readl_relaxed(master->smmu->base + ARM_SMMU_AIDR); in arm_smmu_hw_info()
38 target, master, nested_domain->vsmmu->s2_parent, ats_enabled); in arm_smmu_make_nested_cd_table_ste()
40 target->data[0] = cpu_to_le64(STRTAB_STE_0_V | in arm_smmu_make_nested_cd_table_ste()
43 target->data[0] |= nested_domain->ste[0] & in arm_smmu_make_nested_cd_table_ste()
45 target->data[1] |= nested_domain->ste[1]; in arm_smmu_make_nested_cd_table_ste()
51 * - Non-valid STE
52 * - Abort STE
53 * - Bypass STE (install the S2, no CD table)
54 * - CD table STE (install the S2 and the userspace CD table)
61 FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0])); in arm_smmu_make_nested_domain_ste()
64 * Userspace can request a non-valid STE through the nesting interface. in arm_smmu_make_nested_domain_ste()
68 if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) in arm_smmu_make_nested_domain_ste()
78 nested_domain->vsmmu->s2_parent, in arm_smmu_make_nested_domain_ste()
102 if (nested_domain->vsmmu->smmu != master->smmu) in arm_smmu_attach_dev_nested()
103 return -EINVAL; in arm_smmu_attach_dev_nested()
104 if (arm_smmu_ssids_in_use(&master->cd_table)) in arm_smmu_attach_dev_nested()
105 return -EBUSY; in arm_smmu_attach_dev_nested()
109 * The VM has to control the actual ATS state at the PCI device because in arm_smmu_attach_dev_nested()
111 * think ATS is on it will not generate ATC flushes and the ATC will in arm_smmu_attach_dev_nested()
112 * become incoherent. Since we can't access the actual virtual PCI ATS in arm_smmu_attach_dev_nested()
116 state.disable_ats = !nested_domain->enable_ats; in arm_smmu_attach_dev_nested()
147 if (!(arg->ste[0] & cpu_to_le64(STRTAB_STE_0_V))) { in arm_smmu_validate_vste()
148 memset(arg->ste, 0, sizeof(arg->ste)); in arm_smmu_validate_vste()
153 if ((arg->ste[0] & ~STRTAB_STE_0_NESTING_ALLOWED) || in arm_smmu_validate_vste()
154 (arg->ste[1] & ~STRTAB_STE_1_NESTING_ALLOWED)) in arm_smmu_validate_vste()
155 return -EIO; in arm_smmu_validate_vste()
157 cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(arg->ste[0])); in arm_smmu_validate_vste()
160 return -EIO; in arm_smmu_validate_vste()
163 * Only Full ATS or ATS UR is supported in arm_smmu_validate_vste()
166 eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1])); in arm_smmu_validate_vste()
167 arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS); in arm_smmu_validate_vste()
169 return -EIO; in arm_smmu_validate_vste()
187 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc_domain_nested()
200 return ERR_PTR(-ENOMEM); in arm_vsmmu_alloc_domain_nested()
202 nested_domain->domain.type = IOMMU_DOMAIN_NESTED; in arm_vsmmu_alloc_domain_nested()
203 nested_domain->domain.ops = &arm_smmu_nested_ops; in arm_vsmmu_alloc_domain_nested()
204 nested_domain->enable_ats = enable_ats; in arm_vsmmu_alloc_domain_nested()
205 nested_domain->vsmmu = vsmmu; in arm_vsmmu_alloc_domain_nested()
206 nested_domain->ste[0] = arg.ste[0]; in arm_vsmmu_alloc_domain_nested()
207 nested_domain->ste[1] = arg.ste[1] & ~cpu_to_le64(STRTAB_STE_1_EATS); in arm_vsmmu_alloc_domain_nested()
209 return &nested_domain->domain; in arm_vsmmu_alloc_domain_nested()
218 xa_lock(&vsmmu->core.vdevs); in arm_vsmmu_vsid_to_sid()
219 dev = iommufd_viommu_find_dev(&vsmmu->core, (unsigned long)vsid); in arm_vsmmu_vsid_to_sid()
221 ret = -EIO; in arm_vsmmu_vsid_to_sid()
228 *sid = master->streams[0].id; in arm_vsmmu_vsid_to_sid()
230 xa_unlock(&vsmmu->core.vdevs); in arm_vsmmu_vsid_to_sid()
253 cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]); in arm_vsmmu_convert_user_cmd()
254 cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]); in arm_vsmmu_convert_user_cmd()
256 switch (cmd->cmd[0] & CMDQ_0_OP) { in arm_vsmmu_convert_user_cmd()
259 cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL | in arm_vsmmu_convert_user_cmd()
260 FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); in arm_vsmmu_convert_user_cmd()
261 cmd->cmd[1] = 0; in arm_vsmmu_convert_user_cmd()
267 cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID; in arm_vsmmu_convert_user_cmd()
268 cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); in arm_vsmmu_convert_user_cmd()
273 u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]); in arm_vsmmu_convert_user_cmd()
276 return -EIO; in arm_vsmmu_convert_user_cmd()
277 cmd->cmd[0] &= ~CMDQ_CFGI_0_SID; in arm_vsmmu_convert_user_cmd()
278 cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid); in arm_vsmmu_convert_user_cmd()
282 return -EIO; in arm_vsmmu_convert_user_cmd()
291 struct arm_smmu_device *smmu = vsmmu->smmu; in arm_vsmmu_cache_invalidate()
298 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); in arm_vsmmu_cache_invalidate()
300 return -ENOMEM; in arm_vsmmu_cache_invalidate()
302 end = cmds + array->entry_num; in arm_vsmmu_cache_invalidate()
319 if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1) in arm_vsmmu_cache_invalidate()
323 ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd, in arm_vsmmu_cache_invalidate()
324 cur - last, true); in arm_vsmmu_cache_invalidate()
326 cur--; in arm_vsmmu_cache_invalidate()
332 array->entry_num = cur - cmds; in arm_vsmmu_cache_invalidate()
354 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
356 if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) in arm_vsmmu_alloc()
357 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
359 if (s2_parent->smmu != master->smmu) in arm_vsmmu_alloc()
360 return ERR_PTR(-EINVAL); in arm_vsmmu_alloc()
367 if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) in arm_vsmmu_alloc()
368 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
374 * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make in arm_vsmmu_alloc()
375 * things non-coherent using the memattr, but No-Snoop behavior is not in arm_vsmmu_alloc()
379 !(smmu->features & ARM_SMMU_FEAT_S2FWB)) in arm_vsmmu_alloc()
380 return ERR_PTR(-EOPNOTSUPP); in arm_vsmmu_alloc()
387 vsmmu->smmu = smmu; in arm_vsmmu_alloc()
388 vsmmu->s2_parent = s2_parent; in arm_vsmmu_alloc()
390 vsmmu->vmid = s2_parent->s2_cfg.vmid; in arm_vsmmu_alloc()
392 return &vsmmu->core; in arm_vsmmu_alloc()