Lines Matching +full:smmu +full:- +full:v3

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021-2024 NVIDIA CORPORATION & AFFILIATES. */
8 #include <linux/dma-mapping.h>
15 #include "arm-smmu-v3.h"
61 /* -- PAGE0 -- */
78 /* -- PAGE1 -- */
86 /* VINTF logical-VCMDQ pages */
96 ((_cmdqv)->base + TEGRA241_CMDQV_##_regname)
98 ((_vintf)->base + TEGRA241_VINTF_##_regname)
100 ((_vcmdq)->page0 + TEGRA241_VCMDQ_##_regname)
102 ((_vcmdq)->page1 + TEGRA241_VCMDQ_##_regname)
108 "This allows to disable CMDQV HW and use default SMMU internal CMDQ.");
116 * struct tegra241_vcmdq - Virtual Command Queue
141 * struct tegra241_vintf - Virtual Interface
144 * @hyp_own: Owned by hypervisor (in-kernel)
162 * struct tegra241_cmdqv - CMDQ-V for SMMUv3
163 * @smmu: SMMUv3 device
174 struct arm_smmu_device smmu; member
206 dev_err(cmdqv->dev, "%sfailed to %sable, STATUS=0x%08X\n", in tegra241_cmdqv_write_config()
225 snprintf(header, 16, "VINTF%u: ", vintf->idx); in vintf_write_config()
226 return tegra241_cmdqv_write_config(vintf->cmdqv, in vintf_write_config()
229 regval, header, &vintf->enabled); in vintf_write_config()
236 if (WARN_ON(!vcmdq->vintf)) in lvcmdq_error_header()
239 vcmdq->vintf->idx, vcmdq->idx, vcmdq->lidx); in lvcmdq_error_header()
247 return tegra241_cmdqv_write_config(vcmdq->cmdqv, in vcmdq_write_config()
250 regval, h, &vcmdq->enabled); in vcmdq_write_config()
264 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; in tegra241_vintf0_handle_error()
267 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq); in tegra241_vintf0_handle_error()
281 /* Use readl_relaxed() as register addresses are not 64-bit aligned */ in tegra241_cmdqv_isr()
292 dev_warn(cmdqv->dev, "unexpected error reported. %s\n", err_str); in tegra241_cmdqv_isr()
296 tegra241_vintf0_handle_error(cmdqv->vintfs[0]); in tegra241_cmdqv_isr()
307 switch (ent->opcode) { in tegra241_guest_vcmdq_supports_cmd()
318 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu, in tegra241_cmdqv_get_cmdq() argument
322 container_of(smmu, struct tegra241_cmdqv, smmu); in tegra241_cmdqv_get_cmdq()
323 struct tegra241_vintf *vintf = cmdqv->vintfs[0]; in tegra241_cmdqv_get_cmdq()
330 /* Use SMMU CMDQ if VINTF0 is uninitialized */ in tegra241_cmdqv_get_cmdq()
331 if (!READ_ONCE(vintf->enabled)) in tegra241_cmdqv_get_cmdq()
341 lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf; in tegra241_cmdqv_get_cmdq()
342 vcmdq = vintf->lvcmdqs[lidx]; in tegra241_cmdqv_get_cmdq()
343 if (!vcmdq || !READ_ONCE(vcmdq->enabled)) in tegra241_cmdqv_get_cmdq()
346 /* Unsupported CMD goes for smmu->cmdq pathway */ in tegra241_cmdqv_get_cmdq()
347 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) in tegra241_cmdqv_get_cmdq()
349 return &vcmdq->cmdq; in tegra241_cmdqv_get_cmdq()
360 dev_err(vcmdq->cmdqv->dev, in tegra241_vcmdq_hw_deinit()
374 dev_warn(vcmdq->cmdqv->dev, in tegra241_vcmdq_hw_deinit()
379 dev_dbg(vcmdq->cmdqv->dev, "%sdeinited\n", h); in tegra241_vcmdq_hw_deinit()
391 writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE)); in tegra241_vcmdq_hw_init()
395 dev_err(vcmdq->cmdqv->dev, in tegra241_vcmdq_hw_init()
403 dev_dbg(vcmdq->cmdqv->dev, "%sinited\n", h); in tegra241_vcmdq_hw_init()
411 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) in tegra241_vintf_hw_deinit()
412 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) in tegra241_vintf_hw_deinit()
413 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]); in tegra241_vintf_hw_deinit()
442 vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG))); in tegra241_vintf_hw_init()
444 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) { in tegra241_vintf_hw_init()
445 if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) { in tegra241_vintf_hw_init()
446 ret = tegra241_vcmdq_hw_init(vintf->lvcmdqs[lidx]); in tegra241_vintf_hw_init()
457 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu) in tegra241_cmdqv_hw_reset() argument
460 container_of(smmu, struct tegra241_cmdqv, smmu); in tegra241_cmdqv_hw_reset()
475 for (idx = 0, qidx = 0; idx < cmdqv->num_vintfs; idx++) { in tegra241_cmdqv_hw_reset()
476 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) { in tegra241_cmdqv_hw_reset()
485 return tegra241_vintf_hw_init(cmdqv->vintfs[0], true); in tegra241_cmdqv_hw_reset()
492 struct arm_smmu_device *smmu = &vcmdq->cmdqv->smmu; in tegra241_vcmdq_alloc_smmu_cmdq() local
493 struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq; in tegra241_vcmdq_alloc_smmu_cmdq()
494 struct arm_smmu_queue *q = &cmdq->q; in tegra241_vcmdq_alloc_smmu_cmdq()
499 snprintf(name, 16, "vcmdq%u", vcmdq->idx); in tegra241_vcmdq_alloc_smmu_cmdq()
501 /* Cap queue size to SMMU's IDR1.CMDQS and ensure natural alignment */ in tegra241_vcmdq_alloc_smmu_cmdq()
502 regval = readl_relaxed(smmu->base + ARM_SMMU_IDR1); in tegra241_vcmdq_alloc_smmu_cmdq()
503 q->llq.max_n_shift = in tegra241_vcmdq_alloc_smmu_cmdq()
507 ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0, in tegra241_vcmdq_alloc_smmu_cmdq()
514 q->q_base = q->base_dma & VCMDQ_ADDR; in tegra241_vcmdq_alloc_smmu_cmdq()
515 q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift); in tegra241_vcmdq_alloc_smmu_cmdq()
517 if (!vcmdq->vintf->hyp_own) in tegra241_vcmdq_alloc_smmu_cmdq()
518 cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd; in tegra241_vcmdq_alloc_smmu_cmdq()
520 return arm_smmu_cmdq_init(smmu, cmdq); in tegra241_vcmdq_alloc_smmu_cmdq()
527 vintf->lvcmdqs[lidx] = NULL; in tegra241_vintf_deinit_lvcmdq()
533 struct tegra241_cmdqv *cmdqv = vintf->cmdqv; in tegra241_vintf_init_lvcmdq()
534 u16 idx = vintf->idx; in tegra241_vintf_init_lvcmdq()
536 vcmdq->idx = idx * cmdqv->num_lvcmdqs_per_vintf + lidx; in tegra241_vintf_init_lvcmdq()
537 vcmdq->lidx = lidx; in tegra241_vintf_init_lvcmdq()
538 vcmdq->cmdqv = cmdqv; in tegra241_vintf_init_lvcmdq()
539 vcmdq->vintf = vintf; in tegra241_vintf_init_lvcmdq()
540 vcmdq->page0 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE0(idx, lidx); in tegra241_vintf_init_lvcmdq()
541 vcmdq->page1 = cmdqv->base + TEGRA241_VINTFi_LVCMDQ_PAGE1(idx, lidx); in tegra241_vintf_init_lvcmdq()
543 vintf->lvcmdqs[lidx] = vcmdq; in tegra241_vintf_init_lvcmdq()
549 struct tegra241_vcmdq *vcmdq = vintf->lvcmdqs[lidx]; in tegra241_vintf_free_lvcmdq()
556 dev_dbg(vintf->cmdqv->dev, in tegra241_vintf_free_lvcmdq()
564 struct tegra241_cmdqv *cmdqv = vintf->cmdqv; in tegra241_vintf_alloc_lvcmdq()
571 return ERR_PTR(-ENOMEM); in tegra241_vintf_alloc_lvcmdq()
582 dev_dbg(cmdqv->dev, in tegra241_vintf_alloc_lvcmdq()
597 kfree(cmdqv->vintfs[idx]->lvcmdqs); in tegra241_cmdqv_deinit_vintf()
598 ida_free(&cmdqv->vintf_ids, idx); in tegra241_cmdqv_deinit_vintf()
599 cmdqv->vintfs[idx] = NULL; in tegra241_cmdqv_deinit_vintf()
609 ret = ida_alloc_max(&cmdqv->vintf_ids, max_idx, GFP_KERNEL); in tegra241_cmdqv_init_vintf()
614 vintf->idx = idx; in tegra241_cmdqv_init_vintf()
615 vintf->cmdqv = cmdqv; in tegra241_cmdqv_init_vintf()
616 vintf->base = cmdqv->base + TEGRA241_VINTF(idx); in tegra241_cmdqv_init_vintf()
618 vintf->lvcmdqs = kcalloc(cmdqv->num_lvcmdqs_per_vintf, in tegra241_cmdqv_init_vintf()
619 sizeof(*vintf->lvcmdqs), GFP_KERNEL); in tegra241_cmdqv_init_vintf()
620 if (!vintf->lvcmdqs) { in tegra241_cmdqv_init_vintf()
621 ida_free(&cmdqv->vintf_ids, idx); in tegra241_cmdqv_init_vintf()
622 return -ENOMEM; in tegra241_cmdqv_init_vintf()
625 cmdqv->vintfs[idx] = vintf; in tegra241_cmdqv_init_vintf()
633 tegra241_vcmdq_hw_deinit(vintf->lvcmdqs[lidx]); in tegra241_vintf_remove_lvcmdq()
639 struct tegra241_vintf *vintf = cmdqv->vintfs[idx]; in tegra241_cmdqv_remove_vintf()
643 for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) in tegra241_cmdqv_remove_vintf()
644 if (vintf->lvcmdqs[lidx]) in tegra241_cmdqv_remove_vintf()
650 dev_dbg(cmdqv->dev, "VINTF%u: deallocated\n", vintf->idx); in tegra241_cmdqv_remove_vintf()
655 static void tegra241_cmdqv_remove(struct arm_smmu_device *smmu) in tegra241_cmdqv_remove() argument
658 container_of(smmu, struct tegra241_cmdqv, smmu); in tegra241_cmdqv_remove()
662 for (idx = 0; idx < cmdqv->num_vintfs; idx++) { in tegra241_cmdqv_remove()
663 if (cmdqv->vintfs[idx]) { in tegra241_cmdqv_remove()
671 ida_destroy(&cmdqv->vintf_ids); in tegra241_cmdqv_remove()
673 if (cmdqv->irq > 0) in tegra241_cmdqv_remove()
674 free_irq(cmdqv->irq, cmdqv); in tegra241_cmdqv_remove()
675 iounmap(cmdqv->base); in tegra241_cmdqv_remove()
676 kfree(cmdqv->vintfs); in tegra241_cmdqv_remove()
677 put_device(cmdqv->dev); /* smmu->impl_dev */ in tegra241_cmdqv_remove()
734 *res = *rentry->res; in tegra241_cmdqv_find_acpi_resource()
751 static int tegra241_cmdqv_init_structures(struct arm_smmu_device *smmu) in tegra241_cmdqv_init_structures() argument
754 container_of(smmu, struct tegra241_cmdqv, smmu); in tegra241_cmdqv_init_structures()
761 return -ENOMEM; in tegra241_cmdqv_init_structures()
763 /* Init VINTF0 for in-kernel use */ in tegra241_cmdqv_init_structures()
766 dev_err(cmdqv->dev, "failed to init vintf0: %d\n", ret); in tegra241_cmdqv_init_structures()
771 for (lidx = 0; lidx < cmdqv->num_lvcmdqs_per_vintf; lidx++) { in tegra241_cmdqv_init_structures()
780 smmu->impl_ops = &tegra241_cmdqv_impl_ops; in tegra241_cmdqv_init_structures()
789 __tegra241_cmdqv_probe(struct arm_smmu_device *smmu, struct resource *res, in __tegra241_cmdqv_probe() argument
802 static_assert(offsetof(struct tegra241_cmdqv, smmu) == 0); in __tegra241_cmdqv_probe()
804 base = ioremap(res->start, resource_size(res)); in __tegra241_cmdqv_probe()
806 dev_err(smmu->dev, "failed to ioremap\n"); in __tegra241_cmdqv_probe()
812 dev_info(smmu->dev, "Detected disable_cmdqv=true\n"); in __tegra241_cmdqv_probe()
817 cmdqv = devm_krealloc(smmu->dev, smmu, sizeof(*cmdqv), GFP_KERNEL); in __tegra241_cmdqv_probe()
820 new_smmu = &cmdqv->smmu; in __tegra241_cmdqv_probe()
822 cmdqv->irq = irq; in __tegra241_cmdqv_probe()
823 cmdqv->base = base; in __tegra241_cmdqv_probe()
824 cmdqv->dev = smmu->impl_dev; in __tegra241_cmdqv_probe()
826 if (cmdqv->irq > 0) { in __tegra241_cmdqv_probe()
827 ret = request_irq(irq, tegra241_cmdqv_isr, 0, "tegra241-cmdqv", in __tegra241_cmdqv_probe()
830 dev_err(cmdqv->dev, "failed to request irq (%d): %d\n", in __tegra241_cmdqv_probe()
831 cmdqv->irq, ret); in __tegra241_cmdqv_probe()
837 cmdqv->num_vintfs = 1 << FIELD_GET(CMDQV_NUM_VINTF_LOG2, regval); in __tegra241_cmdqv_probe()
838 cmdqv->num_vcmdqs = 1 << FIELD_GET(CMDQV_NUM_VCMDQ_LOG2, regval); in __tegra241_cmdqv_probe()
839 cmdqv->num_lvcmdqs_per_vintf = cmdqv->num_vcmdqs / cmdqv->num_vintfs; in __tegra241_cmdqv_probe()
841 cmdqv->vintfs = in __tegra241_cmdqv_probe()
842 kcalloc(cmdqv->num_vintfs, sizeof(*cmdqv->vintfs), GFP_KERNEL); in __tegra241_cmdqv_probe()
843 if (!cmdqv->vintfs) in __tegra241_cmdqv_probe()
846 ida_init(&cmdqv->vintf_ids); in __tegra241_cmdqv_probe()
857 /* Provide init-level ops only, until tegra241_cmdqv_init_structures */ in __tegra241_cmdqv_probe()
858 new_smmu->impl_ops = &init_ops; in __tegra241_cmdqv_probe()
863 if (cmdqv->irq > 0) in __tegra241_cmdqv_probe()
864 free_irq(cmdqv->irq, cmdqv); in __tegra241_cmdqv_probe()
870 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu) in tegra241_cmdqv_probe() argument
876 if (!smmu->dev->of_node) in tegra241_cmdqv_probe()
877 res = tegra241_cmdqv_find_acpi_resource(smmu->impl_dev, &irq); in tegra241_cmdqv_probe()
881 new_smmu = __tegra241_cmdqv_probe(smmu, res, irq); in tegra241_cmdqv_probe()
888 dev_info(smmu->impl_dev, "Falling back to standard SMMU CMDQ\n"); in tegra241_cmdqv_probe()
889 smmu->options &= ~ARM_SMMU_OPT_TEGRA241_CMDQV; in tegra241_cmdqv_probe()
890 put_device(smmu->impl_dev); in tegra241_cmdqv_probe()
891 return ERR_PTR(-ENODEV); in tegra241_cmdqv_probe()