Home
last modified time | relevance | path

Searched full:mmu (Results 1 – 25 of 1347) sorted by relevance

12345678910>>...54

/linux-6.14.4/drivers/staging/media/ipu3/
Dipu3-mmu.c21 #include "ipu3-mmu.h"
73 * @mmu: MMU to perform the invalidate operation on
78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument
80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate()
83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument
84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument
86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered()
89 func(mmu); in call_if_imgu_is_powered()
90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered()
95 * @mmu: MMU to set the CIO gate bit in.
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument
74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get()
82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get()
93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get()
120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument
124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find()
134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find()
[all …]
DKbuild2 nvkm-y += nvkm/subdev/mmu/base.o
3 nvkm-y += nvkm/subdev/mmu/nv04.o
4 nvkm-y += nvkm/subdev/mmu/nv41.o
5 nvkm-y += nvkm/subdev/mmu/nv44.o
6 nvkm-y += nvkm/subdev/mmu/nv50.o
7 nvkm-y += nvkm/subdev/mmu/g84.o
8 nvkm-y += nvkm/subdev/mmu/mcp77.o
9 nvkm-y += nvkm/subdev/mmu/gf100.o
10 nvkm-y += nvkm/subdev/mmu/gk104.o
11 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap()
69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap()
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type()
[all …]
/linux-6.14.4/drivers/gpu/drm/nouveau/nvif/
Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument
30 if (!nvif_object_constructed(&mmu->object)) in nvif_mmu_dtor()
33 kfree(mmu->kind); in nvif_mmu_dtor()
34 kfree(mmu->type); in nvif_mmu_dtor()
35 kfree(mmu->heap); in nvif_mmu_dtor()
36 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor()
41 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument
53 mmu->heap = NULL; in nvif_mmu_ctor()
54 mmu->type = NULL; in nvif_mmu_ctor()
[all …]
/linux-6.14.4/drivers/staging/media/atomisp/pci/mmu/
Disp_mmu.c10 * ISP MMU management wrap code
30 #include "mmu/isp_mmu.h"
40 * that are only 32-bit capable(e.g. the ISP MMU).
46 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
68 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, in isp_pte_to_pgaddr() argument
71 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr()
74 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, in isp_pgaddr_to_pte_valid() argument
77 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid()
79 return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu)); in isp_pgaddr_to_pte_valid()
86 static phys_addr_t alloc_page_table(struct isp_mmu *mmu) in alloc_page_table() argument
[all …]
/linux-6.14.4/drivers/iommu/
Dipmmu-vmsa.c71 struct ipmmu_vmsa_device *mmu; member
99 /* MMU "context" registers */
149 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
151 return mmu->root == mmu; in ipmmu_is_root()
156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
159 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
160 *rootp = mmu; in __ipmmu_check_device()
177 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
179 return ioread32(mmu->base + offset); in ipmmu_read()
182 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
[all …]
/linux-6.14.4/arch/x86/kernel/
Dparavirt.c195 /* Mmu ops. */
196 .mmu.flush_tlb_user = native_flush_tlb_local,
197 .mmu.flush_tlb_kernel = native_flush_tlb_global,
198 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
199 .mmu.flush_tlb_multi = native_flush_tlb_multi,
200 .mmu.tlb_remove_table = native_tlb_remove_table,
202 .mmu.exit_mmap = paravirt_nop,
203 .mmu.notify_page_enc_status_changed = paravirt_nop,
206 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
207 .mmu.write_cr2 = pv_native_write_cr2,
[all …]
/linux-6.14.4/drivers/media/pci/intel/ipu6/
Dipu6-mmu.c29 #include "ipu6-mmu.h"
54 static void tlb_invalidate(struct ipu6_mmu *mmu) in tlb_invalidate() argument
59 spin_lock_irqsave(&mmu->ready_lock, flags); in tlb_invalidate()
60 if (!mmu->ready) { in tlb_invalidate()
61 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate()
65 for (i = 0; i < mmu->nr_mmus; i++) { in tlb_invalidate()
74 if (mmu->mmu_hw[i].insert_read_before_invalidate) in tlb_invalidate()
75 readl(mmu->mmu_hw[i].base + REG_L1_PHYS); in tlb_invalidate()
77 writel(0xffffffff, mmu->mmu_hw[i].base + in tlb_invalidate()
87 spin_unlock_irqrestore(&mmu->ready_lock, flags); in tlb_invalidate()
[all …]
Dipu6-dma.c19 #include "ipu6-mmu.h"
29 static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) in get_vm_info() argument
33 list_for_each_entry_safe(info, save, &mmu->vma_list, list) { in get_vm_info()
120 struct ipu6_mmu *mmu = sys->mmu; in ipu6_dma_sync_single() local
122 info = get_vm_info(mmu, dma_handle); in ipu6_dma_sync_single()
159 struct ipu6_mmu *mmu = sys->mmu; in ipu6_dma_alloc() local
174 iova = alloc_iova(&mmu->dmap->iovad, count, in ipu6_dma_alloc()
196 ret = ipu6_mmu_map(mmu->dmap->mmu_info, in ipu6_dma_alloc()
218 list_add(&info->list, &mmu->vma_list); in ipu6_dma_alloc()
225 pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, in ipu6_dma_alloc()
[all …]
/linux-6.14.4/drivers/gpu/drm/panfrost/
Dpanfrost_mmu.c34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready()
52 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd()
105 /* Run the MMU operation */ in mmu_hw_do_operation_locked()
113 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument
119 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation()
124 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument
126 int as_nr = mmu->as; in panfrost_mmu_enable()
127 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable()
158 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument
164 as = mmu->as; in panfrost_mmu_as_get()
[all …]
/linux-6.14.4/drivers/accel/ivpu/
Divpu_mmu.c255 return "MMU bypass is disallowed for this StreamID"; in ivpu_mmu_event_to_str()
313 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); in ivpu_mmu_config_check()
317 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); in ivpu_mmu_config_check()
321 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); in ivpu_mmu_config_check()
332 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); in ivpu_mmu_config_check()
337 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_cdtab_alloc() local
338 struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab; in ivpu_mmu_cdtab_alloc()
345 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size); in ivpu_mmu_cdtab_alloc()
352 struct ivpu_mmu_info *mmu = vdev->mmu; in ivpu_mmu_strtab_alloc() local
353 struct ivpu_mmu_strtab *strtab = &mmu->strtab; in ivpu_mmu_strtab_alloc()
[all …]
/linux-6.14.4/drivers/staging/media/atomisp/include/mmu/
Disp_mmu.h10 * ISP MMU driver for classic two-level page tables
77 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
89 void (*tlb_flush_range)(struct isp_mmu *mmu,
91 void (*tlb_flush_all)(struct isp_mmu *mmu);
92 unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
94 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
109 #define ISP_PTE_VALID_MASK(mmu) \ argument
110 ((mmu)->driver->pte_valid_mask)
112 #define ISP_PTE_VALID(mmu, pte) \ argument
113 ((pte) & ISP_PTE_VALID_MASK(mmu))
[all …]
/linux-6.14.4/arch/arm64/kvm/hyp/nvhe/
Dtlb.c14 struct kvm_s2_mmu *mmu; member
19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument
23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context()
29 cxt->mmu = NULL; in enter_vmid_context()
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()
65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
68 if (mmu == host_s2_mmu) in enter_vmid_context()
71 cxt->mmu = host_s2_mmu; in enter_vmid_context()
81 * We're guaranteed that the host S1 MMU is enabled, so in enter_vmid_context()
83 * TLB fill. For guests, we ensure that the S1 MMU is in enter_vmid_context()
[all …]
/linux-6.14.4/arch/m68k/
DKconfig.cpu6 default M68KCLASSIC if MMU
7 default COLDFIRE if !MMU
24 select M68020 if MMU && !(M68030 || M68040 || M68060)
34 select HAVE_PAGE_SIZE_8KB if !MMU
38 depends on MMU
52 def_bool M68KCLASSIC && !MMU
67 a paging MMU.
71 depends on MMU
77 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
80 if M68KCLASSIC && MMU
[all …]
DKconfig8 select ARCH_HAS_CPU_FINALIZE_INIT if MMU
36 select MMU_GATHER_NO_RANGE if MMU
39 select NO_DMA if !MMU && !COLDFIRE
42 select UACCESS_MEMCPY if !MMU
81 config MMU config
82 bool "MMU-based Paged Memory Management Support"
85 Select if you want MMU-based virtualised addressing space
89 def_bool MMU && M68KCLASSIC
93 def_bool MMU && COLDFIRE
97 def_bool MMU && SUN3
[all …]
/linux-6.14.4/arch/arm/mm/
DKconfig11 depends on !MMU
30 select CPU_COPY_V4WT if MMU
34 select CPU_TLB_V4WT if MMU
37 MMU built around an ARM7TDMI core.
45 depends on !MMU
63 depends on !MMU
82 select CPU_COPY_V4WB if MMU
86 select CPU_TLB_V4WBI if MMU
101 select CPU_COPY_V4WB if MMU
105 select CPU_TLB_V4WBI if MMU
[all …]
/linux-6.14.4/drivers/gpu/drm/msm/
Dmsm_mmu.h13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
18 void (*resume_translation)(struct msm_mmu *mmu);
35 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
38 mmu->dev = dev; in msm_mmu_init()
39 mmu->funcs = funcs; in msm_mmu_init()
40 mmu->type = type; in msm_mmu_init()
46 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument
[all …]
Dmsm_iommu.c30 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) in to_pagetable() argument
32 return container_of(mmu, struct msm_iommu_pagetable, base); in to_pagetable()
91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_unmap() argument
94 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_unmap()
115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, in msm_iommu_pagetable_map() argument
118 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_map()
145 msm_iommu_pagetable_unmap(mmu, iova, addr - iova); in msm_iommu_pagetable_map()
154 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu) in msm_iommu_pagetable_destroy() argument
156 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); in msm_iommu_pagetable_destroy()
172 int msm_iommu_pagetable_params(struct msm_mmu *mmu, in msm_iommu_pagetable_params() argument
[all …]
/linux-6.14.4/arch/arc/mm/
Dtlb.c16 #include <asm/mmu.h>
87 * If Not already present get a free slot from MMU. in tlb_entry_insert()
99 * Commit the Entry to MMU in tlb_entry_insert()
131 * Un-conditionally (without lookup) erase the entire MMU contents
136 struct cpuinfo_arc_mmu *mmu = &mmuinfo; in local_flush_tlb_all() local
139 int num_tlb = mmu->sets * mmu->ways; in local_flush_tlb_all()
182 * Only for fork( ) do we need to move parent to a new MMU ctxt, in local_flush_tlb_mm()
245 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
274 * Delete TLB entry in MMU for a given page (??? address)
403 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) in create_tlb()
[all …]
/linux-6.14.4/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/
Dmmu_public.h14 /*! Set the page table base index of MMU[ID]
16 \param ID[in] MMU identifier
19 \return none, MMU[ID].page_table_base_index = base_index
25 /*! Get the page table base index of MMU[ID]
27 \param ID[in] MMU identifier
30 \return MMU[ID].page_table_base_index
35 /*! Invalidate the page table cache of MMU[ID]
37 \param ID[in] MMU identifier
50 /*! Write to a control register of MMU[ID]
52 \param ID[in] MMU identifier
[all …]
/linux-6.14.4/arch/arm64/kvm/hyp/vhe/
Dtlb.c14 struct kvm_s2_mmu *mmu; member
20 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument
28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context()
29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
31 cxt->mmu = NULL; in enter_vmid_context()
41 * allocate IPA->PA walks, so we enable the S1 MMU... in enter_vmid_context()
63 __load_stage2(mmu, mmu->arch); in enter_vmid_context()
79 /* ... and the stage-2 MMU context that we switched away from */ in exit_vmid_context()
80 if (cxt->mmu) in exit_vmid_context()
81 __load_stage2(cxt->mmu, cxt->mmu->arch); in exit_vmid_context()
[all …]
/linux-6.14.4/arch/x86/include/asm/
Dparavirt.h75 PVOP_VCALL0(mmu.flush_tlb_user); in __flush_tlb_local()
80 PVOP_VCALL0(mmu.flush_tlb_kernel); in __flush_tlb_global()
85 PVOP_VCALL1(mmu.flush_tlb_one_user, addr); in __flush_tlb_one_user()
91 PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); in __flush_tlb_multi()
96 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); in paravirt_tlb_remove_table()
101 PVOP_VCALL1(mmu.exit_mmap, mm); in paravirt_arch_exit_mmap()
107 PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); in notify_page_enc_status_changed()
158 return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2, in read_cr2()
164 PVOP_VCALL1(mmu.write_cr2, x); in write_cr2()
169 return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3, in __read_cr3()
[all …]
/linux-6.14.4/Documentation/devicetree/bindings/iommu/
Dsamsung,sysmmu.yaml7 title: Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)
17 System MMU is an IOMMU and supports identical translation table format to
19 permissions, shareability and security protection. In addition, System MMU has
25 master), but one System MMU can handle transactions from only one peripheral
26 device. The relation between a System MMU and the peripheral device needs to be
31 * MFC has one System MMU on its left and right bus.
32 * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
34 * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
35 the other System MMU on the write channel.
37 For information on assigning System MMU controller to its peripheral devices,
[all …]
/linux-6.14.4/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/
Dbranch.json18 …still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off",
21 … still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off"
24 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
27 …r the address. This event still counts when branch prediction is disabled due to the MMU being off"
30 … the address. This event still counts when branch prediction is disabled due to the MMU being off",
33 …d the address. This event still counts when branch prediction is disabled due to the MMU being off"
36 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
39 …ion. This event still counts when branch prediction is disabled due to the MMU being off. Conditio…
42 …he condition. This event still counts when branch prediction is disabled due to the MMU being off",
45 …the condition. This event still counts when branch prediction is disabled due to the MMU being off"
[all …]

12345678910>>...54