Home
last modified time | relevance | path

Searched full:asid (Results 1 – 25 of 213) sorted by relevance

123456789

/linux-6.14.4/arch/arm64/mm/
Dcontext.c38 #define ctxid2asid(asid) ((asid) & ~ASID_MASK) argument
39 #define asid2ctxid(asid, genid) ((asid) | (genid)) argument
44 u32 asid; in get_cpu_asid_bits() local
50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits()
54 asid = 8; in get_cpu_asid_bits()
57 asid = 16; in get_cpu_asid_bits()
60 return asid; in get_cpu_asid_bits()
66 u32 asid = get_cpu_asid_bits(); in verify_cpu_asid_bits() local
68 if (asid < asid_bits) { in verify_cpu_asid_bits()
70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits()
[all …]
/linux-6.14.4/arch/csky/mm/
Dasid.c3 * Generic ASID allocator.
14 #include <asm/asid.h>
21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument
27 u64 asid; in flush_context() local
29 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context()
33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context()
38 * ASID, as this is the only trace we have of in flush_context()
41 if (asid == 0) in flush_context()
42 asid = reserved_asid(info, i); in flush_context()
43 __set_bit(asid2idx(info, asid), info->map); in flush_context()
[all …]
/linux-6.14.4/arch/arm/mm/
Dcontext.c27 * | process ID | ASID |
32 * The ASID is used to tag entries in the CPU caches and TLBs.
56 u64 context_id, asid; in a15_erratum_get_cpumask() local
65 * running the same ASID as the one being invalidated. in a15_erratum_get_cpumask()
67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
68 if (asid == 0) in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
70 if (context_id == asid) in a15_erratum_get_cpumask()
79 * With LPAE, the ASID and page tables are updated atomicly, so there is
80 * no need for a reserved set of tables (the active ASID tracking prevents
[all …]
/linux-6.14.4/arch/arc/include/asm/
Dmmu_context.h10 * -Major rewrite of Core ASID allocation routine get_new_mmu_context
23 /* ARC ASID Management
25 * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on
28 * ASID is managed per cpu, so task threads across CPUs can have different
29 * ASID. Global ASID management is needed if hardware supports TLB shootdown
32 * Each task is assigned unique ASID, with a simple round-robin allocator
36 * A new allocation cycle, post rollover, could potentially reassign an ASID
37 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
38 * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
49 #define asid_mm(mm, cpu) mm->context.asid[cpu]
[all …]
/linux-6.14.4/arch/xtensa/include/asm/
Dmmu_context.h38 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
72 unsigned long asid = cpu_asid_cache(cpu); in get_new_mmu_context() local
73 if ((++asid & ASID_MASK) == 0) { in get_new_mmu_context()
75 * Start new asid cycle; continue counting with next in get_new_mmu_context()
79 asid += ASID_USER_FIRST; in get_new_mmu_context()
81 cpu_asid_cache(cpu) = asid; in get_new_mmu_context()
82 mm->context.asid[cpu] = asid; in get_new_mmu_context()
89 * Check if our ASID is of an older version and thus invalid. in get_mmu_context()
93 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() local
95 if (asid == NO_CONTEXT || in get_mmu_context()
[all …]
/linux-6.14.4/arch/riscv/mm/
Dcontext.c68 /* Update the list of reserved ASIDs and the ASID bitmap. */ in __flush_context()
87 /* Mark ASID #0 as used because it is used at boot-time */ in __flush_context()
98 unsigned long asid, ver = atomic_long_read(&current_version); in __new_context() local
122 * Allocate a free ASID. If we can't find one then increment in __new_context()
125 asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx); in __new_context()
126 if (asid != num_asids) in __new_context()
136 asid = find_next_zero_bit(context_asid_map, num_asids, 1); in __new_context()
139 __set_bit(asid, context_asid_map); in __new_context()
140 cur_idx = asid; in __new_context()
141 return asid | ver; in __new_context()
[all …]
Dtlbflush.c19 unsigned long asid) in local_flush_tlb_range_threshold_asid() argument
25 local_flush_tlb_all_asid(asid); in local_flush_tlb_range_threshold_asid()
30 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_threshold_asid()
36 unsigned long size, unsigned long stride, unsigned long asid) in local_flush_tlb_range_asid() argument
39 local_flush_tlb_page_asid(start, asid); in local_flush_tlb_range_asid()
41 local_flush_tlb_all_asid(asid); in local_flush_tlb_range_asid()
43 local_flush_tlb_range_threshold_asid(start, size, stride, asid); in local_flush_tlb_range_asid()
68 unsigned long asid; member
78 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); in __ipi_flush_tlb_range_asid()
81 static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, in __flush_tlb_range() argument
[all …]
/linux-6.14.4/arch/loongarch/include/asm/
Dmmu_context.h22 * as a software asid extension.
34 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
54 u64 asid = asid_cache(cpu); in get_new_mmu_context() local
56 if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) in get_new_mmu_context()
57 *need_flush = true; /* start new asid cycle */ in get_new_mmu_context()
59 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_new_mmu_context()
77 static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) in atomic_update_pgd_asid() argument
82 : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) in atomic_update_pgd_asid()
94 /* Check if our ASID is of an older version and thus invalid */ in switch_mm_irqs_off()
104 local_flush_tlb_user(); /* Flush tlb after update ASID */ in switch_mm_irqs_off()
[all …]
/linux-6.14.4/arch/sh/mm/
Dtlbflush_32.c21 unsigned long asid; in local_flush_tlb_page() local
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
30 set_asid(asid); in local_flush_tlb_page()
32 local_flush_tlb_one(asid, page); in local_flush_tlb_page()
56 unsigned long asid; in local_flush_tlb_range() local
59 asid = cpu_asid(cpu, mm); in local_flush_tlb_range()
65 set_asid(asid); in local_flush_tlb_range()
68 local_flush_tlb_one(asid, start); in local_flush_tlb_range()
89 unsigned long asid; in local_flush_tlb_kernel_range() local
92 asid = cpu_asid(cpu, &init_mm); in local_flush_tlb_kernel_range()
[all …]
/linux-6.14.4/arch/csky/include/asm/
Dasid.h22 /* Number of ASID allocated by context (shift value) */
37 * Check the ASID is still valid for the context. If not generate a new ASID.
39 * @pasid: Pointer to the current ASID batch
46 u64 asid, old_active_asid; in asid_check_context() local
48 asid = atomic64_read(pasid); in asid_check_context()
52 * If our active_asid is non-zero and the ASID matches the current in asid_check_context()
60 * - We get a valid ASID back from the cmpxchg, which means the in asid_check_context()
66 !((asid ^ atomic64_read(&info->generation)) >> info->bits) && in asid_check_context()
68 old_active_asid, asid)) in asid_check_context()
/linux-6.14.4/arch/sh/include/asm/
Dmmu_context_32.h6 static inline void set_asid(unsigned long asid) in set_asid() argument
8 __raw_writel(asid, MMU_PTEAEX); in set_asid()
16 static inline void set_asid(unsigned long asid) in set_asid() argument
25 : "r" (asid), "m" (__m(MMU_PTEH)), in set_asid()
31 unsigned long asid; in get_asid() local
34 : "=r" (asid) in get_asid()
36 asid &= MMU_CONTEXT_ASID_MASK; in get_asid()
37 return asid; in get_asid()
Dmmu_context.h6 * ASID handling idea taken from MIPS implementation.
22 * (b) ASID (Address Space IDentifier)
33 /* Impossible ASID value, to differentiate from NO_CONTEXT. */
57 unsigned long asid = asid_cache(cpu); in get_mmu_context() local
60 if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context()
65 if (!(++asid & MMU_CONTEXT_ASID_MASK)) { in get_mmu_context()
67 * We exhaust ASID of this version. in get_mmu_context()
76 if (!asid) in get_mmu_context()
77 asid = MMU_CONTEXT_FIRST_VERSION; in get_mmu_context()
80 cpu_context(cpu, mm) = asid_cache(cpu) = asid; in get_mmu_context()
[all …]
/linux-6.14.4/drivers/misc/sgi-gru/
Dgrumain.c52 /*--------- ASID Management -------------------------------------------
58 * asid in use ("x"s below). Set "limit" to this value.
66 * Each time MAX_ASID is reached, increment the asid generation. Since
69 * a context, the asid generation of the GTS asid is rechecked. If it
70 * doesn't match the current generation, a new asid will be assigned.
75 * All asid manipulation & context loading/unloading is protected by the
79 /* Hit the asid limit. Start over */
89 static int gru_reset_asid_limit(struct gru_state *gru, int asid) in gru_reset_asid_limit() argument
93 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); in gru_reset_asid_limit()
96 if (asid >= limit) in gru_reset_asid_limit()
[all …]
Dgrutlbpurge.c89 * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
122 * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
132 * asid is non-zero.
136 * - if the ctxtmap is zero, no context is active. Set the ASID to
150 int grupagesize, pagesize, pageshift, gid, asid; in gru_flush_tlb_range() local
167 asid = asids->mt_asid; in gru_flush_tlb_range()
168 if (asids->mt_ctxbitmap && asid) { in gru_flush_tlb_range()
170 asid = GRUASID(asid, start); in gru_flush_tlb_range()
172 " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n", in gru_flush_tlb_range()
173 gid, asid, start, grupagesize, num, asids->mt_ctxbitmap); in gru_flush_tlb_range()
[all …]
/linux-6.14.4/drivers/accel/habanalabs/common/
Dasid.c20 /* ASID 0 is reserved for the kernel driver and device CPU */ in hl_asid_init()
50 void hl_asid_free(struct hl_device *hdev, unsigned long asid) in hl_asid_free() argument
52 if (asid == HL_KERNEL_ASID_ID || asid >= hdev->asic_prop.max_asid) { in hl_asid_free()
53 dev_crit(hdev->dev, "Invalid ASID %lu", asid); in hl_asid_free()
57 clear_bit(asid, hdev->asid_bitmap); in hl_asid_free()
/linux-6.14.4/arch/x86/mm/
Dtlb.c64 * to what is traditionally called ASID on the RISC processors.
66 * We don't use the traditional ASID implementation, where each process/mm gets
67 * its own ASID and flush/restart when we run out of ASID space.
76 * ASID - [0, TLB_NR_DYN_ASIDS-1]
81 * ASID+1, because PCID 0 is special.
85 * PCID values, but we can still do with a single ASID denomination
110 * Given @asid, compute kPCID
112 static inline u16 kern_pcid(u16 asid) in kern_pcid() argument
114 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); in kern_pcid()
118 * Make sure that the dynamic ASID space does not conflict with the in kern_pcid()
[all …]
/linux-6.14.4/arch/riscv/kvm/
Dtlb.c79 unsigned long asid, in kvm_riscv_local_hfence_vvma_asid_gva() argument
87 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid); in kvm_riscv_local_hfence_vvma_asid_gva()
97 : : "r" (pos), "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_gva()
102 : : "r" (pos), "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_gva()
109 unsigned long asid) in kvm_riscv_local_hfence_vvma_asid_all() argument
115 asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory"); in kvm_riscv_local_hfence_vvma_asid_all()
282 nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid, in kvm_riscv_hfence_process()
285 kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr, in kvm_riscv_hfence_process()
292 nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid); in kvm_riscv_hfence_process()
294 kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid); in kvm_riscv_hfence_process()
[all …]
/linux-6.14.4/drivers/vhost/
Dvdpa.c71 u64 last, u32 asid);
80 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid) in asid_to_as() argument
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in asid_to_as()
86 if (as->id == asid) in asid_to_as()
92 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid) in asid_to_iotlb() argument
94 struct vhost_vdpa_as *as = asid_to_as(v, asid); in asid_to_iotlb()
102 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid) in vhost_vdpa_alloc_as() argument
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS]; in vhost_vdpa_alloc_as()
107 if (asid_to_as(v, asid)) in vhost_vdpa_alloc_as()
110 if (asid >= v->vdpa->nas) in vhost_vdpa_alloc_as()
[all …]
/linux-6.14.4/tools/perf/pmu-events/arch/riscv/
Driscv-sbi-firmware.json75 "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
78 "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
81 "PublicDescription": "Received SFENCE.VMA with ASID request from other HART event",
84 "BriefDescription": "Received SFENCE.VMA with ASID request from other HART event"
123 "PublicDescription": "Sent HFENCE.VVMA with ASID request to other HART event",
126 "BriefDescription": "Sent HFENCE.VVMA with ASID request to other HART event"
129 "PublicDescription": "Received HFENCE.VVMA with ASID request from other HART event",
132 "BriefDescription": "Received HFENCE.VVMA with ASID request from other HART event"
/linux-6.14.4/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3-sva.c55 u16 asid) in arm_smmu_make_sva_cd() argument
77 FIELD_PREP(CTXDESC_CD_0_ASID, asid)); in arm_smmu_make_sva_cd()
160 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs()
162 arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, in arm_smmu_mm_arch_invalidate_secondary_tlbs()
190 smmu_domain->cd.asid); in arm_smmu_mm_release()
196 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_release()
364 arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); in arm_smmu_sva_set_dev_pasid()
376 * Ensure the ASID is empty in the iommu cache before allowing reuse. in arm_smmu_sva_domain_free()
378 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_sva_domain_free()
382 * still be called/running at this point. We allow the ASID to be in arm_smmu_sva_domain_free()
[all …]
/linux-6.14.4/arch/arm64/include/asm/
Dtlbflush.h58 #define __TLBI_VADDR(addr, asid) \ argument
62 __ta |= (unsigned long)(asid) << 48; \
132 * | ASID | TG | SCALE | NUM | TTL | BADDR |
152 #define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \ argument
161 __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \
208 * The 'mm' argument identifies the ASID to invalidate.
275 unsigned long asid; in flush_tlb_mm() local
278 asid = __TLBI_VADDR(0, ASID(mm)); in flush_tlb_mm()
279 __tlbi(aside1is, asid); in flush_tlb_mm()
280 __tlbi_user(aside1is, asid); in flush_tlb_mm()
[all …]
/linux-6.14.4/drivers/iommu/arm/arm-smmu/
Dqcom_iommu.c54 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */
62 u8 asid; /* asid and ctx bank # are 1:1 */ member
82 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) in to_ctx() argument
87 return qcom_iommu->ctxs[asid]; in to_ctx()
141 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); in qcom_iommu_tlb_inv_context()
161 iova |= ctx->asid; in qcom_iommu_tlb_inv_range_nosync()
207 fsr, iova, fsynr, ctx->asid); in qcom_iommu_fault()
258 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); in qcom_iommu_init_domain()
282 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid)); in qcom_iommu_init_domain()
554 unsigned asid = args->args[0]; in qcom_iommu_of_xlate() local
[all …]
/linux-6.14.4/drivers/vdpa/vdpa_sim/
Dvdpa_sim.c603 unsigned int asid) in vdpasim_set_group_asid() argument
612 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid()
615 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid()
629 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid, in vdpasim_set_map() argument
638 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map()
643 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map()
645 vdpasim->iommu_pt[asid] = false; in vdpasim_set_map()
663 static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid) in vdpasim_reset_map() argument
667 if (asid >= vdpasim->dev_attr.nas) in vdpasim_reset_map()
671 if (vdpasim->iommu_pt[asid]) in vdpasim_reset_map()
[all …]
/linux-6.14.4/drivers/gpu/drm/xe/
Dxe_trace_bo.h90 __field(u32, asid)
99 __entry->asid = xe_vma_vm(vma)->usm.asid;
105 TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
106 __get_str(dev), __entry->vma, __entry->asid, __entry->start,
187 __field(u32, asid)
193 __entry->asid = vm->usm.asid;
196 TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
197 __entry->vm, __entry->asid)
/linux-6.14.4/arch/riscv/include/asm/
Dtlbflush.h23 static inline void local_flush_tlb_all_asid(unsigned long asid) in local_flush_tlb_all_asid() argument
25 if (asid != FLUSH_TLB_NO_ASID) in local_flush_tlb_all_asid()
26 ALT_SFENCE_VMA_ASID(asid); in local_flush_tlb_all_asid()
38 unsigned long asid) in local_flush_tlb_page_asid() argument
40 if (asid != FLUSH_TLB_NO_ASID) in local_flush_tlb_page_asid()
41 ALT_SFENCE_VMA_ADDR_ASID(addr, asid); in local_flush_tlb_page_asid()

123456789