Lines Matching +full:mm +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0-or-later
8 #include <linux/sched/mm.h>
10 #include <linux/mm.h>
24 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && in sste_matches()
25 (sste->esid_data == cpu_to_be64(slb->esid))); in sste_matches()
36 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ in find_free_sste()
40 if (slb->vsid & SLB_VSID_B_1T) in find_free_sste()
41 hash = (slb->esid >> SID_SHIFT_1T) & mask; in find_free_sste()
43 hash = (slb->esid >> SID_SHIFT) & mask; in find_free_sste()
45 primary = ctx->sstp + (hash << 3); in find_free_sste()
47 for (entry = 0, sste = primary; entry < 8; entry++, sste++) { in find_free_sste()
48 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) in find_free_sste()
57 ret = primary + ctx->sst_lru; in find_free_sste()
58 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; in find_free_sste()
69 spin_lock_irqsave(&ctx->sste_lock, flags); in cxl_load_segment()
75 sste - ctx->sstp, slb->vsid, slb->esid); in cxl_load_segment()
76 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); in cxl_load_segment()
78 sste->vsid_data = cpu_to_be64(slb->vsid); in cxl_load_segment()
79 sste->esid_data = cpu_to_be64(slb->esid); in cxl_load_segment()
81 spin_unlock_irqrestore(&ctx->sste_lock, flags); in cxl_load_segment()
84 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, in cxl_fault_segment() argument
87 struct copro_slb slb = {0,0}; in cxl_fault_segment()
90 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { in cxl_fault_segment()
101 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); in cxl_ack_ae()
103 spin_lock_irqsave(&ctx->lock, flags); in cxl_ack_ae()
104 ctx->pending_fault = true; in cxl_ack_ae()
105 ctx->fault_addr = ctx->dar; in cxl_ack_ae()
106 ctx->fault_dsisr = ctx->dsisr; in cxl_ack_ae()
107 spin_unlock_irqrestore(&ctx->lock, flags); in cxl_ack_ae()
109 wake_up_all(&ctx->wq); in cxl_ack_ae()
113 struct mm_struct *mm, u64 ea) in cxl_handle_segment_miss() argument
117 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); in cxl_handle_segment_miss()
120 if ((rc = cxl_fault_segment(ctx, mm, ea))) in cxl_handle_segment_miss()
125 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); in cxl_handle_segment_miss()
131 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) in cxl_handle_mm_fault() argument
133 vm_fault_t flt = 0; in cxl_handle_mm_fault()
135 unsigned long access, flags, inv_flags = 0; in cxl_handle_mm_fault()
138 * Add the fault handling cpu to task mm cpumask so that we in cxl_handle_mm_fault()
141 * valid mm for user space addresses. Hence using the if (mm) in cxl_handle_mm_fault()
144 if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { in cxl_handle_mm_fault()
145 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); in cxl_handle_mm_fault()
153 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { in cxl_handle_mm_fault()
160 * update_mmu_cache() will not have loaded the hash since current->trap in cxl_handle_mm_fault()
161 * is not a 0x400 or 0x300, so just call hash_page_mm() here. in cxl_handle_mm_fault()
167 if (!mm && (get_region_id(dar) != USER_REGION_ID)) in cxl_handle_mm_fault()
174 hash_page_mm(mm, dar, access, 0x300, inv_flags); in cxl_handle_mm_fault()
177 return 0; in cxl_handle_mm_fault()
181 struct mm_struct *mm, in cxl_handle_page_fault() argument
186 if (cxl_handle_mm_fault(mm, dsisr, dar)) { in cxl_handle_page_fault()
189 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); in cxl_handle_page_fault()
190 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); in cxl_handle_page_fault()
196 * mm_users == 0, the context may be in the process of being closed.
200 if (ctx->mm == NULL) in get_mem_context()
203 if (!mmget_not_zero(ctx->mm)) in get_mem_context()
206 return ctx->mm; in get_mem_context()
232 u64 dsisr = ctx->dsisr; in cxl_handle_fault()
233 u64 dar = ctx->dar; in cxl_handle_fault()
234 struct mm_struct *mm = NULL; in cxl_handle_fault() local
237 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || in cxl_handle_fault()
238 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || in cxl_handle_fault()
239 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { in cxl_handle_fault()
240 /* Most likely explanation is harmless - a dedicated in cxl_handle_fault()
244 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); in cxl_handle_fault()
250 if (ctx->status == CLOSED) { in cxl_handle_fault()
256 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); in cxl_handle_fault()
258 if (!ctx->kernel) { in cxl_handle_fault()
260 mm = get_mem_context(ctx); in cxl_handle_fault()
261 if (mm == NULL) { in cxl_handle_fault()
262 pr_devel("%s: unable to get mm for pe=%d pid=%i\n", in cxl_handle_fault()
263 __func__, ctx->pe, pid_nr(ctx->pid)); in cxl_handle_fault()
268 ctx->pe, pid_nr(ctx->pid)); in cxl_handle_fault()
273 cxl_handle_segment_miss(ctx, mm, dar); in cxl_handle_fault()
275 cxl_handle_page_fault(ctx, mm, dsisr, dar); in cxl_handle_fault()
279 if (mm) in cxl_handle_fault()
280 mmput(mm); in cxl_handle_fault()
286 ea |= (1ULL << 40) - 1; in next_segment()
288 ea |= (1ULL << 28) - 1; in next_segment()
293 static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm) in cxl_prefault_vma() argument
295 u64 ea, last_esid = 0; in cxl_prefault_vma()
297 VMA_ITERATOR(vmi, mm, 0); in cxl_prefault_vma()
301 mmap_read_lock(mm); in cxl_prefault_vma()
303 for (ea = vma->vm_start; ea < vma->vm_end; in cxl_prefault_vma()
305 rc = copro_calculate_slb(mm, ea, &slb); in cxl_prefault_vma()
316 mmap_read_unlock(mm); in cxl_prefault_vma()
321 struct mm_struct *mm = get_mem_context(ctx); in cxl_prefault() local
323 if (mm == NULL) { in cxl_prefault()
324 pr_devel("cxl_prefault unable to get mm %i\n", in cxl_prefault()
325 pid_nr(ctx->pid)); in cxl_prefault()
329 switch (ctx->afu->prefault_mode) { in cxl_prefault()
331 cxl_fault_segment(ctx, mm, wed); in cxl_prefault()
334 cxl_prefault_vma(ctx, mm); in cxl_prefault()
340 mmput(mm); in cxl_prefault()