Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * MMU context allocation for 64-bit kernels.
13 #include <linux/mm.h>
62 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if in realloc_context_ids()
67 * the array, so that we can test if they're non-zero to decide if we in realloc_context_ids()
73 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { in realloc_context_ids()
74 if (i == 0 || ctx->extended_id[i]) { in realloc_context_ids()
76 if (id < 0) in realloc_context_ids()
79 ctx->extended_id[i] = id; in realloc_context_ids()
84 return ctx->id; in realloc_context_ids()
87 for (i--; i >= 0; i--) { in realloc_context_ids()
88 if (ctx->extended_id[i]) in realloc_context_ids()
89 ida_free(&mmu_context_ida, ctx->extended_id[i]); in realloc_context_ids()
95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
99 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context()
101 if (!mm->context.hash_context) in hash__init_new_context()
102 return -ENOMEM; in hash__init_new_context()
105 * The old code would re-promote on fork, we don't do that when using in hash__init_new_context()
109 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check in hash__init_new_context()
110 * explicitly against context.id == 0. This ensures that we properly in hash__init_new_context()
111 * initialize context slice details for newly allocated mm's (which will in hash__init_new_context()
112 * have id == 0) and don't alter context slice inherited via fork (which in hash__init_new_context()
113 * will have id != 0). in hash__init_new_context()
116 * check against 0 is OK. in hash__init_new_context()
118 if (mm->context.id == 0) { in hash__init_new_context()
119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
120 slice_init_new_context_exec(mm); in hash__init_new_context()
122 /* This is fork. Copy hash_context details from current->mm */ in hash__init_new_context()
123 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context()
126 if (current->mm->context.hash_context->spt) { in hash__init_new_context()
127 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context()
129 if (!mm->context.hash_context->spt) { in hash__init_new_context()
130 kfree(mm->context.hash_context); in hash__init_new_context()
131 return -ENOMEM; in hash__init_new_context()
137 index = realloc_context_ids(&mm->context); in hash__init_new_context()
138 if (index < 0) { in hash__init_new_context()
140 kfree(mm->context.hash_context->spt); in hash__init_new_context()
142 kfree(mm->context.hash_context); in hash__init_new_context()
146 pkey_mm_init(mm); in hash__init_new_context()
157 static inline int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
160 return 0; in hash__init_new_context()
164 static int radix__init_new_context(struct mm_struct *mm) in radix__init_new_context() argument
169 max_id = (1 << mmu_pid_bits) - 1; in radix__init_new_context()
171 if (index < 0) in radix__init_new_context()
178 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); in radix__init_new_context()
189 mm->context.hash_context = NULL; in radix__init_new_context()
195 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
200 index = radix__init_new_context(mm); in init_new_context()
202 index = hash__init_new_context(mm); in init_new_context()
204 if (index < 0) in init_new_context()
207 mm->context.id = index; in init_new_context()
209 mm->context.pte_frag = NULL; in init_new_context()
210 mm->context.pmd_frag = NULL; in init_new_context()
212 mm_iommu_init(mm); in init_new_context()
214 atomic_set(&mm->context.active_cpus, 0); in init_new_context()
215 atomic_set(&mm->context.copros, 0); in init_new_context()
217 return 0; in init_new_context()
229 ida_free(&mmu_context_ida, ctx->id); in destroy_contexts()
234 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { in destroy_contexts()
235 context_id = ctx->extended_id[index]; in destroy_contexts()
239 kfree(ctx->hash_context); in destroy_contexts()
255 if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { in pmd_frag_destroy()
261 static void destroy_pagetable_cache(struct mm_struct *mm) in destroy_pagetable_cache() argument
265 frag = mm->context.pte_frag; in destroy_pagetable_cache()
269 frag = mm->context.pmd_frag; in destroy_pagetable_cache()
275 void destroy_context(struct mm_struct *mm) in destroy_context() argument
278 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); in destroy_context()
294 process_tb[mm->context.id].prtb0 = 0; in destroy_context()
296 subpage_prot_free(mm); in destroy_context()
297 destroy_contexts(&mm->context); in destroy_context()
298 mm->context.id = MMU_NO_CONTEXT; in destroy_context()
301 void arch_exit_mmap(struct mm_struct *mm) in arch_exit_mmap() argument
303 destroy_pagetable_cache(mm); in arch_exit_mmap()
310 * and 0 is invalid. So this will do. in arch_exit_mmap()
314 * entry. See the "fullmm" comments in tlb-radix.c. in arch_exit_mmap()
320 process_tb[mm->context.id].prtb0 = 0; in arch_exit_mmap()
327 mtspr(SPRN_PID, next->context.id); in radix__switch_mmu_context()
333 * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)