Lines Matching +full:mm +full:- +full:0
1 /* SPDX-License-Identifier: GPL-2.0 */
32 * Xen requires page-aligned LDTs with special permissions. This is
45 * of an older, still-in-use LDT.
47 * slot will be -1 if this LDT doesn't have an alias mapping.
55 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument
57 mm->context.ldt = NULL; in init_new_context_ldt()
58 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt()
60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
61 void destroy_context_ldt(struct mm_struct *mm);
62 void ldt_arch_exit_mmap(struct mm_struct *mm);
64 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument
66 struct mm_struct *mm) in ldt_dup_context() argument
68 return 0; in ldt_dup_context()
70 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument
71 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument
75 extern void load_mm_ldt(struct mm_struct *mm);
78 static inline void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
89 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) in mm_lam_cr3_mask() argument
97 return READ_ONCE(mm->context.lam_cr3_mask); in mm_lam_cr3_mask()
100 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) in dup_lam() argument
102 mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask; in dup_lam()
103 mm->context.untag_mask = oldmm->context.untag_mask; in dup_lam()
107 static inline unsigned long mm_untag_mask(struct mm_struct *mm) in mm_untag_mask() argument
109 return mm->context.untag_mask; in mm_untag_mask()
112 static inline void mm_reset_untag_mask(struct mm_struct *mm) in mm_reset_untag_mask() argument
114 mm->context.untag_mask = -1UL; in mm_reset_untag_mask()
118 static inline bool arch_pgtable_dma_compat(struct mm_struct *mm) in arch_pgtable_dma_compat() argument
120 return !mm_lam_cr3_mask(mm) || in arch_pgtable_dma_compat()
121 test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags); in arch_pgtable_dma_compat()
125 static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm) in mm_lam_cr3_mask() argument
127 return 0; in mm_lam_cr3_mask()
130 static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm) in dup_lam() argument
134 static inline void mm_reset_untag_mask(struct mm_struct *mm) in mm_reset_untag_mask() argument
140 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
143 * Init a new mm. Used on mm copies, like at fork()
144 * and on mm's that are brand-new, like at execve().
148 struct mm_struct *mm) in init_new_context() argument
150 mutex_init(&mm->context.lock); in init_new_context()
152 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); in init_new_context()
153 atomic64_set(&mm->context.tlb_gen, 0); in init_new_context()
154 mm->context.next_trim_cpumask = jiffies + HZ; in init_new_context()
158 /* pkey 0 is the default and allocated implicitly */ in init_new_context()
159 mm->context.pkey_allocation_map = 0x1; in init_new_context()
160 /* -1 means unallocated or invalid */ in init_new_context()
161 mm->context.execute_only_pkey = -1; in init_new_context()
164 mm_reset_untag_mask(mm); in init_new_context()
165 init_new_context_ldt(mm); in init_new_context()
166 return 0; in init_new_context()
170 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
172 destroy_context_ldt(mm); in destroy_context()
186 } while (0);
189 #define deactivate_mm(tsk, mm) \ argument
191 loadsegment(gs, 0); \
192 } while (0)
194 #define deactivate_mm(tsk, mm) \ argument
197 load_gs_index(0); \
198 loadsegment(fs, 0); \
199 } while (0)
203 struct mm_struct *mm) in arch_dup_pkeys() argument
209 /* Duplicate the oldmm pkey state in mm: */ in arch_dup_pkeys()
210 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; in arch_dup_pkeys()
211 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; in arch_dup_pkeys()
215 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) in arch_dup_mmap() argument
217 arch_dup_pkeys(oldmm, mm); in arch_dup_mmap()
218 paravirt_enter_mmap(mm); in arch_dup_mmap()
219 dup_lam(oldmm, mm); in arch_dup_mmap()
220 return ldt_dup_context(oldmm, mm); in arch_dup_mmap()
223 static inline void arch_exit_mmap(struct mm_struct *mm) in arch_exit_mmap() argument
225 paravirt_arch_exit_mmap(mm); in arch_exit_mmap()
226 ldt_arch_exit_mmap(mm); in arch_exit_mmap()
230 static inline bool is_64bit_mm(struct mm_struct *mm) in is_64bit_mm() argument
233 !test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags); in is_64bit_mm()
236 static inline bool is_64bit_mm(struct mm_struct *mm) in is_64bit_mm() argument
249 * mm, or if we are in a kernel thread.
265 #include <asm-generic/mmu_context.h>