Lines Matching full:mm
11 #include <linux/mm.h>
37 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
44 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
48 asce = asce ? : mm->context.asce; in ptep_ipte_local()
57 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
64 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
68 asce = asce ? : mm->context.asce; in ptep_ipte_global()
77 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
86 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
88 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct()
89 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct()
91 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_direct()
92 atomic_dec(&mm->context.flush_count); in ptep_flush_direct()
96 static inline pte_t ptep_flush_lazy(struct mm_struct *mm, in ptep_flush_lazy() argument
105 atomic_inc(&mm->context.flush_count); in ptep_flush_lazy()
106 if (cpumask_equal(&mm->context.cpu_attach_mask, in ptep_flush_lazy()
109 mm->context.flush_mm = 1; in ptep_flush_lazy()
111 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_flush_lazy()
112 atomic_dec(&mm->context.flush_count); in ptep_flush_lazy()
155 struct mm_struct *mm) in pgste_update_all() argument
160 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
176 struct mm_struct *mm) in pgste_set_key() argument
182 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
220 static inline pgste_t pgste_pte_notify(struct mm_struct *mm, in pgste_pte_notify() argument
230 ptep_notify(mm, addr, ptep, bits); in pgste_pte_notify()
236 static inline pgste_t ptep_xchg_start(struct mm_struct *mm, in ptep_xchg_start() argument
241 if (mm_has_pgste(mm)) { in ptep_xchg_start()
243 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_xchg_start()
248 static inline pte_t ptep_xchg_commit(struct mm_struct *mm, in ptep_xchg_commit() argument
252 if (mm_has_pgste(mm)) { in ptep_xchg_commit()
254 pgste_set_key(ptep, pgste, new, mm); in ptep_xchg_commit()
256 pgste = pgste_update_all(old, pgste, mm); in ptep_xchg_commit()
269 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, in ptep_xchg_direct() argument
277 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_direct()
279 old = ptep_flush_direct(mm, addr, ptep, nodat); in ptep_xchg_direct()
280 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_direct()
290 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, in ptep_reset_dat_prot() argument
294 atomic_inc(&mm->context.flush_count); in ptep_reset_dat_prot()
295 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_reset_dat_prot()
306 atomic_dec(&mm->context.flush_count); in ptep_reset_dat_prot()
311 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, in ptep_xchg_lazy() argument
319 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_xchg_lazy()
321 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_xchg_lazy()
322 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); in ptep_xchg_lazy()
334 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_start() local
337 pgste = ptep_xchg_start(mm, addr, ptep); in ptep_modify_prot_start()
339 old = ptep_flush_lazy(mm, addr, ptep, nodat); in ptep_modify_prot_start()
340 if (mm_has_pgste(mm)) { in ptep_modify_prot_start()
341 pgste = pgste_update_all(old, pgste, mm); in ptep_modify_prot_start()
351 struct mm_struct *mm = vma->vm_mm; in ptep_modify_prot_commit() local
353 if (mm_has_pgste(mm)) { in ptep_modify_prot_commit()
355 pgste_set_key(ptep, pgste, pte, mm); in ptep_modify_prot_commit()
364 static inline void pmdp_idte_local(struct mm_struct *mm, in pmdp_idte_local() argument
369 mm->context.asce, IDTE_LOCAL); in pmdp_idte_local()
372 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_local()
373 gmap_pmdp_idte_local(mm, addr); in pmdp_idte_local()
376 static inline void pmdp_idte_global(struct mm_struct *mm, in pmdp_idte_global() argument
381 mm->context.asce, IDTE_GLOBAL); in pmdp_idte_global()
382 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
383 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
386 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
387 gmap_pmdp_idte_global(mm, addr); in pmdp_idte_global()
390 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global()
391 gmap_pmdp_csp(mm, addr); in pmdp_idte_global()
395 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, in pmdp_flush_direct() argument
403 atomic_inc(&mm->context.flush_count); in pmdp_flush_direct()
405 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pmdp_flush_direct()
406 pmdp_idte_local(mm, addr, pmdp); in pmdp_flush_direct()
408 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_direct()
409 atomic_dec(&mm->context.flush_count); in pmdp_flush_direct()
413 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, in pmdp_flush_lazy() argument
421 atomic_inc(&mm->context.flush_count); in pmdp_flush_lazy()
422 if (cpumask_equal(&mm->context.cpu_attach_mask, in pmdp_flush_lazy()
425 mm->context.flush_mm = 1; in pmdp_flush_lazy()
426 if (mm_has_pgste(mm)) in pmdp_flush_lazy()
427 gmap_pmdp_invalidate(mm, addr); in pmdp_flush_lazy()
429 pmdp_idte_global(mm, addr, pmdp); in pmdp_flush_lazy()
431 atomic_dec(&mm->context.flush_count); in pmdp_flush_lazy()
436 static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) in pmd_lookup() argument
444 vma = vma_lookup(mm, addr); in pmd_lookup()
448 pgd = pgd_offset(mm, addr); in pmd_lookup()
469 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_direct() argument
475 old = pmdp_flush_direct(mm, addr, pmdp); in pmdp_xchg_direct()
482 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, in pmdp_xchg_lazy() argument
488 old = pmdp_flush_lazy(mm, addr, pmdp); in pmdp_xchg_lazy()
495 static inline void pudp_idte_local(struct mm_struct *mm, in pudp_idte_local() argument
500 mm->context.asce, IDTE_LOCAL); in pudp_idte_local()
505 static inline void pudp_idte_global(struct mm_struct *mm, in pudp_idte_global() argument
510 mm->context.asce, IDTE_GLOBAL); in pudp_idte_global()
521 static inline pud_t pudp_flush_direct(struct mm_struct *mm, in pudp_flush_direct() argument
529 atomic_inc(&mm->context.flush_count); in pudp_flush_direct()
531 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in pudp_flush_direct()
532 pudp_idte_local(mm, addr, pudp); in pudp_flush_direct()
534 pudp_idte_global(mm, addr, pudp); in pudp_flush_direct()
535 atomic_dec(&mm->context.flush_count); in pudp_flush_direct()
539 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, in pudp_xchg_direct() argument
545 old = pudp_flush_direct(mm, addr, pudp); in pudp_xchg_direct()
553 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument
558 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit()
561 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit()
564 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); in pgtable_trans_huge_deposit()
565 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit()
568 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument
574 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw()
577 pgtable = pmd_huge_pte(mm, pmdp); in pgtable_trans_huge_withdraw()
580 pmd_huge_pte(mm, pmdp) = NULL; in pgtable_trans_huge_withdraw()
582 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; in pgtable_trans_huge_withdraw()
594 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, in ptep_set_pte_at() argument
603 pgste_set_key(ptep, pgste, entry, mm); in ptep_set_pte_at()
609 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_notify() argument
622 * @mm: pointer to the process mm_struct
631 int ptep_force_prot(struct mm_struct *mm, unsigned long addr, in ptep_force_prot() argument
651 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
652 pgste = pgste_update_all(entry, pgste, mm); in ptep_force_prot()
656 ptep_flush_direct(mm, addr, ptep, nodat); in ptep_force_prot()
666 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, in ptep_shadow_pte() argument
693 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) in ptep_unshadow_pte() argument
701 ptep_flush_direct(mm, saddr, ptep, nodat); in ptep_unshadow_pte()
707 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) in ptep_zap_swap_entry() argument
710 dec_mm_counter(mm, MM_SWAPENTS); in ptep_zap_swap_entry()
714 dec_mm_counter(mm, mm_counter(folio)); in ptep_zap_swap_entry()
719 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, in ptep_zap_unused() argument
734 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); in ptep_zap_unused()
735 pte_clear(mm, addr, ptep); in ptep_zap_unused()
743 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_zap_key() argument
763 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr, in ptep_test_and_clear_uc() argument
776 pgste = pgste_pte_notify(mm, addr, ptep, pgste); in ptep_test_and_clear_uc()
778 ptep_ipte_global(mm, addr, ptep, nodat); in ptep_test_and_clear_uc()
790 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in set_guest_storage_key() argument
803 switch (pmd_lookup(mm, addr, &pmdp)) { in set_guest_storage_key()
812 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key()
831 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key()
870 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, in cond_set_guest_storage_key() argument
879 rc = get_guest_storage_key(current->mm, addr, &tmp); in cond_set_guest_storage_key()
891 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
901 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) in reset_guest_reference_bit() argument
914 switch (pmd_lookup(mm, addr, &pmdp)) { in reset_guest_reference_bit()
923 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit()
938 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in reset_guest_reference_bit()
963 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, in get_guest_storage_key() argument
978 switch (pmd_lookup(mm, addr, &pmdp)) { in get_guest_storage_key()
987 ptl = pmd_lock(mm, pmdp); in get_guest_storage_key()
1002 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); in get_guest_storage_key()
1020 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1030 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, in pgste_perform_essa() argument
1044 vma = vma_lookup(mm, hva); in pgste_perform_essa()
1047 ptep = get_locked_pte(mm, hva, &ptl); in pgste_perform_essa()
1128 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1136 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, in set_pgste_bits() argument
1144 vma = vma_lookup(mm, hva); in set_pgste_bits()
1147 ptep = get_locked_pte(mm, hva, &ptl); in set_pgste_bits()
1163 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1169 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) in get_pgste() argument
1175 vma = vma_lookup(mm, hva); in get_pgste()
1178 ptep = get_locked_pte(mm, hva, &ptl); in get_pgste()