Lines Matching full:tlb

2 /* include/asm-generic/tlb.h
4 * Generic TLB shootdown code
35 * correct and efficient ordering of freeing pages and TLB invalidations.
40 * 2) TLB invalidate page
53 * Finish in particular will issue a (final) TLB invalidate and free
94 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
97 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
114 * flush the entire TLB irrespective of the range. For instance
133 * returns the smallest TLB entry size unmapped in this range.
146 * This might be useful if your architecture has size specific TLB
165 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
194 * This is useful if your architecture already flushes TLB entries in the
220 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
224 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
229 static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
234 tlb_remove_page(tlb, page); in tlb_remove_table()
285 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
287 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
295 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true) argument
296 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
309 #define tlb_delay_rmap(tlb) (false) argument
310 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } in tlb_flush_rmaps() argument
334 * requires a complete flush of the tlb
376 void tlb_flush_mmu(struct mmu_gather *tlb);
378 static inline void __tlb_adjust_range(struct mmu_gather *tlb, in __tlb_adjust_range() argument
382 tlb->start = min(tlb->start, address); in __tlb_adjust_range()
383 tlb->end = max(tlb->end, address + range_size); in __tlb_adjust_range()
386 static inline void __tlb_reset_range(struct mmu_gather *tlb) in __tlb_reset_range() argument
388 if (tlb->fullmm) { in __tlb_reset_range()
389 tlb->start = tlb->end = ~0; in __tlb_reset_range()
391 tlb->start = TASK_SIZE; in __tlb_reset_range()
392 tlb->end = 0; in __tlb_reset_range()
394 tlb->freed_tables = 0; in __tlb_reset_range()
395 tlb->cleared_ptes = 0; in __tlb_reset_range()
396 tlb->cleared_pmds = 0; in __tlb_reset_range()
397 tlb->cleared_puds = 0; in __tlb_reset_range()
398 tlb->cleared_p4ds = 0; in __tlb_reset_range()
420 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
422 if (tlb->end) in tlb_flush()
423 flush_tlb_mm(tlb->mm); in tlb_flush()
434 static inline void tlb_flush(struct mmu_gather *tlb) in tlb_flush() argument
436 if (tlb->fullmm || tlb->need_flush_all) { in tlb_flush()
437 flush_tlb_mm(tlb->mm); in tlb_flush()
438 } else if (tlb->end) { in tlb_flush()
440 .vm_mm = tlb->mm, in tlb_flush()
441 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | in tlb_flush()
442 (tlb->vma_huge ? VM_HUGETLB : 0), in tlb_flush()
445 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
453 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
459 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB in tlb_update_vma_flags()
466 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
467 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
468 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
471 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) in tlb_flush_mmu_tlbonly() argument
477 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || in tlb_flush_mmu_tlbonly()
478 tlb->cleared_puds || tlb->cleared_p4ds)) in tlb_flush_mmu_tlbonly()
481 tlb_flush(tlb); in tlb_flush_mmu_tlbonly()
482 __tlb_reset_range(tlb); in tlb_flush_mmu_tlbonly()
485 static inline void tlb_remove_page_size(struct mmu_gather *tlb, in tlb_remove_page_size() argument
488 if (__tlb_remove_page_size(tlb, page, false, page_size)) in tlb_remove_page_size()
489 tlb_flush_mmu(tlb); in tlb_remove_page_size()
492 static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, in __tlb_remove_page() argument
495 return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE); in __tlb_remove_page()
502 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) in tlb_remove_page() argument
504 return tlb_remove_page_size(tlb, page, PAGE_SIZE); in tlb_remove_page()
507 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) in tlb_remove_ptdesc() argument
509 tlb_remove_table(tlb, pt); in tlb_remove_ptdesc()
513 static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt) in tlb_remove_page_ptdesc() argument
515 tlb_remove_page(tlb, ptdesc_page(pt)); in tlb_remove_page_ptdesc()
518 static inline void tlb_change_page_size(struct mmu_gather *tlb, in tlb_change_page_size() argument
522 if (tlb->page_size && tlb->page_size != page_size) { in tlb_change_page_size()
523 if (!tlb->fullmm && !tlb->need_flush_all) in tlb_change_page_size()
524 tlb_flush_mmu(tlb); in tlb_change_page_size()
527 tlb->page_size = page_size; in tlb_change_page_size()
531 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) in tlb_get_unmap_shift() argument
533 if (tlb->cleared_ptes) in tlb_get_unmap_shift()
535 if (tlb->cleared_pmds) in tlb_get_unmap_shift()
537 if (tlb->cleared_puds) in tlb_get_unmap_shift()
539 if (tlb->cleared_p4ds) in tlb_get_unmap_shift()
545 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) in tlb_get_unmap_size() argument
547 return 1UL << tlb_get_unmap_shift(tlb); in tlb_get_unmap_size()
551 * In the case of tlb vma handling, we can optimise these away in the
555 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
557 if (tlb->fullmm) in tlb_start_vma()
560 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
566 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
568 if (tlb->fullmm) in tlb_end_vma()
577 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { in tlb_end_vma()
579 * Do a TLB flush and reset the range at VMA boundaries; this avoids in tlb_end_vma()
582 tlb_flush_mmu_tlbonly(tlb); in tlb_end_vma()
587 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
590 static inline void tlb_flush_pte_range(struct mmu_gather *tlb, in tlb_flush_pte_range() argument
593 __tlb_adjust_range(tlb, address, size); in tlb_flush_pte_range()
594 tlb->cleared_ptes = 1; in tlb_flush_pte_range()
597 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, in tlb_flush_pmd_range() argument
600 __tlb_adjust_range(tlb, address, size); in tlb_flush_pmd_range()
601 tlb->cleared_pmds = 1; in tlb_flush_pmd_range()
604 static inline void tlb_flush_pud_range(struct mmu_gather *tlb, in tlb_flush_pud_range() argument
607 __tlb_adjust_range(tlb, address, size); in tlb_flush_pud_range()
608 tlb->cleared_puds = 1; in tlb_flush_pud_range()
611 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, in tlb_flush_p4d_range() argument
614 __tlb_adjust_range(tlb, address, size); in tlb_flush_p4d_range()
615 tlb->cleared_p4ds = 1; in tlb_flush_p4d_range()
619 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addres… in __tlb_remove_tlb_entry() argument
625 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
628 * so we can later optimise away the tlb invalidate. This helps when
631 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument
633 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
634 __tlb_remove_tlb_entry(tlb, ptep, address); \
639 * later tlb invalidation.
644 static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb, in tlb_remove_tlb_entries() argument
647 tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr); in tlb_remove_tlb_entries()
649 __tlb_remove_tlb_entry(tlb, ptep, address); in tlb_remove_tlb_entries()
657 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ argument
661 tlb_flush_p4d_range(tlb, address, _sz); \
663 tlb_flush_pud_range(tlb, address, _sz); \
665 tlb_flush_pmd_range(tlb, address, _sz); \
667 tlb_flush_pte_range(tlb, address, _sz); \
668 __tlb_remove_tlb_entry(tlb, ptep, address); \
672 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
676 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) argument
679 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ argument
681 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
682 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
686 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
690 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) argument
693 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ argument
695 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
696 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
706 * explicit flushing for that, likely *separate* from a regular TLB entry
718 #define pte_free_tlb(tlb, ptep, address) \ argument
720 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
721 tlb->freed_tables = 1; \
722 __pte_free_tlb(tlb, ptep, address); \
727 #define pmd_free_tlb(tlb, pmdp, address) \ argument
729 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
730 tlb->freed_tables = 1; \
731 __pmd_free_tlb(tlb, pmdp, address); \
736 #define pud_free_tlb(tlb, pudp, address) \ argument
738 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
739 tlb->freed_tables = 1; \
740 __pud_free_tlb(tlb, pudp, address); \
745 #define p4d_free_tlb(tlb, pudp, address) \ argument
747 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
748 tlb->freed_tables = 1; \
749 __p4d_free_tlb(tlb, pudp, address); \