Lines Matching full:tlb
14 #include <asm/tlb.h>
18 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
23 if (tlb->delayed_rmap && tlb->active != &tlb->local) in tlb_next_batch()
26 batch = tlb->active; in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
39 tlb->batch_count++; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
45 tlb->active = batch; in tlb_next_batch()
73 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
74 * @tlb: the current mmu_gather
82 void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_flush_rmaps() argument
84 if (!tlb->delayed_rmap) in tlb_flush_rmaps()
87 tlb_flush_rmap_batch(&tlb->local, vma); in tlb_flush_rmaps()
88 if (tlb->active != &tlb->local) in tlb_flush_rmaps()
89 tlb_flush_rmap_batch(tlb->active, vma); in tlb_flush_rmaps()
90 tlb->delayed_rmap = 0; in tlb_flush_rmaps()
144 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
148 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) in tlb_batch_pages_flush()
150 tlb->active = &tlb->local; in tlb_batch_pages_flush()
153 static void tlb_batch_list_free(struct mmu_gather *tlb) in tlb_batch_list_free() argument
157 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
161 tlb->local.next = NULL; in tlb_batch_list_free()
164 static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, in __tlb_remove_folio_pages_size() argument
171 VM_BUG_ON(!tlb->end); in __tlb_remove_folio_pages_size()
174 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_folio_pages_size()
179 batch = tlb->active; in __tlb_remove_folio_pages_size()
196 if (!tlb_next_batch(tlb)) in __tlb_remove_folio_pages_size()
198 batch = tlb->active; in __tlb_remove_folio_pages_size()
205 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, in __tlb_remove_folio_pages() argument
208 return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, in __tlb_remove_folio_pages()
212 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, in __tlb_remove_page_size() argument
215 return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); in __tlb_remove_page_size()
246 * IRQs delays the completion of the TLB flush we can never observe an already
300 * If we want tlb_remove_table() to imply TLB invalidates.
302 static inline void tlb_table_invalidate(struct mmu_gather *tlb) in tlb_table_invalidate() argument
310 tlb_flush_mmu_tlbonly(tlb); in tlb_table_invalidate()
343 static void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
345 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
348 tlb_table_invalidate(tlb); in tlb_table_flush()
354 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
356 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
361 tlb_table_invalidate(tlb); in tlb_remove_table()
370 tlb_table_flush(tlb); in tlb_remove_table()
373 static inline void tlb_table_init(struct mmu_gather *tlb) in tlb_table_init() argument
375 tlb->batch = NULL; in tlb_table_init()
380 static inline void tlb_table_flush(struct mmu_gather *tlb) { } in tlb_table_flush() argument
381 static inline void tlb_table_init(struct mmu_gather *tlb) { } in tlb_table_init() argument
385 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
387 tlb_table_flush(tlb); in tlb_flush_mmu_free()
389 tlb_batch_pages_flush(tlb); in tlb_flush_mmu_free()
393 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
395 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
396 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
399 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in __tlb_gather_mmu() argument
402 tlb->mm = mm; in __tlb_gather_mmu()
403 tlb->fullmm = fullmm; in __tlb_gather_mmu()
406 tlb->need_flush_all = 0; in __tlb_gather_mmu()
407 tlb->local.next = NULL; in __tlb_gather_mmu()
408 tlb->local.nr = 0; in __tlb_gather_mmu()
409 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
410 tlb->active = &tlb->local; in __tlb_gather_mmu()
411 tlb->batch_count = 0; in __tlb_gather_mmu()
413 tlb->delayed_rmap = 0; in __tlb_gather_mmu()
415 tlb_table_init(tlb); in __tlb_gather_mmu()
417 tlb->page_size = 0; in __tlb_gather_mmu()
420 __tlb_reset_range(tlb); in __tlb_gather_mmu()
421 inc_tlb_flush_pending(tlb->mm); in __tlb_gather_mmu()
426 * @tlb: the mmu_gather structure to initialize
432 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu() argument
434 __tlb_gather_mmu(tlb, mm, false); in tlb_gather_mmu()
439 * @tlb: the mmu_gather structure to initialize
448 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu_fullmm() argument
450 __tlb_gather_mmu(tlb, mm, true); in tlb_gather_mmu_fullmm()
455 * @tlb: the mmu_gather structure to finish
460 void tlb_finish_mmu(struct mmu_gather *tlb) in tlb_finish_mmu() argument
464 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB in tlb_finish_mmu()
466 * and result in having stale TLB entries. So flush TLB forcefully in tlb_finish_mmu()
471 * may result in having stale TLB entries for some architectures, in tlb_finish_mmu()
472 * e.g. aarch64, that could specify flush what level TLB. in tlb_finish_mmu()
474 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
483 tlb->fullmm = 1; in tlb_finish_mmu()
484 __tlb_reset_range(tlb); in tlb_finish_mmu()
485 tlb->freed_tables = 1; in tlb_finish_mmu()
488 tlb_flush_mmu(tlb); in tlb_finish_mmu()
491 tlb_batch_list_free(tlb); in tlb_finish_mmu()
493 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()