Lines Matching +full:i +full:- +full:tlb +full:- +full:size

14 #include <asm/tlb.h>
18 static bool tlb_next_batch(struct mmu_gather *tlb) in tlb_next_batch() argument
23 if (tlb->delayed_rmap && tlb->active != &tlb->local) in tlb_next_batch()
26 batch = tlb->active; in tlb_next_batch()
27 if (batch->next) { in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
39 tlb->batch_count++; in tlb_next_batch()
40 batch->next = NULL; in tlb_next_batch()
41 batch->nr = 0; in tlb_next_batch()
42 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
45 tlb->active = batch; in tlb_next_batch()
53 struct encoded_page **pages = batch->encoded_pages; in tlb_flush_rmap_batch()
55 for (int i = 0; i < batch->nr; i++) { in tlb_flush_rmap_batch() local
56 struct encoded_page *enc = pages[i]; in tlb_flush_rmap_batch()
64 nr_pages = encoded_nr_pages(pages[++i]); in tlb_flush_rmap_batch()
73 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
74 * @tlb: the current mmu_gather
82 void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_flush_rmaps() argument
84 if (!tlb->delayed_rmap) in tlb_flush_rmaps()
87 tlb_flush_rmap_batch(&tlb->local, vma); in tlb_flush_rmaps()
88 if (tlb->active != &tlb->local) in tlb_flush_rmaps()
89 tlb_flush_rmap_batch(tlb->active, vma); in tlb_flush_rmaps()
90 tlb->delayed_rmap = 0; in tlb_flush_rmaps()
103 struct encoded_page **pages = batch->encoded_pages; in __tlb_batch_free_encoded_pages()
106 while (batch->nr) { in __tlb_batch_free_encoded_pages()
108 nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); in __tlb_batch_free_encoded_pages()
114 if (unlikely(encoded_page_flags(pages[nr - 1]) & in __tlb_batch_free_encoded_pages()
121 * actual memory size. Therefore, limit based on the in __tlb_batch_free_encoded_pages()
122 * actual memory size and not the number of involved in __tlb_batch_free_encoded_pages()
126 nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; in __tlb_batch_free_encoded_pages()
138 batch->nr -= nr; in __tlb_batch_free_encoded_pages()
144 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
148 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) in tlb_batch_pages_flush()
150 tlb->active = &tlb->local; in tlb_batch_pages_flush()
153 static void tlb_batch_list_free(struct mmu_gather *tlb) in tlb_batch_list_free() argument
157 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
158 next = batch->next; in tlb_batch_list_free()
161 tlb->local.next = NULL; in tlb_batch_list_free()
164 static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, in __tlb_remove_folio_pages_size() argument
171 VM_BUG_ON(!tlb->end); in __tlb_remove_folio_pages_size()
174 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_folio_pages_size()
176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size()
179 batch = tlb->active; in __tlb_remove_folio_pages_size()
185 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
188 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
189 batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); in __tlb_remove_folio_pages_size()
195 if (batch->nr >= batch->max - 1) { in __tlb_remove_folio_pages_size()
196 if (!tlb_next_batch(tlb)) in __tlb_remove_folio_pages_size()
198 batch = tlb->active; in __tlb_remove_folio_pages_size()
200 VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); in __tlb_remove_folio_pages_size()
205 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, in __tlb_remove_folio_pages() argument
208 return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, in __tlb_remove_folio_pages()
212 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, in __tlb_remove_page_size() argument
215 return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); in __tlb_remove_page_size()
224 int i; in __tlb_remove_table_free() local
226 for (i = 0; i < batch->nr; i++) in __tlb_remove_table_free()
227 __tlb_remove_table(batch->tables[i]); in __tlb_remove_table_free()
239 * gup_fast() and other software pagetable walkers do a lockless page-table
246 * IRQs delays the completion of the TLB flush we can never observe an already
271 * This isn't an RCU grace period and hence the page-tables cannot be in tlb_remove_table_sync_one()
272 * assumed to be actually RCU-freed. in tlb_remove_table_sync_one()
274 * It is however sufficient for software page-table walkers that rely on in tlb_remove_table_sync_one()
287 call_rcu(&batch->rcu, tlb_remove_table_rcu); in tlb_remove_table_free()
300 * If we want tlb_remove_table() to imply TLB invalidates.
302 static inline void tlb_table_invalidate(struct mmu_gather *tlb) in tlb_table_invalidate() argument
306 * Invalidate page-table caches used by hardware walkers. Then in tlb_table_invalidate()
307 * we still need to RCU-sched wait while freeing the pages in tlb_table_invalidate()
308 * because software walkers can still be in-flight. in tlb_table_invalidate()
310 tlb_flush_mmu_tlbonly(tlb); in tlb_table_invalidate()
328 call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); in __tlb_remove_table_one()
343 static void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
345 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
348 tlb_table_invalidate(tlb); in tlb_table_flush()
354 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
356 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
361 tlb_table_invalidate(tlb); in tlb_remove_table()
365 (*batch)->nr = 0; in tlb_remove_table()
368 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table()
369 if ((*batch)->nr == MAX_TABLE_BATCH) in tlb_remove_table()
370 tlb_table_flush(tlb); in tlb_remove_table()
373 static inline void tlb_table_init(struct mmu_gather *tlb) in tlb_table_init() argument
375 tlb->batch = NULL; in tlb_table_init()
380 static inline void tlb_table_flush(struct mmu_gather *tlb) { } in tlb_table_flush() argument
381 static inline void tlb_table_init(struct mmu_gather *tlb) { } in tlb_table_init() argument
385 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
387 tlb_table_flush(tlb); in tlb_flush_mmu_free()
389 tlb_batch_pages_flush(tlb); in tlb_flush_mmu_free()
393 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
395 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
396 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
399 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in __tlb_gather_mmu() argument
402 tlb->mm = mm; in __tlb_gather_mmu()
403 tlb->fullmm = fullmm; in __tlb_gather_mmu()
406 tlb->need_flush_all = 0; in __tlb_gather_mmu()
407 tlb->local.next = NULL; in __tlb_gather_mmu()
408 tlb->local.nr = 0; in __tlb_gather_mmu()
409 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
410 tlb->active = &tlb->local; in __tlb_gather_mmu()
411 tlb->batch_count = 0; in __tlb_gather_mmu()
413 tlb->delayed_rmap = 0; in __tlb_gather_mmu()
415 tlb_table_init(tlb); in __tlb_gather_mmu()
417 tlb->page_size = 0; in __tlb_gather_mmu()
420 __tlb_reset_range(tlb); in __tlb_gather_mmu()
421 inc_tlb_flush_pending(tlb->mm); in __tlb_gather_mmu()
425 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
426 * @tlb: the mmu_gather structure to initialize
429 * Called to initialize an (on-stack) mmu_gather structure for page-table
430 * tear-down from @mm.
432 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu() argument
434 __tlb_gather_mmu(tlb, mm, false); in tlb_gather_mmu()
438 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
439 * @tlb: the mmu_gather structure to initialize
445 * Called to initialize an (on-stack) mmu_gather structure for page-table
446 * tear-down from @mm.
448 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu_fullmm() argument
450 __tlb_gather_mmu(tlb, mm, true); in tlb_gather_mmu_fullmm()
454 * tlb_finish_mmu - finish an mmu_gather structure
455 * @tlb: the mmu_gather structure to finish
460 void tlb_finish_mmu(struct mmu_gather *tlb) in tlb_finish_mmu() argument
464 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB in tlb_finish_mmu()
466 * and result in having stale TLB entries. So flush TLB forcefully in tlb_finish_mmu()
471 * may result in having stale TLB entries for some architectures, in tlb_finish_mmu()
472 * e.g. aarch64, that could specify flush what level TLB. in tlb_finish_mmu()
474 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
480 * On x86 non-fullmm doesn't yield significant difference in tlb_finish_mmu()
483 tlb->fullmm = 1; in tlb_finish_mmu()
484 __tlb_reset_range(tlb); in tlb_finish_mmu()
485 tlb->freed_tables = 1; in tlb_finish_mmu()
488 tlb_flush_mmu(tlb); in tlb_finish_mmu()
491 tlb_batch_list_free(tlb); in tlb_finish_mmu()
493 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()