Lines Matching +full:t +full:- +full:head +full:- +full:semi

23 	if (tlb->delayed_rmap && tlb->active != &tlb->local)  in tlb_next_batch()
26 batch = tlb->active; in tlb_next_batch()
27 if (batch->next) { in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
39 tlb->batch_count++; in tlb_next_batch()
40 batch->next = NULL; in tlb_next_batch()
41 batch->nr = 0; in tlb_next_batch()
42 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
45 tlb->active = batch; in tlb_next_batch()
53 struct encoded_page **pages = batch->encoded_pages; in tlb_flush_rmap_batch()
55 for (int i = 0; i < batch->nr; i++) { in tlb_flush_rmap_batch()
73 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
84 if (!tlb->delayed_rmap) in tlb_flush_rmaps()
87 tlb_flush_rmap_batch(&tlb->local, vma); in tlb_flush_rmaps()
88 if (tlb->active != &tlb->local) in tlb_flush_rmaps()
89 tlb_flush_rmap_batch(tlb->active, vma); in tlb_flush_rmaps()
90 tlb->delayed_rmap = 0; in tlb_flush_rmaps()
103 struct encoded_page **pages = batch->encoded_pages; in __tlb_batch_free_encoded_pages()
106 while (batch->nr) { in __tlb_batch_free_encoded_pages()
108 nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); in __tlb_batch_free_encoded_pages()
111 * Make sure we cover page + nr_pages, and don't leave in __tlb_batch_free_encoded_pages()
114 if (unlikely(encoded_page_flags(pages[nr - 1]) & in __tlb_batch_free_encoded_pages()
126 nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; in __tlb_batch_free_encoded_pages()
138 batch->nr -= nr; in __tlb_batch_free_encoded_pages()
148 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) in tlb_batch_pages_flush()
150 tlb->active = &tlb->local; in tlb_batch_pages_flush()
157 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
158 next = batch->next; in tlb_batch_list_free()
161 tlb->local.next = NULL; in tlb_batch_list_free()
171 VM_BUG_ON(!tlb->end); in __tlb_remove_folio_pages_size()
174 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_folio_pages_size()
176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); in __tlb_remove_folio_pages_size()
179 batch = tlb->active; in __tlb_remove_folio_pages_size()
185 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
188 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
189 batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); in __tlb_remove_folio_pages_size()
195 if (batch->nr >= batch->max - 1) { in __tlb_remove_folio_pages_size()
198 batch = tlb->active; in __tlb_remove_folio_pages_size()
200 VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); in __tlb_remove_folio_pages_size()
226 for (i = 0; i < batch->nr; i++) in __tlb_remove_table_free()
227 __tlb_remove_table(batch->tables[i]); in __tlb_remove_table_free()
235 * Semi RCU freeing of the page directories.
239 * gup_fast() and other software pagetable walkers do a lockless page-table
271 * This isn't an RCU grace period and hence the page-tables cannot be in tlb_remove_table_sync_one()
272 * assumed to be actually RCU-freed. in tlb_remove_table_sync_one()
274 * It is however sufficient for software page-table walkers that rely on in tlb_remove_table_sync_one()
280 static void tlb_remove_table_rcu(struct rcu_head *head) in tlb_remove_table_rcu() argument
282 __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); in tlb_remove_table_rcu()
287 call_rcu(&batch->rcu, tlb_remove_table_rcu); in tlb_remove_table_free()
306 * Invalidate page-table caches used by hardware walkers. Then in tlb_table_invalidate()
307 * we still need to RCU-sched wait while freeing the pages in tlb_table_invalidate()
308 * because software walkers can still be in-flight. in tlb_table_invalidate()
315 static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) in __tlb_remove_table_one_rcu() argument
319 ptdesc = container_of(head, struct ptdesc, pt_rcu_head); in __tlb_remove_table_one_rcu()
328 call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); in __tlb_remove_table_one()
345 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
356 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
365 (*batch)->nr = 0; in tlb_remove_table()
368 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table()
369 if ((*batch)->nr == MAX_TABLE_BATCH) in tlb_remove_table()
375 tlb->batch = NULL; in tlb_table_init()
402 tlb->mm = mm; in __tlb_gather_mmu()
403 tlb->fullmm = fullmm; in __tlb_gather_mmu()
406 tlb->need_flush_all = 0; in __tlb_gather_mmu()
407 tlb->local.next = NULL; in __tlb_gather_mmu()
408 tlb->local.nr = 0; in __tlb_gather_mmu()
409 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
410 tlb->active = &tlb->local; in __tlb_gather_mmu()
411 tlb->batch_count = 0; in __tlb_gather_mmu()
413 tlb->delayed_rmap = 0; in __tlb_gather_mmu()
417 tlb->page_size = 0; in __tlb_gather_mmu()
421 inc_tlb_flush_pending(tlb->mm); in __tlb_gather_mmu()
425 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
429 * Called to initialize an (on-stack) mmu_gather structure for page-table
430 * tear-down from @mm.
438 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
445 * Called to initialize an (on-stack) mmu_gather structure for page-table
446 * tear-down from @mm.
454 * tlb_finish_mmu - finish an mmu_gather structure
464 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB in tlb_finish_mmu()
474 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
480 * On x86 non-fullmm doesn't yield significant difference in tlb_finish_mmu()
483 tlb->fullmm = 1; in tlb_finish_mmu()
485 tlb->freed_tables = 1; in tlb_finish_mmu()
493 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()