Lines Matching +full:mm +full:- +full:0
1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tsb.c
25 return vaddr & (nentries - 1); in tsb_hash()
37 for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { in flush_tsb_kernel_range_scan()
41 match |= (ent->tag << 22); in flush_tsb_kernel_range_scan()
43 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_tsb_kernel_range_scan()
56 if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) in flush_tsb_kernel_range()
64 if (tag_compare(ent->tag, v)) in flush_tsb_kernel_range()
65 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_tsb_kernel_range()
75 v &= ~0x1UL; in __flush_tsb_one_entry()
88 for (i = 0; i < tb->tlb_nr; i++) in __flush_tsb_one()
89 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); in __flush_tsb_one()
101 hpage_entries = 1 << (hugepage_shift - hash_shift); in __flush_huge_tsb_one_entry()
102 for (i = 0; i < hpage_entries; i++) in __flush_huge_tsb_one_entry()
113 for (i = 0; i < tb->tlb_nr; i++) in __flush_huge_tsb_one()
114 __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, in __flush_huge_tsb_one()
121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
126 if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
131 if (tb->hugepage_shift == PAGE_SHIFT) in flush_tsb_user()
136 tb->hugepage_shift); in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
146 tb->hugepage_shift); in flush_tsb_user()
149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument
157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page()
160 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user_page()
161 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user_page()
174 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user_page()
175 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user_page()
176 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user_page()
183 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user_page()
194 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) in setup_tsb_params() argument
199 mm->context.tsb_block[tsb_idx].tsb_nentries = in setup_tsb_params()
216 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); in setup_tsb_params()
217 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); in setup_tsb_params()
223 case 8192 << 0: in setup_tsb_params()
224 tsb_reg = 0x0UL; in setup_tsb_params()
232 tsb_reg = 0x1UL; in setup_tsb_params()
237 tsb_reg = 0x2UL; in setup_tsb_params()
242 tsb_reg = 0x3UL; in setup_tsb_params()
247 tsb_reg = 0x4UL; in setup_tsb_params()
252 tsb_reg = 0x5UL; in setup_tsb_params()
257 tsb_reg = 0x6UL; in setup_tsb_params()
262 tsb_reg = 0x7UL; in setup_tsb_params()
268 current->comm, current->pid, tsb_bytes); in setup_tsb_params()
277 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; in setup_tsb_params()
278 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; in setup_tsb_params()
279 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; in setup_tsb_params()
282 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); in setup_tsb_params()
283 tte |= (tsb_paddr & ~(page_sz - 1UL)); in setup_tsb_params()
285 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; in setup_tsb_params()
286 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; in setup_tsb_params()
287 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; in setup_tsb_params()
292 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; in setup_tsb_params()
296 hp->pgsz_idx = HV_PGSZ_IDX_BASE; in setup_tsb_params()
300 hp->pgsz_idx = HV_PGSZ_IDX_HUGE; in setup_tsb_params()
306 hp->assoc = 1; in setup_tsb_params()
307 hp->num_ttes = tsb_bytes / 16; in setup_tsb_params()
308 hp->ctx_idx = 0; in setup_tsb_params()
311 hp->pgsz_mask = HV_PGSZ_MASK_BASE; in setup_tsb_params()
315 hp->pgsz_mask = HV_PGSZ_MASK_HUGE; in setup_tsb_params()
321 hp->tsb_base = tsb_paddr; in setup_tsb_params()
322 hp->resv = 0; in setup_tsb_params()
347 0, in pgtable_cache_init()
354 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { in pgtable_cache_init()
360 0, NULL); in pgtable_cache_init()
368 int sysctl_tsb_ratio = -2;
374 if (sysctl_tsb_ratio < 0) in tsb_size_to_rss_limit()
375 return num_ents - (num_ents >> -sysctl_tsb_ratio); in tsb_size_to_rss_limit()
383 * When we reach the maximum TSB size supported, we stick ~0UL into
396 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) in tsb_grow() argument
408 new_cache_index = 0; in tsb_grow()
417 new_rss_limit = ~0UL; in tsb_grow()
427 /* Not being able to fork due to a high-order TSB in tsb_grow()
429 * down to a 0-order allocation and force no TSB in tsb_grow()
432 if (mm->context.tsb_block[tsb_index].tsb == NULL && in tsb_grow()
433 new_cache_index > 0) { in tsb_grow()
434 new_cache_index = 0; in tsb_grow()
436 new_rss_limit = ~0UL; in tsb_grow()
443 if (mm->context.tsb_block[tsb_index].tsb != NULL) in tsb_grow()
444 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; in tsb_grow()
455 * We have to hold mm->context.lock while committing to the in tsb_grow()
473 spin_lock_irqsave(&mm->context.lock, flags); in tsb_grow()
475 old_tsb = mm->context.tsb_block[tsb_index].tsb; in tsb_grow()
477 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); in tsb_grow()
478 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * in tsb_grow()
487 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { in tsb_grow()
488 spin_unlock_irqrestore(&mm->context.lock, flags); in tsb_grow()
494 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; in tsb_grow()
514 mm->context.tsb_block[tsb_index].tsb = new_tsb; in tsb_grow()
515 setup_tsb_params(mm, tsb_index, new_size); in tsb_grow()
517 spin_unlock_irqrestore(&mm->context.lock, flags); in tsb_grow()
524 tsb_context_switch(mm); in tsb_grow()
528 smp_tsb_sync(mm); in tsb_grow()
536 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
538 unsigned long mm_rss = get_mm_rss(mm); in init_new_context()
545 spin_lock_init(&mm->context.lock); in init_new_context()
547 mm->context.sparc64_ctx_val = 0UL; in init_new_context()
549 mm->context.tag_store = NULL; in init_new_context()
550 spin_lock_init(&mm->context.tag_lock); in init_new_context()
554 * will re-increment the counters as the parent PTEs are in init_new_context()
557 saved_hugetlb_pte_count = mm->context.hugetlb_pte_count; in init_new_context()
558 saved_thp_pte_count = mm->context.thp_pte_count; in init_new_context()
559 mm->context.hugetlb_pte_count = 0; in init_new_context()
560 mm->context.thp_pte_count = 0; in init_new_context()
562 mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE); in init_new_context()
569 for (i = 0; i < MM_NUM_TSBS; i++) in init_new_context()
570 mm->context.tsb_block[i].tsb = NULL; in init_new_context()
575 tsb_grow(mm, MM_TSB_BASE, mm_rss); in init_new_context()
579 tsb_grow(mm, MM_TSB_HUGE, in init_new_context()
584 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) in init_new_context()
585 return -ENOMEM; in init_new_context()
587 return 0; in init_new_context()
594 if (!tp->tsb) in tsb_destroy_one()
596 cache_index = tp->tsb_reg_val & 0x7UL; in tsb_destroy_one()
597 kmem_cache_free(tsb_caches[cache_index], tp->tsb); in tsb_destroy_one()
598 tp->tsb = NULL; in tsb_destroy_one()
599 tp->tsb_reg_val = 0UL; in tsb_destroy_one()
602 void destroy_context(struct mm_struct *mm) in destroy_context() argument
606 for (i = 0; i < MM_NUM_TSBS; i++) in destroy_context()
607 tsb_destroy_one(&mm->context.tsb_block[i]); in destroy_context()
611 if (CTX_VALID(mm->context)) { in destroy_context()
612 unsigned long nr = CTX_NRBITS(mm->context); in destroy_context()
619 if (mm->context.tag_store) { in destroy_context()
624 tag_desc = mm->context.tag_store; in destroy_context()
626 for (i = 0; i < max_desc; i++) { in destroy_context()
627 tags = tag_desc->tags; in destroy_context()
628 tag_desc->tags = NULL; in destroy_context()
632 kfree(mm->context.tag_store); in destroy_context()
633 mm->context.tag_store = NULL; in destroy_context()