Lines Matching +full:broken +full:- +full:turn +full:- +full:around
1 // SPDX-License-Identifier: GPL-2.0
29 #include <asm/io-unit.h>
75 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
98 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
109 /* XXX should we hyper_flush_whole_icache here - Anton */
158 if (size & (minsz - 1)) { in __srmmu_get_nocache()
161 size += minsz - 1; in __srmmu_get_nocache()
168 if (offset == -1) { in __srmmu_get_nocache()
215 if (vaddr & (size - 1)) { in srmmu_free_nocache()
220 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; in srmmu_free_nocache()
243 * system RAM. -- Tomas Szepe <[email protected]>, June 2002
329 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); in get_pgd_fast()
351 spin_lock(&mm->page_table_lock); in pte_alloc_one()
357 spin_unlock(&mm->page_table_lock); in pte_alloc_one()
367 spin_lock(&mm->page_table_lock); in pte_free()
370 spin_unlock(&mm->page_table_lock); in pte_free()
375 /* context handling - a dynamically sized pool is used */
376 #define NO_CONTEXT -1
394 entry->next->prev = entry->prev; in remove_from_ctx_list()
395 entry->prev->next = entry->next; in remove_from_ctx_list()
400 entry->next = head; in add_to_ctx_list()
401 (entry->prev = head->prev)->next = entry; in add_to_ctx_list()
402 head->prev = entry; in add_to_ctx_list()
416 mm->context = ctxp->ctx_number; in alloc_context()
417 ctxp->ctx_mm = mm; in alloc_context()
421 if (ctxp->ctx_mm == old_mm) in alloc_context()
422 ctxp = ctxp->next; in alloc_context()
425 flush_cache_mm(ctxp->ctx_mm); in alloc_context()
426 flush_tlb_mm(ctxp->ctx_mm); in alloc_context()
429 ctxp->ctx_mm->context = NO_CONTEXT; in alloc_context()
430 ctxp->ctx_mm = mm; in alloc_context()
431 mm->context = ctxp->ctx_number; in alloc_context()
455 clist->ctx_number = ctx; in sparc_context_init()
456 clist->ctx_mm = NULL; in sparc_context_init()
469 if (mm->context == NO_CONTEXT) { in switch_mm()
473 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); in switch_mm()
482 srmmu_set_context(mm->context); in switch_mm()
506 * 36-bit physical address on the I/O space lines... in srmmu_mapioaddr()
518 len -= PAGE_SIZE; in srmmu_mapiorange()
548 len -= PAGE_SIZE; in srmmu_unmapiorange()
590 if ((ctx1 = vma->vm_mm->context) != -1) {
663 * around 8mb mapped for us.
701 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_early_allocate_ptable_skeleton()
736 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_allocate_ptable_skeleton()
760 * This is much cleaner than poking around physical address space
774 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ in srmmu_inherit_prom_mappings()
778 break; /* probably wrap around */ in srmmu_inherit_prom_mappings()
790 addr = start - PAGE_SIZE; in srmmu_inherit_prom_mappings()
839 /* Create a third-level SRMMU 16MB page mapping. */
911 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); in srmmu_paging_init()
928 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE)); in srmmu_paging_init()
942 local_ops->tlb_all(); in srmmu_paging_init()
952 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); in srmmu_paging_init()
993 mm->context = NO_CONTEXT; in init_new_context()
1001 if (mm->context != NO_CONTEXT) { in destroy_context()
1003 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); in destroy_context()
1006 free_context(mm->context); in destroy_context()
1008 mm->context = NO_CONTEXT; in destroy_context()
1034 vac_line_size = prom_getint(nd, "cache-line-size"); in init_vac_layout()
1035 if (vac_line_size == -1) { in init_vac_layout()
1036 prom_printf("can't determine cache-line-size, halting.\n"); in init_vac_layout()
1039 cache_lines = prom_getint(nd, "cache-nlines"); in init_vac_layout()
1040 if (cache_lines == -1) { in init_vac_layout()
1041 prom_printf("can't determine cache-nlines, halting.\n"); in init_vac_layout()
1085 #if 0 /* XXX I think this is bad news... -DaveM */ in poke_hypersparc()
1135 * The Swift branch folding logic is completely broken. At in poke_swift()
1138 * it is coming from user mode (it mis-executes the branch in in poke_swift()
1140 * hosing your machine which is completely unacceptable. Turn in poke_swift()
1192 * broken hardware, send it back and we'll give you in init_swift()
1240 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_range()
1248 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_cache_page()
1250 if (vma->vm_flags & VM_EXEC) in turbosparc_flush_cache_page()
1256 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1291 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_range()
1298 FLUSH_BEGIN(vma->vm_mm) in turbosparc_flush_tlb_page()
1321 /* Write-back D-cache, emulate VLSI in poke_turbosparc()
1326 /* Do DVMA snooping in Dcache, Write-thru D-cache */ in poke_turbosparc()
1433 /* Must disable mixed-cmd mode here for other cpu's. */ in poke_viking()
1475 * Our workaround is to take a global spinlock around the TLB flushes,
1509 * "load from non-cacheable memory" interrupt bug. in init_viking()
1550 /* First, check for sparc-leon. */ in get_srmmu_type()
1572 prom_printf("Sparc-Linux Cypress support does not longer exit.\n"); in get_srmmu_type()
1597 if (!prom_getintdefault(cpunode, "psr-implementation", 1) && in get_srmmu_type()
1598 prom_getintdefault(cpunode, "psr-version", 1) == 5) { in get_srmmu_type()
1629 /* Local cross-calls. */
1632 xc1(local_ops->page_for_dma, page); in smp_flush_page_for_dma()
1633 local_ops->page_for_dma(page); in smp_flush_page_for_dma()
1638 xc0(local_ops->cache_all); in smp_flush_cache_all()
1639 local_ops->cache_all(); in smp_flush_cache_all()
1644 xc0(local_ops->tlb_all); in smp_flush_tlb_all()
1645 local_ops->tlb_all(); in smp_flush_tlb_all()
1655 if (mm->context != NO_CONTEXT) { in smp_flush_cache_mm()
1657 xc1(local_ops->cache_mm, (unsigned long)mm); in smp_flush_cache_mm()
1658 local_ops->cache_mm(mm); in smp_flush_cache_mm()
1664 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_mm()
1666 xc1(local_ops->tlb_mm, (unsigned long)mm); in smp_flush_tlb_mm()
1667 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) in smp_flush_tlb_mm()
1671 local_ops->tlb_mm(mm); in smp_flush_tlb_mm()
1679 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_range()
1681 if (mm->context != NO_CONTEXT) { in smp_flush_cache_range()
1683 xc3(local_ops->cache_range, (unsigned long)vma, start, in smp_flush_cache_range()
1685 local_ops->cache_range(vma, start, end); in smp_flush_cache_range()
1693 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_range()
1695 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_range()
1697 xc3(local_ops->tlb_range, (unsigned long)vma, start, in smp_flush_tlb_range()
1699 local_ops->tlb_range(vma, start, end); in smp_flush_tlb_range()
1705 struct mm_struct *mm = vma->vm_mm; in smp_flush_cache_page()
1707 if (mm->context != NO_CONTEXT) { in smp_flush_cache_page()
1709 xc2(local_ops->cache_page, (unsigned long)vma, page); in smp_flush_cache_page()
1710 local_ops->cache_page(vma, page); in smp_flush_cache_page()
1716 struct mm_struct *mm = vma->vm_mm; in smp_flush_tlb_page()
1718 if (mm->context != NO_CONTEXT) { in smp_flush_tlb_page()
1720 xc2(local_ops->tlb_page, (unsigned long)vma, page); in smp_flush_tlb_page()
1721 local_ops->tlb_page(vma, page); in smp_flush_tlb_page()
1731 * XXX This experiment failed, research further... -DaveM in smp_flush_page_to_ram()
1734 xc1(local_ops->page_to_ram, page); in smp_flush_page_to_ram()
1736 local_ops->page_to_ram(page); in smp_flush_page_to_ram()
1742 xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); in smp_flush_sig_insns()
1743 local_ops->sig_insns(mm, insn_addr); in smp_flush_sig_insns()
1772 smp_cachetlb_ops.tlb_all = local_ops->tlb_all; in load_mmu()
1773 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm; in load_mmu()
1774 smp_cachetlb_ops.tlb_range = local_ops->tlb_range; in load_mmu()
1775 smp_cachetlb_ops.tlb_page = local_ops->tlb_page; in load_mmu()
1780 smp_cachetlb_ops.cache_all = local_ops->cache_all; in load_mmu()
1781 smp_cachetlb_ops.cache_mm = local_ops->cache_mm; in load_mmu()
1782 smp_cachetlb_ops.cache_range = local_ops->cache_range; in load_mmu()
1783 smp_cachetlb_ops.cache_page = local_ops->cache_page; in load_mmu()
1785 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram; in load_mmu()
1786 smp_cachetlb_ops.sig_insns = local_ops->sig_insns; in load_mmu()
1787 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma; in load_mmu()