Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0-or-later
34 #include <asm/r4k-timer.h>
35 #include <asm/mips-cps.h>
47 /* Number of TCs (or siblings in Intel speak) per CPU core */
51 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
55 /* representing the core map of multi-core chips of each logical CPU */
140 /* Re-calculate the mask */ in calculate_cpu_foreign_map()
361 mp_ops->init_secondary(); in start_secondary()
396 * irq will be enabled in ->smp_finish(), enabling it too early in start_secondary()
400 mp_ops->smp_finish(); in start_secondary()
430 current_thread_info()->cpu = 0; in smp_prepare_cpus()
431 mp_ops->prepare_cpus(max_cpus); in smp_prepare_cpus()
444 if (mp_ops->prepare_boot_cpu) in smp_prepare_boot_cpu()
445 mp_ops->prepare_boot_cpu(); in smp_prepare_boot_cpu()
454 err = mp_ops->boot_secondary(cpu, tidle); in __cpu_up()
458 /* Wait for CPU to start and be ready to sync counters */ in __cpu_up()
462 return -EIO; in __cpu_up()
465 /* Wait for CPU to finish startup & mark itself online before return */ in __cpu_up()
531 * multithreaded address spaces, inter-CPU interrupts have to be sent.
532 * Another case where inter-CPU interrupts are required is when the target
543 if (atomic_read(&mm->mm_users) == 0) in flush_tlb_mm()
550 * No need to worry about other CPUs - the ginvt in in flush_tlb_mm()
553 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()
578 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi()
583 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
603 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()
614 int exec = vma->vm_flags & VM_EXEC; in flush_tlb_range()
635 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); in flush_tlb_kernel_range_ipi()
652 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi()
663 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page()
670 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
671 (current->mm != vma->vm_mm)) { in flush_tlb_page()
689 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
690 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
715 if (mp_ops->cleanup_dead_cpu) in arch_cpuhp_cleanup_dead_cpu()
716 mp_ops->cleanup_dead_cpu(cpu); in arch_cpuhp_cleanup_dead_cpu()