Lines Matching +full:itc +full:- +full:setting

1 // SPDX-License-Identifier: GPL-2.0
63 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
69 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
127 current_thread_info()->new_child = 0; in smp_callin()
131 current->active_mm = &init_mm; in smp_callin()
156 * initiates the synchonization instead of the slave. -DaveM
177 t0 = tick_ops->get_tick(); in get_delta()
184 t1 = tick_ops->get_tick(); in get_delta()
186 if (t1 - t0 < best_t1 - best_t0) in get_delta()
190 *rt = best_t1 - best_t0; in get_delta()
191 *master = best_tm - best_t0; in get_delta()
197 return tcenter - best_tm; in get_delta()
209 long lat; /* estimate of itc adjustment latency */ in smp_synchronize_tick_client()
227 adjust_latency += -delta; in smp_synchronize_tick_client()
228 adj = -delta + adjust_latency/4; in smp_synchronize_tick_client()
230 adj = -delta; in smp_synchronize_tick_client()
232 tick_ops->add_tick(adj); in smp_synchronize_tick_client()
280 go[SLAVE] = tick_ops->get_tick(); in smp_synchronize_one_tick()
309 hdesc->cpu = cpu; in ldom_startcpu_cpuid()
310 hdesc->num_mappings = num_kernel_image_mappings; in ldom_startcpu_cpuid()
314 hdesc->fault_info_va = (unsigned long) &tb->fault_info; in ldom_startcpu_cpuid()
315 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); in ldom_startcpu_cpuid()
317 hdesc->thread_reg = thread_reg; in ldom_startcpu_cpuid()
322 for (i = 0; i < hdesc->num_mappings; i++) { in ldom_startcpu_cpuid()
323 hdesc->maps[i].vaddr = tte_vaddr; in ldom_startcpu_cpuid()
324 hdesc->maps[i].tte = tte_data; in ldom_startcpu_cpuid()
343 * 32-bits (I think) so to be safe we have it read the pointer
344 * contained here so we work on >4GB machines. -DaveM
372 prom_startcpu(dp->phandle, entry, cookie); in smp_boot_one_cpu()
385 ret = -ENODEV; in smp_boot_one_cpu()
413 * ADDR 0x20) for the dummy read. -DaveM in spitfire_xcall_helper()
445 stuck -= 1; in spitfire_xcall_helper()
468 cpu_list = __va(tb->cpu_list_pa); in spitfire_xcall_deliver()
469 mondo = __va(tb->cpu_mondo_block_pa); in spitfire_xcall_deliver()
477 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
487 cpu_list = __va(tb->cpu_list_pa); in cheetah_xcall_deliver()
488 mondo = __va(tb->cpu_mondo_block_pa); in cheetah_xcall_deliver()
491 * busy/nack fields hard-coded by ITID number for this Ultra-III in cheetah_xcall_deliver()
576 if (!--stuck) in cheetah_xcall_deliver()
629 /* Multi-cpu list version.
632 * Sometimes not all cpus receive the mondo, requiring us to re-send
638 * Here two in-between mondo check wait time are defined: 2 usec for
656 cpu_list = __va(tb->cpu_list_pa); in hypervisor_xcall_deliver()
668 tb->cpu_list_pa, in hypervisor_xcall_deliver()
669 tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
675 /* If not these non-fatal errors, panic */ in hypervisor_xcall_deliver()
685 * Re-pack cpu_list with the cpus remain to be retried in the in hypervisor_xcall_deliver()
686 * front - this simplifies tracking the truly stalled cpus. in hypervisor_xcall_deliver()
688 * The hypervisor indicates successful sends by setting in hypervisor_xcall_deliver()
762 this_cpu, ecpuerror_id - 1); in hypervisor_xcall_deliver()
765 this_cpu, enocpu_id - 1); in hypervisor_xcall_deliver()
772 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
776 /* some cpus being non-responsive to the cpu mondo */ in hypervisor_xcall_deliver()
807 mondo = __va(tb->cpu_mondo_block_pa); in xcall_deliver()
813 cpu_list = __va(tb->cpu_list_pa); in xcall_deliver()
890 /* It is not valid to test "current->active_mm == mm" here. in tsb_sync()
896 if (tp->pgd_paddr == __pa(mm->pgd)) in tsb_sync()
1053 * mm->cpu_vm_mask is a bit mask of which cpus an address
1058 /* This currently is only used by the hugetlb arch pre-fault
1059 * hook on UltraSPARC-III+ and later when changing the pagesize
1064 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_mm()
1087 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); in tlb_pending_func()
1092 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_pending()
1111 unsigned long context = CTX_HWBITS(mm->context); in smp_flush_tlb_page()
1254 if (cpu_data(i).proc_id == -1) { in smp_fill_in_sib_core_maps()
1276 ret = -ENODEV; in __cpu_up()
1300 tb->cpu_mondo_pa, 0); in cpu_play_dead()
1302 tb->dev_mondo_pa, 0); in cpu_play_dead()
1304 tb->resum_mondo_pa, 0); in cpu_play_dead()
1306 tb->nonresum_mondo_pa, 0); in cpu_play_dead()
1340 c->core_id = 0; in __cpu_disable()
1341 c->proc_id = -1; in __cpu_disable()
1382 } while (--limit > 0); in __cpu_die()
1447 * - cpu poke not supported in arch_smp_send_reschedule()
1448 * - cpu not idle in arch_smp_send_reschedule()
1449 * - send_cpu_poke() returns with error in arch_smp_send_reschedule()
1536 int rc = -EINVAL; in setup_per_cpu_areas()
1554 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; in setup_per_cpu_areas()