1 // SPDX-License-Identifier: GPL-2.0
2 /* smp.c: Sparc64 SMP support.
3 *
4 * Copyright (C) 1997, 2007, 2008 David S. Miller ([email protected])
5 */
6
7 #include <linux/export.h>
8 #include <linux/kernel.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/hotplug.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/threads.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/cache.h>
23 #include <linux/jiffies.h>
24 #include <linux/profile.h>
25 #include <linux/memblock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/ftrace.h>
28 #include <linux/cpu.h>
29 #include <linux/slab.h>
30 #include <linux/kgdb.h>
31
32 #include <asm/head.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mmu_context.h>
37 #include <asm/cpudata.h>
38 #include <asm/hvtramp.h>
39 #include <asm/io.h>
40 #include <asm/timer.h>
41 #include <asm/setup.h>
42
43 #include <asm/irq.h>
44 #include <asm/irq_regs.h>
45 #include <asm/page.h>
46 #include <asm/oplib.h>
47 #include <linux/uaccess.h>
48 #include <asm/starfire.h>
49 #include <asm/tlb.h>
50 #include <asm/pgalloc.h>
51 #include <asm/sections.h>
52 #include <asm/prom.h>
53 #include <asm/mdesc.h>
54 #include <asm/ldc.h>
55 #include <asm/hypervisor.h>
56 #include <asm/pcr.h>
57
58 #include "cpumap.h"
59 #include "kernel.h"
60
61 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
62 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
63 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
64
65 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
66 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
67
68 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
69 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
70
71 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
72 EXPORT_SYMBOL(cpu_core_map);
73 EXPORT_SYMBOL(cpu_core_sib_map);
74 EXPORT_SYMBOL(cpu_core_sib_cache_map);
75
76 static cpumask_t smp_commenced_mask;
77
78 static DEFINE_PER_CPU(bool, poke);
79 static bool cpu_poke;
80
smp_info(struct seq_file * m)81 void smp_info(struct seq_file *m)
82 {
83 int i;
84
85 seq_printf(m, "State:\n");
86 for_each_online_cpu(i)
87 seq_printf(m, "CPU%d:\t\tonline\n", i);
88 }
89
smp_bogo(struct seq_file * m)90 void smp_bogo(struct seq_file *m)
91 {
92 int i;
93
94 for_each_online_cpu(i)
95 seq_printf(m,
96 "Cpu%dClkTck\t: %016lx\n",
97 i, cpu_data(i).clock_tick);
98 }
99
100 extern void setup_sparc64_timer(void);
101
102 static volatile unsigned long callin_flag = 0;
103
smp_callin(void)104 void smp_callin(void)
105 {
106 int cpuid = hard_smp_processor_id();
107
108 __local_per_cpu_offset = __per_cpu_offset(cpuid);
109
110 if (tlb_type == hypervisor)
111 sun4v_ktsb_register();
112
113 __flush_tlb_all();
114
115 setup_sparc64_timer();
116
117 if (cheetah_pcache_forced_on)
118 cheetah_enable_pcache();
119
120 callin_flag = 1;
121 __asm__ __volatile__("membar #Sync\n\t"
122 "flush %%g6" : : : "memory");
123
124 /* Clear this or we will die instantly when we
125 * schedule back to this idler...
126 */
127 current_thread_info()->new_child = 0;
128
129 /* Attach to the address space of init_task. */
130 mmgrab(&init_mm);
131 current->active_mm = &init_mm;
132
133 /* inform the notifiers about the new cpu */
134 notify_cpu_starting(cpuid);
135
136 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
137 rmb();
138
139 set_cpu_online(cpuid, true);
140
141 local_irq_enable();
142
143 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
144 }
145
cpu_panic(void)146 void cpu_panic(void)
147 {
148 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
149 panic("SMP bolixed\n");
150 }
151
152 /* This tick register synchronization scheme is taken entirely from
153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
154 *
155 * The only change I've made is to rework it so that the master
156 * initiates the synchonization instead of the slave. -DaveM
157 */
158
159 #define MASTER 0
160 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
161
162 #define NUM_ROUNDS 64 /* magic value */
163 #define NUM_ITERS 5 /* likewise */
164
165 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
166 static unsigned long go[SLAVE + 1];
167
168 #define DEBUG_TICK_SYNC 0
169
get_delta(long * rt,long * master)170 static inline long get_delta (long *rt, long *master)
171 {
172 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
173 unsigned long tcenter, t0, t1, tm;
174 unsigned long i;
175
176 for (i = 0; i < NUM_ITERS; i++) {
177 t0 = tick_ops->get_tick();
178 go[MASTER] = 1;
179 membar_safe("#StoreLoad");
180 while (!(tm = go[SLAVE]))
181 rmb();
182 go[SLAVE] = 0;
183 wmb();
184 t1 = tick_ops->get_tick();
185
186 if (t1 - t0 < best_t1 - best_t0)
187 best_t0 = t0, best_t1 = t1, best_tm = tm;
188 }
189
190 *rt = best_t1 - best_t0;
191 *master = best_tm - best_t0;
192
193 /* average best_t0 and best_t1 without overflow: */
194 tcenter = (best_t0/2 + best_t1/2);
195 if (best_t0 % 2 + best_t1 % 2 == 2)
196 tcenter++;
197 return tcenter - best_tm;
198 }
199
smp_synchronize_tick_client(void)200 void smp_synchronize_tick_client(void)
201 {
202 long i, delta, adj, adjust_latency = 0, done = 0;
203 unsigned long flags, rt, master_time_stamp;
204 #if DEBUG_TICK_SYNC
205 struct {
206 long rt; /* roundtrip time */
207 long master; /* master's timestamp */
208 long diff; /* difference between midpoint and master's timestamp */
209 long lat; /* estimate of itc adjustment latency */
210 } t[NUM_ROUNDS];
211 #endif
212
213 go[MASTER] = 1;
214
215 while (go[MASTER])
216 rmb();
217
218 local_irq_save(flags);
219 {
220 for (i = 0; i < NUM_ROUNDS; i++) {
221 delta = get_delta(&rt, &master_time_stamp);
222 if (delta == 0)
223 done = 1; /* let's lock on to this... */
224
225 if (!done) {
226 if (i > 0) {
227 adjust_latency += -delta;
228 adj = -delta + adjust_latency/4;
229 } else
230 adj = -delta;
231
232 tick_ops->add_tick(adj);
233 }
234 #if DEBUG_TICK_SYNC
235 t[i].rt = rt;
236 t[i].master = master_time_stamp;
237 t[i].diff = delta;
238 t[i].lat = adjust_latency/4;
239 #endif
240 }
241 }
242 local_irq_restore(flags);
243
244 #if DEBUG_TICK_SYNC
245 for (i = 0; i < NUM_ROUNDS; i++)
246 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
247 t[i].rt, t[i].master, t[i].diff, t[i].lat);
248 #endif
249
250 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
251 "(last diff %ld cycles, maxerr %lu cycles)\n",
252 smp_processor_id(), delta, rt);
253 }
254
255 static void smp_start_sync_tick_client(int cpu);
256
smp_synchronize_one_tick(int cpu)257 static void smp_synchronize_one_tick(int cpu)
258 {
259 unsigned long flags, i;
260
261 go[MASTER] = 0;
262
263 smp_start_sync_tick_client(cpu);
264
265 /* wait for client to be ready */
266 while (!go[MASTER])
267 rmb();
268
269 /* now let the client proceed into his loop */
270 go[MASTER] = 0;
271 membar_safe("#StoreLoad");
272
273 raw_spin_lock_irqsave(&itc_sync_lock, flags);
274 {
275 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
276 while (!go[MASTER])
277 rmb();
278 go[MASTER] = 0;
279 wmb();
280 go[SLAVE] = tick_ops->get_tick();
281 membar_safe("#StoreLoad");
282 }
283 }
284 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
285 }
286
287 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
ldom_startcpu_cpuid(unsigned int cpu,unsigned long thread_reg,void ** descrp)288 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
289 void **descrp)
290 {
291 extern unsigned long sparc64_ttable_tl0;
292 extern unsigned long kern_locked_tte_data;
293 struct hvtramp_descr *hdesc;
294 unsigned long trampoline_ra;
295 struct trap_per_cpu *tb;
296 u64 tte_vaddr, tte_data;
297 unsigned long hv_err;
298 int i;
299
300 hdesc = kzalloc(struct_size(hdesc, maps, num_kernel_image_mappings),
301 GFP_KERNEL);
302 if (!hdesc) {
303 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
304 "hvtramp_descr.\n");
305 return;
306 }
307 *descrp = hdesc;
308
309 hdesc->cpu = cpu;
310 hdesc->num_mappings = num_kernel_image_mappings;
311
312 tb = &trap_block[cpu];
313
314 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
315 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
316
317 hdesc->thread_reg = thread_reg;
318
319 tte_vaddr = (unsigned long) KERNBASE;
320 tte_data = kern_locked_tte_data;
321
322 for (i = 0; i < hdesc->num_mappings; i++) {
323 hdesc->maps[i].vaddr = tte_vaddr;
324 hdesc->maps[i].tte = tte_data;
325 tte_vaddr += 0x400000;
326 tte_data += 0x400000;
327 }
328
329 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
330
331 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
332 kimage_addr_to_ra(&sparc64_ttable_tl0),
333 __pa(hdesc));
334 if (hv_err)
335 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
336 "gives error %lu\n", hv_err);
337 }
338 #endif
339
340 extern unsigned long sparc64_cpu_startup;
341
342 /* The OBP cpu startup callback truncates the 3rd arg cookie to
343 * 32-bits (I think) so to be safe we have it read the pointer
344 * contained here so we work on >4GB machines. -DaveM
345 */
346 static struct thread_info *cpu_new_thread = NULL;
347
smp_boot_one_cpu(unsigned int cpu,struct task_struct * idle)348 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
349 {
350 unsigned long entry =
351 (unsigned long)(&sparc64_cpu_startup);
352 unsigned long cookie =
353 (unsigned long)(&cpu_new_thread);
354 void *descr = NULL;
355 int timeout, ret;
356
357 callin_flag = 0;
358 cpu_new_thread = task_thread_info(idle);
359
360 if (tlb_type == hypervisor) {
361 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
362 if (ldom_domaining_enabled)
363 ldom_startcpu_cpuid(cpu,
364 (unsigned long) cpu_new_thread,
365 &descr);
366 else
367 #endif
368 prom_startcpu_cpuid(cpu, entry, cookie);
369 } else {
370 struct device_node *dp = of_find_node_by_cpuid(cpu);
371
372 prom_startcpu(dp->phandle, entry, cookie);
373 }
374
375 for (timeout = 0; timeout < 50000; timeout++) {
376 if (callin_flag)
377 break;
378 udelay(100);
379 }
380
381 if (callin_flag) {
382 ret = 0;
383 } else {
384 printk("Processor %d is stuck.\n", cpu);
385 ret = -ENODEV;
386 }
387 cpu_new_thread = NULL;
388
389 kfree(descr);
390
391 return ret;
392 }
393
spitfire_xcall_helper(u64 data0,u64 data1,u64 data2,u64 pstate,unsigned long cpu)394 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
395 {
396 u64 result, target;
397 int stuck, tmp;
398
399 if (this_is_starfire) {
400 /* map to real upaid */
401 cpu = (((cpu & 0x3c) << 1) |
402 ((cpu & 0x40) >> 4) |
403 (cpu & 0x3));
404 }
405
406 target = (cpu << 14) | 0x70;
407 again:
408 /* Ok, this is the real Spitfire Errata #54.
409 * One must read back from a UDB internal register
410 * after writes to the UDB interrupt dispatch, but
411 * before the membar Sync for that write.
412 * So we use the high UDB control register (ASI 0x7f,
413 * ADDR 0x20) for the dummy read. -DaveM
414 */
415 tmp = 0x40;
416 __asm__ __volatile__(
417 "wrpr %1, %2, %%pstate\n\t"
418 "stxa %4, [%0] %3\n\t"
419 "stxa %5, [%0+%8] %3\n\t"
420 "add %0, %8, %0\n\t"
421 "stxa %6, [%0+%8] %3\n\t"
422 "membar #Sync\n\t"
423 "stxa %%g0, [%7] %3\n\t"
424 "membar #Sync\n\t"
425 "mov 0x20, %%g1\n\t"
426 "ldxa [%%g1] 0x7f, %%g0\n\t"
427 "membar #Sync"
428 : "=r" (tmp)
429 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
430 "r" (data0), "r" (data1), "r" (data2), "r" (target),
431 "r" (0x10), "0" (tmp)
432 : "g1");
433
434 /* NOTE: PSTATE_IE is still clear. */
435 stuck = 100000;
436 do {
437 __asm__ __volatile__("ldxa [%%g0] %1, %0"
438 : "=r" (result)
439 : "i" (ASI_INTR_DISPATCH_STAT));
440 if (result == 0) {
441 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
442 : : "r" (pstate));
443 return;
444 }
445 stuck -= 1;
446 if (stuck == 0)
447 break;
448 } while (result & 0x1);
449 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
450 : : "r" (pstate));
451 if (stuck == 0) {
452 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
453 smp_processor_id(), result);
454 } else {
455 udelay(2);
456 goto again;
457 }
458 }
459
spitfire_xcall_deliver(struct trap_per_cpu * tb,int cnt)460 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
461 {
462 u64 *mondo, data0, data1, data2;
463 u16 *cpu_list;
464 u64 pstate;
465 int i;
466
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 cpu_list = __va(tb->cpu_list_pa);
469 mondo = __va(tb->cpu_mondo_block_pa);
470 data0 = mondo[0];
471 data1 = mondo[1];
472 data2 = mondo[2];
473 for (i = 0; i < cnt; i++)
474 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
475 }
476
477 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
478 * packet, but we have no use for that. However we do take advantage of
479 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
480 */
cheetah_xcall_deliver(struct trap_per_cpu * tb,int cnt)481 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
482 {
483 int nack_busy_id, is_jbus, need_more;
484 u64 *mondo, pstate, ver, busy_mask;
485 u16 *cpu_list;
486
487 cpu_list = __va(tb->cpu_list_pa);
488 mondo = __va(tb->cpu_mondo_block_pa);
489
490 /* Unfortunately, someone at Sun had the brilliant idea to make the
491 * busy/nack fields hard-coded by ITID number for this Ultra-III
492 * derivative processor.
493 */
494 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
495 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
496 (ver >> 32) == __SERRANO_ID);
497
498 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
499
500 retry:
501 need_more = 0;
502 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
503 : : "r" (pstate), "i" (PSTATE_IE));
504
505 /* Setup the dispatch data registers. */
506 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
507 "stxa %1, [%4] %6\n\t"
508 "stxa %2, [%5] %6\n\t"
509 "membar #Sync\n\t"
510 : /* no outputs */
511 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
512 "r" (0x40), "r" (0x50), "r" (0x60),
513 "i" (ASI_INTR_W));
514
515 nack_busy_id = 0;
516 busy_mask = 0;
517 {
518 int i;
519
520 for (i = 0; i < cnt; i++) {
521 u64 target, nr;
522
523 nr = cpu_list[i];
524 if (nr == 0xffff)
525 continue;
526
527 target = (nr << 14) | 0x70;
528 if (is_jbus) {
529 busy_mask |= (0x1UL << (nr * 2));
530 } else {
531 target |= (nack_busy_id << 24);
532 busy_mask |= (0x1UL <<
533 (nack_busy_id * 2));
534 }
535 __asm__ __volatile__(
536 "stxa %%g0, [%0] %1\n\t"
537 "membar #Sync\n\t"
538 : /* no outputs */
539 : "r" (target), "i" (ASI_INTR_W));
540 nack_busy_id++;
541 if (nack_busy_id == 32) {
542 need_more = 1;
543 break;
544 }
545 }
546 }
547
548 /* Now, poll for completion. */
549 {
550 u64 dispatch_stat, nack_mask;
551 long stuck;
552
553 stuck = 100000 * nack_busy_id;
554 nack_mask = busy_mask << 1;
555 do {
556 __asm__ __volatile__("ldxa [%%g0] %1, %0"
557 : "=r" (dispatch_stat)
558 : "i" (ASI_INTR_DISPATCH_STAT));
559 if (!(dispatch_stat & (busy_mask | nack_mask))) {
560 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
561 : : "r" (pstate));
562 if (unlikely(need_more)) {
563 int i, this_cnt = 0;
564 for (i = 0; i < cnt; i++) {
565 if (cpu_list[i] == 0xffff)
566 continue;
567 cpu_list[i] = 0xffff;
568 this_cnt++;
569 if (this_cnt == 32)
570 break;
571 }
572 goto retry;
573 }
574 return;
575 }
576 if (!--stuck)
577 break;
578 } while (dispatch_stat & busy_mask);
579
580 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
581 : : "r" (pstate));
582
583 if (dispatch_stat & busy_mask) {
584 /* Busy bits will not clear, continue instead
585 * of freezing up on this cpu.
586 */
587 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
588 smp_processor_id(), dispatch_stat);
589 } else {
590 int i, this_busy_nack = 0;
591
592 /* Delay some random time with interrupts enabled
593 * to prevent deadlock.
594 */
595 udelay(2 * nack_busy_id);
596
597 /* Clear out the mask bits for cpus which did not
598 * NACK us.
599 */
600 for (i = 0; i < cnt; i++) {
601 u64 check_mask, nr;
602
603 nr = cpu_list[i];
604 if (nr == 0xffff)
605 continue;
606
607 if (is_jbus)
608 check_mask = (0x2UL << (2*nr));
609 else
610 check_mask = (0x2UL <<
611 this_busy_nack);
612 if ((dispatch_stat & check_mask) == 0)
613 cpu_list[i] = 0xffff;
614 this_busy_nack += 2;
615 if (this_busy_nack == 64)
616 break;
617 }
618
619 goto retry;
620 }
621 }
622 }
623
624 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
625 #define MONDO_USEC_WAIT_MIN 2
626 #define MONDO_USEC_WAIT_MAX 100
627 #define MONDO_RETRY_LIMIT 500000
628
629 /* Multi-cpu list version.
630 *
631 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
632 * Sometimes not all cpus receive the mondo, requiring us to re-send
633 * the mondo until all cpus have received, or cpus are truly stuck
634 * unable to receive mondo, and we timeout.
635 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
636 * perform guest service, such as PCIe error handling. Consider the
637 * service time, 1 second overall wait is reasonable for 1 cpu.
638 * Here two in-between mondo check wait time are defined: 2 usec for
639 * single cpu quick turn around and up to 100usec for large cpu count.
640 * Deliver mondo to large number of cpus could take longer, we adjusts
641 * the retry count as long as target cpus are making forward progress.
642 */
hypervisor_xcall_deliver(struct trap_per_cpu * tb,int cnt)643 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
644 {
645 int this_cpu, tot_cpus, prev_sent, i, rem;
646 int usec_wait, retries, tot_retries;
647 u16 first_cpu = 0xffff;
648 unsigned long xc_rcvd = 0;
649 unsigned long status;
650 int ecpuerror_id = 0;
651 int enocpu_id = 0;
652 u16 *cpu_list;
653 u16 cpu;
654
655 this_cpu = smp_processor_id();
656 cpu_list = __va(tb->cpu_list_pa);
657 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
658 if (usec_wait > MONDO_USEC_WAIT_MAX)
659 usec_wait = MONDO_USEC_WAIT_MAX;
660 retries = tot_retries = 0;
661 tot_cpus = cnt;
662 prev_sent = 0;
663
664 do {
665 int n_sent, mondo_delivered, target_cpu_busy;
666
667 status = sun4v_cpu_mondo_send(cnt,
668 tb->cpu_list_pa,
669 tb->cpu_mondo_block_pa);
670
671 /* HV_EOK means all cpus received the xcall, we're done. */
672 if (likely(status == HV_EOK))
673 goto xcall_done;
674
675 /* If not these non-fatal errors, panic */
676 if (unlikely((status != HV_EWOULDBLOCK) &&
677 (status != HV_ECPUERROR) &&
678 (status != HV_ENOCPU)))
679 goto fatal_errors;
680
681 /* First, see if we made any forward progress.
682 *
683 * Go through the cpu_list, count the target cpus that have
684 * received our mondo (n_sent), and those that did not (rem).
685 * Re-pack cpu_list with the cpus remain to be retried in the
686 * front - this simplifies tracking the truly stalled cpus.
687 *
688 * The hypervisor indicates successful sends by setting
689 * cpu list entries to the value 0xffff.
690 *
691 * EWOULDBLOCK means some target cpus did not receive the
692 * mondo and retry usually helps.
693 *
694 * ECPUERROR means at least one target cpu is in error state,
695 * it's usually safe to skip the faulty cpu and retry.
696 *
697 * ENOCPU means one of the target cpu doesn't belong to the
698 * domain, perhaps offlined which is unexpected, but not
699 * fatal and it's okay to skip the offlined cpu.
700 */
701 rem = 0;
702 n_sent = 0;
703 for (i = 0; i < cnt; i++) {
704 cpu = cpu_list[i];
705 if (likely(cpu == 0xffff)) {
706 n_sent++;
707 } else if ((status == HV_ECPUERROR) &&
708 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
709 ecpuerror_id = cpu + 1;
710 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
711 enocpu_id = cpu + 1;
712 } else {
713 cpu_list[rem++] = cpu;
714 }
715 }
716
717 /* No cpu remained, we're done. */
718 if (rem == 0)
719 break;
720
721 /* Otherwise, update the cpu count for retry. */
722 cnt = rem;
723
724 /* Record the overall number of mondos received by the
725 * first of the remaining cpus.
726 */
727 if (first_cpu != cpu_list[0]) {
728 first_cpu = cpu_list[0];
729 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
730 }
731
732 /* Was any mondo delivered successfully? */
733 mondo_delivered = (n_sent > prev_sent);
734 prev_sent = n_sent;
735
736 /* or, was any target cpu busy processing other mondos? */
737 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
738 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
739
740 /* Retry count is for no progress. If we're making progress,
741 * reset the retry count.
742 */
743 if (likely(mondo_delivered || target_cpu_busy)) {
744 tot_retries += retries;
745 retries = 0;
746 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
747 goto fatal_mondo_timeout;
748 }
749
750 /* Delay a little bit to let other cpus catch up on
751 * their cpu mondo queue work.
752 */
753 if (!mondo_delivered)
754 udelay(usec_wait);
755
756 retries++;
757 } while (1);
758
759 xcall_done:
760 if (unlikely(ecpuerror_id > 0)) {
761 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
762 this_cpu, ecpuerror_id - 1);
763 } else if (unlikely(enocpu_id > 0)) {
764 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
765 this_cpu, enocpu_id - 1);
766 }
767 return;
768
769 fatal_errors:
770 /* fatal errors include bad alignment, etc */
771 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
772 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
773 panic("Unexpected SUN4V mondo error %lu\n", status);
774
775 fatal_mondo_timeout:
776 /* some cpus being non-responsive to the cpu mondo */
777 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
778 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
779 panic("SUN4V mondo timeout panic\n");
780 }
781
782 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
783
xcall_deliver(u64 data0,u64 data1,u64 data2,const cpumask_t * mask)784 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
785 {
786 struct trap_per_cpu *tb;
787 int this_cpu, i, cnt;
788 unsigned long flags;
789 u16 *cpu_list;
790 u64 *mondo;
791
792 /* We have to do this whole thing with interrupts fully disabled.
793 * Otherwise if we send an xcall from interrupt context it will
794 * corrupt both our mondo block and cpu list state.
795 *
796 * One consequence of this is that we cannot use timeout mechanisms
797 * that depend upon interrupts being delivered locally. So, for
798 * example, we cannot sample jiffies and expect it to advance.
799 *
800 * Fortunately, udelay() uses %stick/%tick so we can use that.
801 */
802 local_irq_save(flags);
803
804 this_cpu = smp_processor_id();
805 tb = &trap_block[this_cpu];
806
807 mondo = __va(tb->cpu_mondo_block_pa);
808 mondo[0] = data0;
809 mondo[1] = data1;
810 mondo[2] = data2;
811 wmb();
812
813 cpu_list = __va(tb->cpu_list_pa);
814
815 /* Setup the initial cpu list. */
816 cnt = 0;
817 for_each_cpu(i, mask) {
818 if (i == this_cpu || !cpu_online(i))
819 continue;
820 cpu_list[cnt++] = i;
821 }
822
823 if (cnt)
824 xcall_deliver_impl(tb, cnt);
825
826 local_irq_restore(flags);
827 }
828
829 /* Send cross call to all processors mentioned in MASK_P
830 * except self. Really, there are only two cases currently,
831 * "cpu_online_mask" and "mm_cpumask(mm)".
832 */
smp_cross_call_masked(unsigned long * func,u32 ctx,u64 data1,u64 data2,const cpumask_t * mask)833 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
834 {
835 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
836
837 xcall_deliver(data0, data1, data2, mask);
838 }
839
840 /* Send cross call to all processors except self. */
smp_cross_call(unsigned long * func,u32 ctx,u64 data1,u64 data2)841 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
842 {
843 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
844 }
845
846 extern unsigned long xcall_sync_tick;
847
smp_start_sync_tick_client(int cpu)848 static void smp_start_sync_tick_client(int cpu)
849 {
850 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
851 cpumask_of(cpu));
852 }
853
854 extern unsigned long xcall_call_function;
855
arch_send_call_function_ipi_mask(const struct cpumask * mask)856 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
857 {
858 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
859 }
860
861 extern unsigned long xcall_call_function_single;
862
arch_send_call_function_single_ipi(int cpu)863 void arch_send_call_function_single_ipi(int cpu)
864 {
865 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
866 cpumask_of(cpu));
867 }
868
smp_call_function_client(int irq,struct pt_regs * regs)869 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
870 {
871 clear_softint(1 << irq);
872 irq_enter();
873 generic_smp_call_function_interrupt();
874 irq_exit();
875 }
876
smp_call_function_single_client(int irq,struct pt_regs * regs)877 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
878 {
879 clear_softint(1 << irq);
880 irq_enter();
881 generic_smp_call_function_single_interrupt();
882 irq_exit();
883 }
884
tsb_sync(void * info)885 static void tsb_sync(void *info)
886 {
887 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
888 struct mm_struct *mm = info;
889
890 /* It is not valid to test "current->active_mm == mm" here.
891 *
892 * The value of "current" is not changed atomically with
893 * switch_mm(). But that's OK, we just need to check the
894 * current cpu's trap block PGD physical address.
895 */
896 if (tp->pgd_paddr == __pa(mm->pgd))
897 tsb_context_switch(mm);
898 }
899
smp_tsb_sync(struct mm_struct * mm)900 void smp_tsb_sync(struct mm_struct *mm)
901 {
902 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
903 }
904
905 extern unsigned long xcall_flush_tlb_mm;
906 extern unsigned long xcall_flush_tlb_page;
907 extern unsigned long xcall_flush_tlb_kernel_range;
908 extern unsigned long xcall_fetch_glob_regs;
909 extern unsigned long xcall_fetch_glob_pmu;
910 extern unsigned long xcall_fetch_glob_pmu_n4;
911 extern unsigned long xcall_receive_signal;
912 extern unsigned long xcall_new_mmu_context_version;
913 #ifdef CONFIG_KGDB
914 extern unsigned long xcall_kgdb_capture;
915 #endif
916
917 #ifdef DCACHE_ALIASING_POSSIBLE
918 extern unsigned long xcall_flush_dcache_page_cheetah;
919 #endif
920 extern unsigned long xcall_flush_dcache_page_spitfire;
921
__local_flush_dcache_folio(struct folio * folio)922 static inline void __local_flush_dcache_folio(struct folio *folio)
923 {
924 unsigned int i, nr = folio_nr_pages(folio);
925
926 #ifdef DCACHE_ALIASING_POSSIBLE
927 for (i = 0; i < nr; i++)
928 __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
929 ((tlb_type == spitfire) &&
930 folio_flush_mapping(folio) != NULL));
931 #else
932 if (folio_flush_mapping(folio) != NULL &&
933 tlb_type == spitfire) {
934 unsigned long pfn = folio_pfn(folio)
935 for (i = 0; i < nr; i++)
936 __flush_icache_page((pfn + i) * PAGE_SIZE);
937 }
938 #endif
939 }
940
smp_flush_dcache_folio_impl(struct folio * folio,int cpu)941 void smp_flush_dcache_folio_impl(struct folio *folio, int cpu)
942 {
943 int this_cpu;
944
945 if (tlb_type == hypervisor)
946 return;
947
948 #ifdef CONFIG_DEBUG_DCFLUSH
949 atomic_inc(&dcpage_flushes);
950 #endif
951
952 this_cpu = get_cpu();
953
954 if (cpu == this_cpu) {
955 __local_flush_dcache_folio(folio);
956 } else if (cpu_online(cpu)) {
957 void *pg_addr = folio_address(folio);
958 u64 data0 = 0;
959
960 if (tlb_type == spitfire) {
961 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
962 if (folio_flush_mapping(folio) != NULL)
963 data0 |= ((u64)1 << 32);
964 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
965 #ifdef DCACHE_ALIASING_POSSIBLE
966 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
967 #endif
968 }
969 if (data0) {
970 unsigned int i, nr = folio_nr_pages(folio);
971
972 for (i = 0; i < nr; i++) {
973 xcall_deliver(data0, __pa(pg_addr),
974 (u64) pg_addr, cpumask_of(cpu));
975 #ifdef CONFIG_DEBUG_DCFLUSH
976 atomic_inc(&dcpage_flushes_xcall);
977 #endif
978 pg_addr += PAGE_SIZE;
979 }
980 }
981 }
982
983 put_cpu();
984 }
985
flush_dcache_folio_all(struct mm_struct * mm,struct folio * folio)986 void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio)
987 {
988 void *pg_addr;
989 u64 data0;
990
991 if (tlb_type == hypervisor)
992 return;
993
994 preempt_disable();
995
996 #ifdef CONFIG_DEBUG_DCFLUSH
997 atomic_inc(&dcpage_flushes);
998 #endif
999 data0 = 0;
1000 pg_addr = folio_address(folio);
1001 if (tlb_type == spitfire) {
1002 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
1003 if (folio_flush_mapping(folio) != NULL)
1004 data0 |= ((u64)1 << 32);
1005 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1006 #ifdef DCACHE_ALIASING_POSSIBLE
1007 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1008 #endif
1009 }
1010 if (data0) {
1011 unsigned int i, nr = folio_nr_pages(folio);
1012
1013 for (i = 0; i < nr; i++) {
1014 xcall_deliver(data0, __pa(pg_addr),
1015 (u64) pg_addr, cpu_online_mask);
1016 #ifdef CONFIG_DEBUG_DCFLUSH
1017 atomic_inc(&dcpage_flushes_xcall);
1018 #endif
1019 pg_addr += PAGE_SIZE;
1020 }
1021 }
1022 __local_flush_dcache_folio(folio);
1023
1024 preempt_enable();
1025 }
1026
1027 #ifdef CONFIG_KGDB
kgdb_roundup_cpus(void)1028 void kgdb_roundup_cpus(void)
1029 {
1030 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1031 }
1032 #endif
1033
smp_fetch_global_regs(void)1034 void smp_fetch_global_regs(void)
1035 {
1036 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1037 }
1038
smp_fetch_global_pmu(void)1039 void smp_fetch_global_pmu(void)
1040 {
1041 if (tlb_type == hypervisor &&
1042 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1043 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1044 else
1045 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1046 }
1047
1048 /* We know that the window frames of the user have been flushed
1049 * to the stack before we get here because all callers of us
1050 * are flush_tlb_*() routines, and these run after flush_cache_*()
1051 * which performs the flushw.
1052 *
1053 * mm->cpu_vm_mask is a bit mask of which cpus an address
1054 * space has (potentially) executed on, this is the heuristic
1055 * we use to limit cross calls.
1056 */
1057
1058 /* This currently is only used by the hugetlb arch pre-fault
1059 * hook on UltraSPARC-III+ and later when changing the pagesize
1060 * bits of the context register for an address space.
1061 */
smp_flush_tlb_mm(struct mm_struct * mm)1062 void smp_flush_tlb_mm(struct mm_struct *mm)
1063 {
1064 u32 ctx = CTX_HWBITS(mm->context);
1065
1066 get_cpu();
1067
1068 smp_cross_call_masked(&xcall_flush_tlb_mm,
1069 ctx, 0, 0,
1070 mm_cpumask(mm));
1071
1072 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1073
1074 put_cpu();
1075 }
1076
1077 struct tlb_pending_info {
1078 unsigned long ctx;
1079 unsigned long nr;
1080 unsigned long *vaddrs;
1081 };
1082
tlb_pending_func(void * info)1083 static void tlb_pending_func(void *info)
1084 {
1085 struct tlb_pending_info *t = info;
1086
1087 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1088 }
1089
smp_flush_tlb_pending(struct mm_struct * mm,unsigned long nr,unsigned long * vaddrs)1090 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1091 {
1092 u32 ctx = CTX_HWBITS(mm->context);
1093 struct tlb_pending_info info;
1094
1095 get_cpu();
1096
1097 info.ctx = ctx;
1098 info.nr = nr;
1099 info.vaddrs = vaddrs;
1100
1101 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1102 &info, 1);
1103
1104 __flush_tlb_pending(ctx, nr, vaddrs);
1105
1106 put_cpu();
1107 }
1108
smp_flush_tlb_page(struct mm_struct * mm,unsigned long vaddr)1109 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1110 {
1111 unsigned long context = CTX_HWBITS(mm->context);
1112
1113 get_cpu();
1114
1115 smp_cross_call_masked(&xcall_flush_tlb_page,
1116 context, vaddr, 0,
1117 mm_cpumask(mm));
1118
1119 __flush_tlb_page(context, vaddr);
1120
1121 put_cpu();
1122 }
1123
smp_flush_tlb_kernel_range(unsigned long start,unsigned long end)1124 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1125 {
1126 start &= PAGE_MASK;
1127 end = PAGE_ALIGN(end);
1128 if (start != end) {
1129 smp_cross_call(&xcall_flush_tlb_kernel_range,
1130 0, start, end);
1131
1132 __flush_tlb_kernel_range(start, end);
1133 }
1134 }
1135
1136 /* CPU capture. */
1137 /* #define CAPTURE_DEBUG */
1138 extern unsigned long xcall_capture;
1139
1140 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1141 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1142 static unsigned long penguins_are_doing_time;
1143
smp_capture(void)1144 void smp_capture(void)
1145 {
1146 int result = atomic_add_return(1, &smp_capture_depth);
1147
1148 if (result == 1) {
1149 int ncpus = num_online_cpus();
1150
1151 #ifdef CAPTURE_DEBUG
1152 printk("CPU[%d]: Sending penguins to jail...",
1153 smp_processor_id());
1154 #endif
1155 penguins_are_doing_time = 1;
1156 atomic_inc(&smp_capture_registry);
1157 smp_cross_call(&xcall_capture, 0, 0, 0);
1158 while (atomic_read(&smp_capture_registry) != ncpus)
1159 rmb();
1160 #ifdef CAPTURE_DEBUG
1161 printk("done\n");
1162 #endif
1163 }
1164 }
1165
smp_release(void)1166 void smp_release(void)
1167 {
1168 if (atomic_dec_and_test(&smp_capture_depth)) {
1169 #ifdef CAPTURE_DEBUG
1170 printk("CPU[%d]: Giving pardon to "
1171 "imprisoned penguins\n",
1172 smp_processor_id());
1173 #endif
1174 penguins_are_doing_time = 0;
1175 membar_safe("#StoreLoad");
1176 atomic_dec(&smp_capture_registry);
1177 }
1178 }
1179
1180 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1181 * set, so they can service tlb flush xcalls...
1182 */
1183 extern void prom_world(int);
1184
smp_penguin_jailcell(int irq,struct pt_regs * regs)1185 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1186 {
1187 clear_softint(1 << irq);
1188
1189 preempt_disable();
1190
1191 __asm__ __volatile__("flushw");
1192 prom_world(1);
1193 atomic_inc(&smp_capture_registry);
1194 membar_safe("#StoreLoad");
1195 while (penguins_are_doing_time)
1196 rmb();
1197 atomic_dec(&smp_capture_registry);
1198 prom_world(0);
1199
1200 preempt_enable();
1201 }
1202
smp_prepare_cpus(unsigned int max_cpus)1203 void __init smp_prepare_cpus(unsigned int max_cpus)
1204 {
1205 }
1206
smp_setup_processor_id(void)1207 void __init smp_setup_processor_id(void)
1208 {
1209 if (tlb_type == spitfire)
1210 xcall_deliver_impl = spitfire_xcall_deliver;
1211 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1212 xcall_deliver_impl = cheetah_xcall_deliver;
1213 else
1214 xcall_deliver_impl = hypervisor_xcall_deliver;
1215 }
1216
smp_fill_in_sib_core_maps(void)1217 void smp_fill_in_sib_core_maps(void)
1218 {
1219 unsigned int i;
1220
1221 for_each_present_cpu(i) {
1222 unsigned int j;
1223
1224 cpumask_clear(&cpu_core_map[i]);
1225 if (cpu_data(i).core_id == 0) {
1226 cpumask_set_cpu(i, &cpu_core_map[i]);
1227 continue;
1228 }
1229
1230 for_each_present_cpu(j) {
1231 if (cpu_data(i).core_id ==
1232 cpu_data(j).core_id)
1233 cpumask_set_cpu(j, &cpu_core_map[i]);
1234 }
1235 }
1236
1237 for_each_present_cpu(i) {
1238 unsigned int j;
1239
1240 for_each_present_cpu(j) {
1241 if (cpu_data(i).max_cache_id ==
1242 cpu_data(j).max_cache_id)
1243 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1244
1245 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1246 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1247 }
1248 }
1249
1250 for_each_present_cpu(i) {
1251 unsigned int j;
1252
1253 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1254 if (cpu_data(i).proc_id == -1) {
1255 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1256 continue;
1257 }
1258
1259 for_each_present_cpu(j) {
1260 if (cpu_data(i).proc_id ==
1261 cpu_data(j).proc_id)
1262 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1263 }
1264 }
1265 }
1266
__cpu_up(unsigned int cpu,struct task_struct * tidle)1267 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1268 {
1269 int ret = smp_boot_one_cpu(cpu, tidle);
1270
1271 if (!ret) {
1272 cpumask_set_cpu(cpu, &smp_commenced_mask);
1273 while (!cpu_online(cpu))
1274 mb();
1275 if (!cpu_online(cpu)) {
1276 ret = -ENODEV;
1277 } else {
1278 /* On SUN4V, writes to %tick and %stick are
1279 * not allowed.
1280 */
1281 if (tlb_type != hypervisor)
1282 smp_synchronize_one_tick(cpu);
1283 }
1284 }
1285 return ret;
1286 }
1287
1288 #ifdef CONFIG_HOTPLUG_CPU
cpu_play_dead(void)1289 void cpu_play_dead(void)
1290 {
1291 int cpu = smp_processor_id();
1292 unsigned long pstate;
1293
1294 idle_task_exit();
1295
1296 if (tlb_type == hypervisor) {
1297 struct trap_per_cpu *tb = &trap_block[cpu];
1298
1299 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1300 tb->cpu_mondo_pa, 0);
1301 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1302 tb->dev_mondo_pa, 0);
1303 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1304 tb->resum_mondo_pa, 0);
1305 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1306 tb->nonresum_mondo_pa, 0);
1307 }
1308
1309 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1310 membar_safe("#Sync");
1311
1312 local_irq_disable();
1313
1314 __asm__ __volatile__(
1315 "rdpr %%pstate, %0\n\t"
1316 "wrpr %0, %1, %%pstate"
1317 : "=r" (pstate)
1318 : "i" (PSTATE_IE));
1319
1320 while (1)
1321 barrier();
1322 }
1323
__cpu_disable(void)1324 int __cpu_disable(void)
1325 {
1326 int cpu = smp_processor_id();
1327 cpuinfo_sparc *c;
1328 int i;
1329
1330 for_each_cpu(i, &cpu_core_map[cpu])
1331 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1332 cpumask_clear(&cpu_core_map[cpu]);
1333
1334 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1335 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1336 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1337
1338 c = &cpu_data(cpu);
1339
1340 c->core_id = 0;
1341 c->proc_id = -1;
1342
1343 smp_wmb();
1344
1345 /* Make sure no interrupts point to this cpu. */
1346 fixup_irqs();
1347
1348 local_irq_enable();
1349 mdelay(1);
1350 local_irq_disable();
1351
1352 set_cpu_online(cpu, false);
1353
1354 cpu_map_rebuild();
1355
1356 return 0;
1357 }
1358
__cpu_die(unsigned int cpu)1359 void __cpu_die(unsigned int cpu)
1360 {
1361 int i;
1362
1363 for (i = 0; i < 100; i++) {
1364 smp_rmb();
1365 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1366 break;
1367 msleep(100);
1368 }
1369 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1370 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1371 } else {
1372 #if defined(CONFIG_SUN_LDOMS)
1373 unsigned long hv_err;
1374 int limit = 100;
1375
1376 do {
1377 hv_err = sun4v_cpu_stop(cpu);
1378 if (hv_err == HV_EOK) {
1379 set_cpu_present(cpu, false);
1380 break;
1381 }
1382 } while (--limit > 0);
1383 if (limit <= 0) {
1384 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1385 hv_err);
1386 }
1387 #endif
1388 }
1389 }
1390 #endif
1391
smp_cpus_done(unsigned int max_cpus)1392 void __init smp_cpus_done(unsigned int max_cpus)
1393 {
1394 }
1395
send_cpu_ipi(int cpu)1396 static void send_cpu_ipi(int cpu)
1397 {
1398 xcall_deliver((u64) &xcall_receive_signal,
1399 0, 0, cpumask_of(cpu));
1400 }
1401
scheduler_poke(void)1402 void scheduler_poke(void)
1403 {
1404 if (!cpu_poke)
1405 return;
1406
1407 if (!__this_cpu_read(poke))
1408 return;
1409
1410 __this_cpu_write(poke, false);
1411 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1412 }
1413
send_cpu_poke(int cpu)1414 static unsigned long send_cpu_poke(int cpu)
1415 {
1416 unsigned long hv_err;
1417
1418 per_cpu(poke, cpu) = true;
1419 hv_err = sun4v_cpu_poke(cpu);
1420 if (hv_err != HV_EOK) {
1421 per_cpu(poke, cpu) = false;
1422 pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1423 __func__, hv_err);
1424 }
1425
1426 return hv_err;
1427 }
1428
arch_smp_send_reschedule(int cpu)1429 void arch_smp_send_reschedule(int cpu)
1430 {
1431 if (cpu == smp_processor_id()) {
1432 WARN_ON_ONCE(preemptible());
1433 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1434 return;
1435 }
1436
1437 /* Use cpu poke to resume idle cpu if supported. */
1438 if (cpu_poke && idle_cpu(cpu)) {
1439 unsigned long ret;
1440
1441 ret = send_cpu_poke(cpu);
1442 if (ret == HV_EOK)
1443 return;
1444 }
1445
1446 /* Use IPI in following cases:
1447 * - cpu poke not supported
1448 * - cpu not idle
1449 * - send_cpu_poke() returns with error
1450 */
1451 send_cpu_ipi(cpu);
1452 }
1453
smp_init_cpu_poke(void)1454 void smp_init_cpu_poke(void)
1455 {
1456 unsigned long major;
1457 unsigned long minor;
1458 int ret;
1459
1460 if (tlb_type != hypervisor)
1461 return;
1462
1463 ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
1464 if (ret) {
1465 pr_debug("HV_GRP_CORE is not registered\n");
1466 return;
1467 }
1468
1469 if (major == 1 && minor >= 6) {
1470 /* CPU POKE is registered. */
1471 cpu_poke = true;
1472 return;
1473 }
1474
1475 pr_debug("CPU_POKE not supported\n");
1476 }
1477
smp_receive_signal_client(int irq,struct pt_regs * regs)1478 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1479 {
1480 clear_softint(1 << irq);
1481 scheduler_ipi();
1482 }
1483
stop_this_cpu(void * dummy)1484 static void stop_this_cpu(void *dummy)
1485 {
1486 set_cpu_online(smp_processor_id(), false);
1487 prom_stopself();
1488 }
1489
smp_send_stop(void)1490 void smp_send_stop(void)
1491 {
1492 int cpu;
1493
1494 if (tlb_type == hypervisor) {
1495 int this_cpu = smp_processor_id();
1496 #ifdef CONFIG_SERIAL_SUNHV
1497 sunhv_migrate_hvcons_irq(this_cpu);
1498 #endif
1499 for_each_online_cpu(cpu) {
1500 if (cpu == this_cpu)
1501 continue;
1502
1503 set_cpu_online(cpu, false);
1504 #ifdef CONFIG_SUN_LDOMS
1505 if (ldom_domaining_enabled) {
1506 unsigned long hv_err;
1507 hv_err = sun4v_cpu_stop(cpu);
1508 if (hv_err)
1509 printk(KERN_ERR "sun4v_cpu_stop() "
1510 "failed err=%lu\n", hv_err);
1511 } else
1512 #endif
1513 prom_stopcpu_cpuid(cpu);
1514 }
1515 } else
1516 smp_call_function(stop_this_cpu, NULL, 0);
1517 }
1518
pcpu_cpu_distance(unsigned int from,unsigned int to)1519 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1520 {
1521 if (cpu_to_node(from) == cpu_to_node(to))
1522 return LOCAL_DISTANCE;
1523 else
1524 return REMOTE_DISTANCE;
1525 }
1526
pcpu_cpu_to_node(int cpu)1527 static int __init pcpu_cpu_to_node(int cpu)
1528 {
1529 return cpu_to_node(cpu);
1530 }
1531
setup_per_cpu_areas(void)1532 void __init setup_per_cpu_areas(void)
1533 {
1534 unsigned long delta;
1535 unsigned int cpu;
1536 int rc = -EINVAL;
1537
1538 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1539 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1540 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1541 pcpu_cpu_distance,
1542 pcpu_cpu_to_node);
1543 if (rc)
1544 pr_warn("PERCPU: %s allocator failed (%d), "
1545 "falling back to page size\n",
1546 pcpu_fc_names[pcpu_chosen_fc], rc);
1547 }
1548 if (rc < 0)
1549 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1550 pcpu_cpu_to_node);
1551 if (rc < 0)
1552 panic("cannot initialize percpu area (err=%d)", rc);
1553
1554 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1555 for_each_possible_cpu(cpu)
1556 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1557
1558 /* Setup %g5 for the boot cpu. */
1559 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1560
1561 of_fill_in_cpu_data();
1562 if (tlb_type == hypervisor)
1563 mdesc_fill_in_cpu_data(cpu_all_mask);
1564 }
1565