1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <[email protected]>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <[email protected]>
8 * Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <[email protected]>
11 * - Added support for C3 on SMP
12 */
13 #define pr_fmt(fmt) "ACPI: " fmt
14
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h> /* need_resched() */
19 #include <linux/tick.h>
20 #include <linux/cpuidle.h>
21 #include <linux/cpu.h>
22 #include <linux/minmax.h>
23 #include <linux/perf_event.h>
24 #include <acpi/processor.h>
25 #include <linux/context_tracking.h>
26
27 /*
28 * Include the apic definitions for x86 to have the APIC timer related defines
29 * available also for UP (on SMP it gets magically included via linux/smp.h).
30 * asm/acpi.h is not an option, as it would require more include magic. Also
31 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
32 */
33 #ifdef CONFIG_X86
34 #include <asm/apic.h>
35 #include <asm/cpu.h>
36 #endif
37
38 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
39
40 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
41 module_param(max_cstate, uint, 0400);
42 static bool nocst __read_mostly;
43 module_param(nocst, bool, 0400);
44 static bool bm_check_disable __read_mostly;
45 module_param(bm_check_disable, bool, 0400);
46
47 static unsigned int latency_factor __read_mostly = 2;
48 module_param(latency_factor, uint, 0644);
49
50 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
51
52 struct cpuidle_driver acpi_idle_driver = {
53 .name = "acpi_idle",
54 .owner = THIS_MODULE,
55 };
56
57 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
58 static
59 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
60
disabled_by_idle_boot_param(void)61 static int disabled_by_idle_boot_param(void)
62 {
63 return boot_option_idle_override == IDLE_POLL ||
64 boot_option_idle_override == IDLE_HALT;
65 }
66
67 /*
68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
69 * For now disable this. Probably a bug somewhere else.
70 *
71 * To skip this limit, boot/load with a large max_cstate limit.
72 */
set_max_cstate(const struct dmi_system_id * id)73 static int set_max_cstate(const struct dmi_system_id *id)
74 {
75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
76 return 0;
77
78 pr_notice("%s detected - limiting to C%ld max_cstate."
79 " Override with \"processor.max_cstate=%d\"\n", id->ident,
80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
81
82 max_cstate = (long)id->driver_data;
83
84 return 0;
85 }
86
87 static const struct dmi_system_id processor_power_dmi_table[] = {
88 { set_max_cstate, "Clevo 5600D", {
89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
91 (void *)2},
92 { set_max_cstate, "Pavilion zv5000", {
93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
95 (void *)1},
96 { set_max_cstate, "Asus L8400B", {
97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
99 (void *)1},
100 {},
101 };
102
103
104 /*
105 * Callers should disable interrupts before the call and enable
106 * interrupts after return.
107 */
acpi_safe_halt(void)108 static void __cpuidle acpi_safe_halt(void)
109 {
110 if (!tif_need_resched()) {
111 raw_safe_halt();
112 raw_local_irq_disable();
113 }
114 }
115
116 #ifdef ARCH_APICTIMER_STOPS_ON_C3
117
118 /*
119 * Some BIOS implementations switch to C3 in the published C2 state.
120 * This seems to be a common problem on AMD boxen, but other vendors
121 * are affected too. We pick the most conservative approach: we assume
122 * that the local APIC stops in both C2 and C3.
123 */
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cx)124 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
125 struct acpi_processor_cx *cx)
126 {
127 struct acpi_processor_power *pwr = &pr->power;
128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
129
130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
131 return;
132
133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
134 type = ACPI_STATE_C1;
135
136 /*
137 * Check, if one of the previous states already marked the lapic
138 * unstable
139 */
140 if (pwr->timer_broadcast_on_state < state)
141 return;
142
143 if (cx->type >= type)
144 pr->power.timer_broadcast_on_state = state;
145 }
146
__lapic_timer_propagate_broadcast(void * arg)147 static void __lapic_timer_propagate_broadcast(void *arg)
148 {
149 struct acpi_processor *pr = arg;
150
151 if (pr->power.timer_broadcast_on_state < INT_MAX)
152 tick_broadcast_enable();
153 else
154 tick_broadcast_disable();
155 }
156
lapic_timer_propagate_broadcast(struct acpi_processor * pr)157 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
158 {
159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
160 (void *)pr, 1);
161 }
162
163 /* Power(C) State timer broadcast control */
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)164 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
165 struct acpi_processor_cx *cx)
166 {
167 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
168 }
169
170 #else
171
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cstate)172 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
173 struct acpi_processor_cx *cstate) { }
lapic_timer_propagate_broadcast(struct acpi_processor * pr)174 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
175
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)176 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
177 struct acpi_processor_cx *cx)
178 {
179 return false;
180 }
181
182 #endif
183
184 #if defined(CONFIG_X86)
tsc_check_state(int state)185 static void tsc_check_state(int state)
186 {
187 switch (boot_cpu_data.x86_vendor) {
188 case X86_VENDOR_HYGON:
189 case X86_VENDOR_AMD:
190 case X86_VENDOR_INTEL:
191 case X86_VENDOR_CENTAUR:
192 case X86_VENDOR_ZHAOXIN:
193 /*
194 * AMD Fam10h TSC will tick in all
195 * C/P/S0/S1 states when this bit is set.
196 */
197 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
198 return;
199 fallthrough;
200 default:
201 /* TSC could halt in idle, so notify users */
202 if (state > ACPI_STATE_C1)
203 mark_tsc_unstable("TSC halts in idle");
204 }
205 }
206 #else
tsc_check_state(int state)207 static void tsc_check_state(int state) { return; }
208 #endif
209
acpi_processor_get_power_info_fadt(struct acpi_processor * pr)210 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
211 {
212
213 if (!pr->pblk)
214 return -ENODEV;
215
216 /* if info is obtained from pblk/fadt, type equals state */
217 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
218 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
219
220 #ifndef CONFIG_HOTPLUG_CPU
221 /*
222 * Check for P_LVL2_UP flag before entering C2 and above on
223 * an SMP system.
224 */
225 if ((num_online_cpus() > 1) &&
226 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
227 return -ENODEV;
228 #endif
229
230 /* determine C2 and C3 address from pblk */
231 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
232 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
233
234 /* determine latencies from FADT */
235 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
236 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
237
238 /*
239 * FADT specified C2 latency must be less than or equal to
240 * 100 microseconds.
241 */
242 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
243 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
244 acpi_gbl_FADT.c2_latency);
245 /* invalidate C2 */
246 pr->power.states[ACPI_STATE_C2].address = 0;
247 }
248
249 /*
250 * FADT supplied C3 latency must be less than or equal to
251 * 1000 microseconds.
252 */
253 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
254 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
255 acpi_gbl_FADT.c3_latency);
256 /* invalidate C3 */
257 pr->power.states[ACPI_STATE_C3].address = 0;
258 }
259
260 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
261 pr->power.states[ACPI_STATE_C2].address,
262 pr->power.states[ACPI_STATE_C3].address);
263
264 snprintf(pr->power.states[ACPI_STATE_C2].desc,
265 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
266 pr->power.states[ACPI_STATE_C2].address);
267 snprintf(pr->power.states[ACPI_STATE_C3].desc,
268 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
269 pr->power.states[ACPI_STATE_C3].address);
270
271 if (!pr->power.states[ACPI_STATE_C2].address &&
272 !pr->power.states[ACPI_STATE_C3].address)
273 return -ENODEV;
274
275 return 0;
276 }
277
acpi_processor_get_power_info_default(struct acpi_processor * pr)278 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
279 {
280 if (!pr->power.states[ACPI_STATE_C1].valid) {
281 /* set the first C-State to C1 */
282 /* all processors need to support C1 */
283 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
284 pr->power.states[ACPI_STATE_C1].valid = 1;
285 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
286
287 snprintf(pr->power.states[ACPI_STATE_C1].desc,
288 ACPI_CX_DESC_LEN, "ACPI HLT");
289 }
290 /* the C0 state only exists as a filler in our array */
291 pr->power.states[ACPI_STATE_C0].valid = 1;
292 return 0;
293 }
294
acpi_processor_get_power_info_cst(struct acpi_processor * pr)295 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
296 {
297 int ret;
298
299 if (nocst)
300 return -ENODEV;
301
302 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
303 if (ret)
304 return ret;
305
306 if (!pr->power.count)
307 return -EFAULT;
308
309 pr->flags.has_cst = 1;
310 return 0;
311 }
312
acpi_processor_power_verify_c3(struct acpi_processor * pr,struct acpi_processor_cx * cx)313 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
314 struct acpi_processor_cx *cx)
315 {
316 static int bm_check_flag = -1;
317 static int bm_control_flag = -1;
318
319
320 if (!cx->address)
321 return;
322
323 /*
324 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
325 * DMA transfers are used by any ISA device to avoid livelock.
326 * Note that we could disable Type-F DMA (as recommended by
327 * the erratum), but this is known to disrupt certain ISA
328 * devices thus we take the conservative approach.
329 */
330 if (errata.piix4.fdma) {
331 acpi_handle_debug(pr->handle,
332 "C3 not supported on PIIX4 with Type-F DMA\n");
333 return;
334 }
335
336 /* All the logic here assumes flags.bm_check is same across all CPUs */
337 if (bm_check_flag == -1) {
338 /* Determine whether bm_check is needed based on CPU */
339 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
340 bm_check_flag = pr->flags.bm_check;
341 bm_control_flag = pr->flags.bm_control;
342 } else {
343 pr->flags.bm_check = bm_check_flag;
344 pr->flags.bm_control = bm_control_flag;
345 }
346
347 if (pr->flags.bm_check) {
348 if (!pr->flags.bm_control) {
349 if (pr->flags.has_cst != 1) {
350 /* bus mastering control is necessary */
351 acpi_handle_debug(pr->handle,
352 "C3 support requires BM control\n");
353 return;
354 } else {
355 /* Here we enter C3 without bus mastering */
356 acpi_handle_debug(pr->handle,
357 "C3 support without BM control\n");
358 }
359 }
360 } else {
361 /*
362 * WBINVD should be set in fadt, for C3 state to be
363 * supported on when bm_check is not required.
364 */
365 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
366 acpi_handle_debug(pr->handle,
367 "Cache invalidation should work properly"
368 " for C3 to be enabled on SMP systems\n");
369 return;
370 }
371 }
372
373 /*
374 * Otherwise we've met all of our C3 requirements.
375 * Normalize the C3 latency to expidite policy. Enable
376 * checking of bus mastering status (bm_check) so we can
377 * use this in our C3 policy
378 */
379 cx->valid = 1;
380
381 /*
382 * On older chipsets, BM_RLD needs to be set
383 * in order for Bus Master activity to wake the
384 * system from C3. Newer chipsets handle DMA
385 * during C3 automatically and BM_RLD is a NOP.
386 * In either case, the proper way to
387 * handle BM_RLD is to set it and leave it set.
388 */
389 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
390 }
391
acpi_cst_latency_sort(struct acpi_processor_cx * states,size_t length)392 static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length)
393 {
394 int i, j, k;
395
396 for (i = 1; i < length; i++) {
397 if (!states[i].valid)
398 continue;
399
400 for (j = i - 1, k = i; j >= 0; j--) {
401 if (!states[j].valid)
402 continue;
403
404 if (states[j].latency > states[k].latency)
405 swap(states[j].latency, states[k].latency);
406
407 k = j;
408 }
409 }
410 }
411
acpi_processor_power_verify(struct acpi_processor * pr)412 static int acpi_processor_power_verify(struct acpi_processor *pr)
413 {
414 unsigned int i;
415 unsigned int working = 0;
416 unsigned int last_latency = 0;
417 unsigned int last_type = 0;
418 bool buggy_latency = false;
419
420 pr->power.timer_broadcast_on_state = INT_MAX;
421
422 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
423 struct acpi_processor_cx *cx = &pr->power.states[i];
424
425 switch (cx->type) {
426 case ACPI_STATE_C1:
427 cx->valid = 1;
428 break;
429
430 case ACPI_STATE_C2:
431 if (!cx->address)
432 break;
433 cx->valid = 1;
434 break;
435
436 case ACPI_STATE_C3:
437 acpi_processor_power_verify_c3(pr, cx);
438 break;
439 }
440 if (!cx->valid)
441 continue;
442 if (cx->type >= last_type && cx->latency < last_latency)
443 buggy_latency = true;
444 last_latency = cx->latency;
445 last_type = cx->type;
446
447 lapic_timer_check_state(i, pr, cx);
448 tsc_check_state(cx->type);
449 working++;
450 }
451
452 if (buggy_latency) {
453 pr_notice("FW issue: working around C-state latencies out of order\n");
454 acpi_cst_latency_sort(&pr->power.states[1], max_cstate);
455 }
456
457 lapic_timer_propagate_broadcast(pr);
458
459 return working;
460 }
461
acpi_processor_get_cstate_info(struct acpi_processor * pr)462 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
463 {
464 unsigned int i;
465 int result;
466
467
468 /* NOTE: the idle thread may not be running while calling
469 * this function */
470
471 /* Zero initialize all the C-states info. */
472 memset(pr->power.states, 0, sizeof(pr->power.states));
473
474 result = acpi_processor_get_power_info_cst(pr);
475 if (result == -ENODEV)
476 result = acpi_processor_get_power_info_fadt(pr);
477
478 if (result)
479 return result;
480
481 acpi_processor_get_power_info_default(pr);
482
483 pr->power.count = acpi_processor_power_verify(pr);
484
485 /*
486 * if one state of type C2 or C3 is available, mark this
487 * CPU as being "idle manageable"
488 */
489 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
490 if (pr->power.states[i].valid) {
491 pr->power.count = i;
492 pr->flags.power = 1;
493 }
494 }
495
496 return 0;
497 }
498
499 /**
500 * acpi_idle_bm_check - checks if bus master activity was detected
501 */
acpi_idle_bm_check(void)502 static int acpi_idle_bm_check(void)
503 {
504 u32 bm_status = 0;
505
506 if (bm_check_disable)
507 return 0;
508
509 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
510 if (bm_status)
511 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
512 /*
513 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
514 * the true state of bus mastering activity; forcing us to
515 * manually check the BMIDEA bit of each IDE channel.
516 */
517 else if (errata.piix4.bmisx) {
518 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
519 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
520 bm_status = 1;
521 }
522 return bm_status;
523 }
524
io_idle(unsigned long addr)525 static __cpuidle void io_idle(unsigned long addr)
526 {
527 /* IO port based C-state */
528 inb(addr);
529
530 #ifdef CONFIG_X86
531 /* No delay is needed if we are in guest */
532 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
533 return;
534 /*
535 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
536 * not this code. Assume that any Intel systems using this
537 * are ancient and may need the dummy wait. This also assumes
538 * that the motivating chipset issue was Intel-only.
539 */
540 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
541 return;
542 #endif
543 /*
544 * Dummy wait op - must do something useless after P_LVL2 read
545 * because chipsets cannot guarantee that STPCLK# signal gets
546 * asserted in time to freeze execution properly
547 *
548 * This workaround has been in place since the original ACPI
549 * implementation was merged, circa 2002.
550 *
551 * If a profile is pointing to this instruction, please first
552 * consider moving your system to a more modern idle
553 * mechanism.
554 */
555 inl(acpi_gbl_FADT.xpm_timer_block.address);
556 }
557
558 /**
559 * acpi_idle_do_entry - enter idle state using the appropriate method
560 * @cx: cstate data
561 *
562 * Caller disables interrupt before call and enables interrupt after return.
563 */
acpi_idle_do_entry(struct acpi_processor_cx * cx)564 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
565 {
566 perf_lopwr_cb(true);
567
568 if (cx->entry_method == ACPI_CSTATE_FFH) {
569 /* Call into architectural FFH based C-state */
570 acpi_processor_ffh_cstate_enter(cx);
571 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
572 acpi_safe_halt();
573 } else {
574 io_idle(cx->address);
575 }
576
577 perf_lopwr_cb(false);
578 }
579
580 /**
581 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
582 * @dev: the target CPU
583 * @index: the index of suggested state
584 */
acpi_idle_play_dead(struct cpuidle_device * dev,int index)585 static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
586 {
587 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
588
589 ACPI_FLUSH_CPU_CACHE();
590
591 while (1) {
592
593 if (cx->entry_method == ACPI_CSTATE_HALT)
594 raw_safe_halt();
595 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
596 io_idle(cx->address);
597 } else
598 return;
599 }
600 }
601
acpi_idle_fallback_to_c1(struct acpi_processor * pr)602 static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
603 {
604 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
605 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
606 }
607
608 static int c3_cpu_count;
609 static DEFINE_RAW_SPINLOCK(c3_lock);
610
611 /**
612 * acpi_idle_enter_bm - enters C3 with proper BM handling
613 * @drv: cpuidle driver
614 * @pr: Target processor
615 * @cx: Target state context
616 * @index: index of target state
617 */
acpi_idle_enter_bm(struct cpuidle_driver * drv,struct acpi_processor * pr,struct acpi_processor_cx * cx,int index)618 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
619 struct acpi_processor *pr,
620 struct acpi_processor_cx *cx,
621 int index)
622 {
623 static struct acpi_processor_cx safe_cx = {
624 .entry_method = ACPI_CSTATE_HALT,
625 };
626
627 /*
628 * disable bus master
629 * bm_check implies we need ARB_DIS
630 * bm_control implies whether we can do ARB_DIS
631 *
632 * That leaves a case where bm_check is set and bm_control is not set.
633 * In that case we cannot do much, we enter C3 without doing anything.
634 */
635 bool dis_bm = pr->flags.bm_control;
636
637 instrumentation_begin();
638
639 /* If we can skip BM, demote to a safe state. */
640 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
641 dis_bm = false;
642 index = drv->safe_state_index;
643 if (index >= 0) {
644 cx = this_cpu_read(acpi_cstate[index]);
645 } else {
646 cx = &safe_cx;
647 index = -EBUSY;
648 }
649 }
650
651 if (dis_bm) {
652 raw_spin_lock(&c3_lock);
653 c3_cpu_count++;
654 /* Disable bus master arbitration when all CPUs are in C3 */
655 if (c3_cpu_count == num_online_cpus())
656 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
657 raw_spin_unlock(&c3_lock);
658 }
659
660 ct_cpuidle_enter();
661
662 acpi_idle_do_entry(cx);
663
664 ct_cpuidle_exit();
665
666 /* Re-enable bus master arbitration */
667 if (dis_bm) {
668 raw_spin_lock(&c3_lock);
669 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
670 c3_cpu_count--;
671 raw_spin_unlock(&c3_lock);
672 }
673
674 instrumentation_end();
675
676 return index;
677 }
678
acpi_idle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)679 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
680 struct cpuidle_driver *drv, int index)
681 {
682 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
683 struct acpi_processor *pr;
684
685 pr = __this_cpu_read(processors);
686 if (unlikely(!pr))
687 return -EINVAL;
688
689 if (cx->type != ACPI_STATE_C1) {
690 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
691 return acpi_idle_enter_bm(drv, pr, cx, index);
692
693 /* C2 to C1 demotion. */
694 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
695 index = ACPI_IDLE_STATE_START;
696 cx = per_cpu(acpi_cstate[index], dev->cpu);
697 }
698 }
699
700 if (cx->type == ACPI_STATE_C3)
701 ACPI_FLUSH_CPU_CACHE();
702
703 acpi_idle_do_entry(cx);
704
705 return index;
706 }
707
acpi_idle_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)708 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
709 struct cpuidle_driver *drv, int index)
710 {
711 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
712
713 if (cx->type == ACPI_STATE_C3) {
714 struct acpi_processor *pr = __this_cpu_read(processors);
715
716 if (unlikely(!pr))
717 return 0;
718
719 if (pr->flags.bm_check) {
720 u8 bm_sts_skip = cx->bm_sts_skip;
721
722 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
723 cx->bm_sts_skip = 1;
724 acpi_idle_enter_bm(drv, pr, cx, index);
725 cx->bm_sts_skip = bm_sts_skip;
726
727 return 0;
728 } else {
729 ACPI_FLUSH_CPU_CACHE();
730 }
731 }
732 acpi_idle_do_entry(cx);
733
734 return 0;
735 }
736
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)737 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
738 struct cpuidle_device *dev)
739 {
740 int i, count = ACPI_IDLE_STATE_START;
741 struct acpi_processor_cx *cx;
742 struct cpuidle_state *state;
743
744 if (max_cstate == 0)
745 max_cstate = 1;
746
747 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
748 state = &acpi_idle_driver.states[count];
749 cx = &pr->power.states[i];
750
751 if (!cx->valid)
752 continue;
753
754 per_cpu(acpi_cstate[count], dev->cpu) = cx;
755
756 if (lapic_timer_needs_broadcast(pr, cx))
757 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
758
759 if (cx->type == ACPI_STATE_C3) {
760 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
761 if (pr->flags.bm_check)
762 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
763 }
764
765 count++;
766 if (count == CPUIDLE_STATE_MAX)
767 break;
768 }
769
770 if (!count)
771 return -EINVAL;
772
773 return 0;
774 }
775
acpi_processor_setup_cstates(struct acpi_processor * pr)776 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
777 {
778 int i, count;
779 struct acpi_processor_cx *cx;
780 struct cpuidle_state *state;
781 struct cpuidle_driver *drv = &acpi_idle_driver;
782
783 if (max_cstate == 0)
784 max_cstate = 1;
785
786 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
787 cpuidle_poll_state_init(drv);
788 count = 1;
789 } else {
790 count = 0;
791 }
792
793 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
794 cx = &pr->power.states[i];
795
796 if (!cx->valid)
797 continue;
798
799 state = &drv->states[count];
800 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
801 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
802 state->exit_latency = cx->latency;
803 state->target_residency = cx->latency * latency_factor;
804 state->enter = acpi_idle_enter;
805
806 state->flags = 0;
807
808 state->enter_dead = acpi_idle_play_dead;
809
810 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2)
811 drv->safe_state_index = count;
812
813 /*
814 * Halt-induced C1 is not good for ->enter_s2idle, because it
815 * re-enables interrupts on exit. Moreover, C1 is generally not
816 * particularly interesting from the suspend-to-idle angle, so
817 * avoid C1 and the situations in which we may need to fall back
818 * to it altogether.
819 */
820 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
821 state->enter_s2idle = acpi_idle_enter_s2idle;
822
823 count++;
824 if (count == CPUIDLE_STATE_MAX)
825 break;
826 }
827
828 drv->state_count = count;
829
830 if (!count)
831 return -EINVAL;
832
833 return 0;
834 }
835
acpi_processor_cstate_first_run_checks(void)836 static inline void acpi_processor_cstate_first_run_checks(void)
837 {
838 static int first_run;
839
840 if (first_run)
841 return;
842 dmi_check_system(processor_power_dmi_table);
843 max_cstate = acpi_processor_cstate_check(max_cstate);
844 if (max_cstate < ACPI_C_STATES_MAX)
845 pr_notice("processor limited to max C-state %d\n", max_cstate);
846
847 first_run++;
848
849 if (nocst)
850 return;
851
852 acpi_processor_claim_cst_control();
853 }
854 #else
855
disabled_by_idle_boot_param(void)856 static inline int disabled_by_idle_boot_param(void) { return 0; }
acpi_processor_cstate_first_run_checks(void)857 static inline void acpi_processor_cstate_first_run_checks(void) { }
acpi_processor_get_cstate_info(struct acpi_processor * pr)858 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
859 {
860 return -ENODEV;
861 }
862
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)863 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
864 struct cpuidle_device *dev)
865 {
866 return -EINVAL;
867 }
868
acpi_processor_setup_cstates(struct acpi_processor * pr)869 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
870 {
871 return -EINVAL;
872 }
873
874 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
875
876 struct acpi_lpi_states_array {
877 unsigned int size;
878 unsigned int composite_states_size;
879 struct acpi_lpi_state *entries;
880 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
881 };
882
obj_get_integer(union acpi_object * obj,u32 * value)883 static int obj_get_integer(union acpi_object *obj, u32 *value)
884 {
885 if (obj->type != ACPI_TYPE_INTEGER)
886 return -EINVAL;
887
888 *value = obj->integer.value;
889 return 0;
890 }
891
acpi_processor_evaluate_lpi(acpi_handle handle,struct acpi_lpi_states_array * info)892 static int acpi_processor_evaluate_lpi(acpi_handle handle,
893 struct acpi_lpi_states_array *info)
894 {
895 acpi_status status;
896 int ret = 0;
897 int pkg_count, state_idx = 1, loop;
898 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
899 union acpi_object *lpi_data;
900 struct acpi_lpi_state *lpi_state;
901
902 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
903 if (ACPI_FAILURE(status)) {
904 acpi_handle_debug(handle, "No _LPI, giving up\n");
905 return -ENODEV;
906 }
907
908 lpi_data = buffer.pointer;
909
910 /* There must be at least 4 elements = 3 elements + 1 package */
911 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
912 lpi_data->package.count < 4) {
913 pr_debug("not enough elements in _LPI\n");
914 ret = -ENODATA;
915 goto end;
916 }
917
918 pkg_count = lpi_data->package.elements[2].integer.value;
919
920 /* Validate number of power states. */
921 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
922 pr_debug("count given by _LPI is not valid\n");
923 ret = -ENODATA;
924 goto end;
925 }
926
927 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
928 if (!lpi_state) {
929 ret = -ENOMEM;
930 goto end;
931 }
932
933 info->size = pkg_count;
934 info->entries = lpi_state;
935
936 /* LPI States start at index 3 */
937 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
938 union acpi_object *element, *pkg_elem, *obj;
939
940 element = &lpi_data->package.elements[loop];
941 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
942 continue;
943
944 pkg_elem = element->package.elements;
945
946 obj = pkg_elem + 6;
947 if (obj->type == ACPI_TYPE_BUFFER) {
948 struct acpi_power_register *reg;
949
950 reg = (struct acpi_power_register *)obj->buffer.pointer;
951 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
952 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
953 continue;
954
955 lpi_state->address = reg->address;
956 lpi_state->entry_method =
957 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
958 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
959 } else if (obj->type == ACPI_TYPE_INTEGER) {
960 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
961 lpi_state->address = obj->integer.value;
962 } else {
963 continue;
964 }
965
966 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
967
968 obj = pkg_elem + 9;
969 if (obj->type == ACPI_TYPE_STRING)
970 strscpy(lpi_state->desc, obj->string.pointer,
971 ACPI_CX_DESC_LEN);
972
973 lpi_state->index = state_idx;
974 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
975 pr_debug("No min. residency found, assuming 10 us\n");
976 lpi_state->min_residency = 10;
977 }
978
979 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
980 pr_debug("No wakeup residency found, assuming 10 us\n");
981 lpi_state->wake_latency = 10;
982 }
983
984 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
985 lpi_state->flags = 0;
986
987 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
988 lpi_state->arch_flags = 0;
989
990 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
991 lpi_state->res_cnt_freq = 1;
992
993 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
994 lpi_state->enable_parent_state = 0;
995 }
996
997 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
998 end:
999 kfree(buffer.pointer);
1000 return ret;
1001 }
1002
1003 /*
1004 * flat_state_cnt - the number of composite LPI states after the process of flattening
1005 */
1006 static int flat_state_cnt;
1007
1008 /**
1009 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1010 *
1011 * @local: local LPI state
1012 * @parent: parent LPI state
1013 * @result: composite LPI state
1014 */
combine_lpi_states(struct acpi_lpi_state * local,struct acpi_lpi_state * parent,struct acpi_lpi_state * result)1015 static bool combine_lpi_states(struct acpi_lpi_state *local,
1016 struct acpi_lpi_state *parent,
1017 struct acpi_lpi_state *result)
1018 {
1019 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1020 if (!parent->address) /* 0 means autopromotable */
1021 return false;
1022 result->address = local->address + parent->address;
1023 } else {
1024 result->address = parent->address;
1025 }
1026
1027 result->min_residency = max(local->min_residency, parent->min_residency);
1028 result->wake_latency = local->wake_latency + parent->wake_latency;
1029 result->enable_parent_state = parent->enable_parent_state;
1030 result->entry_method = local->entry_method;
1031
1032 result->flags = parent->flags;
1033 result->arch_flags = parent->arch_flags;
1034 result->index = parent->index;
1035
1036 strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1037 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1038 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1039 return true;
1040 }
1041
1042 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1043
stash_composite_state(struct acpi_lpi_states_array * curr_level,struct acpi_lpi_state * t)1044 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1045 struct acpi_lpi_state *t)
1046 {
1047 curr_level->composite_states[curr_level->composite_states_size++] = t;
1048 }
1049
flatten_lpi_states(struct acpi_processor * pr,struct acpi_lpi_states_array * curr_level,struct acpi_lpi_states_array * prev_level)1050 static int flatten_lpi_states(struct acpi_processor *pr,
1051 struct acpi_lpi_states_array *curr_level,
1052 struct acpi_lpi_states_array *prev_level)
1053 {
1054 int i, j, state_count = curr_level->size;
1055 struct acpi_lpi_state *p, *t = curr_level->entries;
1056
1057 curr_level->composite_states_size = 0;
1058 for (j = 0; j < state_count; j++, t++) {
1059 struct acpi_lpi_state *flpi;
1060
1061 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1062 continue;
1063
1064 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1065 pr_warn("Limiting number of LPI states to max (%d)\n",
1066 ACPI_PROCESSOR_MAX_POWER);
1067 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1068 break;
1069 }
1070
1071 flpi = &pr->power.lpi_states[flat_state_cnt];
1072
1073 if (!prev_level) { /* leaf/processor node */
1074 memcpy(flpi, t, sizeof(*t));
1075 stash_composite_state(curr_level, flpi);
1076 flat_state_cnt++;
1077 continue;
1078 }
1079
1080 for (i = 0; i < prev_level->composite_states_size; i++) {
1081 p = prev_level->composite_states[i];
1082 if (t->index <= p->enable_parent_state &&
1083 combine_lpi_states(p, t, flpi)) {
1084 stash_composite_state(curr_level, flpi);
1085 flat_state_cnt++;
1086 flpi++;
1087 }
1088 }
1089 }
1090
1091 kfree(curr_level->entries);
1092 return 0;
1093 }
1094
acpi_processor_ffh_lpi_probe(unsigned int cpu)1095 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1096 {
1097 return -EOPNOTSUPP;
1098 }
1099
acpi_processor_get_lpi_info(struct acpi_processor * pr)1100 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1101 {
1102 int ret, i;
1103 acpi_status status;
1104 acpi_handle handle = pr->handle, pr_ahandle;
1105 struct acpi_device *d = NULL;
1106 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1107
1108 /* make sure our architecture has support */
1109 ret = acpi_processor_ffh_lpi_probe(pr->id);
1110 if (ret == -EOPNOTSUPP)
1111 return ret;
1112
1113 if (!osc_pc_lpi_support_confirmed)
1114 return -EOPNOTSUPP;
1115
1116 if (!acpi_has_method(handle, "_LPI"))
1117 return -EINVAL;
1118
1119 flat_state_cnt = 0;
1120 prev = &info[0];
1121 curr = &info[1];
1122 handle = pr->handle;
1123 ret = acpi_processor_evaluate_lpi(handle, prev);
1124 if (ret)
1125 return ret;
1126 flatten_lpi_states(pr, prev, NULL);
1127
1128 status = acpi_get_parent(handle, &pr_ahandle);
1129 while (ACPI_SUCCESS(status)) {
1130 d = acpi_fetch_acpi_dev(pr_ahandle);
1131 if (!d)
1132 break;
1133
1134 handle = pr_ahandle;
1135
1136 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1137 break;
1138
1139 /* can be optional ? */
1140 if (!acpi_has_method(handle, "_LPI"))
1141 break;
1142
1143 ret = acpi_processor_evaluate_lpi(handle, curr);
1144 if (ret)
1145 break;
1146
1147 /* flatten all the LPI states in this level of hierarchy */
1148 flatten_lpi_states(pr, curr, prev);
1149
1150 tmp = prev, prev = curr, curr = tmp;
1151
1152 status = acpi_get_parent(handle, &pr_ahandle);
1153 }
1154
1155 pr->power.count = flat_state_cnt;
1156 /* reset the index after flattening */
1157 for (i = 0; i < pr->power.count; i++)
1158 pr->power.lpi_states[i].index = i;
1159
1160 /* Tell driver that _LPI is supported. */
1161 pr->flags.has_lpi = 1;
1162 pr->flags.power = 1;
1163
1164 return 0;
1165 }
1166
acpi_processor_ffh_lpi_enter(struct acpi_lpi_state * lpi)1167 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1168 {
1169 return -ENODEV;
1170 }
1171
1172 /**
1173 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1174 * @dev: the target CPU
1175 * @drv: cpuidle driver containing cpuidle state info
1176 * @index: index of target state
1177 *
1178 * Return: 0 for success or negative value for error
1179 */
acpi_idle_lpi_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)1180 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1181 struct cpuidle_driver *drv, int index)
1182 {
1183 struct acpi_processor *pr;
1184 struct acpi_lpi_state *lpi;
1185
1186 pr = __this_cpu_read(processors);
1187
1188 if (unlikely(!pr))
1189 return -EINVAL;
1190
1191 lpi = &pr->power.lpi_states[index];
1192 if (lpi->entry_method == ACPI_CSTATE_FFH)
1193 return acpi_processor_ffh_lpi_enter(lpi);
1194
1195 return -EINVAL;
1196 }
1197
acpi_processor_setup_lpi_states(struct acpi_processor * pr)1198 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1199 {
1200 int i;
1201 struct acpi_lpi_state *lpi;
1202 struct cpuidle_state *state;
1203 struct cpuidle_driver *drv = &acpi_idle_driver;
1204
1205 if (!pr->flags.has_lpi)
1206 return -EOPNOTSUPP;
1207
1208 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1209 lpi = &pr->power.lpi_states[i];
1210
1211 state = &drv->states[i];
1212 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1213 strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1214 state->exit_latency = lpi->wake_latency;
1215 state->target_residency = lpi->min_residency;
1216 state->flags |= arch_get_idle_state_flags(lpi->arch_flags);
1217 if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
1218 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
1219 state->enter = acpi_idle_lpi_enter;
1220 drv->safe_state_index = i;
1221 }
1222
1223 drv->state_count = i;
1224
1225 return 0;
1226 }
1227
1228 /**
1229 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1230 * global state data i.e. idle routines
1231 *
1232 * @pr: the ACPI processor
1233 */
acpi_processor_setup_cpuidle_states(struct acpi_processor * pr)1234 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1235 {
1236 int i;
1237 struct cpuidle_driver *drv = &acpi_idle_driver;
1238
1239 if (!pr->flags.power_setup_done || !pr->flags.power)
1240 return -EINVAL;
1241
1242 drv->safe_state_index = -1;
1243 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1244 drv->states[i].name[0] = '\0';
1245 drv->states[i].desc[0] = '\0';
1246 }
1247
1248 if (pr->flags.has_lpi)
1249 return acpi_processor_setup_lpi_states(pr);
1250
1251 return acpi_processor_setup_cstates(pr);
1252 }
1253
1254 /**
1255 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1256 * device i.e. per-cpu data
1257 *
1258 * @pr: the ACPI processor
1259 * @dev : the cpuidle device
1260 */
acpi_processor_setup_cpuidle_dev(struct acpi_processor * pr,struct cpuidle_device * dev)1261 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1262 struct cpuidle_device *dev)
1263 {
1264 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1265 return -EINVAL;
1266
1267 dev->cpu = pr->id;
1268 if (pr->flags.has_lpi)
1269 return acpi_processor_ffh_lpi_probe(pr->id);
1270
1271 return acpi_processor_setup_cpuidle_cx(pr, dev);
1272 }
1273
acpi_processor_get_power_info(struct acpi_processor * pr)1274 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1275 {
1276 int ret;
1277
1278 ret = acpi_processor_get_lpi_info(pr);
1279 if (ret)
1280 ret = acpi_processor_get_cstate_info(pr);
1281
1282 return ret;
1283 }
1284
acpi_processor_hotplug(struct acpi_processor * pr)1285 int acpi_processor_hotplug(struct acpi_processor *pr)
1286 {
1287 int ret = 0;
1288 struct cpuidle_device *dev;
1289
1290 if (disabled_by_idle_boot_param())
1291 return 0;
1292
1293 if (!pr->flags.power_setup_done)
1294 return -ENODEV;
1295
1296 dev = per_cpu(acpi_cpuidle_device, pr->id);
1297 cpuidle_pause_and_lock();
1298 cpuidle_disable_device(dev);
1299 ret = acpi_processor_get_power_info(pr);
1300 if (!ret && pr->flags.power) {
1301 acpi_processor_setup_cpuidle_dev(pr, dev);
1302 ret = cpuidle_enable_device(dev);
1303 }
1304 cpuidle_resume_and_unlock();
1305
1306 return ret;
1307 }
1308
acpi_processor_power_state_has_changed(struct acpi_processor * pr)1309 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1310 {
1311 int cpu;
1312 struct acpi_processor *_pr;
1313 struct cpuidle_device *dev;
1314
1315 if (disabled_by_idle_boot_param())
1316 return 0;
1317
1318 if (!pr->flags.power_setup_done)
1319 return -ENODEV;
1320
1321 /*
1322 * FIXME: Design the ACPI notification to make it once per
1323 * system instead of once per-cpu. This condition is a hack
1324 * to make the code that updates C-States be called once.
1325 */
1326
1327 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1328
1329 /* Protect against cpu-hotplug */
1330 cpus_read_lock();
1331 cpuidle_pause_and_lock();
1332
1333 /* Disable all cpuidle devices */
1334 for_each_online_cpu(cpu) {
1335 _pr = per_cpu(processors, cpu);
1336 if (!_pr || !_pr->flags.power_setup_done)
1337 continue;
1338 dev = per_cpu(acpi_cpuidle_device, cpu);
1339 cpuidle_disable_device(dev);
1340 }
1341
1342 /* Populate Updated C-state information */
1343 acpi_processor_get_power_info(pr);
1344 acpi_processor_setup_cpuidle_states(pr);
1345
1346 /* Enable all cpuidle devices */
1347 for_each_online_cpu(cpu) {
1348 _pr = per_cpu(processors, cpu);
1349 if (!_pr || !_pr->flags.power_setup_done)
1350 continue;
1351 acpi_processor_get_power_info(_pr);
1352 if (_pr->flags.power) {
1353 dev = per_cpu(acpi_cpuidle_device, cpu);
1354 acpi_processor_setup_cpuidle_dev(_pr, dev);
1355 cpuidle_enable_device(dev);
1356 }
1357 }
1358 cpuidle_resume_and_unlock();
1359 cpus_read_unlock();
1360 }
1361
1362 return 0;
1363 }
1364
1365 static int acpi_processor_registered;
1366
acpi_processor_power_init(struct acpi_processor * pr)1367 int acpi_processor_power_init(struct acpi_processor *pr)
1368 {
1369 int retval;
1370 struct cpuidle_device *dev;
1371
1372 if (disabled_by_idle_boot_param())
1373 return 0;
1374
1375 acpi_processor_cstate_first_run_checks();
1376
1377 if (!acpi_processor_get_power_info(pr))
1378 pr->flags.power_setup_done = 1;
1379
1380 /*
1381 * Install the idle handler if processor power management is supported.
1382 * Note that we use previously set idle handler will be used on
1383 * platforms that only support C1.
1384 */
1385 if (pr->flags.power) {
1386 /* Register acpi_idle_driver if not already registered */
1387 if (!acpi_processor_registered) {
1388 acpi_processor_setup_cpuidle_states(pr);
1389 retval = cpuidle_register_driver(&acpi_idle_driver);
1390 if (retval)
1391 return retval;
1392 pr_debug("%s registered with cpuidle\n",
1393 acpi_idle_driver.name);
1394 }
1395
1396 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1397 if (!dev)
1398 return -ENOMEM;
1399 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1400
1401 acpi_processor_setup_cpuidle_dev(pr, dev);
1402
1403 /* Register per-cpu cpuidle_device. Cpuidle driver
1404 * must already be registered before registering device
1405 */
1406 retval = cpuidle_register_device(dev);
1407 if (retval) {
1408 if (acpi_processor_registered == 0)
1409 cpuidle_unregister_driver(&acpi_idle_driver);
1410 return retval;
1411 }
1412 acpi_processor_registered++;
1413 }
1414 return 0;
1415 }
1416
acpi_processor_power_exit(struct acpi_processor * pr)1417 int acpi_processor_power_exit(struct acpi_processor *pr)
1418 {
1419 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1420
1421 if (disabled_by_idle_boot_param())
1422 return 0;
1423
1424 if (pr->flags.power) {
1425 cpuidle_unregister_device(dev);
1426 acpi_processor_registered--;
1427 if (acpi_processor_registered == 0)
1428 cpuidle_unregister_driver(&acpi_idle_driver);
1429
1430 kfree(dev);
1431 }
1432
1433 pr->flags.power_setup_done = 0;
1434 return 0;
1435 }
1436