1 // SPDX-License-Identifier: GPL-2.0
2 /* sysfs entries for device PM */
3 #include <linux/device.h>
4 #include <linux/kobject.h>
5 #include <linux/string.h>
6 #include <linux/export.h>
7 #include <linux/pm_qos.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/atomic.h>
10 #include <linux/jiffies.h>
11 #include "power.h"
12
13 /*
14 * control - Report/change current runtime PM setting of the device
15 *
16 * Runtime power management of a device can be blocked with the help of
17 * this attribute. All devices have one of the following two values for
18 * the power/control file:
19 *
20 * + "auto\n" to allow the device to be power managed at run time;
21 * + "on\n" to prevent the device from being power managed at run time;
22 *
23 * The default for all devices is "auto", which means that devices may be
24 * subject to automatic power management, depending on their drivers.
25 * Changing this attribute to "on" prevents the driver from power managing
26 * the device at run time. Doing that while the device is suspended causes
27 * it to be woken up.
28 *
29 * wakeup - Report/change current wakeup option for device
30 *
31 * Some devices support "wakeup" events, which are hardware signals
32 * used to activate devices from suspended or low power states. Such
33 * devices have one of three values for the sysfs power/wakeup file:
34 *
35 * + "enabled\n" to issue the events;
36 * + "disabled\n" not to do so; or
37 * + "\n" for temporary or permanent inability to issue wakeup.
38 *
39 * (For example, unconfigured USB devices can't issue wakeups.)
40 *
41 * Familiar examples of devices that can issue wakeup events include
42 * keyboards and mice (both PS2 and USB styles), power buttons, modems,
43 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
44 * will wake the entire system from a suspend state; others may just
45 * wake up the device (if the system as a whole is already active).
46 * Some wakeup events use normal IRQ lines; other use special out
47 * of band signaling.
48 *
49 * It is the responsibility of device drivers to enable (or disable)
50 * wakeup signaling as part of changing device power states, respecting
51 * the policy choices provided through the driver model.
52 *
53 * Devices may not be able to generate wakeup events from all power
54 * states. Also, the events may be ignored in some configurations;
55 * for example, they might need help from other devices that aren't
56 * active, or which may have wakeup disabled. Some drivers rely on
57 * wakeup events internally (unless they are disabled), keeping
58 * their hardware in low power modes whenever they're unused. This
59 * saves runtime power, without requiring system-wide sleep states.
60 *
61 * async - Report/change current async suspend setting for the device
62 *
63 * Asynchronous suspend and resume of the device during system-wide power
64 * state transitions can be enabled by writing "enabled" to this file.
65 * Analogously, if "disabled" is written to this file, the device will be
66 * suspended and resumed synchronously.
67 *
68 * All devices have one of the following two values for power/async:
69 *
70 * + "enabled\n" to permit the asynchronous suspend/resume of the device;
71 * + "disabled\n" to forbid it;
72 *
73 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
74 * of a device unless it is certain that all of the PM dependencies of the
75 * device are known to the PM core. However, for some devices this
76 * attribute is set to "enabled" by bus type code or device drivers and in
77 * that cases it should be safe to leave the default value.
78 *
79 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
80 *
81 * Some drivers don't want to carry out a runtime suspend as soon as a
82 * device becomes idle; they want it always to remain idle for some period
83 * of time before suspending it. This period is the autosuspend_delay
84 * value (expressed in milliseconds) and it can be controlled by the user.
85 * If the value is negative then the device will never be runtime
86 * suspended.
87 *
88 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
89 * value are used only if the driver calls pm_runtime_use_autosuspend().
90 *
91 * wakeup_count - Report the number of wakeup events related to the device
92 */
93
94 const char power_group_name[] = "power";
95 EXPORT_SYMBOL_GPL(power_group_name);
96
97 static const char ctrl_auto[] = "auto";
98 static const char ctrl_on[] = "on";
99
control_show(struct device * dev,struct device_attribute * attr,char * buf)100 static ssize_t control_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102 {
103 return sysfs_emit(buf, "%s\n",
104 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
105 }
106
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)107 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
108 const char * buf, size_t n)
109 {
110 device_lock(dev);
111 if (sysfs_streq(buf, ctrl_auto))
112 pm_runtime_allow(dev);
113 else if (sysfs_streq(buf, ctrl_on))
114 pm_runtime_forbid(dev);
115 else
116 n = -EINVAL;
117 device_unlock(dev);
118 return n;
119 }
120
121 static DEVICE_ATTR_RW(control);
122
runtime_active_time_show(struct device * dev,struct device_attribute * attr,char * buf)123 static ssize_t runtime_active_time_show(struct device *dev,
124 struct device_attribute *attr,
125 char *buf)
126 {
127 u64 tmp = pm_runtime_active_time(dev);
128
129 do_div(tmp, NSEC_PER_MSEC);
130
131 return sysfs_emit(buf, "%llu\n", tmp);
132 }
133
134 static DEVICE_ATTR_RO(runtime_active_time);
135
runtime_suspended_time_show(struct device * dev,struct device_attribute * attr,char * buf)136 static ssize_t runtime_suspended_time_show(struct device *dev,
137 struct device_attribute *attr,
138 char *buf)
139 {
140 u64 tmp = pm_runtime_suspended_time(dev);
141
142 do_div(tmp, NSEC_PER_MSEC);
143
144 return sysfs_emit(buf, "%llu\n", tmp);
145 }
146
147 static DEVICE_ATTR_RO(runtime_suspended_time);
148
runtime_status_show(struct device * dev,struct device_attribute * attr,char * buf)149 static ssize_t runtime_status_show(struct device *dev,
150 struct device_attribute *attr, char *buf)
151 {
152 const char *output;
153
154 if (dev->power.runtime_error) {
155 output = "error";
156 } else if (dev->power.disable_depth) {
157 output = "unsupported";
158 } else {
159 switch (dev->power.runtime_status) {
160 case RPM_SUSPENDED:
161 output = "suspended";
162 break;
163 case RPM_SUSPENDING:
164 output = "suspending";
165 break;
166 case RPM_RESUMING:
167 output = "resuming";
168 break;
169 case RPM_ACTIVE:
170 output = "active";
171 break;
172 default:
173 return -EIO;
174 }
175 }
176 return sysfs_emit(buf, "%s\n", output);
177 }
178
179 static DEVICE_ATTR_RO(runtime_status);
180
autosuspend_delay_ms_show(struct device * dev,struct device_attribute * attr,char * buf)181 static ssize_t autosuspend_delay_ms_show(struct device *dev,
182 struct device_attribute *attr,
183 char *buf)
184 {
185 if (!dev->power.use_autosuspend)
186 return -EIO;
187
188 return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
189 }
190
autosuspend_delay_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)191 static ssize_t autosuspend_delay_ms_store(struct device *dev,
192 struct device_attribute *attr, const char *buf, size_t n)
193 {
194 long delay;
195
196 if (!dev->power.use_autosuspend)
197 return -EIO;
198
199 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
200 return -EINVAL;
201
202 device_lock(dev);
203 pm_runtime_set_autosuspend_delay(dev, delay);
204 device_unlock(dev);
205 return n;
206 }
207
208 static DEVICE_ATTR_RW(autosuspend_delay_ms);
209
pm_qos_resume_latency_us_show(struct device * dev,struct device_attribute * attr,char * buf)210 static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
211 struct device_attribute *attr,
212 char *buf)
213 {
214 s32 value = dev_pm_qos_requested_resume_latency(dev);
215
216 if (value == 0)
217 return sysfs_emit(buf, "n/a\n");
218 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
219 value = 0;
220
221 return sysfs_emit(buf, "%d\n", value);
222 }
223
pm_qos_resume_latency_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)224 static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t n)
227 {
228 s32 value;
229 int ret;
230
231 if (!kstrtos32(buf, 0, &value)) {
232 /*
233 * Prevent users from writing negative or "no constraint" values
234 * directly.
235 */
236 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
237 return -EINVAL;
238
239 if (value == 0)
240 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
241 } else if (sysfs_streq(buf, "n/a")) {
242 value = 0;
243 } else {
244 return -EINVAL;
245 }
246
247 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
248 value);
249 return ret < 0 ? ret : n;
250 }
251
252 static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
253
pm_qos_latency_tolerance_us_show(struct device * dev,struct device_attribute * attr,char * buf)254 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
255 struct device_attribute *attr,
256 char *buf)
257 {
258 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
259
260 if (value < 0)
261 return sysfs_emit(buf, "%s\n", "auto");
262 if (value == PM_QOS_LATENCY_ANY)
263 return sysfs_emit(buf, "%s\n", "any");
264
265 return sysfs_emit(buf, "%d\n", value);
266 }
267
pm_qos_latency_tolerance_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)268 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
269 struct device_attribute *attr,
270 const char *buf, size_t n)
271 {
272 s32 value;
273 int ret;
274
275 if (kstrtos32(buf, 0, &value) == 0) {
276 /* Users can't write negative values directly */
277 if (value < 0)
278 return -EINVAL;
279 } else {
280 if (sysfs_streq(buf, "auto"))
281 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
282 else if (sysfs_streq(buf, "any"))
283 value = PM_QOS_LATENCY_ANY;
284 else
285 return -EINVAL;
286 }
287 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
288 return ret < 0 ? ret : n;
289 }
290
291 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
292
pm_qos_no_power_off_show(struct device * dev,struct device_attribute * attr,char * buf)293 static ssize_t pm_qos_no_power_off_show(struct device *dev,
294 struct device_attribute *attr,
295 char *buf)
296 {
297 return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
298 & PM_QOS_FLAG_NO_POWER_OFF));
299 }
300
pm_qos_no_power_off_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)301 static ssize_t pm_qos_no_power_off_store(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t n)
304 {
305 int ret;
306
307 if (kstrtoint(buf, 0, &ret))
308 return -EINVAL;
309
310 if (ret != 0 && ret != 1)
311 return -EINVAL;
312
313 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
314 return ret < 0 ? ret : n;
315 }
316
317 static DEVICE_ATTR_RW(pm_qos_no_power_off);
318
319 #ifdef CONFIG_PM_SLEEP
320 static const char _enabled[] = "enabled";
321 static const char _disabled[] = "disabled";
322
wakeup_show(struct device * dev,struct device_attribute * attr,char * buf)323 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
324 char *buf)
325 {
326 return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
327 ? (device_may_wakeup(dev) ? _enabled : _disabled)
328 : "");
329 }
330
wakeup_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)331 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
332 const char *buf, size_t n)
333 {
334 if (!device_can_wakeup(dev))
335 return -EINVAL;
336
337 if (sysfs_streq(buf, _enabled))
338 device_set_wakeup_enable(dev, 1);
339 else if (sysfs_streq(buf, _disabled))
340 device_set_wakeup_enable(dev, 0);
341 else
342 return -EINVAL;
343 return n;
344 }
345
346 static DEVICE_ATTR_RW(wakeup);
347
wakeup_count_show(struct device * dev,struct device_attribute * attr,char * buf)348 static ssize_t wakeup_count_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
350 {
351 unsigned long count;
352 bool enabled = false;
353
354 spin_lock_irq(&dev->power.lock);
355 if (dev->power.wakeup) {
356 count = dev->power.wakeup->wakeup_count;
357 enabled = true;
358 }
359 spin_unlock_irq(&dev->power.lock);
360
361 if (!enabled)
362 return sysfs_emit(buf, "\n");
363 return sysfs_emit(buf, "%lu\n", count);
364 }
365
366 static DEVICE_ATTR_RO(wakeup_count);
367
wakeup_active_count_show(struct device * dev,struct device_attribute * attr,char * buf)368 static ssize_t wakeup_active_count_show(struct device *dev,
369 struct device_attribute *attr,
370 char *buf)
371 {
372 unsigned long count;
373 bool enabled = false;
374
375 spin_lock_irq(&dev->power.lock);
376 if (dev->power.wakeup) {
377 count = dev->power.wakeup->active_count;
378 enabled = true;
379 }
380 spin_unlock_irq(&dev->power.lock);
381
382 if (!enabled)
383 return sysfs_emit(buf, "\n");
384 return sysfs_emit(buf, "%lu\n", count);
385 }
386
387 static DEVICE_ATTR_RO(wakeup_active_count);
388
wakeup_abort_count_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t wakeup_abort_count_show(struct device *dev,
390 struct device_attribute *attr,
391 char *buf)
392 {
393 unsigned long count;
394 bool enabled = false;
395
396 spin_lock_irq(&dev->power.lock);
397 if (dev->power.wakeup) {
398 count = dev->power.wakeup->wakeup_count;
399 enabled = true;
400 }
401 spin_unlock_irq(&dev->power.lock);
402
403 if (!enabled)
404 return sysfs_emit(buf, "\n");
405 return sysfs_emit(buf, "%lu\n", count);
406 }
407
408 static DEVICE_ATTR_RO(wakeup_abort_count);
409
wakeup_expire_count_show(struct device * dev,struct device_attribute * attr,char * buf)410 static ssize_t wakeup_expire_count_show(struct device *dev,
411 struct device_attribute *attr,
412 char *buf)
413 {
414 unsigned long count;
415 bool enabled = false;
416
417 spin_lock_irq(&dev->power.lock);
418 if (dev->power.wakeup) {
419 count = dev->power.wakeup->expire_count;
420 enabled = true;
421 }
422 spin_unlock_irq(&dev->power.lock);
423
424 if (!enabled)
425 return sysfs_emit(buf, "\n");
426 return sysfs_emit(buf, "%lu\n", count);
427 }
428
429 static DEVICE_ATTR_RO(wakeup_expire_count);
430
wakeup_active_show(struct device * dev,struct device_attribute * attr,char * buf)431 static ssize_t wakeup_active_show(struct device *dev,
432 struct device_attribute *attr, char *buf)
433 {
434 unsigned int active;
435 bool enabled = false;
436
437 spin_lock_irq(&dev->power.lock);
438 if (dev->power.wakeup) {
439 active = dev->power.wakeup->active;
440 enabled = true;
441 }
442 spin_unlock_irq(&dev->power.lock);
443
444 if (!enabled)
445 return sysfs_emit(buf, "\n");
446 return sysfs_emit(buf, "%u\n", active);
447 }
448
449 static DEVICE_ATTR_RO(wakeup_active);
450
wakeup_total_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)451 static ssize_t wakeup_total_time_ms_show(struct device *dev,
452 struct device_attribute *attr,
453 char *buf)
454 {
455 s64 msec;
456 bool enabled = false;
457
458 spin_lock_irq(&dev->power.lock);
459 if (dev->power.wakeup) {
460 msec = ktime_to_ms(dev->power.wakeup->total_time);
461 enabled = true;
462 }
463 spin_unlock_irq(&dev->power.lock);
464
465 if (!enabled)
466 return sysfs_emit(buf, "\n");
467 return sysfs_emit(buf, "%lld\n", msec);
468 }
469
470 static DEVICE_ATTR_RO(wakeup_total_time_ms);
471
wakeup_max_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)472 static ssize_t wakeup_max_time_ms_show(struct device *dev,
473 struct device_attribute *attr, char *buf)
474 {
475 s64 msec;
476 bool enabled = false;
477
478 spin_lock_irq(&dev->power.lock);
479 if (dev->power.wakeup) {
480 msec = ktime_to_ms(dev->power.wakeup->max_time);
481 enabled = true;
482 }
483 spin_unlock_irq(&dev->power.lock);
484
485 if (!enabled)
486 return sysfs_emit(buf, "\n");
487 return sysfs_emit(buf, "%lld\n", msec);
488 }
489
490 static DEVICE_ATTR_RO(wakeup_max_time_ms);
491
wakeup_last_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)492 static ssize_t wakeup_last_time_ms_show(struct device *dev,
493 struct device_attribute *attr,
494 char *buf)
495 {
496 s64 msec;
497 bool enabled = false;
498
499 spin_lock_irq(&dev->power.lock);
500 if (dev->power.wakeup) {
501 msec = ktime_to_ms(dev->power.wakeup->last_time);
502 enabled = true;
503 }
504 spin_unlock_irq(&dev->power.lock);
505
506 if (!enabled)
507 return sysfs_emit(buf, "\n");
508 return sysfs_emit(buf, "%lld\n", msec);
509 }
510
511 static DEVICE_ATTR_RO(wakeup_last_time_ms);
512
513 #ifdef CONFIG_PM_AUTOSLEEP
wakeup_prevent_sleep_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)514 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
515 struct device_attribute *attr,
516 char *buf)
517 {
518 s64 msec;
519 bool enabled = false;
520
521 spin_lock_irq(&dev->power.lock);
522 if (dev->power.wakeup) {
523 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
524 enabled = true;
525 }
526 spin_unlock_irq(&dev->power.lock);
527
528 if (!enabled)
529 return sysfs_emit(buf, "\n");
530 return sysfs_emit(buf, "%lld\n", msec);
531 }
532
533 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
534 #endif /* CONFIG_PM_AUTOSLEEP */
535
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)536 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
537 kgid_t kgid)
538 {
539 if (dev->power.wakeup && dev->power.wakeup->dev)
540 return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
541 return 0;
542 }
543
544 #else /* CONFIG_PM_SLEEP */
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)545 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
546 kgid_t kgid)
547 {
548 return 0;
549 }
550 #endif
551
552 #ifdef CONFIG_PM_ADVANCED_DEBUG
runtime_usage_show(struct device * dev,struct device_attribute * attr,char * buf)553 static ssize_t runtime_usage_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555 {
556 return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
557 }
558 static DEVICE_ATTR_RO(runtime_usage);
559
runtime_active_kids_show(struct device * dev,struct device_attribute * attr,char * buf)560 static ssize_t runtime_active_kids_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
563 {
564 return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
565 0 : atomic_read(&dev->power.child_count));
566 }
567 static DEVICE_ATTR_RO(runtime_active_kids);
568
runtime_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)569 static ssize_t runtime_enabled_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571 {
572 const char *output;
573
574 if (dev->power.disable_depth && !dev->power.runtime_auto)
575 output = "disabled & forbidden";
576 else if (dev->power.disable_depth)
577 output = "disabled";
578 else if (!dev->power.runtime_auto)
579 output = "forbidden";
580 else
581 output = "enabled";
582
583 return sysfs_emit(buf, "%s\n", output);
584 }
585 static DEVICE_ATTR_RO(runtime_enabled);
586
587 #ifdef CONFIG_PM_SLEEP
async_show(struct device * dev,struct device_attribute * attr,char * buf)588 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
589 char *buf)
590 {
591 return sysfs_emit(buf, "%s\n",
592 device_async_suspend_enabled(dev) ?
593 _enabled : _disabled);
594 }
595
async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)596 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
597 const char *buf, size_t n)
598 {
599 if (sysfs_streq(buf, _enabled))
600 device_enable_async_suspend(dev);
601 else if (sysfs_streq(buf, _disabled))
602 device_disable_async_suspend(dev);
603 else
604 return -EINVAL;
605 return n;
606 }
607
608 static DEVICE_ATTR_RW(async);
609
610 #endif /* CONFIG_PM_SLEEP */
611 #endif /* CONFIG_PM_ADVANCED_DEBUG */
612
613 static struct attribute *power_attrs[] = {
614 #ifdef CONFIG_PM_ADVANCED_DEBUG
615 #ifdef CONFIG_PM_SLEEP
616 &dev_attr_async.attr,
617 #endif
618 &dev_attr_runtime_status.attr,
619 &dev_attr_runtime_usage.attr,
620 &dev_attr_runtime_active_kids.attr,
621 &dev_attr_runtime_enabled.attr,
622 #endif /* CONFIG_PM_ADVANCED_DEBUG */
623 NULL,
624 };
625 static const struct attribute_group pm_attr_group = {
626 .name = power_group_name,
627 .attrs = power_attrs,
628 };
629
630 static struct attribute *wakeup_attrs[] = {
631 #ifdef CONFIG_PM_SLEEP
632 &dev_attr_wakeup.attr,
633 &dev_attr_wakeup_count.attr,
634 &dev_attr_wakeup_active_count.attr,
635 &dev_attr_wakeup_abort_count.attr,
636 &dev_attr_wakeup_expire_count.attr,
637 &dev_attr_wakeup_active.attr,
638 &dev_attr_wakeup_total_time_ms.attr,
639 &dev_attr_wakeup_max_time_ms.attr,
640 &dev_attr_wakeup_last_time_ms.attr,
641 #ifdef CONFIG_PM_AUTOSLEEP
642 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
643 #endif
644 #endif
645 NULL,
646 };
647 static const struct attribute_group pm_wakeup_attr_group = {
648 .name = power_group_name,
649 .attrs = wakeup_attrs,
650 };
651
652 static struct attribute *runtime_attrs[] = {
653 #ifndef CONFIG_PM_ADVANCED_DEBUG
654 &dev_attr_runtime_status.attr,
655 #endif
656 &dev_attr_control.attr,
657 &dev_attr_runtime_suspended_time.attr,
658 &dev_attr_runtime_active_time.attr,
659 &dev_attr_autosuspend_delay_ms.attr,
660 NULL,
661 };
662 static const struct attribute_group pm_runtime_attr_group = {
663 .name = power_group_name,
664 .attrs = runtime_attrs,
665 };
666
667 static struct attribute *pm_qos_resume_latency_attrs[] = {
668 &dev_attr_pm_qos_resume_latency_us.attr,
669 NULL,
670 };
671 static const struct attribute_group pm_qos_resume_latency_attr_group = {
672 .name = power_group_name,
673 .attrs = pm_qos_resume_latency_attrs,
674 };
675
676 static struct attribute *pm_qos_latency_tolerance_attrs[] = {
677 &dev_attr_pm_qos_latency_tolerance_us.attr,
678 NULL,
679 };
680 static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
681 .name = power_group_name,
682 .attrs = pm_qos_latency_tolerance_attrs,
683 };
684
685 static struct attribute *pm_qos_flags_attrs[] = {
686 &dev_attr_pm_qos_no_power_off.attr,
687 NULL,
688 };
689 static const struct attribute_group pm_qos_flags_attr_group = {
690 .name = power_group_name,
691 .attrs = pm_qos_flags_attrs,
692 };
693
dpm_sysfs_add(struct device * dev)694 int dpm_sysfs_add(struct device *dev)
695 {
696 int rc;
697
698 /* No need to create PM sysfs if explicitly disabled. */
699 if (device_pm_not_required(dev))
700 return 0;
701
702 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
703 if (rc)
704 return rc;
705
706 if (!pm_runtime_has_no_callbacks(dev)) {
707 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
708 if (rc)
709 goto err_out;
710 }
711 if (device_can_wakeup(dev)) {
712 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
713 if (rc)
714 goto err_runtime;
715 }
716 if (dev->power.set_latency_tolerance) {
717 rc = sysfs_merge_group(&dev->kobj,
718 &pm_qos_latency_tolerance_attr_group);
719 if (rc)
720 goto err_wakeup;
721 }
722 rc = pm_wakeup_source_sysfs_add(dev);
723 if (rc)
724 goto err_latency;
725 return 0;
726
727 err_latency:
728 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
729 err_wakeup:
730 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
731 err_runtime:
732 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
733 err_out:
734 sysfs_remove_group(&dev->kobj, &pm_attr_group);
735 return rc;
736 }
737
dpm_sysfs_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)738 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
739 {
740 int rc;
741
742 if (device_pm_not_required(dev))
743 return 0;
744
745 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
746 if (rc)
747 return rc;
748
749 if (!pm_runtime_has_no_callbacks(dev)) {
750 rc = sysfs_group_change_owner(
751 &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
752 if (rc)
753 return rc;
754 }
755
756 if (device_can_wakeup(dev)) {
757 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
758 kuid, kgid);
759 if (rc)
760 return rc;
761
762 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
763 if (rc)
764 return rc;
765 }
766
767 if (dev->power.set_latency_tolerance) {
768 rc = sysfs_group_change_owner(
769 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
770 kgid);
771 if (rc)
772 return rc;
773 }
774 return 0;
775 }
776
wakeup_sysfs_add(struct device * dev)777 int wakeup_sysfs_add(struct device *dev)
778 {
779 int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
780
781 if (!ret)
782 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
783
784 return ret;
785 }
786
wakeup_sysfs_remove(struct device * dev)787 void wakeup_sysfs_remove(struct device *dev)
788 {
789 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
790 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
791 }
792
pm_qos_sysfs_add_resume_latency(struct device * dev)793 int pm_qos_sysfs_add_resume_latency(struct device *dev)
794 {
795 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
796 }
797
pm_qos_sysfs_remove_resume_latency(struct device * dev)798 void pm_qos_sysfs_remove_resume_latency(struct device *dev)
799 {
800 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
801 }
802
pm_qos_sysfs_add_flags(struct device * dev)803 int pm_qos_sysfs_add_flags(struct device *dev)
804 {
805 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
806 }
807
pm_qos_sysfs_remove_flags(struct device * dev)808 void pm_qos_sysfs_remove_flags(struct device *dev)
809 {
810 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
811 }
812
pm_qos_sysfs_add_latency_tolerance(struct device * dev)813 int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
814 {
815 return sysfs_merge_group(&dev->kobj,
816 &pm_qos_latency_tolerance_attr_group);
817 }
818
pm_qos_sysfs_remove_latency_tolerance(struct device * dev)819 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
820 {
821 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
822 }
823
rpm_sysfs_remove(struct device * dev)824 void rpm_sysfs_remove(struct device *dev)
825 {
826 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
827 }
828
dpm_sysfs_remove(struct device * dev)829 void dpm_sysfs_remove(struct device *dev)
830 {
831 if (device_pm_not_required(dev))
832 return;
833 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
834 dev_pm_qos_constraints_destroy(dev);
835 rpm_sysfs_remove(dev);
836 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
837 sysfs_remove_group(&dev->kobj, &pm_attr_group);
838 }
839