Lines Matching full:policy
83 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
84 static int cpufreq_init_governor(struct cpufreq_policy *policy);
85 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
86 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
87 static int cpufreq_set_policy(struct cpufreq_policy *policy,
93 * Two notifier lists: the "policy" list is involved in the
94 * validation process for a new CPU frequency policy; the
121 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) in get_governor_parent_kobj() argument
124 return &policy->kobj; in get_governor_parent_kobj()
173 * - policy->cpus with all possible CPUs
175 void cpufreq_generic_init(struct cpufreq_policy *policy, in cpufreq_generic_init() argument
179 policy->freq_table = table; in cpufreq_generic_init()
180 policy->cpuinfo.transition_latency = transition_latency; in cpufreq_generic_init()
186 cpumask_setall(policy->cpus); in cpufreq_generic_init()
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_cpu_get_raw() local
194 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; in cpufreq_cpu_get_raw()
200 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); in cpufreq_generic_get() local
202 if (!policy || IS_ERR(policy->clk)) { in cpufreq_generic_get()
204 __func__, policy ? "clk" : "policy", cpu); in cpufreq_generic_get()
208 return clk_get_rate(policy->clk) / 1000; in cpufreq_generic_get()
213 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
214 * @cpu: CPU to find the policy for.
216 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
217 * the kobject reference counter of that policy. Return a valid policy on
220 * The policy returned by this function has to be released with the help of
225 struct cpufreq_policy *policy = NULL; in cpufreq_cpu_get() local
236 policy = cpufreq_cpu_get_raw(cpu); in cpufreq_cpu_get()
237 if (policy) in cpufreq_cpu_get()
238 kobject_get(&policy->kobj); in cpufreq_cpu_get()
243 return policy; in cpufreq_cpu_get()
248 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
249 * @policy: cpufreq policy returned by cpufreq_cpu_get().
251 void cpufreq_cpu_put(struct cpufreq_policy *policy) in cpufreq_cpu_put() argument
253 kobject_put(&policy->kobj); in cpufreq_cpu_put()
258 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
259 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
261 void cpufreq_cpu_release(struct cpufreq_policy *policy) in cpufreq_cpu_release() argument
263 if (WARN_ON(!policy)) in cpufreq_cpu_release()
266 lockdep_assert_held(&policy->rwsem); in cpufreq_cpu_release()
268 up_write(&policy->rwsem); in cpufreq_cpu_release()
270 cpufreq_cpu_put(policy); in cpufreq_cpu_release()
274 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
275 * @cpu: CPU to find the policy for.
277 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
278 * if the policy returned by it is not NULL, acquire its rwsem for writing.
279 * Return the policy if it is active or release it and return NULL otherwise.
281 * The policy returned by this function has to be released with the help of
287 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_cpu_acquire() local
289 if (!policy) in cpufreq_cpu_acquire()
292 down_write(&policy->rwsem); in cpufreq_cpu_acquire()
294 if (policy_is_inactive(policy)) { in cpufreq_cpu_acquire()
295 cpufreq_cpu_release(policy); in cpufreq_cpu_acquire()
299 return policy; in cpufreq_cpu_acquire()
342 * @policy: cpufreq policy to enable fast frequency switching for.
350 static void cpufreq_notify_transition(struct cpufreq_policy *policy, in cpufreq_notify_transition() argument
361 freqs->policy = policy; in cpufreq_notify_transition()
373 if (policy->cur && policy->cur != freqs->old) { in cpufreq_notify_transition()
375 freqs->old, policy->cur); in cpufreq_notify_transition()
376 freqs->old = policy->cur; in cpufreq_notify_transition()
388 cpumask_pr_args(policy->cpus)); in cpufreq_notify_transition()
390 for_each_cpu(cpu, policy->cpus) in cpufreq_notify_transition()
396 cpufreq_stats_record_transition(policy, freqs->new); in cpufreq_notify_transition()
397 policy->cur = freqs->new; in cpufreq_notify_transition()
402 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, in cpufreq_notify_post_transition() argument
405 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
410 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_notify_post_transition()
411 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
414 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, in cpufreq_freq_transition_begin() argument
427 && current == policy->transition_task); in cpufreq_freq_transition_begin()
430 wait_event(policy->transition_wait, !policy->transition_ongoing); in cpufreq_freq_transition_begin()
432 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_begin()
434 if (unlikely(policy->transition_ongoing)) { in cpufreq_freq_transition_begin()
435 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
439 policy->transition_ongoing = true; in cpufreq_freq_transition_begin()
440 policy->transition_task = current; in cpufreq_freq_transition_begin()
442 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
444 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_freq_transition_begin()
448 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, in cpufreq_freq_transition_end() argument
451 if (WARN_ON(!policy->transition_ongoing)) in cpufreq_freq_transition_end()
454 cpufreq_notify_post_transition(policy, freqs, transition_failed); in cpufreq_freq_transition_end()
456 arch_set_freq_scale(policy->related_cpus, in cpufreq_freq_transition_end()
457 policy->cur, in cpufreq_freq_transition_end()
458 arch_scale_freq_ref(policy->cpu)); in cpufreq_freq_transition_end()
460 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_end()
461 policy->transition_ongoing = false; in cpufreq_freq_transition_end()
462 policy->transition_task = NULL; in cpufreq_freq_transition_end()
463 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_end()
465 wake_up(&policy->transition_wait); in cpufreq_freq_transition_end()
491 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
492 * @policy: cpufreq policy to enable fast frequency switching for.
494 * Try to enable fast frequency switching for @policy.
501 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) in cpufreq_enable_fast_switch() argument
503 lockdep_assert_held(&policy->rwsem); in cpufreq_enable_fast_switch()
505 if (!policy->fast_switch_possible) in cpufreq_enable_fast_switch()
511 policy->fast_switch_enabled = true; in cpufreq_enable_fast_switch()
514 policy->cpu); in cpufreq_enable_fast_switch()
522 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
523 * @policy: cpufreq policy to disable fast frequency switching for.
525 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) in cpufreq_disable_fast_switch() argument
528 if (policy->fast_switch_enabled) { in cpufreq_disable_fast_switch()
529 policy->fast_switch_enabled = false; in cpufreq_disable_fast_switch()
537 static unsigned int __resolve_freq(struct cpufreq_policy *policy, in __resolve_freq() argument
542 target_freq = clamp_val(target_freq, policy->min, policy->max); in __resolve_freq()
544 if (!policy->freq_table) in __resolve_freq()
547 idx = cpufreq_frequency_table_target(policy, target_freq, relation); in __resolve_freq()
548 policy->cached_resolved_idx = idx; in __resolve_freq()
549 policy->cached_target_freq = target_freq; in __resolve_freq()
550 return policy->freq_table[idx].frequency; in __resolve_freq()
556 * @policy: associated policy to interrogate
559 * The target to driver frequency mapping is cached in the policy.
562 * given target_freq, subject to policy (min/max) and driver limitations.
564 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, in cpufreq_driver_resolve_freq() argument
567 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); in cpufreq_driver_resolve_freq()
571 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) in cpufreq_policy_transition_delay_us() argument
575 if (policy->transition_delay_us) in cpufreq_policy_transition_delay_us()
576 return policy->transition_delay_us; in cpufreq_policy_transition_delay_us()
578 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; in cpufreq_policy_transition_delay_us()
617 static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf) in show_local_boost() argument
619 return sysfs_emit(buf, "%d\n", policy->boost_enabled); in show_local_boost()
622 static ssize_t store_local_boost(struct cpufreq_policy *policy, in store_local_boost() argument
634 if (policy->boost_enabled == enable) in store_local_boost()
637 policy->boost_enabled = enable; in store_local_boost()
640 ret = cpufreq_driver->set_boost(policy, enable); in store_local_boost()
644 policy->boost_enabled = !policy->boost_enabled; in store_local_boost()
715 * Write out information from cpufreq_driver->policy[cpu]; object must be
721 (struct cpufreq_policy *policy, char *buf) \
723 return sysfs_emit(buf, "%u\n", policy->object); \
737 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) in show_scaling_cur_freq() argument
742 freq = arch_freq_get_on_cpu(policy->cpu); in show_scaling_cur_freq()
746 ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu)); in show_scaling_cur_freq()
748 ret = sysfs_emit(buf, "%u\n", policy->cur); in show_scaling_cur_freq()
757 (struct cpufreq_policy *policy, const char *buf, size_t count) \
766 ret = freq_qos_update_request(policy->object##_freq_req, val);\
776 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, in show_cpuinfo_cur_freq() argument
779 unsigned int cur_freq = __cpufreq_get(policy); in show_cpuinfo_cur_freq()
788 * show_scaling_governor - show the current policy for the specified CPU
790 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) in show_scaling_governor() argument
792 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) in show_scaling_governor()
794 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) in show_scaling_governor()
796 else if (policy->governor) in show_scaling_governor()
797 return sysfs_emit(buf, "%s\n", policy->governor->name); in show_scaling_governor()
802 * store_scaling_governor - store policy for the specified CPU
804 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, in store_scaling_governor() argument
821 ret = cpufreq_set_policy(policy, NULL, new_pol); in store_scaling_governor()
829 ret = cpufreq_set_policy(policy, new_gov, in store_scaling_governor()
841 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) in show_scaling_driver() argument
849 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, in show_scaling_available_governors() argument
896 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) in show_related_cpus() argument
898 return cpufreq_show_cpus(policy->related_cpus, buf); in show_related_cpus()
904 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) in show_affected_cpus() argument
906 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus()
909 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, in store_scaling_setspeed() argument
915 if (!policy->governor || !policy->governor->store_setspeed) in store_scaling_setspeed()
922 policy->governor->store_setspeed(policy, freq); in store_scaling_setspeed()
927 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) in show_scaling_setspeed() argument
929 if (!policy->governor || !policy->governor->show_setspeed) in show_scaling_setspeed()
932 return policy->governor->show_setspeed(policy, buf); in show_scaling_setspeed()
938 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) in show_bios_limit() argument
942 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); in show_bios_limit()
945 return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq); in show_bios_limit()
984 struct cpufreq_policy *policy = to_policy(kobj); in show() local
991 down_read(&policy->rwsem); in show()
992 if (likely(!policy_is_inactive(policy))) in show()
993 ret = fattr->show(policy, buf); in show()
994 up_read(&policy->rwsem); in show()
1002 struct cpufreq_policy *policy = to_policy(kobj); in store() local
1009 down_write(&policy->rwsem); in store()
1010 if (likely(!policy_is_inactive(policy))) in store()
1011 ret = fattr->store(policy, buf, count); in store()
1012 up_write(&policy->rwsem); in store()
1019 struct cpufreq_policy *policy = to_policy(kobj); in cpufreq_sysfs_release() local
1021 complete(&policy->kobj_unregister); in cpufreq_sysfs_release()
1035 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, in add_cpu_dev_symlink() argument
1041 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) in add_cpu_dev_symlink()
1045 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) in add_cpu_dev_symlink()
1049 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu, in remove_cpu_dev_symlink() argument
1054 cpumask_clear_cpu(cpu, policy->real_cpus); in remove_cpu_dev_symlink()
1057 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) in cpufreq_add_dev_interface() argument
1065 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); in cpufreq_add_dev_interface()
1071 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); in cpufreq_add_dev_interface()
1076 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); in cpufreq_add_dev_interface()
1081 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); in cpufreq_add_dev_interface()
1087 ret = sysfs_create_file(&policy->kobj, &local_boost.attr); in cpufreq_add_dev_interface()
1095 static int cpufreq_init_policy(struct cpufreq_policy *policy) in cpufreq_init_policy() argument
1102 /* Update policy governor to the one used before hotplug. */ in cpufreq_init_policy()
1103 gov = get_governor(policy->last_governor); in cpufreq_init_policy()
1106 gov->name, policy->cpu); in cpufreq_init_policy()
1118 /* Use the default policy if there is no last_policy. */ in cpufreq_init_policy()
1119 if (policy->last_policy) { in cpufreq_init_policy()
1120 pol = policy->last_policy; in cpufreq_init_policy()
1125 * nor "powersave", fall back to the initial policy in cpufreq_init_policy()
1129 pol = policy->policy; in cpufreq_init_policy()
1136 ret = cpufreq_set_policy(policy, gov, pol); in cpufreq_init_policy()
1143 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) in cpufreq_add_policy_cpu() argument
1148 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu()
1151 down_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1153 cpufreq_stop_governor(policy); in cpufreq_add_policy_cpu()
1155 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu()
1158 ret = cpufreq_start_governor(policy); in cpufreq_add_policy_cpu()
1162 up_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1166 void refresh_frequency_limits(struct cpufreq_policy *policy) in refresh_frequency_limits() argument
1168 if (!policy_is_inactive(policy)) { in refresh_frequency_limits()
1169 pr_debug("updating policy for CPU %u\n", policy->cpu); in refresh_frequency_limits()
1171 cpufreq_set_policy(policy, policy->governor, policy->policy); in refresh_frequency_limits()
1178 struct cpufreq_policy *policy = in handle_update() local
1181 pr_debug("handle_update for cpu %u called\n", policy->cpu); in handle_update()
1182 down_write(&policy->rwsem); in handle_update()
1183 refresh_frequency_limits(policy); in handle_update()
1184 up_write(&policy->rwsem); in handle_update()
1190 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min); in cpufreq_notifier_min() local
1192 schedule_work(&policy->update); in cpufreq_notifier_min()
1199 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max); in cpufreq_notifier_max() local
1201 schedule_work(&policy->update); in cpufreq_notifier_max()
1205 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) in cpufreq_policy_put_kobj() argument
1210 down_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1211 cpufreq_stats_free_table(policy); in cpufreq_policy_put_kobj()
1212 kobj = &policy->kobj; in cpufreq_policy_put_kobj()
1213 cmp = &policy->kobj_unregister; in cpufreq_policy_put_kobj()
1214 up_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1229 struct cpufreq_policy *policy; in cpufreq_policy_alloc() local
1236 policy = kzalloc(sizeof(*policy), GFP_KERNEL); in cpufreq_policy_alloc()
1237 if (!policy) in cpufreq_policy_alloc()
1240 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1243 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1246 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1249 init_completion(&policy->kobj_unregister); in cpufreq_policy_alloc()
1250 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, in cpufreq_policy_alloc()
1251 cpufreq_global_kobject, "policy%u", cpu); in cpufreq_policy_alloc()
1253 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret); in cpufreq_policy_alloc()
1255 * The entire policy object will be freed below, but the extra in cpufreq_policy_alloc()
1259 kobject_put(&policy->kobj); in cpufreq_policy_alloc()
1263 freq_constraints_init(&policy->constraints); in cpufreq_policy_alloc()
1265 policy->nb_min.notifier_call = cpufreq_notifier_min; in cpufreq_policy_alloc()
1266 policy->nb_max.notifier_call = cpufreq_notifier_max; in cpufreq_policy_alloc()
1268 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_alloc()
1269 &policy->nb_min); in cpufreq_policy_alloc()
1276 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, in cpufreq_policy_alloc()
1277 &policy->nb_max); in cpufreq_policy_alloc()
1284 INIT_LIST_HEAD(&policy->policy_list); in cpufreq_policy_alloc()
1285 init_rwsem(&policy->rwsem); in cpufreq_policy_alloc()
1286 spin_lock_init(&policy->transition_lock); in cpufreq_policy_alloc()
1287 init_waitqueue_head(&policy->transition_wait); in cpufreq_policy_alloc()
1288 INIT_WORK(&policy->update, handle_update); in cpufreq_policy_alloc()
1290 policy->cpu = cpu; in cpufreq_policy_alloc()
1291 return policy; in cpufreq_policy_alloc()
1294 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_alloc()
1295 &policy->nb_min); in cpufreq_policy_alloc()
1297 cpufreq_policy_put_kobj(policy); in cpufreq_policy_alloc()
1299 free_cpumask_var(policy->real_cpus); in cpufreq_policy_alloc()
1301 free_cpumask_var(policy->related_cpus); in cpufreq_policy_alloc()
1303 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc()
1305 kfree(policy); in cpufreq_policy_alloc()
1310 static void cpufreq_policy_free(struct cpufreq_policy *policy) in cpufreq_policy_free() argument
1316 * The callers must ensure the policy is inactive by now, to avoid any in cpufreq_policy_free()
1319 if (unlikely(!policy_is_inactive(policy))) in cpufreq_policy_free()
1320 pr_warn("%s: Freeing active policy\n", __func__); in cpufreq_policy_free()
1322 /* Remove policy from list */ in cpufreq_policy_free()
1324 list_del(&policy->policy_list); in cpufreq_policy_free()
1326 for_each_cpu(cpu, policy->related_cpus) in cpufreq_policy_free()
1330 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, in cpufreq_policy_free()
1331 &policy->nb_max); in cpufreq_policy_free()
1332 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_free()
1333 &policy->nb_min); in cpufreq_policy_free()
1335 /* Cancel any pending policy->update work before freeing the policy. */ in cpufreq_policy_free()
1336 cancel_work_sync(&policy->update); in cpufreq_policy_free()
1338 if (policy->max_freq_req) { in cpufreq_policy_free()
1345 CPUFREQ_REMOVE_POLICY, policy); in cpufreq_policy_free()
1346 freq_qos_remove_request(policy->max_freq_req); in cpufreq_policy_free()
1349 freq_qos_remove_request(policy->min_freq_req); in cpufreq_policy_free()
1350 kfree(policy->min_freq_req); in cpufreq_policy_free()
1352 cpufreq_policy_put_kobj(policy); in cpufreq_policy_free()
1353 free_cpumask_var(policy->real_cpus); in cpufreq_policy_free()
1354 free_cpumask_var(policy->related_cpus); in cpufreq_policy_free()
1355 free_cpumask_var(policy->cpus); in cpufreq_policy_free()
1356 kfree(policy); in cpufreq_policy_free()
1361 struct cpufreq_policy *policy; in cpufreq_online() local
1369 /* Check if this CPU already has a policy to manage it */ in cpufreq_online()
1370 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_online()
1371 if (policy) { in cpufreq_online()
1372 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); in cpufreq_online()
1373 if (!policy_is_inactive(policy)) in cpufreq_online()
1374 return cpufreq_add_policy_cpu(policy, cpu); in cpufreq_online()
1376 /* This is the only online CPU for the policy. Start over. */ in cpufreq_online()
1378 down_write(&policy->rwsem); in cpufreq_online()
1379 policy->cpu = cpu; in cpufreq_online()
1380 policy->governor = NULL; in cpufreq_online()
1383 policy = cpufreq_policy_alloc(cpu); in cpufreq_online()
1384 if (!policy) in cpufreq_online()
1386 down_write(&policy->rwsem); in cpufreq_online()
1390 /* Recover policy->cpus using related_cpus */ in cpufreq_online()
1391 cpumask_copy(policy->cpus, policy->related_cpus); in cpufreq_online()
1393 ret = cpufreq_driver->online(policy); in cpufreq_online()
1400 cpumask_copy(policy->cpus, cpumask_of(cpu)); in cpufreq_online()
1406 ret = cpufreq_driver->init(policy); in cpufreq_online()
1414 * The initialization has succeeded and the policy is online. in cpufreq_online()
1418 ret = cpufreq_table_validate_and_sort(policy); in cpufreq_online()
1422 /* related_cpus should at least include policy->cpus. */ in cpufreq_online()
1423 cpumask_copy(policy->related_cpus, policy->cpus); in cpufreq_online()
1430 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); in cpufreq_online()
1433 for_each_cpu(j, policy->related_cpus) { in cpufreq_online()
1434 per_cpu(cpufreq_cpu_data, j) = policy; in cpufreq_online()
1435 add_cpu_dev_symlink(policy, j, get_cpu_device(j)); in cpufreq_online()
1438 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), in cpufreq_online()
1440 if (!policy->min_freq_req) { in cpufreq_online()
1445 ret = freq_qos_add_request(&policy->constraints, in cpufreq_online()
1446 policy->min_freq_req, FREQ_QOS_MIN, in cpufreq_online()
1453 kfree(policy->min_freq_req); in cpufreq_online()
1454 policy->min_freq_req = NULL; in cpufreq_online()
1463 policy->max_freq_req = policy->min_freq_req + 1; in cpufreq_online()
1465 ret = freq_qos_add_request(&policy->constraints, in cpufreq_online()
1466 policy->max_freq_req, FREQ_QOS_MAX, in cpufreq_online()
1469 policy->max_freq_req = NULL; in cpufreq_online()
1474 CPUFREQ_CREATE_POLICY, policy); in cpufreq_online()
1476 ret = freq_qos_update_request(policy->max_freq_req, policy->max); in cpufreq_online()
1482 policy->cur = cpufreq_driver->get(policy->cpu); in cpufreq_online()
1483 if (!policy->cur) { in cpufreq_online()
1500 * for the next freq which is >= policy->cur ('cur' must be set by now, in cpufreq_online()
1504 * We are passing target-freq as "policy->cur - 1" otherwise in cpufreq_online()
1505 * __cpufreq_driver_target() would simply fail, as policy->cur will be in cpufreq_online()
1510 unsigned int old_freq = policy->cur; in cpufreq_online()
1513 ret = cpufreq_frequency_table_get_index(policy, old_freq); in cpufreq_online()
1515 ret = __cpufreq_driver_target(policy, old_freq - 1, in cpufreq_online()
1525 __func__, policy->cpu, old_freq, policy->cur); in cpufreq_online()
1530 ret = cpufreq_add_dev_interface(policy); in cpufreq_online()
1534 cpufreq_stats_create_table(policy); in cpufreq_online()
1537 list_add(&policy->policy_list, &cpufreq_policy_list); in cpufreq_online()
1544 * once the energy model is properly initialized for the policy in cpufreq_online()
1547 * Also, this should be called before the policy is registered in cpufreq_online()
1551 cpufreq_driver->register_em(policy); in cpufreq_online()
1554 ret = cpufreq_init_policy(policy); in cpufreq_online()
1556 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", in cpufreq_online()
1561 up_write(&policy->rwsem); in cpufreq_online()
1563 kobject_uevent(&policy->kobj, KOBJ_ADD); in cpufreq_online()
1565 /* Callback for handling stuff after policy is ready */ in cpufreq_online()
1567 cpufreq_driver->ready(policy); in cpufreq_online()
1569 /* Register cpufreq cooling only for a new policy */ in cpufreq_online()
1571 policy->cdev = of_cpufreq_cooling_register(policy); in cpufreq_online()
1573 /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ in cpufreq_online()
1575 policy->boost_enabled != cpufreq_boost_enabled()) { in cpufreq_online()
1576 policy->boost_enabled = cpufreq_boost_enabled(); in cpufreq_online()
1577 ret = cpufreq_driver->set_boost(policy, policy->boost_enabled); in cpufreq_online()
1580 pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu, in cpufreq_online()
1581 policy->boost_enabled ? "enable" : "disable"); in cpufreq_online()
1582 policy->boost_enabled = !policy->boost_enabled; in cpufreq_online()
1591 for_each_cpu(j, policy->real_cpus) in cpufreq_online()
1592 remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); in cpufreq_online()
1596 cpufreq_driver->offline(policy); in cpufreq_online()
1600 cpufreq_driver->exit(policy); in cpufreq_online()
1603 cpumask_clear(policy->cpus); in cpufreq_online()
1604 up_write(&policy->rwsem); in cpufreq_online()
1606 cpufreq_policy_free(policy); in cpufreq_online()
1617 struct cpufreq_policy *policy; in cpufreq_add_dev() local
1630 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_add_dev()
1631 if (policy) in cpufreq_add_dev()
1632 add_cpu_dev_symlink(policy, cpu, dev); in cpufreq_add_dev()
1637 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) in __cpufreq_offline() argument
1642 cpufreq_stop_governor(policy); in __cpufreq_offline()
1644 cpumask_clear_cpu(cpu, policy->cpus); in __cpufreq_offline()
1646 if (!policy_is_inactive(policy)) { in __cpufreq_offline()
1648 if (cpu == policy->cpu) in __cpufreq_offline()
1649 policy->cpu = cpumask_any(policy->cpus); in __cpufreq_offline()
1651 /* Start the governor again for the active policy. */ in __cpufreq_offline()
1653 ret = cpufreq_start_governor(policy); in __cpufreq_offline()
1662 strscpy(policy->last_governor, policy->governor->name, in __cpufreq_offline()
1665 policy->last_policy = policy->policy; in __cpufreq_offline()
1668 cpufreq_exit_governor(policy); in __cpufreq_offline()
1675 cpufreq_driver->offline(policy); in __cpufreq_offline()
1680 cpufreq_driver->exit(policy); in __cpufreq_offline()
1682 policy->freq_table = NULL; in __cpufreq_offline()
1687 struct cpufreq_policy *policy; in cpufreq_offline() local
1691 policy = cpufreq_cpu_get_raw(cpu); in cpufreq_offline()
1692 if (!policy) { in cpufreq_offline()
1697 down_write(&policy->rwsem); in cpufreq_offline()
1699 __cpufreq_offline(cpu, policy); in cpufreq_offline()
1701 up_write(&policy->rwsem); in cpufreq_offline()
1713 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_remove_dev() local
1715 if (!policy) in cpufreq_remove_dev()
1718 down_write(&policy->rwsem); in cpufreq_remove_dev()
1721 __cpufreq_offline(cpu, policy); in cpufreq_remove_dev()
1723 remove_cpu_dev_symlink(policy, cpu, dev); in cpufreq_remove_dev()
1725 if (!cpumask_empty(policy->real_cpus)) { in cpufreq_remove_dev()
1726 up_write(&policy->rwsem); in cpufreq_remove_dev()
1731 * Unregister cpufreq cooling once all the CPUs of the policy are in cpufreq_remove_dev()
1735 cpufreq_cooling_unregister(policy->cdev); in cpufreq_remove_dev()
1736 policy->cdev = NULL; in cpufreq_remove_dev()
1741 cpufreq_driver->exit(policy); in cpufreq_remove_dev()
1743 up_write(&policy->rwsem); in cpufreq_remove_dev()
1745 cpufreq_policy_free(policy); in cpufreq_remove_dev()
1750 * @policy: Policy managing CPUs.
1756 static void cpufreq_out_of_sync(struct cpufreq_policy *policy, in cpufreq_out_of_sync() argument
1762 policy->cur, new_freq); in cpufreq_out_of_sync()
1764 freqs.old = policy->cur; in cpufreq_out_of_sync()
1767 cpufreq_freq_transition_begin(policy, &freqs); in cpufreq_out_of_sync()
1768 cpufreq_freq_transition_end(policy, &freqs, 0); in cpufreq_out_of_sync()
1771 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update) in cpufreq_verify_current_freq() argument
1775 new_freq = cpufreq_driver->get(policy->cpu); in cpufreq_verify_current_freq()
1780 * If fast frequency switching is used with the given policy, the check in cpufreq_verify_current_freq()
1781 * against policy->cur is pointless, so skip it in that case. in cpufreq_verify_current_freq()
1783 if (policy->fast_switch_enabled || !has_target()) in cpufreq_verify_current_freq()
1786 if (policy->cur != new_freq) { in cpufreq_verify_current_freq()
1794 if (abs(policy->cur - new_freq) < KHZ_PER_MHZ) in cpufreq_verify_current_freq()
1795 return policy->cur; in cpufreq_verify_current_freq()
1797 cpufreq_out_of_sync(policy, new_freq); in cpufreq_verify_current_freq()
1799 schedule_work(&policy->update); in cpufreq_verify_current_freq()
1806 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1814 struct cpufreq_policy *policy; in cpufreq_quick_get() local
1828 policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get()
1829 if (policy) { in cpufreq_quick_get()
1830 ret_freq = policy->cur; in cpufreq_quick_get()
1831 cpufreq_cpu_put(policy); in cpufreq_quick_get()
1846 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get_max() local
1849 if (policy) { in cpufreq_quick_get_max()
1850 ret_freq = policy->max; in cpufreq_quick_get_max()
1851 cpufreq_cpu_put(policy); in cpufreq_quick_get_max()
1866 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_get_hw_max_freq() local
1869 if (policy) { in cpufreq_get_hw_max_freq()
1870 ret_freq = policy->cpuinfo.max_freq; in cpufreq_get_hw_max_freq()
1871 cpufreq_cpu_put(policy); in cpufreq_get_hw_max_freq()
1878 static unsigned int __cpufreq_get(struct cpufreq_policy *policy) in __cpufreq_get() argument
1880 if (unlikely(policy_is_inactive(policy))) in __cpufreq_get()
1883 return cpufreq_verify_current_freq(policy, true); in __cpufreq_get()
1894 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_get() local
1897 if (policy) { in cpufreq_get()
1898 down_read(&policy->rwsem); in cpufreq_get()
1900 ret_freq = __cpufreq_get(policy); in cpufreq_get()
1901 up_read(&policy->rwsem); in cpufreq_get()
1903 cpufreq_cpu_put(policy); in cpufreq_get()
1921 int cpufreq_generic_suspend(struct cpufreq_policy *policy) in cpufreq_generic_suspend() argument
1925 if (!policy->suspend_freq) { in cpufreq_generic_suspend()
1931 policy->suspend_freq); in cpufreq_generic_suspend()
1933 ret = __cpufreq_driver_target(policy, policy->suspend_freq, in cpufreq_generic_suspend()
1937 __func__, policy->suspend_freq, ret); in cpufreq_generic_suspend()
1953 struct cpufreq_policy *policy; in cpufreq_suspend() local
1963 for_each_active_policy(policy) { in cpufreq_suspend()
1965 down_write(&policy->rwsem); in cpufreq_suspend()
1966 cpufreq_stop_governor(policy); in cpufreq_suspend()
1967 up_write(&policy->rwsem); in cpufreq_suspend()
1970 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) in cpufreq_suspend()
1987 struct cpufreq_policy *policy; in cpufreq_resume() local
2003 for_each_active_policy(policy) { in cpufreq_resume()
2004 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { in cpufreq_resume()
2008 down_write(&policy->rwsem); in cpufreq_resume()
2009 ret = cpufreq_start_governor(policy); in cpufreq_resume()
2010 up_write(&policy->rwsem); in cpufreq_resume()
2013 pr_err("%s: Failed to start governor for CPU%u's policy\n", in cpufreq_resume()
2014 __func__, policy->cpu); in cpufreq_resume()
2072 * of notifiers that ron on cpufreq policy changes.
2158 * @policy: cpufreq policy to switch the frequency for.
2168 * This function must not be called if policy->fast_switch_enabled is unset.
2171 * twice in parallel for the same policy and that it will never be called in
2172 * parallel with either ->target() or ->target_index() for the same policy.
2179 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, in cpufreq_driver_fast_switch() argument
2185 target_freq = clamp_val(target_freq, policy->min, policy->max); in cpufreq_driver_fast_switch()
2186 freq = cpufreq_driver->fast_switch(policy, target_freq); in cpufreq_driver_fast_switch()
2191 policy->cur = freq; in cpufreq_driver_fast_switch()
2192 arch_set_freq_scale(policy->related_cpus, freq, in cpufreq_driver_fast_switch()
2193 arch_scale_freq_ref(policy->cpu)); in cpufreq_driver_fast_switch()
2194 cpufreq_stats_record_transition(policy, freq); in cpufreq_driver_fast_switch()
2197 for_each_cpu(cpu, policy->cpus) in cpufreq_driver_fast_switch()
2219 * This function must not be called if policy->fast_switch_enabled is unset.
2246 static int __target_intermediate(struct cpufreq_policy *policy, in __target_intermediate() argument
2251 freqs->new = cpufreq_driver->get_intermediate(policy, index); in __target_intermediate()
2258 __func__, policy->cpu, freqs->old, freqs->new); in __target_intermediate()
2260 cpufreq_freq_transition_begin(policy, freqs); in __target_intermediate()
2261 ret = cpufreq_driver->target_intermediate(policy, index); in __target_intermediate()
2262 cpufreq_freq_transition_end(policy, freqs, ret); in __target_intermediate()
2271 static int __target_index(struct cpufreq_policy *policy, int index) in __target_index() argument
2273 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; in __target_index()
2275 unsigned int newfreq = policy->freq_table[index].frequency; in __target_index()
2279 if (newfreq == policy->cur) in __target_index()
2283 restore_freq = policy->cur; in __target_index()
2289 retval = __target_intermediate(policy, &freqs, index); in __target_index()
2301 __func__, policy->cpu, freqs.old, freqs.new); in __target_index()
2303 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
2306 retval = cpufreq_driver->target_index(policy, index); in __target_index()
2312 cpufreq_freq_transition_end(policy, &freqs, retval); in __target_index()
2323 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
2324 cpufreq_freq_transition_end(policy, &freqs, 0); in __target_index()
2331 int __cpufreq_driver_target(struct cpufreq_policy *policy, in __cpufreq_driver_target() argument
2340 target_freq = __resolve_freq(policy, target_freq, relation); in __cpufreq_driver_target()
2343 policy->cpu, target_freq, relation, old_target_freq); in __cpufreq_driver_target()
2351 if (target_freq == policy->cur && in __cpufreq_driver_target()
2360 if (!policy->efficiencies_available) in __cpufreq_driver_target()
2363 return cpufreq_driver->target(policy, target_freq, relation); in __cpufreq_driver_target()
2369 return __target_index(policy, policy->cached_resolved_idx); in __cpufreq_driver_target()
2373 int cpufreq_driver_target(struct cpufreq_policy *policy, in cpufreq_driver_target() argument
2379 down_write(&policy->rwsem); in cpufreq_driver_target()
2381 ret = __cpufreq_driver_target(policy, target_freq, relation); in cpufreq_driver_target()
2383 up_write(&policy->rwsem); in cpufreq_driver_target()
2394 static int cpufreq_init_governor(struct cpufreq_policy *policy) in cpufreq_init_governor() argument
2405 if (!policy->governor) in cpufreq_init_governor()
2409 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING && in cpufreq_init_governor()
2415 policy->governor->name, gov->name); in cpufreq_init_governor()
2416 policy->governor = gov; in cpufreq_init_governor()
2422 if (!try_module_get(policy->governor->owner)) in cpufreq_init_governor()
2425 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_init_governor()
2427 if (policy->governor->init) { in cpufreq_init_governor()
2428 ret = policy->governor->init(policy); in cpufreq_init_governor()
2430 module_put(policy->governor->owner); in cpufreq_init_governor()
2435 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET); in cpufreq_init_governor()
2440 static void cpufreq_exit_governor(struct cpufreq_policy *policy) in cpufreq_exit_governor() argument
2442 if (cpufreq_suspended || !policy->governor) in cpufreq_exit_governor()
2445 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_exit_governor()
2447 if (policy->governor->exit) in cpufreq_exit_governor()
2448 policy->governor->exit(policy); in cpufreq_exit_governor()
2450 module_put(policy->governor->owner); in cpufreq_exit_governor()
2453 int cpufreq_start_governor(struct cpufreq_policy *policy) in cpufreq_start_governor() argument
2460 if (!policy->governor) in cpufreq_start_governor()
2463 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_start_governor()
2466 cpufreq_verify_current_freq(policy, false); in cpufreq_start_governor()
2468 if (policy->governor->start) { in cpufreq_start_governor()
2469 ret = policy->governor->start(policy); in cpufreq_start_governor()
2474 if (policy->governor->limits) in cpufreq_start_governor()
2475 policy->governor->limits(policy); in cpufreq_start_governor()
2480 void cpufreq_stop_governor(struct cpufreq_policy *policy) in cpufreq_stop_governor() argument
2482 if (cpufreq_suspended || !policy->governor) in cpufreq_stop_governor()
2485 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_stop_governor()
2487 if (policy->governor->stop) in cpufreq_stop_governor()
2488 policy->governor->stop(policy); in cpufreq_stop_governor()
2491 static void cpufreq_governor_limits(struct cpufreq_policy *policy) in cpufreq_governor_limits() argument
2493 if (cpufreq_suspended || !policy->governor) in cpufreq_governor_limits()
2496 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_governor_limits()
2498 if (policy->governor->limits) in cpufreq_governor_limits()
2499 policy->governor->limits(policy); in cpufreq_governor_limits()
2527 struct cpufreq_policy *policy; in cpufreq_unregister_governor() local
2538 for_each_inactive_policy(policy) { in cpufreq_unregister_governor()
2539 if (!strcmp(policy->last_governor, governor->name)) { in cpufreq_unregister_governor()
2540 policy->governor = NULL; in cpufreq_unregister_governor()
2541 strcpy(policy->last_governor, "\0"); in cpufreq_unregister_governor()
2554 * POLICY INTERFACE *
2559 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2561 * @cpu: CPU to find the policy for
2563 * Reads the current cpufreq policy.
2565 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) in cpufreq_get_policy() argument
2568 if (!policy) in cpufreq_get_policy()
2575 memcpy(policy, cpu_policy, sizeof(*policy)); in cpufreq_get_policy()
2586 * @policy: cpufreq policy of the CPUs.
2588 * Update the value of cpufreq pressure for all @cpus in the policy.
2590 static void cpufreq_update_pressure(struct cpufreq_policy *policy) in cpufreq_update_pressure() argument
2596 cpu = cpumask_first(policy->related_cpus); in cpufreq_update_pressure()
2598 capped_freq = policy->max; in cpufreq_update_pressure()
2612 for_each_cpu(cpu, policy->related_cpus) in cpufreq_update_pressure()
2617 * cpufreq_set_policy - Modify cpufreq policy parameters.
2618 * @policy: Policy object to modify.
2619 * @new_gov: Policy governor pointer.
2620 * @new_pol: Policy value (for drivers with built-in governors).
2623 * limits to be set for the policy, update @policy with the verified limits
2625 * carry out a governor update for @policy. That is, run the current governor's
2627 * @policy) or replace the governor for @policy with @new_gov.
2629 * The cpuinfo part of @policy is not updated by this function.
2631 static int cpufreq_set_policy(struct cpufreq_policy *policy, in cpufreq_set_policy() argument
2639 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); in cpufreq_set_policy()
2640 new_data.freq_table = policy->freq_table; in cpufreq_set_policy()
2641 new_data.cpu = policy->cpu; in cpufreq_set_policy()
2646 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); in cpufreq_set_policy()
2647 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); in cpufreq_set_policy()
2649 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", in cpufreq_set_policy()
2661 * Resolve policy min/max to available frequencies. It ensures in cpufreq_set_policy()
2665 policy->min = new_data.min; in cpufreq_set_policy()
2666 policy->max = new_data.max; in cpufreq_set_policy()
2667 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); in cpufreq_set_policy()
2668 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); in cpufreq_set_policy()
2669 trace_cpu_frequency_limits(policy); in cpufreq_set_policy()
2671 cpufreq_update_pressure(policy); in cpufreq_set_policy()
2673 policy->cached_target_freq = UINT_MAX; in cpufreq_set_policy()
2676 policy->min, policy->max); in cpufreq_set_policy()
2679 policy->policy = new_pol; in cpufreq_set_policy()
2681 return cpufreq_driver->setpolicy(policy); in cpufreq_set_policy()
2684 if (new_gov == policy->governor) { in cpufreq_set_policy()
2686 cpufreq_governor_limits(policy); in cpufreq_set_policy()
2693 old_gov = policy->governor; in cpufreq_set_policy()
2696 cpufreq_stop_governor(policy); in cpufreq_set_policy()
2697 cpufreq_exit_governor(policy); in cpufreq_set_policy()
2701 policy->governor = new_gov; in cpufreq_set_policy()
2702 ret = cpufreq_init_governor(policy); in cpufreq_set_policy()
2704 ret = cpufreq_start_governor(policy); in cpufreq_set_policy()
2709 cpufreq_exit_governor(policy); in cpufreq_set_policy()
2713 pr_debug("starting governor %s failed\n", policy->governor->name); in cpufreq_set_policy()
2715 policy->governor = old_gov; in cpufreq_set_policy()
2716 if (cpufreq_init_governor(policy)) in cpufreq_set_policy()
2717 policy->governor = NULL; in cpufreq_set_policy()
2719 cpufreq_start_governor(policy); in cpufreq_set_policy()
2726 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2727 * @cpu: CPU to re-evaluate the policy for.
2729 * Update the current frequency for the cpufreq policy of @cpu and use
2731 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2732 * for the policy in question, among other things.
2736 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); in cpufreq_update_policy() local
2738 if (!policy) in cpufreq_update_policy()
2746 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) in cpufreq_update_policy()
2749 refresh_frequency_limits(policy); in cpufreq_update_policy()
2752 cpufreq_cpu_release(policy); in cpufreq_update_policy()
2757 * cpufreq_update_limits - Update policy limits for a given CPU.
2758 * @cpu: CPU to update the policy limits for.
2765 struct cpufreq_policy *policy; in cpufreq_update_limits() local
2767 policy = cpufreq_cpu_get(cpu); in cpufreq_update_limits()
2768 if (!policy) in cpufreq_update_limits()
2776 cpufreq_cpu_put(policy); in cpufreq_update_limits()
2783 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) in cpufreq_boost_set_sw() argument
2787 if (!policy->freq_table) in cpufreq_boost_set_sw()
2790 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); in cpufreq_boost_set_sw()
2792 pr_err("%s: Policy frequency update failed\n", __func__); in cpufreq_boost_set_sw()
2796 ret = freq_qos_update_request(policy->max_freq_req, policy->max); in cpufreq_boost_set_sw()
2805 struct cpufreq_policy *policy; in cpufreq_boost_trigger_state() local
2817 for_each_active_policy(policy) { in cpufreq_boost_trigger_state()
2818 policy->boost_enabled = state; in cpufreq_boost_trigger_state()
2819 ret = cpufreq_driver->set_boost(policy, state); in cpufreq_boost_trigger_state()
2821 policy->boost_enabled = !policy->boost_enabled; in cpufreq_boost_trigger_state()