Lines Matching +full:cpu +full:- +full:ns
1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/cpu.h>
34 static DEFINE_PER_CPU(struct cpu, cpu_devices);
40 * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
46 * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
56 current->comm, current->pid); in store_smt_snooze_delay()
65 current->comm, current->pid); in show_smt_snooze_delay()
77 pr_warn("smt-snooze-delay command line option has no effect\n"); in setup_smt_snooze_delay()
80 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
100 struct cpu *cpu = container_of(dev, struct cpu, dev); \
102 smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
109 struct cpu *cpu = container_of(dev, struct cpu, dev); \
113 return -EINVAL; \
114 smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
133 * will update all per cpu DSCR default values across the
139 * read_dscr() - Fetch the cpu specific DSCR default
140 * @val: Returned cpu specific DSCR default value
142 * This function returns the per cpu DSCR default value
143 * for any cpu which is contained in its PACA structure.
147 *(unsigned long *)val = get_paca()->dscr_default; in read_dscr()
152 * write_dscr() - Update the cpu specific DSCR default
153 * @val: New cpu specific DSCR default value to update
155 * This function updates the per cpu DSCR default value
156 * for any cpu which is contained in its PACA structure.
160 get_paca()->dscr_default = *(unsigned long *)val; in write_dscr()
161 if (!current->thread.dscr_inherit) { in write_dscr()
162 current->thread.dscr = *(unsigned long *)val; in write_dscr()
172 attr->attr.mode |= 0200; in add_write_permission_dev_attr()
176 * show_dscr_default() - Fetch the system wide DSCR default
190 * store_dscr_default() - Update the system wide DSCR default
207 return -EINVAL; in store_dscr_default()
222 int cpu; in sysfs_create_dscr_default() local
225 for_each_possible_cpu(cpu) in sysfs_create_dscr_default()
226 paca_ptrs[cpu]->dscr_default = dscr_default; in sysfs_create_dscr_default()
243 static unsigned int get_idle_ticks_bit(u64 ns) in get_idle_ticks_bit() argument
247 if (ns >= 10000) in get_idle_ticks_bit()
248 cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec; in get_idle_ticks_bit()
250 cycle = div_u64(ns * tb_ticks_per_usec, 1000); in get_idle_ticks_bit()
269 unsigned int cpu = dev->id; in show_pw20_state() local
271 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_pw20_state()
298 unsigned int cpu = dev->id; in store_pw20_state() local
301 return -EINVAL; in store_pw20_state()
304 return -EINVAL; in store_pw20_state()
306 smp_call_function_single(cpu, do_store_pw20_state, &value, 1); in store_pw20_state()
318 unsigned int cpu = dev->id; in show_pw20_wait_time() local
321 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_pw20_wait_time()
325 tb_cycle = (tb_cycle << (MAX_BIT - value + 1)); in show_pw20_wait_time()
326 /* convert ms to ns */ in show_pw20_wait_time()
355 pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT); in set_pw20_wait_entry_bit()
367 unsigned int cpu = dev->id; in store_pw20_wait_time() local
370 return -EINVAL; in store_pw20_wait_time()
373 return -EINVAL; in store_pw20_wait_time()
377 return -EINVAL; in store_pw20_wait_time()
381 smp_call_function_single(cpu, set_pw20_wait_entry_bit, in store_pw20_wait_time()
391 unsigned int cpu = dev->id; in show_altivec_idle() local
393 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_altivec_idle()
420 unsigned int cpu = dev->id; in store_altivec_idle() local
423 return -EINVAL; in store_altivec_idle()
426 return -EINVAL; in store_altivec_idle()
428 smp_call_function_single(cpu, do_store_altivec_idle, &value, 1); in store_altivec_idle()
440 unsigned int cpu = dev->id; in show_altivec_idle_wait_time() local
443 smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); in show_altivec_idle_wait_time()
447 tb_cycle = (tb_cycle << (MAX_BIT - value + 1)); in show_altivec_idle_wait_time()
448 /* convert ms to ns */ in show_altivec_idle_wait_time()
477 altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT); in set_altivec_idle_wait_entry_bit()
489 unsigned int cpu = dev->id; in store_altivec_idle_wait_time() local
492 return -EINVAL; in store_altivec_idle_wait_time()
495 return -EINVAL; in store_altivec_idle_wait_time()
499 return -EINVAL; in store_altivec_idle_wait_time()
503 smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit, in store_altivec_idle_wait_time()
519 * 1~48(ns): TB[63]
520 * 49~97(ns): TB[62]
521 * 98~195(ns): TB[61]
522 * 196~390(ns): TB[60]
523 * 391~780(ns): TB[59]
524 * 781~1560(ns): TB[58]
779 struct cpu *cpu = container_of(dev, struct cpu, dev); in idle_purr_show() local
782 smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1); in idle_purr_show()
809 struct cpu *cpu = container_of(dev, struct cpu, dev); in idle_spurr_show() local
812 smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1); in idle_spurr_show()
836 static int register_cpu_online(unsigned int cpu) in register_cpu_online() argument
838 struct cpu *c = &per_cpu(cpu_devices, cpu); in register_cpu_online()
839 struct device *s = &c->dev; in register_cpu_online()
844 if (!s->of_node) in register_cpu_online()
845 s->of_node = of_get_cpu_node(cpu, NULL); in register_cpu_online()
853 switch (cur_cpu_spec->pmc_type) { in register_cpu_online()
886 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) in register_cpu_online()
922 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { in register_cpu_online()
930 cacheinfo_cpu_online(cpu); in register_cpu_online()
935 static int unregister_cpu_online(unsigned int cpu) in unregister_cpu_online() argument
937 struct cpu *c = &per_cpu(cpu_devices, cpu); in unregister_cpu_online()
938 struct device *s = &c->dev; in unregister_cpu_online()
942 if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu)) in unregister_cpu_online()
943 return -EBUSY; in unregister_cpu_online()
951 switch (cur_cpu_spec->pmc_type) { in unregister_cpu_online()
984 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) in unregister_cpu_online()
1018 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { in unregister_cpu_online()
1026 cacheinfo_cpu_offline(cpu); in unregister_cpu_online()
1027 of_node_put(s->of_node); in unregister_cpu_online()
1028 s->of_node = NULL; in unregister_cpu_online()
1041 return -EINVAL; in arch_cpu_probe()
1049 return -EINVAL; in arch_cpu_release()
1057 int cpu; in cpu_add_dev_attr() local
1061 for_each_possible_cpu(cpu) { in cpu_add_dev_attr()
1062 device_create_file(get_cpu_device(cpu), attr); in cpu_add_dev_attr()
1072 int cpu; in cpu_add_dev_attr_group() local
1078 for_each_possible_cpu(cpu) { in cpu_add_dev_attr_group()
1079 dev = get_cpu_device(cpu); in cpu_add_dev_attr_group()
1080 ret = sysfs_create_group(&dev->kobj, attrs); in cpu_add_dev_attr_group()
1092 int cpu; in cpu_remove_dev_attr() local
1096 for_each_possible_cpu(cpu) { in cpu_remove_dev_attr()
1097 device_remove_file(get_cpu_device(cpu), attr); in cpu_remove_dev_attr()
1106 int cpu; in cpu_remove_dev_attr_group() local
1111 for_each_possible_cpu(cpu) { in cpu_remove_dev_attr_group()
1112 dev = get_cpu_device(cpu); in cpu_remove_dev_attr_group()
1113 sysfs_remove_group(&dev->kobj, attrs); in cpu_remove_dev_attr_group()
1127 return sysfs_create_link(&node->dev.kobj, &dev->kobj, in sysfs_add_device_to_node()
1128 kobject_name(&dev->kobj)); in sysfs_add_device_to_node()
1135 sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); in sysfs_remove_device_from_node()
1140 /* Only valid if CPU is present. */
1144 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_physical_id() local
1146 return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id)); in show_physical_id()
1152 int cpu, r; in topology_init() local
1154 for_each_possible_cpu(cpu) { in topology_init()
1155 struct cpu *c = &per_cpu(cpu_devices, cpu); in topology_init()
1160 * the RTAS calls for CPU hotplug. But, there may be a in topology_init()
1162 * CPU. For instance, the boot cpu might never be valid in topology_init()
1165 if (smp_ops && smp_ops->cpu_offline_self) in topology_init()
1166 c->hotpluggable = 1; in topology_init()
1169 if (cpu_online(cpu) || c->hotpluggable) { in topology_init()
1170 register_cpu(c, cpu); in topology_init()
1172 device_create_file(&c->dev, &dev_attr_physical_id); in topology_init()