Lines Matching +full:static +full:- +full:trace +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * In-Memory Collection (IMC) Performance Monitor counter support.
13 #include <asm/imc-pmu.h>
22 * Used to avoid races in counting the nest-pmu units during hotplug
25 static DEFINE_MUTEX(nest_init_lock);
26 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
27 static struct imc_pmu **per_nest_pmu_arr;
28 static cpumask_t nest_imc_cpumask;
29 static struct imc_pmu_ref *nest_imc_refc;
30 static int nest_pmus;
34 static cpumask_t core_imc_cpumask;
35 static struct imc_pmu_ref *core_imc_refc;
36 static struct imc_pmu *core_imc_pmu;
40 static DEFINE_PER_CPU(u64 *, thread_imc_mem);
41 static struct imc_pmu *thread_imc_pmu;
42 static int thread_imc_mem_size;
44 /* Trace IMC data structures */
45 static DEFINE_PER_CPU(u64 *, trace_imc_mem);
46 static struct imc_pmu_ref *trace_imc_refc;
47 static int trace_imc_mem_size;
51 * core and trace-imc
53 static struct imc_pmu_ref imc_global_refc = {
55 .id = 0,
59 static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) in imc_event_to_pmu()
61 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu()
64 PMU_FORMAT_ATTR(event, "config:0-61");
65 PMU_FORMAT_ATTR(offset, "config:0-31");
67 PMU_FORMAT_ATTR(mode, "config:33-40");
68 static struct attribute *imc_format_attrs[] = {
76 static const struct attribute_group imc_format_group = {
81 /* Format attribute for imc trace-mode */
82 PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
83 PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
84 PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
85 PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
86 static struct attribute *trace_imc_format_attrs[] = {
95 static const struct attribute_group trace_imc_format_group = {
101 static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, in imc_pmu_cpumask_get_attr()
109 switch(imc_pmu->domain){ in imc_pmu_cpumask_get_attr()
123 static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
125 static struct attribute *imc_pmu_cpumask_attrs[] = {
130 static const struct attribute_group imc_pmu_cpumask_attr_group = {
135 static struct attribute *device_str_attr_create(const char *name, const char *str) in device_str_attr_create()
142 sysfs_attr_init(&attr->attr.attr); in device_str_attr_create()
144 attr->event_str = str; in device_str_attr_create()
145 attr->attr.attr.name = name; in device_str_attr_create()
146 attr->attr.attr.mode = 0444; in device_str_attr_create()
147 attr->attr.show = perf_event_sysfs_show; in device_str_attr_create()
149 return &attr->attr.attr; in device_str_attr_create()
152 static int imc_parse_event(struct device_node *np, const char *scale, in imc_parse_event()
162 event->value = base + reg; in imc_parse_event()
164 if (of_property_read_string(np, "event-name", &s)) in imc_parse_event()
167 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); in imc_parse_event()
168 if (!event->name) in imc_parse_event()
175 event->scale = kstrdup(s, GFP_KERNEL); in imc_parse_event()
176 if (!event->scale) in imc_parse_event()
184 event->unit = kstrdup(s, GFP_KERNEL); in imc_parse_event()
185 if (!event->unit) in imc_parse_event()
191 kfree(event->unit); in imc_parse_event()
192 kfree(event->scale); in imc_parse_event()
193 kfree(event->name); in imc_parse_event()
194 return -EINVAL; in imc_parse_event()
201 static void imc_free_events(struct imc_events *events, int nr_entries) in imc_free_events()
221 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) in update_events_in_group()
244 if (of_property_read_string(node, "events-prefix", &prefix)) { in update_events_in_group()
260 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group()
261 if (!pmu->events) { in update_events_in_group()
263 return -ENOMEM; in update_events_in_group()
269 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group()
279 imc_free_events(pmu->events, ct); in update_events_in_group()
280 return -ENOMEM; in update_events_in_group()
287 * "ct" has the total event structs added from the events-parent node. in update_events_in_group()
294 imc_free_events(pmu->events, ct); in update_events_in_group()
295 return -ENOMEM; in update_events_in_group()
298 attr_group->name = "events"; in update_events_in_group()
299 attr_group->attrs = attrs; in update_events_in_group()
301 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); in update_events_in_group()
304 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); in update_events_in_group()
309 if (pmu->events[i].scale) { in update_events_in_group()
310 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); in update_events_in_group()
313 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); in update_events_in_group()
320 if (pmu->events[i].unit) { in update_events_in_group()
321 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); in update_events_in_group()
324 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); in update_events_in_group()
333 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; in update_events_in_group()
339 static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) in get_nest_pmu_ref()
344 static void nest_change_cpu_context(int old_cpu, int new_cpu) in nest_change_cpu_context()
352 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
357 static int ppc_nest_imc_cpu_offline(unsigned int cpu) in ppc_nest_imc_cpu_offline()
359 int nid, target = -1; in ppc_nest_imc_cpu_offline()
414 return -EINVAL; in ppc_nest_imc_cpu_offline()
416 ref->refc = 0; in ppc_nest_imc_cpu_offline()
421 static int ppc_nest_imc_cpu_online(unsigned int cpu) in ppc_nest_imc_cpu_online()
424 static struct cpumask tmp_mask; in ppc_nest_imc_cpu_online()
451 static int nest_pmu_cpumask_init(void) in nest_pmu_cpumask_init()
459 static void nest_imc_counters_release(struct perf_event *event) in nest_imc_counters_release()
464 if (event->cpu < 0) in nest_imc_counters_release()
467 node_id = cpu_to_node(event->cpu); in nest_imc_counters_release()
475 ref = get_nest_pmu_ref(event->cpu); in nest_imc_counters_release()
480 spin_lock(&ref->lock); in nest_imc_counters_release()
481 if (ref->refc == 0) { in nest_imc_counters_release()
487 * function set the ref->count to zero, if the cpu which is in nest_imc_counters_release()
492 spin_unlock(&ref->lock); in nest_imc_counters_release()
495 ref->refc--; in nest_imc_counters_release()
496 if (ref->refc == 0) { in nest_imc_counters_release()
498 get_hard_smp_processor_id(event->cpu)); in nest_imc_counters_release()
500 spin_unlock(&ref->lock); in nest_imc_counters_release()
501 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); in nest_imc_counters_release()
504 } else if (ref->refc < 0) { in nest_imc_counters_release()
505 WARN(1, "nest-imc: Invalid event reference count\n"); in nest_imc_counters_release()
506 ref->refc = 0; in nest_imc_counters_release()
508 spin_unlock(&ref->lock); in nest_imc_counters_release()
511 static int nest_imc_event_init(struct perf_event *event) in nest_imc_event_init()
514 u32 l_config, config = event->attr.config; in nest_imc_event_init()
520 if (event->attr.type != event->pmu->type) in nest_imc_event_init()
521 return -ENOENT; in nest_imc_event_init()
524 if (event->hw.sample_period) in nest_imc_event_init()
525 return -EINVAL; in nest_imc_event_init()
527 if (event->cpu < 0) in nest_imc_event_init()
528 return -EINVAL; in nest_imc_event_init()
533 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) in nest_imc_event_init()
534 return -EINVAL; in nest_imc_event_init()
537 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). in nest_imc_event_init()
540 chip_id = cpu_to_chip_id(event->cpu); in nest_imc_event_init()
544 return -ENODEV; in nest_imc_event_init()
546 pcni = pmu->mem_info; in nest_imc_event_init()
548 if (pcni->id == chip_id) { in nest_imc_event_init()
553 } while (pcni->vbase); in nest_imc_event_init()
556 return -ENODEV; in nest_imc_event_init()
562 event->hw.event_base = (u64)pcni->vbase + l_config; in nest_imc_event_init()
563 node_id = cpu_to_node(event->cpu); in nest_imc_event_init()
569 ref = get_nest_pmu_ref(event->cpu); in nest_imc_event_init()
571 return -EINVAL; in nest_imc_event_init()
573 spin_lock(&ref->lock); in nest_imc_event_init()
574 if (ref->refc == 0) { in nest_imc_event_init()
576 get_hard_smp_processor_id(event->cpu)); in nest_imc_event_init()
578 spin_unlock(&ref->lock); in nest_imc_event_init()
579 pr_err("nest-imc: Unable to start the counters for node %d\n", in nest_imc_event_init()
584 ++ref->refc; in nest_imc_event_init()
585 spin_unlock(&ref->lock); in nest_imc_event_init()
587 event->destroy = nest_imc_counters_release; in nest_imc_event_init()
599 static int core_imc_mem_init(int cpu, int size) in core_imc_mem_init()
610 mem_info = &core_imc_pmu->mem_info[core_id]; in core_imc_mem_init()
611 mem_info->id = core_id; in core_imc_mem_init()
618 return -ENOMEM; in core_imc_mem_init()
619 mem_info->vbase = page_address(page); in core_imc_mem_init()
621 core_imc_refc[core_id].id = core_id; in core_imc_mem_init()
625 __pa((void *)mem_info->vbase), in core_imc_mem_init()
628 free_pages((u64)mem_info->vbase, get_order(size)); in core_imc_mem_init()
629 mem_info->vbase = NULL; in core_imc_mem_init()
635 static bool is_core_imc_mem_inited(int cpu) in is_core_imc_mem_inited()
640 mem_info = &core_imc_pmu->mem_info[core_id]; in is_core_imc_mem_inited()
641 if (!mem_info->vbase) in is_core_imc_mem_inited()
647 static int ppc_core_imc_cpu_online(unsigned int cpu) in ppc_core_imc_cpu_online()
650 static struct cpumask tmp_mask; in ppc_core_imc_cpu_online()
661 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); in ppc_core_imc_cpu_online()
673 static int ppc_core_imc_cpu_offline(unsigned int cpu) in ppc_core_imc_cpu_offline()
697 if (!core_imc_pmu->pmu.event_init) in ppc_core_imc_cpu_offline()
708 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); in ppc_core_imc_cpu_offline()
719 return -EINVAL; in ppc_core_imc_cpu_offline()
721 ref->refc = 0; in ppc_core_imc_cpu_offline()
724 * last cpu in this core and core-imc event running in ppc_core_imc_cpu_offline()
728 if (imc_global_refc.id == IMC_DOMAIN_CORE) in ppc_core_imc_cpu_offline()
729 imc_global_refc.refc--; in ppc_core_imc_cpu_offline()
736 static int core_imc_pmu_cpumask_init(void) in core_imc_pmu_cpumask_init()
744 static void reset_global_refc(struct perf_event *event) in reset_global_refc()
747 imc_global_refc.refc--; in reset_global_refc()
751 * event for this domain(thread/core/trace), in reset_global_refc()
752 * set the global id to zero. in reset_global_refc()
756 imc_global_refc.id = 0; in reset_global_refc()
761 static void core_imc_counters_release(struct perf_event *event) in core_imc_counters_release()
766 if (event->cpu < 0) in core_imc_counters_release()
774 core_id = event->cpu / threads_per_core; in core_imc_counters_release()
781 spin_lock(&ref->lock); in core_imc_counters_release()
782 if (ref->refc == 0) { in core_imc_counters_release()
788 * function set the ref->count to zero, if the cpu which is in core_imc_counters_release()
793 spin_unlock(&ref->lock); in core_imc_counters_release()
796 ref->refc--; in core_imc_counters_release()
797 if (ref->refc == 0) { in core_imc_counters_release()
799 get_hard_smp_processor_id(event->cpu)); in core_imc_counters_release()
801 spin_unlock(&ref->lock); in core_imc_counters_release()
805 } else if (ref->refc < 0) { in core_imc_counters_release()
806 WARN(1, "core-imc: Invalid event reference count\n"); in core_imc_counters_release()
807 ref->refc = 0; in core_imc_counters_release()
809 spin_unlock(&ref->lock); in core_imc_counters_release()
814 static int core_imc_event_init(struct perf_event *event) in core_imc_event_init()
817 u64 config = event->attr.config; in core_imc_event_init()
822 if (event->attr.type != event->pmu->type) in core_imc_event_init()
823 return -ENOENT; in core_imc_event_init()
826 if (event->hw.sample_period) in core_imc_event_init()
827 return -EINVAL; in core_imc_event_init()
829 if (event->cpu < 0) in core_imc_event_init()
830 return -EINVAL; in core_imc_event_init()
832 event->hw.idx = -1; in core_imc_event_init()
836 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in core_imc_event_init()
837 return -EINVAL; in core_imc_event_init()
839 if (!is_core_imc_mem_inited(event->cpu)) in core_imc_event_init()
840 return -ENODEV; in core_imc_event_init()
842 core_id = event->cpu / threads_per_core; in core_imc_event_init()
843 pcmi = &core_imc_pmu->mem_info[core_id]; in core_imc_event_init()
844 if ((!pcmi->vbase)) in core_imc_event_init()
845 return -ENODEV; in core_imc_event_init()
849 return -EINVAL; in core_imc_event_init()
857 spin_lock(&ref->lock); in core_imc_event_init()
858 if (ref->refc == 0) { in core_imc_event_init()
860 get_hard_smp_processor_id(event->cpu)); in core_imc_event_init()
862 spin_unlock(&ref->lock); in core_imc_event_init()
863 pr_err("core-imc: Unable to start the counters for core %d\n", in core_imc_event_init()
868 ++ref->refc; in core_imc_event_init()
869 spin_unlock(&ref->lock); in core_imc_event_init()
872 * Since the system can run either in accumulation or trace-mode in core_imc_event_init()
873 * of IMC at a time, core-imc events are allowed only if no other in core_imc_event_init()
874 * trace/thread imc events are enabled/monitored. in core_imc_event_init()
876 * Take the global lock, and check the refc.id in core_imc_event_init()
877 * to know whether any other trace/thread imc in core_imc_event_init()
881 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { in core_imc_event_init()
883 * No other trace/thread imc events are running in in core_imc_event_init()
884 * the system, so set the refc.id to core-imc. in core_imc_event_init()
886 imc_global_refc.id = IMC_DOMAIN_CORE; in core_imc_event_init()
890 return -EBUSY; in core_imc_event_init()
894 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); in core_imc_event_init()
895 event->destroy = core_imc_counters_release; in core_imc_event_init()
903 * written to the LDBAR for that cpu, when the thread-imc event
909 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
916 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
920 static int thread_imc_mem_alloc(int cpu_id, int size) in thread_imc_mem_alloc()
935 return -ENOMEM; in thread_imc_mem_alloc()
945 static int ppc_thread_imc_cpu_online(unsigned int cpu) in ppc_thread_imc_cpu_online()
950 static int ppc_thread_imc_cpu_offline(unsigned int cpu) in ppc_thread_imc_cpu_offline()
957 * For thread-imc, bit 0 of LDBAR will be set to 1 in the in ppc_thread_imc_cpu_offline()
963 /* Reduce the refc if thread-imc event running on this cpu */ in ppc_thread_imc_cpu_offline()
965 if (imc_global_refc.id == IMC_DOMAIN_THREAD) in ppc_thread_imc_cpu_offline()
966 imc_global_refc.refc--; in ppc_thread_imc_cpu_offline()
972 static int thread_imc_cpu_init(void) in thread_imc_cpu_init()
980 static int thread_imc_event_init(struct perf_event *event) in thread_imc_event_init()
982 u32 config = event->attr.config; in thread_imc_event_init()
986 if (event->attr.type != event->pmu->type) in thread_imc_event_init()
987 return -ENOENT; in thread_imc_event_init()
990 return -EACCES; in thread_imc_event_init()
993 if (event->hw.sample_period) in thread_imc_event_init()
994 return -EINVAL; in thread_imc_event_init()
996 event->hw.idx = -1; in thread_imc_event_init()
1000 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in thread_imc_event_init()
1001 return -EINVAL; in thread_imc_event_init()
1003 target = event->hw.target; in thread_imc_event_init()
1005 return -EINVAL; in thread_imc_event_init()
1009 * Check if any other trace/core imc events are running in the in thread_imc_event_init()
1010 * system, if not set the global id to thread-imc. in thread_imc_event_init()
1012 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) { in thread_imc_event_init()
1013 imc_global_refc.id = IMC_DOMAIN_THREAD; in thread_imc_event_init()
1017 return -EBUSY; in thread_imc_event_init()
1021 event->pmu->task_ctx_nr = perf_sw_context; in thread_imc_event_init()
1022 event->destroy = reset_global_refc; in thread_imc_event_init()
1026 static bool is_thread_imc_pmu(struct perf_event *event) in is_thread_imc_pmu()
1028 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) in is_thread_imc_pmu()
1034 static __be64 *get_event_base_addr(struct perf_event *event) in get_event_base_addr()
1040 return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); in get_event_base_addr()
1043 return (__be64 *)event->hw.event_base; in get_event_base_addr()
1046 static void thread_imc_pmu_start_txn(struct pmu *pmu, in thread_imc_pmu_start_txn()
1054 static void thread_imc_pmu_cancel_txn(struct pmu *pmu) in thread_imc_pmu_cancel_txn()
1059 static int thread_imc_pmu_commit_txn(struct pmu *pmu) in thread_imc_pmu_commit_txn()
1065 static u64 imc_read_counter(struct perf_event *event) in imc_read_counter()
1071 * In-Memory Collection (IMC) counters are free flowing counters. in imc_read_counter()
1078 local64_set(&event->hw.prev_count, data); in imc_read_counter()
1083 static void imc_event_update(struct perf_event *event) in imc_event_update()
1087 counter_prev = local64_read(&event->hw.prev_count); in imc_event_update()
1089 final_count = counter_new - counter_prev; in imc_event_update()
1092 local64_add(final_count, &event->count); in imc_event_update()
1095 static void imc_event_start(struct perf_event *event, int flags) in imc_event_start()
1106 static void imc_event_stop(struct perf_event *event, int flags) in imc_event_stop()
1115 static int imc_event_add(struct perf_event *event, int flags) in imc_event_add()
1123 static int thread_imc_event_add(struct perf_event *event, int flags) in thread_imc_event_add()
1133 return -EINVAL; in thread_imc_event_add()
1147 return -EINVAL; in thread_imc_event_add()
1149 spin_lock(&ref->lock); in thread_imc_event_add()
1150 if (ref->refc == 0) { in thread_imc_event_add()
1153 spin_unlock(&ref->lock); in thread_imc_event_add()
1154 pr_err("thread-imc: Unable to start the counter\ in thread_imc_event_add()
1156 return -EINVAL; in thread_imc_event_add()
1159 ++ref->refc; in thread_imc_event_add()
1160 spin_unlock(&ref->lock); in thread_imc_event_add()
1164 static void thread_imc_event_del(struct perf_event *event, int flags) in thread_imc_event_del()
1177 spin_lock(&ref->lock); in thread_imc_event_del()
1178 ref->refc--; in thread_imc_event_del()
1179 if (ref->refc == 0) { in thread_imc_event_del()
1182 spin_unlock(&ref->lock); in thread_imc_event_del()
1183 pr_err("thread-imc: Unable to stop the counters\ in thread_imc_event_del()
1187 } else if (ref->refc < 0) { in thread_imc_event_del()
1188 ref->refc = 0; in thread_imc_event_del()
1190 spin_unlock(&ref->lock); in thread_imc_event_del()
1205 static int trace_imc_mem_alloc(int cpu_id, int size) in trace_imc_mem_alloc()
1218 return -ENOMEM; in trace_imc_mem_alloc()
1222 /* Initialise the counters for trace mode */ in trace_imc_mem_alloc()
1226 pr_info("IMC:opal init failed for trace imc\n"); in trace_imc_mem_alloc()
1231 trace_imc_refc[core_id].id = core_id; in trace_imc_mem_alloc()
1238 static int ppc_trace_imc_cpu_online(unsigned int cpu) in ppc_trace_imc_cpu_online()
1243 static int ppc_trace_imc_cpu_offline(unsigned int cpu) in ppc_trace_imc_cpu_offline()
1247 * it is set to zero for imc trace-mode in ppc_trace_imc_cpu_offline()
1249 * Reduce the refc if any trace-imc event running in ppc_trace_imc_cpu_offline()
1253 if (imc_global_refc.id == IMC_DOMAIN_TRACE) in ppc_trace_imc_cpu_offline()
1254 imc_global_refc.refc--; in ppc_trace_imc_cpu_offline()
1260 static int trace_imc_cpu_init(void) in trace_imc_cpu_init()
1268 static u64 get_trace_imc_event_base_addr(void) in get_trace_imc_event_base_addr()
1274 * Function to parse trace-imc data obtained
1277 static int trace_imc_prepare_sample(struct trace_imc_data *mem, in trace_imc_prepare_sample()
1284 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb) in trace_imc_prepare_sample()
1285 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1)); in trace_imc_prepare_sample()
1287 return -EINVAL; in trace_imc_prepare_sample()
1289 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) != in trace_imc_prepare_sample()
1290 be64_to_cpu(READ_ONCE(mem->tb2))) in trace_imc_prepare_sample()
1291 return -EINVAL; in trace_imc_prepare_sample()
1294 data->ip = be64_to_cpu(READ_ONCE(mem->ip)); in trace_imc_prepare_sample()
1295 data->period = event->hw.last_period; in trace_imc_prepare_sample()
1297 header->type = PERF_RECORD_SAMPLE; in trace_imc_prepare_sample()
1298 header->size = sizeof(*header) + event->header_size; in trace_imc_prepare_sample()
1299 header->misc = 0; in trace_imc_prepare_sample()
1302 switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { in trace_imc_prepare_sample()
1303 case 0:/* when MSR HV and PR not set in the trace-record */ in trace_imc_prepare_sample()
1304 header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; in trace_imc_prepare_sample()
1307 header->misc |= PERF_RECORD_MISC_GUEST_USER; in trace_imc_prepare_sample()
1310 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1313 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1320 if (is_kernel_addr(data->ip)) in trace_imc_prepare_sample()
1321 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1323 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1330 static void dump_trace_imc_data(struct perf_event *event) in dump_trace_imc_data()
1358 static int trace_imc_event_add(struct perf_event *event, int flags) in trace_imc_event_add()
1364 /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */ in trace_imc_event_add()
1368 /* trace-imc reference count */ in trace_imc_event_add()
1373 return -EINVAL; in trace_imc_event_add()
1377 spin_lock(&ref->lock); in trace_imc_event_add()
1378 if (ref->refc == 0) { in trace_imc_event_add()
1381 spin_unlock(&ref->lock); in trace_imc_event_add()
1382 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); in trace_imc_event_add()
1383 return -EINVAL; in trace_imc_event_add()
1386 ++ref->refc; in trace_imc_event_add()
1387 spin_unlock(&ref->lock); in trace_imc_event_add()
1391 static void trace_imc_event_read(struct perf_event *event) in trace_imc_event_read()
1396 static void trace_imc_event_stop(struct perf_event *event, int flags) in trace_imc_event_stop()
1403 static void trace_imc_event_start(struct perf_event *event, int flags) in trace_imc_event_start()
1408 static void trace_imc_event_del(struct perf_event *event, int flags) in trace_imc_event_del()
1420 spin_lock(&ref->lock); in trace_imc_event_del()
1421 ref->refc--; in trace_imc_event_del()
1422 if (ref->refc == 0) { in trace_imc_event_del()
1425 spin_unlock(&ref->lock); in trace_imc_event_del()
1426 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); in trace_imc_event_del()
1429 } else if (ref->refc < 0) { in trace_imc_event_del()
1430 ref->refc = 0; in trace_imc_event_del()
1432 spin_unlock(&ref->lock); in trace_imc_event_del()
1437 static int trace_imc_event_init(struct perf_event *event) in trace_imc_event_init()
1439 if (event->attr.type != event->pmu->type) in trace_imc_event_init()
1440 return -ENOENT; in trace_imc_event_init()
1443 return -EACCES; in trace_imc_event_init()
1446 if (event->attr.sample_period == 0) in trace_imc_event_init()
1447 return -ENOENT; in trace_imc_event_init()
1455 if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { in trace_imc_event_init()
1458 * system, so set the refc.id to trace-imc. in trace_imc_event_init()
1460 imc_global_refc.id = IMC_DOMAIN_TRACE; in trace_imc_event_init()
1464 return -EBUSY; in trace_imc_event_init()
1468 event->hw.idx = -1; in trace_imc_event_init()
1474 event->pmu->task_ctx_nr = perf_sw_context; in trace_imc_event_init()
1475 event->destroy = reset_global_refc; in trace_imc_event_init()
1480 static int update_pmu_ops(struct imc_pmu *pmu) in update_pmu_ops()
1482 pmu->pmu.task_ctx_nr = perf_invalid_context; in update_pmu_ops()
1483 pmu->pmu.add = imc_event_add; in update_pmu_ops()
1484 pmu->pmu.del = imc_event_stop; in update_pmu_ops()
1485 pmu->pmu.start = imc_event_start; in update_pmu_ops()
1486 pmu->pmu.stop = imc_event_stop; in update_pmu_ops()
1487 pmu->pmu.read = imc_event_update; in update_pmu_ops()
1488 pmu->pmu.attr_groups = pmu->attr_groups; in update_pmu_ops()
1489 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; in update_pmu_ops()
1490 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; in update_pmu_ops()
1492 switch (pmu->domain) { in update_pmu_ops()
1494 pmu->pmu.event_init = nest_imc_event_init; in update_pmu_ops()
1495 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1498 pmu->pmu.event_init = core_imc_event_init; in update_pmu_ops()
1499 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1502 pmu->pmu.event_init = thread_imc_event_init; in update_pmu_ops()
1503 pmu->pmu.add = thread_imc_event_add; in update_pmu_ops()
1504 pmu->pmu.del = thread_imc_event_del; in update_pmu_ops()
1505 pmu->pmu.start_txn = thread_imc_pmu_start_txn; in update_pmu_ops()
1506 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; in update_pmu_ops()
1507 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; in update_pmu_ops()
1510 pmu->pmu.event_init = trace_imc_event_init; in update_pmu_ops()
1511 pmu->pmu.add = trace_imc_event_add; in update_pmu_ops()
1512 pmu->pmu.del = trace_imc_event_del; in update_pmu_ops()
1513 pmu->pmu.start = trace_imc_event_start; in update_pmu_ops()
1514 pmu->pmu.stop = trace_imc_event_stop; in update_pmu_ops()
1515 pmu->pmu.read = trace_imc_event_read; in update_pmu_ops()
1516 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group; in update_pmu_ops()
1526 static int init_nest_pmu_ref(void) in init_nest_pmu_ref()
1534 return -ENOMEM; in init_nest_pmu_ref()
1545 * Loop to init the "id" with the node_id. Variable "i" initialized to in init_nest_pmu_ref()
1550 nest_imc_refc[i++].id = nid; in init_nest_pmu_ref()
1560 if (nest_imc_refc[i].id == nid) { in init_nest_pmu_ref()
1569 static void cleanup_all_core_imc_memory(void) in cleanup_all_core_imc_memory()
1572 struct imc_mem_info *ptr = core_imc_pmu->mem_info; in cleanup_all_core_imc_memory()
1573 int size = core_imc_pmu->counter_mem_size; in cleanup_all_core_imc_memory()
1585 static void thread_imc_ldbar_disable(void *dummy) in thread_imc_ldbar_disable()
1588 * By setting 0th bit of LDBAR to zero, we disable thread-imc in thread_imc_ldbar_disable()
1599 static void cleanup_all_thread_imc_memory(void) in cleanup_all_thread_imc_memory()
1610 static void cleanup_all_trace_imc_memory(void) in cleanup_all_trace_imc_memory()
1623 static void imc_common_mem_free(struct imc_pmu *pmu_ptr) in imc_common_mem_free()
1625 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) in imc_common_mem_free()
1626 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); in imc_common_mem_free()
1627 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); in imc_common_mem_free()
1636 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) in imc_common_cpuhp_mem_free()
1638 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { in imc_common_cpuhp_mem_free()
1648 nest_pmus--; in imc_common_cpuhp_mem_free()
1653 if (pmu_ptr->domain == IMC_DOMAIN_CORE) { in imc_common_cpuhp_mem_free()
1659 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { in imc_common_cpuhp_mem_free()
1664 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) { in imc_common_cpuhp_mem_free()
1671 * Function to unregister thread-imc if core-imc
1678 perf_pmu_unregister(&thread_imc_pmu->pmu); in unregister_thread_imc()
1684 static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, in imc_mem_init()
1688 int nr_cores, cpu, res = -ENOMEM; in imc_mem_init()
1691 return -ENODEV; in imc_mem_init()
1693 switch (pmu_ptr->domain) { in imc_mem_init()
1696 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); in imc_mem_init()
1697 if (!pmu_ptr->pmu.name) in imc_mem_init()
1712 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1713 if (!pmu_ptr->pmu.name) in imc_mem_init()
1717 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), in imc_mem_init()
1720 if (!pmu_ptr->mem_info) in imc_mem_init()
1727 kfree(pmu_ptr->mem_info); in imc_mem_init()
1735 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1736 if (!pmu_ptr->pmu.name) in imc_mem_init()
1739 thread_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1741 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); in imc_mem_init()
1752 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1753 if (!pmu_ptr->pmu.name) in imc_mem_init()
1754 return -ENOMEM; in imc_mem_init()
1760 return -ENOMEM; in imc_mem_init()
1762 trace_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1772 return -EINVAL; in imc_mem_init()
1798 switch (pmu_ptr->domain) { in init_imc_pmu()
1853 return -EINVAL; /* Unknown domain */ in init_imc_pmu()
1864 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); in init_imc_pmu()
1869 pmu_ptr->pmu.name); in init_imc_pmu()