Lines Matching full:event
211 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
213 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
233 * - removing the last event from a task ctx; this is relatively straight
236 * - adding the first event to a task ctx; this is tricky because we cannot
247 struct perf_event *event; member
255 struct perf_event *event = efs->event; in event_function() local
256 struct perf_event_context *ctx = event->ctx; in event_function()
291 efs->func(event, cpuctx, ctx, efs->data); in event_function()
298 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
300 struct perf_event_context *ctx = event->ctx; in event_function_call()
304 .event = event, in event_function_call()
309 if (!event->parent) { in event_function_call()
311 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
312 * stabilize the event->ctx relation. See in event_function_call()
319 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
345 func(event, NULL, ctx, data); in event_function_call()
355 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
357 struct perf_event_context *ctx = event->ctx; in event_function_local()
394 func(event, cpuctx, ctx, data); in event_function_local()
447 * perf event paranoia level:
459 * max perf event sample rate
609 static u64 perf_event_time(struct perf_event *event);
618 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
620 return event->clock(); in perf_event_clock()
624 * State based event timekeeping...
626 * The basic idea is to use event->state to determine which (if any) time
631 * Event groups make things a little more complicated, but not terribly so. The
646 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
648 struct perf_event *leader = event->group_leader; in __perf_effective_state()
653 return event->state; in __perf_effective_state()
657 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
659 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
660 u64 delta = now - event->tstamp; in __perf_update_times()
662 *enabled = event->total_time_enabled; in __perf_update_times()
666 *running = event->total_time_running; in __perf_update_times()
671 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
673 u64 now = perf_event_time(event); in perf_event_update_time()
675 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
676 &event->total_time_running); in perf_event_update_time()
677 event->tstamp = now; in perf_event_update_time()
689 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
691 if (event->state == state) in perf_event_set_state()
694 perf_event_update_time(event); in perf_event_set_state()
699 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
700 perf_event_update_sibling_time(event); in perf_event_set_state()
702 WRITE_ONCE(event->state, state); in perf_event_set_state()
752 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
756 /* @event doesn't care about cgroup */ in perf_cgroup_match()
757 if (!event->cgrp) in perf_cgroup_match()
765 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
767 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
771 event->cgrp->css.cgroup); in perf_cgroup_match()
774 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
776 css_put(&event->cgrp->css); in perf_detach_cgroup()
777 event->cgrp = NULL; in perf_detach_cgroup()
780 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
782 return event->cgrp != NULL; in is_cgroup_event()
785 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
789 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
793 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
797 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
835 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
843 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
846 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
889 * cpuctx->cgrp is set when the first cgroup event enabled, in perf_cgroup_switch()
890 * and is cleared when the last cgroup event disabled. in perf_cgroup_switch()
922 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
963 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
980 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
985 event->cgrp = cgrp; in perf_cgroup_connect()
993 perf_detach_cgroup(event); in perf_cgroup_connect()
1000 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1004 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1007 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1022 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1026 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1029 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1046 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1051 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1054 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1059 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1068 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1080 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1085 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1091 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1096 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1264 * because the sys_perf_event_open() case will install a new event and break
1275 * quiesce the event, after which we can install it in the new location. This
1276 * means that only external vectors (perf_fops, prctl) can perturb the event
1280 * However; because event->ctx can change while we're waiting to acquire
1300 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1306 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1314 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1324 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1326 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1329 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1355 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1362 if (event->parent) in perf_event_pid_type()
1363 event = event->parent; in perf_event_pid_type()
1365 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1372 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1374 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1377 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1379 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1383 * If we inherit events we want to return the parent event id
1386 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1388 u64 id = event->id; in primary_event_id()
1390 if (event->parent) in primary_event_id()
1391 id = event->parent->id; in primary_event_id()
1511 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1513 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1518 if (is_cgroup_event(event)) in perf_event_time()
1519 return perf_cgroup_event_time(event); in perf_event_time()
1524 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1526 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1531 if (is_cgroup_event(event)) in perf_event_time_now()
1532 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1541 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1543 struct perf_event_context *ctx = event->ctx; in get_event_type()
1552 if (event->group_leader != event) in get_event_type()
1553 event = event->group_leader; in get_event_type()
1555 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1563 * Helper function to initialize event group nodes.
1565 static void init_event_group(struct perf_event *event) in init_event_group() argument
1567 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1568 event->group_index = 0; in init_event_group()
1573 * based on event attrs bits.
1576 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1578 if (event->attr.pinned) in get_event_groups()
1593 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1598 if (event->cgrp) in event_cgroup()
1599 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1606 * Compare function for event groups;
1701 * Insert @event into @groups' tree; using
1702 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1707 struct perf_event *event) in perf_event_groups_insert() argument
1709 event->group_index = ++groups->index; in perf_event_groups_insert()
1711 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1715 * Helper function to insert event into the pinned or flexible groups.
1718 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1722 groups = get_event_groups(event, ctx); in add_event_to_groups()
1723 perf_event_groups_insert(groups, event); in add_event_to_groups()
1731 struct perf_event *event) in perf_event_groups_delete() argument
1733 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1736 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1737 init_event_group(event); in perf_event_groups_delete()
1741 * Helper function to delete event from its groups.
1744 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1748 groups = get_event_groups(event, ctx); in del_event_from_groups()
1749 perf_event_groups_delete(groups, event); in del_event_from_groups()
1753 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1774 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1777 .cpu = event->cpu, in perf_event_groups_next()
1779 .cgroup = event_cgroup(event), in perf_event_groups_next()
1783 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1790 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1791 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1792 event; event = perf_event_groups_next(event, pmu))
1797 #define perf_event_groups_for_each(event, groups) \ argument
1798 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1799 typeof(*event), group_node); event; \
1800 event = rb_entry_safe(rb_next(&event->group_node), \
1801 typeof(*event), group_node))
1804 * Does the event attribute request inherit with PERF_SAMPLE_READ
1812 * Add an event from the lists for its context.
1816 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1820 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1821 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1823 event->tstamp = perf_event_time(event); in list_add_event()
1826 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1830 if (event->group_leader == event) { in list_add_event()
1831 event->group_caps = event->event_caps; in list_add_event()
1832 add_event_to_groups(event, ctx); in list_add_event()
1835 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1837 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1839 if (event->attr.inherit_stat) in list_add_event()
1841 if (has_inherit_and_sample_read(&event->attr)) in list_add_event()
1844 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1845 perf_cgroup_event_enable(event, ctx); in list_add_event()
1848 event->pmu_ctx->nr_events++; in list_add_event()
1852 * Initialize event state based on the perf_event_attr::disabled.
1854 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1856 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1890 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1908 size += event->read_size; in __perf_event_header_size()
1928 event->header_size = size; in __perf_event_header_size()
1935 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1937 event->read_size = in perf_event__header_size()
1938 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1939 event->group_leader->nr_siblings); in perf_event__header_size()
1940 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1943 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1946 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1967 event->id_header_size = size; in perf_event__id_header_size()
1971 * Check that adding an event to the group does not result in anybody
1972 * overflowing the 64k event limit imposed by the output buffer.
1974 * Specifically, check that the read_size for the event does not exceed 16k,
1976 * depends on per-event read_format, also (re)check the existing events.
1981 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1983 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
1985 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
2000 if (event == group_leader) in perf_event_validate_size()
2012 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2014 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2016 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2022 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2025 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2027 if (group_leader == event) in perf_group_attach()
2030 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2032 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2034 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2045 * Remove an event from the lists for its context.
2049 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2051 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2057 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2060 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2063 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2065 if (event->attr.inherit_stat) in list_del_event()
2067 if (has_inherit_and_sample_read(&event->attr)) in list_del_event()
2070 list_del_rcu(&event->event_entry); in list_del_event()
2072 if (event->group_leader == event) in list_del_event()
2073 del_event_from_groups(event, ctx); in list_del_event()
2076 * If event was in error state, then keep it in list_del_event()
2080 * of the event in list_del_event()
2082 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2083 perf_cgroup_event_disable(event, ctx); in list_del_event()
2084 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2088 event->pmu_ctx->nr_events--; in list_del_event()
2092 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2097 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2100 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2103 static void put_event(struct perf_event *event);
2104 static void event_sched_out(struct perf_event *event,
2107 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2109 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2113 * If event uses aux_event tear down the link in perf_put_aux_event()
2115 if (event->aux_event) { in perf_put_aux_event()
2116 iter = event->aux_event; in perf_put_aux_event()
2117 event->aux_event = NULL; in perf_put_aux_event()
2123 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2126 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2127 if (iter->aux_event != event) in perf_put_aux_event()
2131 put_event(event); in perf_put_aux_event()
2139 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2143 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2145 return event->attr.aux_output || has_aux_action(event); in perf_need_aux_event()
2148 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2152 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2153 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2163 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2166 if (event->attr.aux_output && in perf_get_aux_event()
2167 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2170 if ((event->attr.aux_pause || event->attr.aux_resume) && in perf_get_aux_event()
2174 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2181 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2186 event->aux_event = group_leader; in perf_get_aux_event()
2191 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2193 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2194 &event->pmu_ctx->flexible_active; in get_event_list()
2203 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2205 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2206 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2209 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2211 struct perf_event *leader = event->group_leader; in perf_group_detach()
2213 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2220 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2223 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2225 perf_put_aux_event(event); in perf_group_detach()
2230 if (leader != event) { in perf_group_detach()
2231 list_del_init(&event->sibling_list); in perf_group_detach()
2232 event->group_leader->nr_siblings--; in perf_group_detach()
2233 event->group_leader->group_generation++; in perf_group_detach()
2238 * If this was a group event with sibling events then in perf_group_detach()
2242 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2251 sibling->group_caps = event->group_caps; in perf_group_detach()
2254 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2260 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2272 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2274 struct perf_event *parent_event = event->parent; in perf_child_detach()
2276 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2279 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2286 sync_child_event(event); in perf_child_detach()
2287 list_del_init(&event->child_list); in perf_child_detach()
2290 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2292 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2296 event_filter_match(struct perf_event *event) in event_filter_match() argument
2298 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2299 perf_cgroup_match(event); in event_filter_match()
2303 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2305 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2311 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2314 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2322 list_del_init(&event->active_list); in event_sched_out()
2324 perf_pmu_disable(event->pmu); in event_sched_out()
2326 event->pmu->del(event, 0); in event_sched_out()
2327 event->oncpu = -1; in event_sched_out()
2329 if (event->pending_disable) { in event_sched_out()
2330 event->pending_disable = 0; in event_sched_out()
2331 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2335 perf_event_set_state(event, state); in event_sched_out()
2337 if (!is_software_event(event)) in event_sched_out()
2339 if (event->attr.freq && event->attr.sample_freq) { in event_sched_out()
2343 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2346 perf_pmu_enable(event->pmu); in event_sched_out()
2352 struct perf_event *event; in group_sched_out() local
2364 for_each_sibling_event(event, group_event) in group_sched_out()
2365 event_sched_out(event, ctx); in group_sched_out()
2397 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) in ctx_time_update_event() argument
2403 update_cgrp_time_from_event(event); in ctx_time_update_event()
2413 * Cross CPU call to remove a performance event
2415 * We disable the event on the hardware level first. After that we
2419 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2424 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2437 event->pending_disable = 1; in __perf_remove_from_context()
2440 event_sched_out(event, ctx); in __perf_remove_from_context()
2441 perf_event_set_state(event, min(event->state, state)); in __perf_remove_from_context()
2443 perf_group_detach(event); in __perf_remove_from_context()
2445 perf_child_detach(event); in __perf_remove_from_context()
2446 list_del_event(event, ctx); in __perf_remove_from_context()
2473 * Remove the event from a task's (or a CPU's) list of events.
2475 * If event->ctx is a cloned context, callers must make sure that
2476 * every task struct that event->ctx->task could possibly point to
2482 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2484 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2495 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2502 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2506 * Cross CPU call to disable a performance event
2508 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2513 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2516 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2517 ctx_time_update_event(ctx, event); in __perf_event_disable()
2519 if (event == event->group_leader) in __perf_event_disable()
2520 group_sched_out(event, ctx); in __perf_event_disable()
2522 event_sched_out(event, ctx); in __perf_event_disable()
2524 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2525 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2527 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2531 * Disable an event.
2533 * If event->ctx is a cloned context, callers must make sure that
2534 * every task struct that event->ctx->task could possibly point to
2537 * hold the top-level event's child_mutex, so any descendant that
2540 * When called from perf_pending_disable it's OK because event->ctx
2544 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2546 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2549 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2555 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2558 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2560 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2567 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2571 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2572 _perf_event_disable(event); in perf_event_disable()
2573 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2577 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2579 event->pending_disable = 1; in perf_event_disable_inatomic()
2580 irq_work_queue(&event->pending_disable_irq); in perf_event_disable_inatomic()
2585 static void perf_log_throttle(struct perf_event *event, int enable);
2586 static void perf_log_itrace_start(struct perf_event *event);
2589 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2591 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2595 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2599 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2602 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2604 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2609 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2616 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2617 perf_log_throttle(event, 1); in event_sched_in()
2618 event->hw.interrupts = 0; in event_sched_in()
2621 perf_pmu_disable(event->pmu); in event_sched_in()
2623 perf_log_itrace_start(event); in event_sched_in()
2625 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2626 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2627 event->oncpu = -1; in event_sched_in()
2632 if (!is_software_event(event)) in event_sched_in()
2634 if (event->attr.freq && event->attr.sample_freq) { in event_sched_in()
2638 if (event->attr.exclusive) in event_sched_in()
2642 perf_pmu_enable(event->pmu); in event_sched_in()
2650 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2664 for_each_sibling_event(event, group_event) { in group_sched_in()
2665 if (event_sched_in(event, ctx)) { in group_sched_in()
2666 partial_group = event; in group_sched_in()
2678 * The events up to the failed event are scheduled out normally. in group_sched_in()
2680 for_each_sibling_event(event, group_event) { in group_sched_in()
2681 if (event == partial_group) in group_sched_in()
2684 event_sched_out(event, ctx); in group_sched_in()
2694 * Work out whether we can put this event group on the CPU now.
2696 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2698 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2704 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2716 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2725 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2728 list_add_event(event, ctx); in add_event_to_ctx()
2729 perf_group_attach(event); in add_event_to_ctx()
2767 * time an event is added, only do it for the groups of equal priority and
2834 * Cross CPU call to install and enable a performance event
2841 struct perf_event *event = info; in __perf_install_in_context() local
2842 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2873 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2875 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2880 event->cgrp->css.cgroup); in __perf_install_in_context()
2886 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2887 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
2888 get_event_type(event)); in __perf_install_in_context()
2890 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2899 static bool exclusive_event_installable(struct perf_event *event,
2903 * Attach a performance event to a context.
2909 struct perf_event *event, in perf_install_in_context() argument
2916 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2918 if (event->cpu != -1) in perf_install_in_context()
2919 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2922 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2925 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2929 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2933 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2935 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2936 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2942 add_event_to_ctx(event, ctx); in perf_install_in_context()
2948 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2990 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
3006 * thus we can safely install the event. in perf_install_in_context()
3012 add_event_to_ctx(event, ctx); in perf_install_in_context()
3017 * Cross CPU call to enable a performance event
3019 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
3024 struct perf_event *leader = event->group_leader; in __perf_event_enable()
3027 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
3028 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
3033 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
3034 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3039 if (!event_filter_match(event)) in __perf_event_enable()
3043 * If the event is in a group and isn't the group leader, in __perf_event_enable()
3046 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
3053 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3057 * Enable an event.
3059 * If event->ctx is a cloned context, callers must make sure that
3060 * every task struct that event->ctx->task could possibly point to
3065 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3067 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3070 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3071 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3078 * If the event is in error state, clear that first. in _perf_event_enable()
3080 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3084 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3088 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3089 event->group_leader == event) in _perf_event_enable()
3092 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3096 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3102 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3106 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3107 _perf_event_enable(event); in perf_event_enable()
3108 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3113 struct perf_event *event; member
3120 struct perf_event *event = sd->event; in __perf_event_stop() local
3123 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3131 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3133 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3136 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3144 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3148 event->pmu->start(event, 0); in __perf_event_stop()
3153 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3156 .event = event, in perf_event_stop()
3162 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3169 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3170 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3173 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3186 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3187 * (p2) when an event is scheduled in (pmu::add), it calls
3191 * If (p1) happens while the event is active, we restart it to force (p2).
3202 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3204 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3206 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3210 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3211 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3212 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3218 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3223 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3226 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3227 _perf_event_enable(event); in _perf_event_refresh()
3235 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3240 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3241 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3242 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3264 * Copy event-type-independent attributes that may be modified.
3272 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3279 if (event->attr.type != attr->type) in perf_event_modify_attr()
3282 switch (event->attr.type) { in perf_event_modify_attr()
3291 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3293 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3295 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3299 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3300 err = func(event, attr); in perf_event_modify_attr()
3303 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3310 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3318 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3334 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3337 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3341 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3344 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3466 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3471 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3475 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3478 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3481 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3482 event->pmu->read(event); in __perf_event_sync_stat()
3484 perf_event_update_time(event); in __perf_event_sync_stat()
3487 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3491 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3494 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3495 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3500 perf_event_update_userpage(event); in __perf_event_sync_stat()
3507 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3514 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3520 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3523 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3525 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3707 * This callback is relevant even to per-cpu events; for example multi event
3754 * We stop each event and update the event value in event->count.
3757 * sets the disabled bit in the control field of event _before_
3758 * accessing the event control register. If a NMI hits, then it will
3759 * not restart the event.
3775 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3795 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) in __heap_add() argument
3799 if (event) { in __heap_add()
3800 itrs[heap->nr] = event; in __heap_add()
3827 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3891 * Because the userpage is strictly per-event (there is no concept of context,
3897 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3899 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3902 perf_event_update_time(event); in event_update_userpage()
3903 perf_event_update_userpage(event); in event_update_userpage()
3910 struct perf_event *event; in group_update_userpage() local
3915 for_each_sibling_event(event, group_event) in group_update_userpage()
3916 event_update_userpage(event); in group_update_userpage()
3919 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3921 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3924 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3927 if (!event_filter_match(event)) in merge_sched_in()
3930 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3931 if (!group_sched_in(event, ctx)) in merge_sched_in()
3932 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3935 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3937 if (event->attr.pinned) { in merge_sched_in()
3938 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3939 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3943 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3944 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3946 group_update_userpage(event); in merge_sched_in()
4087 * We restore the event value and then enable it.
4090 * sets the enabled bit in the control field of event _before_
4091 * accessing the event control register. If a NMI hits, then it will
4092 * keep the event running.
4106 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4108 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4182 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4184 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4188 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4206 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4211 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4217 struct perf_event *event; in perf_adjust_freq_unthr_events() local
4222 list_for_each_entry(event, event_list, active_list) { in perf_adjust_freq_unthr_events()
4223 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_events()
4227 if (!event_filter_match(event)) in perf_adjust_freq_unthr_events()
4230 hwc = &event->hw; in perf_adjust_freq_unthr_events()
4234 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_events()
4235 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4236 event->pmu->start(event, 0); in perf_adjust_freq_unthr_events()
4239 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4243 * stop the event and update event->count in perf_adjust_freq_unthr_events()
4245 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_events()
4247 now = local64_read(&event->count); in perf_adjust_freq_unthr_events()
4252 * restart the event in perf_adjust_freq_unthr_events()
4254 * we have stopped the event so tell that in perf_adjust_freq_unthr_events()
4259 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_events()
4261 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_events()
4303 * Move @event to the tail of the @ctx's elegible events.
4305 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4314 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4315 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4318 /* pick an event from the flexible_groups to rotate */
4322 struct perf_event *event; in ctx_event_to_rotate() local
4329 /* pick the first active flexible event */ in ctx_event_to_rotate()
4330 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4332 if (event) in ctx_event_to_rotate()
4335 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4343 event = __node_2_pe(node); in ctx_event_to_rotate()
4350 event = __node_2_pe(node); in ctx_event_to_rotate()
4357 event = __node_2_pe(node); in ctx_event_to_rotate()
4366 return event; in ctx_event_to_rotate()
4379 * events, thus the event count values are stable. in perf_rotate_context()
4449 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4452 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4455 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4456 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4459 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4473 struct perf_event *event; in perf_event_enable_on_exec() local
4488 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4489 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4490 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4494 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4509 static void perf_remove_from_owner(struct perf_event *event);
4510 static void perf_event_exit_event(struct perf_event *event,
4520 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4529 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4530 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4533 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4534 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4538 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4554 struct perf_event *event; member
4561 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4569 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) { in __perf_event_read_cpu()
4570 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu); in __perf_event_read_cpu()
4576 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4588 * Cross CPU call to read the hardware event
4593 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4594 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4596 struct pmu *pmu = event->pmu; in __perf_event_read()
4602 * event->count would have been updated to a recent sample in __perf_event_read()
4603 * when the event was scheduled out. in __perf_event_read()
4609 ctx_time_update_event(ctx, event); in __perf_event_read()
4611 perf_event_update_time(event); in __perf_event_read()
4613 perf_event_update_sibling_time(event); in __perf_event_read()
4615 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4619 pmu->read(event); in __perf_event_read()
4626 pmu->read(event); in __perf_event_read()
4628 for_each_sibling_event(sub, event) { in __perf_event_read()
4631 * Use sibling's PMU rather than @event's since in __perf_event_read()
4644 static inline u64 perf_event_count(struct perf_event *event, bool self) in perf_event_count() argument
4647 return local64_read(&event->count); in perf_event_count()
4649 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4652 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4660 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4661 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4665 * NMI-safe method to read a local event, that is an event that
4672 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4687 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4690 if (event->attr.inherit) { in perf_event_read_local()
4695 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4696 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4697 event->hw.target != current) { in perf_event_read_local()
4703 * Get the event CPU numbers, and adjust them to local if the event is in perf_event_read_local()
4704 * a per-package event that can be read locally in perf_event_read_local()
4706 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4707 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4709 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4710 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4716 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4717 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4723 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4728 event->pmu->read(event); in perf_event_read_local()
4730 *value = local64_read(&event->count); in perf_event_read_local()
4734 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4746 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4748 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4752 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4753 * value in the event structure: in perf_event_read()
4767 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4772 .event = event, in perf_event_read()
4778 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4784 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4785 * scheduled out and that will have updated the event count. in perf_event_read()
4787 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4795 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4799 state = event->state; in perf_event_read()
4809 ctx_time_update_event(ctx, event); in perf_event_read()
4811 perf_event_update_time(event); in perf_event_read()
4813 perf_event_update_sibling_time(event); in perf_event_read()
4884 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4892 /* Must be root to operate on a CPU event: */ in find_get_context()
4893 err = perf_allow_cpu(&event->attr); in find_get_context()
4897 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4958 struct perf_event *event) in find_get_pmu_context() argument
4971 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4991 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_pmu_context()
5091 static void perf_event_free_filter(struct perf_event *event);
5095 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
5097 if (event->ns) in free_event_rcu()
5098 put_pid_ns(event->ns); in free_event_rcu()
5099 perf_event_free_filter(event); in free_event_rcu()
5100 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
5103 static void ring_buffer_attach(struct perf_event *event,
5106 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
5108 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
5111 list_del_rcu(&event->sb_list); in detach_sb_event()
5115 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5117 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5119 if (event->parent) in is_sb_event()
5122 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5134 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5136 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5137 detach_sb_event(event); in unaccount_pmu_sb_event()
5162 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5166 if (event->parent) in unaccount_event()
5169 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5171 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5173 if (event->attr.build_id) in unaccount_event()
5175 if (event->attr.comm) in unaccount_event()
5177 if (event->attr.namespaces) in unaccount_event()
5179 if (event->attr.cgroup) in unaccount_event()
5181 if (event->attr.task) in unaccount_event()
5183 if (event->attr.freq) in unaccount_event()
5185 if (event->attr.context_switch) { in unaccount_event()
5189 if (is_cgroup_event(event)) in unaccount_event()
5191 if (has_branch_stack(event)) in unaccount_event()
5193 if (event->attr.ksymbol) in unaccount_event()
5195 if (event->attr.bpf_event) in unaccount_event()
5197 if (event->attr.text_poke) in unaccount_event()
5205 unaccount_pmu_sb_event(event); in unaccount_event()
5218 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5228 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5230 struct pmu *pmu = event->pmu; in exclusive_event_init()
5243 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
5245 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
5248 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5256 event->attach_state |= PERF_ATTACH_EXCLUSIVE; in exclusive_event_init()
5261 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5263 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5266 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5271 event->attach_state &= ~PERF_ATTACH_EXCLUSIVE; in exclusive_event_destroy()
5284 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5288 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5296 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5303 static void perf_addr_filters_splice(struct perf_event *event,
5307 static void __free_event(struct perf_event *event) in __free_event() argument
5309 if (event->attach_state & PERF_ATTACH_CALLCHAIN) in __free_event()
5312 kfree(event->addr_filter_ranges); in __free_event()
5314 if (event->attach_state & PERF_ATTACH_EXCLUSIVE) in __free_event()
5315 exclusive_event_destroy(event); in __free_event()
5317 if (is_cgroup_event(event)) in __free_event()
5318 perf_detach_cgroup(event); in __free_event()
5320 if (event->destroy) in __free_event()
5321 event->destroy(event); in __free_event()
5327 if (event->hw.target) in __free_event()
5328 put_task_struct(event->hw.target); in __free_event()
5330 if (event->pmu_ctx) { in __free_event()
5332 * put_pmu_ctx() needs an event->ctx reference, because of in __free_event()
5335 WARN_ON_ONCE(!event->ctx); in __free_event()
5336 WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx); in __free_event()
5337 put_pmu_ctx(event->pmu_ctx); in __free_event()
5344 if (event->ctx) in __free_event()
5345 put_ctx(event->ctx); in __free_event()
5347 if (event->pmu) in __free_event()
5348 module_put(event->pmu->module); in __free_event()
5350 call_rcu(&event->rcu_head, free_event_rcu); in __free_event()
5354 static void _free_event(struct perf_event *event) in _free_event() argument
5356 irq_work_sync(&event->pending_irq); in _free_event()
5357 irq_work_sync(&event->pending_disable_irq); in _free_event()
5359 unaccount_event(event); in _free_event()
5361 security_perf_event_free(event); in _free_event()
5363 if (event->rb) { in _free_event()
5365 * Can happen when we close an event with re-directed output. in _free_event()
5370 mutex_lock(&event->mmap_mutex); in _free_event()
5371 ring_buffer_attach(event, NULL); in _free_event()
5372 mutex_unlock(&event->mmap_mutex); in _free_event()
5375 perf_event_free_bpf_prog(event); in _free_event()
5376 perf_addr_filters_splice(event, NULL); in _free_event()
5378 __free_event(event); in _free_event()
5383 * where the event isn't exposed yet and inherited events.
5385 static void free_event(struct perf_event *event) in free_event() argument
5387 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5388 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5389 atomic_long_read(&event->refcount), event)) { in free_event()
5394 _free_event(event); in free_event()
5398 * Remove user event from the owner task.
5400 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5408 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5411 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5434 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5437 * event. in perf_remove_from_owner()
5439 if (event->owner) { in perf_remove_from_owner()
5440 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5441 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5448 static void put_event(struct perf_event *event) in put_event() argument
5452 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5455 parent = event->parent; in put_event()
5456 _free_event(event); in put_event()
5464 * Kill an event dead; while event:refcount will preserve the event
5468 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5470 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5475 * If we got here through err_alloc: free_event(event); we will not in perf_event_release_kernel()
5479 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5484 if (!is_kernel_event(event)) in perf_event_release_kernel()
5485 perf_remove_from_owner(event); in perf_event_release_kernel()
5487 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5491 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5494 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5501 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5503 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5506 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5507 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5519 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5530 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5532 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5539 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5548 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5563 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5573 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5582 * Last reference unless ->pending_task work is pending on this event in perf_event_release_kernel()
5585 put_event(event); in perf_event_release_kernel()
5599 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5607 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5609 (void)perf_event_read(event, false); in __perf_event_read_value()
5610 total += perf_event_count(event, false); in __perf_event_read_value()
5612 *enabled += event->total_time_enabled + in __perf_event_read_value()
5613 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5614 *running += event->total_time_running + in __perf_event_read_value()
5615 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5617 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5623 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5628 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5633 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5634 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5635 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5720 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5723 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5730 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5750 ret = event->read_size; in perf_read_group()
5751 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5762 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5769 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5775 values[n++] = primary_event_id(event); in perf_read_one()
5777 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5785 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5789 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5792 mutex_lock(&event->child_mutex); in is_event_hup()
5793 no_children = list_empty(&event->child_list); in is_event_hup()
5794 mutex_unlock(&event->child_mutex); in is_event_hup()
5799 * Read the performance event - simple non blocking version for now
5802 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5804 u64 read_format = event->attr.read_format; in __perf_read()
5808 * Return end-of-file for a read on an event that is in in __perf_read()
5812 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5815 if (count < event->read_size) in __perf_read()
5818 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5820 ret = perf_read_group(event, read_format, buf); in __perf_read()
5822 ret = perf_read_one(event, read_format, buf); in __perf_read()
5830 struct perf_event *event = file->private_data; in perf_read() local
5834 ret = security_perf_event_read(event); in perf_read()
5838 ctx = perf_event_ctx_lock(event); in perf_read()
5839 ret = __perf_read(event, buf, count); in perf_read()
5840 perf_event_ctx_unlock(event, ctx); in perf_read()
5847 struct perf_event *event = file->private_data; in perf_poll() local
5851 poll_wait(file, &event->waitq, wait); in perf_poll()
5853 if (is_event_hup(event)) in perf_poll()
5857 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5860 mutex_lock(&event->mmap_mutex); in perf_poll()
5861 rb = event->rb; in perf_poll()
5864 mutex_unlock(&event->mmap_mutex); in perf_poll()
5868 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5870 (void)perf_event_read(event, false); in _perf_event_reset()
5871 local64_set(&event->count, 0); in _perf_event_reset()
5872 perf_event_update_userpage(event); in _perf_event_reset()
5875 /* Assume it's not an event with inherit set. */
5876 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5881 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5882 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5883 _perf_event_disable(event); in perf_event_pause()
5884 count = local64_read(&event->count); in perf_event_pause()
5886 local64_set(&event->count, 0); in perf_event_pause()
5887 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5894 * Holding the top-level event's child_mutex means that any
5895 * descendant process that has inherited this event will block
5899 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5904 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5906 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5907 func(event); in perf_event_for_each_child()
5908 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5910 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5913 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5916 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5921 event = event->group_leader; in perf_event_for_each()
5923 perf_event_for_each_child(event, func); in perf_event_for_each()
5924 for_each_sibling_event(sibling, event) in perf_event_for_each()
5928 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5936 if (event->attr.freq) { in __perf_event_period()
5937 event->attr.sample_freq = value; in __perf_event_period()
5939 event->attr.sample_period = value; in __perf_event_period()
5940 event->hw.sample_period = value; in __perf_event_period()
5943 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5945 perf_pmu_disable(event->pmu); in __perf_event_period()
5948 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5950 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5951 event->hw.interrupts = 0; in __perf_event_period()
5952 perf_log_throttle(event, 1); in __perf_event_period()
5954 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5957 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5960 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5961 perf_pmu_enable(event->pmu); in __perf_event_period()
5965 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5967 return event->pmu->check_period(event, value); in perf_event_check_period()
5970 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5972 if (!is_sampling_event(event)) in _perf_event_period()
5978 if (event->attr.freq) { in _perf_event_period()
5982 if (perf_event_check_period(event, value)) in _perf_event_period()
5988 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5993 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5998 ctx = perf_event_ctx_lock(event); in perf_event_period()
5999 ret = _perf_event_period(event, value); in perf_event_period()
6000 perf_event_ctx_unlock(event, ctx); in perf_event_period()
6013 static int perf_event_set_output(struct perf_event *event,
6015 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6019 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
6036 return _perf_event_refresh(event, arg); in _perf_ioctl()
6045 return _perf_event_period(event, value); in _perf_ioctl()
6049 u64 id = primary_event_id(event); in _perf_ioctl()
6065 return perf_event_set_output(event, output_event); in _perf_ioctl()
6069 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
6080 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
6093 rb = rcu_dereference(event->rb); in _perf_ioctl()
6104 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6114 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6121 perf_event_for_each(event, func); in _perf_ioctl()
6123 perf_event_for_each_child(event, func); in _perf_ioctl()
6130 struct perf_event *event = file->private_data; in perf_ioctl() local
6135 ret = security_perf_event_write(event); in perf_ioctl()
6139 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6140 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6141 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6171 struct perf_event *event; in perf_event_task_enable() local
6174 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6175 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6176 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6177 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6187 struct perf_event *event; in perf_event_task_disable() local
6190 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6191 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6192 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6193 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6200 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6202 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6205 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6208 return event->pmu->event_idx(event); in perf_event_index()
6211 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6217 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6234 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6243 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6250 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6256 * based on snapshot values taken when the event in perf_event_update_userpage()
6263 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6273 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6274 userpg->offset = perf_event_count(event, false); in perf_event_update_userpage()
6276 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6279 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6282 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6284 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6294 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6300 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6302 if (event->rb) { in ring_buffer_attach()
6305 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
6307 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6309 old_rb = event->rb; in ring_buffer_attach()
6311 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6314 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6315 event->rcu_pending = 1; in ring_buffer_attach()
6319 if (event->rcu_pending) { in ring_buffer_attach()
6320 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6321 event->rcu_pending = 0; in ring_buffer_attach()
6325 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6330 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
6331 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
6339 if (has_aux(event)) in ring_buffer_attach()
6340 perf_event_stop(event, 0); in ring_buffer_attach()
6342 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6351 wake_up_all(&event->waitq); in ring_buffer_attach()
6355 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6359 if (event->parent) in ring_buffer_wakeup()
6360 event = event->parent; in ring_buffer_wakeup()
6363 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6365 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6366 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6371 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6375 if (event->parent) in ring_buffer_get()
6376 event = event->parent; in ring_buffer_get()
6379 rb = rcu_dereference(event->rb); in ring_buffer_get()
6401 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6403 atomic_inc(&event->mmap_count); in perf_mmap_open()
6404 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6407 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6409 if (event->pmu->event_mapped) in perf_mmap_open()
6410 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6413 static void perf_pmu_output_stop(struct perf_event *event);
6417 * event, or through other events by use of perf_event_set_output().
6425 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6426 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6432 if (event->pmu->event_unmapped) in perf_mmap_close()
6433 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6447 perf_pmu_output_stop(event); in perf_mmap_close()
6463 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6466 ring_buffer_attach(event, NULL); in perf_mmap_close()
6467 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6480 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6481 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6483 * This event is en-route to free_event() which will in perf_mmap_close()
6490 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6496 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6501 if (event->rb == rb) in perf_mmap_close()
6502 ring_buffer_attach(event, NULL); in perf_mmap_close()
6504 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6505 put_event(event); in perf_mmap_close()
6616 struct perf_event *event = file->private_data; in perf_mmap() local
6632 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6638 ret = security_perf_event_read(event); in perf_mmap()
6654 if (!event->rb) in perf_mmap()
6661 mutex_lock(&event->mmap_mutex); in perf_mmap()
6664 rb = event->rb; in perf_mmap()
6719 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6721 mutex_lock(&event->mmap_mutex); in perf_mmap()
6722 if (event->rb) { in perf_mmap()
6723 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6728 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6731 * event and try again. in perf_mmap()
6733 ring_buffer_attach(event, NULL); in perf_mmap()
6734 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6739 rb = event->rb; in perf_mmap()
6782 WARN_ON(!rb && event->rb); in perf_mmap()
6789 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6790 event->cpu, flags); in perf_mmap()
6801 ring_buffer_attach(event, rb); in perf_mmap()
6803 perf_event_update_time(event); in perf_mmap()
6804 perf_event_init_userpage(event); in perf_mmap()
6805 perf_event_update_userpage(event); in perf_mmap()
6807 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6808 event->attr.aux_watermark, flags); in perf_mmap()
6818 atomic_inc(&event->mmap_count); in perf_mmap()
6825 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6837 if (event->pmu->event_mapped) in perf_mmap()
6838 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6846 struct perf_event *event = filp->private_data; in perf_fasync() local
6850 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6870 * Perf event wakeup
6876 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6878 ring_buffer_wakeup(event); in perf_event_wakeup()
6880 if (event->pending_kill) { in perf_event_wakeup()
6881 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6882 event->pending_kill = 0; in perf_event_wakeup()
6886 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6893 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6903 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6904 event->orig_type, event->attr.sig_data); in perf_sigtrap()
6908 * Deliver the pending work in-event-context or follow the context.
6910 static void __perf_pending_disable(struct perf_event *event) in __perf_pending_disable() argument
6912 int cpu = READ_ONCE(event->oncpu); in __perf_pending_disable()
6915 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_disable()
6922 * Yay, we hit home and are in the context of the event. in __perf_pending_disable()
6925 if (event->pending_disable) { in __perf_pending_disable()
6926 event->pending_disable = 0; in __perf_pending_disable()
6927 perf_event_disable_local(event); in __perf_pending_disable()
6950 * But the event runs on CPU-B and wants disabling there. in __perf_pending_disable()
6952 irq_work_queue_on(&event->pending_disable_irq, cpu); in __perf_pending_disable()
6957 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq); in perf_pending_disable() local
6965 __perf_pending_disable(event); in perf_pending_disable()
6972 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6982 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
6983 * irrespective of where the event is. in perf_pending_irq()
6985 if (event->pending_wakeup) { in perf_pending_irq()
6986 event->pending_wakeup = 0; in perf_pending_irq()
6987 perf_event_wakeup(event); in perf_pending_irq()
6996 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
7005 if (event->pending_work) { in perf_pending_task()
7006 event->pending_work = 0; in perf_pending_task()
7007 perf_sigtrap(event); in perf_pending_task()
7008 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task()
7010 put_event(event); in perf_pending_task()
7054 static bool should_sample_guest(struct perf_event *event) in should_sample_guest() argument
7056 return !event->attr.exclude_guest && perf_guest_state(); in should_sample_guest()
7059 unsigned long perf_misc_flags(struct perf_event *event, in perf_misc_flags() argument
7062 if (should_sample_guest(event)) in perf_misc_flags()
7068 unsigned long perf_instruction_pointer(struct perf_event *event, in perf_instruction_pointer() argument
7071 if (should_sample_guest(event)) in perf_instruction_pointer()
7210 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7214 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7249 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7259 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
7261 * doesn't change the event state. in perf_pmu_snapshot_aux()
7273 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7282 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7286 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7328 * when event->attr.sample_id_all is set.
7335 struct perf_event *event, in __perf_event_header__init_id() argument
7338 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7343 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7344 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7348 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7351 data->id = primary_event_id(event); in __perf_event_header__init_id()
7354 data->stream_id = event->id; in __perf_event_header__init_id()
7364 struct perf_event *event) in perf_event_header__init_id() argument
7366 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7367 header->size += event->id_header_size; in perf_event_header__init_id()
7368 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7396 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7400 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7405 struct perf_event *event, in perf_output_read_one() argument
7408 u64 read_format = event->attr.read_format; in perf_output_read_one()
7412 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr)); in perf_output_read_one()
7415 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7419 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7422 values[n++] = primary_event_id(event); in perf_output_read_one()
7424 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7430 struct perf_event *event, in perf_output_read_group() argument
7433 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7434 u64 read_format = event->attr.read_format; in perf_output_read_group()
7438 bool self = has_inherit_and_sample_read(&event->attr); in perf_output_read_group()
7454 if ((leader != event) && in perf_output_read_group()
7469 if ((sub != event) && in perf_output_read_group()
7500 struct perf_event *event) in perf_output_read() argument
7503 u64 read_format = event->attr.read_format; in perf_output_read()
7507 * based on snapshot values taken when the event in perf_output_read()
7515 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7517 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7518 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7520 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7526 struct perf_event *event) in perf_output_sample() argument
7560 perf_output_read(handle, event); in perf_output_sample()
7611 if (branch_sample_hw_index(event)) in perf_output_sample()
7641 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7672 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7696 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7699 if (!event->attr.watermark) { in perf_output_sample()
7700 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7843 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7845 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7846 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7848 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7849 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7866 struct perf_event *event, in perf_prepare_sample() argument
7869 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7887 data->type = event->attr.sample_type; in perf_prepare_sample()
7891 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
7894 data->ip = perf_instruction_pointer(event, regs); in perf_prepare_sample()
7899 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
7926 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7941 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7942 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7988 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
8030 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
8041 event->attr.aux_sample_size); in perf_prepare_sample()
8043 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
8053 struct perf_event *event, in perf_prepare_header() argument
8057 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
8058 header->misc = perf_misc_flags(event, regs); in perf_prepare_header()
8071 static void __perf_event_aux_pause(struct perf_event *event, bool pause) in __perf_event_aux_pause() argument
8074 if (!event->hw.aux_paused) { in __perf_event_aux_pause()
8075 event->hw.aux_paused = 1; in __perf_event_aux_pause()
8076 event->pmu->stop(event, PERF_EF_PAUSE); in __perf_event_aux_pause()
8079 if (event->hw.aux_paused) { in __perf_event_aux_pause()
8080 event->hw.aux_paused = 0; in __perf_event_aux_pause()
8081 event->pmu->start(event, PERF_EF_RESUME); in __perf_event_aux_pause()
8086 static void perf_event_aux_pause(struct perf_event *event, bool pause) in perf_event_aux_pause() argument
8090 if (WARN_ON_ONCE(!event)) in perf_event_aux_pause()
8093 rb = ring_buffer_get(event); in perf_event_aux_pause()
8099 * Guard against self-recursion here. Another event could trip in perf_event_aux_pause()
8107 __perf_event_aux_pause(event, pause); in perf_event_aux_pause()
8115 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
8130 perf_prepare_sample(data, event, regs); in __perf_event_output()
8131 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
8133 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
8137 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
8147 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
8151 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
8155 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
8159 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
8163 perf_event_output(struct perf_event *event, in perf_event_output() argument
8167 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
8182 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
8191 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
8193 .pid = perf_event_pid(event, task), in perf_event_read_event()
8194 .tid = perf_event_tid(event, task), in perf_event_read_event()
8198 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
8199 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
8204 perf_output_read(&handle, event); in perf_event_read_event()
8205 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
8210 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8217 struct perf_event *event; in perf_iterate_ctx() local
8219 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8221 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8223 if (!event_filter_match(event)) in perf_iterate_ctx()
8227 output(event, data); in perf_iterate_ctx()
8234 struct perf_event *event; in perf_iterate_sb_cpu() local
8236 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8239 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
8242 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8245 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8247 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8249 output(event, data); in perf_iterate_sb_cpu()
8257 * your event, otherwise it might not get delivered.
8292 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8294 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8299 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8305 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8306 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8314 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8318 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8343 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8345 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8349 .event = event, in __perf_event_output_stop()
8352 if (!has_aux(event)) in __perf_event_output_stop()
8356 parent = event; in __perf_event_output_stop()
8362 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
8364 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
8365 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
8374 struct perf_event *event = info; in __perf_pmu_output_stop() local
8377 .rb = event->rb, in __perf_pmu_output_stop()
8390 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8397 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8401 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
8411 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8441 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8443 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8444 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8445 event->attr.task; in perf_event_task_match()
8448 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8457 if (!perf_event_task_match(event)) in perf_event_task_output()
8460 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8462 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8467 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8468 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8471 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8473 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8476 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8477 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8480 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8484 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8547 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8549 return event->attr.comm; in perf_event_comm_match()
8552 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8561 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8564 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8565 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8571 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8572 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8578 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8646 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8648 return event->attr.namespaces; in perf_event_namespaces_match()
8651 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8660 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8664 &sample, event); in perf_event_namespaces_output()
8665 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8670 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8672 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8677 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8774 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8776 return event->attr.cgroup; in perf_event_cgroup_match()
8779 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8787 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8791 &sample, event); in perf_event_cgroup_output()
8792 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8800 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8885 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8892 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8893 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8896 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8907 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8910 if (event->attr.mmap2) { in perf_event_mmap_output()
8920 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8921 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8926 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8927 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8929 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8931 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8936 if (event->attr.mmap2) { in perf_event_mmap_output()
8955 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
9116 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
9118 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
9124 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
9133 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
9140 event->addr_filters_gen++; in __perf_addr_filters_adjust()
9144 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
9203 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9225 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9226 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9232 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9240 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9258 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9260 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9266 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9285 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9287 return event->attr.context_switch; in perf_event_switch_match()
9290 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9297 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9301 if (event->ctx->task) { in perf_event_switch_output()
9308 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9310 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9313 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9315 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9319 if (event->ctx->task) in perf_event_switch_output()
9324 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9362 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9379 .time = perf_event_clock(event), in perf_log_throttle()
9380 .id = primary_event_id(event), in perf_log_throttle()
9381 .stream_id = event->id, in perf_log_throttle()
9387 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9389 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9395 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9415 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9417 return event->attr.ksymbol; in perf_event_ksymbol_match()
9420 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9427 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9431 &sample, event); in perf_event_ksymbol_output()
9432 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9439 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9505 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9507 return event->attr.bpf_event; in perf_event_bpf_match()
9510 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9517 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9521 &sample, event); in perf_event_bpf_output()
9522 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9528 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9607 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9609 return event->attr.text_poke; in perf_event_text_poke_match()
9612 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9620 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9623 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9625 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9640 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9677 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9679 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9682 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9693 if (event->parent) in perf_log_itrace_start()
9694 event = event->parent; in perf_log_itrace_start()
9696 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9697 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9703 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9704 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9706 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9707 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9713 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9718 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9728 if (event->parent) in perf_report_aux_output_id()
9729 event = event->parent; in perf_report_aux_output_id()
9736 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9737 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9743 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9750 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9752 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9767 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9772 if (event->attr.freq) { in __perf_event_account_interrupt()
9779 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9785 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9787 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9790 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9797 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9804 static int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9810 .event = event, in bpf_overflow_handler()
9819 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
9821 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
9831 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9835 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9839 if (event->prog) in perf_event_set_bpf_handler()
9845 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9847 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
9848 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9849 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9862 event->prog = prog; in perf_event_set_bpf_handler()
9863 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
9867 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9869 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9874 event->prog = NULL; in perf_event_free_bpf_handler()
9878 static inline int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9885 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9892 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9898 * Generic event overflow handling, sampling.
9901 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9905 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9912 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9915 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9917 if (event->attr.aux_pause) in __perf_event_overflow()
9918 perf_event_aux_pause(event->aux_event, true); in __perf_event_overflow()
9920 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && in __perf_event_overflow()
9921 !bpf_overflow_handler(event, data, regs)) in __perf_event_overflow()
9929 event->pending_kill = POLL_IN; in __perf_event_overflow()
9930 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9932 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9933 perf_event_disable_inatomic(event); in __perf_event_overflow()
9936 if (event->attr.sigtrap) { in __perf_event_overflow()
9940 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
9943 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9952 if (!event->pending_work && in __perf_event_overflow()
9953 !task_work_add(current, &event->pending_task, notify_mode)) { in __perf_event_overflow()
9954 event->pending_work = pending_id; in __perf_event_overflow()
9955 local_inc(&event->ctx->nr_no_switch_fast); in __perf_event_overflow()
9956 WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); in __perf_event_overflow()
9958 event->pending_addr = 0; in __perf_event_overflow()
9960 event->pending_addr = data->addr; in __perf_event_overflow()
9962 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9975 WARN_ON_ONCE(event->pending_work != pending_id); in __perf_event_overflow()
9979 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9981 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9982 event->pending_wakeup = 1; in __perf_event_overflow()
9983 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9986 if (event->attr.aux_resume) in __perf_event_overflow()
9987 perf_event_aux_pause(event->aux_event, false); in __perf_event_overflow()
9992 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9996 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
10000 * Generic software event infrastructure
10011 * We directly increment event->count and keep a second value in
10012 * event->hw.period_left to count intervals. This period event
10017 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
10019 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
10040 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
10044 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
10048 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
10054 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
10066 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
10070 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
10072 local64_add(nr, &event->count); in perf_swevent_event()
10077 if (!is_sampling_event(event)) in perf_swevent_event()
10080 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
10082 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10084 data->period = event->hw.last_period; in perf_swevent_event()
10086 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
10087 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10092 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
10095 int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) in perf_exclude_event() argument
10097 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
10101 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
10104 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
10111 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
10117 if (event->attr.type != type) in perf_swevent_match()
10120 if (event->attr.config != event_id) in perf_swevent_match()
10123 if (perf_exclude_event(event, regs)) in perf_swevent_match()
10157 /* For the event head insertion and removal in the hlist */
10159 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
10162 u32 event_id = event->attr.config; in find_swevent_head()
10163 u64 type = event->attr.type; in find_swevent_head()
10166 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
10171 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
10184 struct perf_event *event; in do_perf_sw_event() local
10192 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
10193 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
10194 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
10240 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
10244 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
10247 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
10250 if (is_sampling_event(event)) { in perf_swevent_add()
10252 perf_swevent_set_period(event); in perf_swevent_add()
10257 head = find_swevent_head(swhash, event); in perf_swevent_add()
10261 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
10262 perf_event_update_userpage(event); in perf_swevent_add()
10267 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
10269 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
10272 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
10274 event->hw.state = 0; in perf_swevent_start()
10277 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
10279 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10371 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10373 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10375 WARN_ON(event->parent); in sw_perf_event_destroy()
10384 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10386 u64 event_id = event->attr.config; in perf_swevent_init()
10388 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10394 if (has_branch_stack(event)) in perf_swevent_init()
10399 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10402 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10412 if (!event->parent) { in perf_swevent_init()
10420 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10441 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10443 perf_trace_destroy(event); in tp_perf_event_destroy()
10446 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10450 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10456 if (has_branch_stack(event)) in perf_tp_event_init()
10459 err = perf_trace_init(event); in perf_tp_event_init()
10463 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10479 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10485 if (event->parent) in perf_tp_filter_match()
10486 event = event->parent; in perf_tp_filter_match()
10488 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10493 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10497 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10502 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10505 if (!perf_tp_filter_match(event, raw)) in perf_tp_event_match()
10523 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10532 struct perf_event *event) in __perf_tp_event_target_task() argument
10536 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10539 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10541 if (perf_tp_event_match(event, raw, regs)) { in __perf_tp_event_target_task()
10543 perf_sample_save_raw_data(data, event, raw); in __perf_tp_event_target_task()
10544 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10556 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10558 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10559 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10560 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10564 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10565 __perf_tp_event_target_task(count, record, regs, data, raw, event); in perf_tp_event_target_task()
10566 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10576 struct perf_event *event; in perf_tp_event() local
10587 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10588 if (perf_tp_event_match(event, &raw, regs)) { in perf_tp_event()
10591 * some members in data are event-specific and in perf_tp_event()
10594 * the problem that next event skips preparing data in perf_tp_event()
10598 perf_sample_save_raw_data(&data, event, &raw); in perf_tp_event()
10599 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10605 * deliver this event there too. in perf_tp_event()
10666 static int perf_kprobe_event_init(struct perf_event *event);
10678 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10683 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10692 if (has_branch_stack(event)) in perf_kprobe_event_init()
10695 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10696 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10700 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10725 static int perf_uprobe_event_init(struct perf_event *event);
10737 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10743 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10752 if (has_branch_stack(event)) in perf_uprobe_event_init()
10755 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10756 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10757 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10761 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10778 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10780 ftrace_profile_free_filter(event); in perf_event_free_filter()
10784 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10787 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10789 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10792 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10796 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10802 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10807 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10808 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10810 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10811 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10812 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10813 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10832 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10838 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10841 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10843 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10844 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10847 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10856 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10860 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10866 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10888 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10890 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10917 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10923 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10927 if (event->parent) in perf_addr_filters_splice()
10930 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10932 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10934 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10936 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10963 * Update event's address range filters based on the
10966 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10968 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10969 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10976 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10997 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10998 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
11000 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
11002 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
11003 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
11009 event->addr_filters_gen++; in perf_event_addr_filters_apply()
11019 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
11073 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
11100 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
11159 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
11186 if (!event->ctx->task) in perf_event_parse_addr_filter()
11201 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11230 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11239 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11241 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11244 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11248 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11253 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11256 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11264 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11269 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11279 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11280 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11290 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
11294 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11298 if (has_addr_filter(event)) in perf_event_set_filter()
11299 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11314 struct perf_event *event; in perf_swevent_hrtimer() local
11317 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11319 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11322 event->pmu->read(event); in perf_swevent_hrtimer()
11324 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11327 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11328 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11329 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11333 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11339 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11341 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11344 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11360 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11362 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11364 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11372 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11374 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11376 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11386 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11387 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11389 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11390 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11393 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11398 * Software event: cpu wall time clock
11401 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11407 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11408 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11411 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11413 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11414 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11417 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11419 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11420 cpu_clock_event_update(event); in cpu_clock_event_stop()
11423 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11426 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11427 perf_event_update_userpage(event); in cpu_clock_event_add()
11432 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11434 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11437 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11439 cpu_clock_event_update(event); in cpu_clock_event_read()
11442 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11444 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11447 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11453 if (has_branch_stack(event)) in cpu_clock_event_init()
11456 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11476 * Software event: task time clock
11479 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11484 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11486 local64_add(delta, &event->count); in task_clock_event_update()
11489 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11491 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11492 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11495 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11497 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11498 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11501 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11504 task_clock_event_start(event, flags); in task_clock_event_add()
11505 perf_event_update_userpage(event); in task_clock_event_add()
11510 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11512 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11515 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11518 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11519 u64 time = event->ctx->time + delta; in task_clock_event_read()
11521 task_clock_event_update(event, time); in task_clock_event_read()
11524 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11526 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11529 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11535 if (has_branch_stack(event)) in task_clock_event_init()
11538 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11570 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11612 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11987 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11989 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11990 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11993 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
12004 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
12007 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
12012 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
12017 event->pmu = pmu; in perf_try_init_event()
12018 ret = pmu->event_init(event); in perf_try_init_event()
12021 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
12025 has_extended_regs(event)) in perf_try_init_event()
12029 event_has_any_exclude_flag(event)) in perf_try_init_event()
12032 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { in perf_try_init_event()
12033 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); in perf_try_init_event()
12042 event->event_caps |= PERF_EV_CAP_READ_SCOPE; in perf_try_init_event()
12048 if (ret && event->destroy) in perf_try_init_event()
12049 event->destroy(event); in perf_try_init_event()
12053 event->pmu = NULL; in perf_try_init_event()
12060 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
12070 * pmus overwrites event->attr.type to forward event to another pmu. in perf_init_event()
12072 event->orig_type = event->attr.type; in perf_init_event()
12075 if (event->parent && event->parent->pmu) { in perf_init_event()
12076 pmu = event->parent->pmu; in perf_init_event()
12077 ret = perf_try_init_event(pmu, event); in perf_init_event()
12086 type = event->attr.type; in perf_init_event()
12088 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
12093 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
12102 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
12106 ret = perf_try_init_event(pmu, event); in perf_init_event()
12107 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
12108 type = event->attr.type; in perf_init_event()
12119 ret = perf_try_init_event(pmu, event); in perf_init_event()
12136 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
12138 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
12141 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
12152 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
12154 if (is_sb_event(event)) in account_pmu_sb_event()
12155 attach_sb_event(event); in account_pmu_sb_event()
12179 static void account_event(struct perf_event *event) in account_event() argument
12183 if (event->parent) in account_event()
12186 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
12188 if (event->attr.mmap || event->attr.mmap_data) in account_event()
12190 if (event->attr.build_id) in account_event()
12192 if (event->attr.comm) in account_event()
12194 if (event->attr.namespaces) in account_event()
12196 if (event->attr.cgroup) in account_event()
12198 if (event->attr.task) in account_event()
12200 if (event->attr.freq) in account_event()
12202 if (event->attr.context_switch) { in account_event()
12206 if (has_branch_stack(event)) in account_event()
12208 if (is_cgroup_event(event)) in account_event()
12210 if (event->attr.ksymbol) in account_event()
12212 if (event->attr.bpf_event) in account_event()
12214 if (event->attr.text_poke) in account_event()
12245 account_pmu_sb_event(event); in account_event()
12249 * Allocate and initialize an event structure
12260 struct perf_event *event; in perf_event_alloc() local
12275 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
12277 if (!event) in perf_event_alloc()
12285 group_leader = event; in perf_event_alloc()
12287 mutex_init(&event->child_mutex); in perf_event_alloc()
12288 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
12290 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
12291 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
12292 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
12293 init_event_group(event); in perf_event_alloc()
12294 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
12295 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
12296 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
12297 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
12300 init_waitqueue_head(&event->waitq); in perf_event_alloc()
12301 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
12302 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); in perf_event_alloc()
12303 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
12305 mutex_init(&event->mmap_mutex); in perf_event_alloc()
12306 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
12308 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
12309 event->cpu = cpu; in perf_event_alloc()
12310 event->attr = *attr; in perf_event_alloc()
12311 event->group_leader = group_leader; in perf_event_alloc()
12312 event->pmu = NULL; in perf_event_alloc()
12313 event->oncpu = -1; in perf_event_alloc()
12315 event->parent = parent_event; in perf_event_alloc()
12317 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
12318 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
12320 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12323 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12326 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12332 event->hw.target = get_task_struct(task); in perf_event_alloc()
12335 event->clock = &local_clock; in perf_event_alloc()
12337 event->clock = parent_event->clock; in perf_event_alloc()
12347 event->prog = prog; in perf_event_alloc()
12353 event->overflow_handler = overflow_handler; in perf_event_alloc()
12354 event->overflow_handler_context = context; in perf_event_alloc()
12355 } else if (is_write_backward(event)){ in perf_event_alloc()
12356 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12357 event->overflow_handler_context = NULL; in perf_event_alloc()
12359 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12360 event->overflow_handler_context = NULL; in perf_event_alloc()
12363 perf_event__state_init(event); in perf_event_alloc()
12367 hwc = &event->hw; in perf_event_alloc()
12384 if (!has_branch_stack(event)) in perf_event_alloc()
12385 event->attr.branch_sample_type = 0; in perf_event_alloc()
12387 pmu = perf_init_event(event); in perf_event_alloc()
12403 if (event->attr.aux_output && in perf_event_alloc()
12405 event->attr.aux_pause || event->attr.aux_resume)) { in perf_event_alloc()
12410 if (event->attr.aux_pause && event->attr.aux_resume) { in perf_event_alloc()
12415 if (event->attr.aux_start_paused) { in perf_event_alloc()
12420 event->hw.aux_paused = 1; in perf_event_alloc()
12424 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12429 err = exclusive_event_init(event); in perf_event_alloc()
12433 if (has_addr_filter(event)) { in perf_event_alloc()
12434 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12437 if (!event->addr_filter_ranges) { in perf_event_alloc()
12446 if (event->parent) { in perf_event_alloc()
12447 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12450 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12451 event->parent->addr_filter_ranges, in perf_event_alloc()
12457 event->addr_filters_gen = 1; in perf_event_alloc()
12460 if (!event->parent) { in perf_event_alloc()
12461 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12465 event->attach_state |= PERF_ATTACH_CALLCHAIN; in perf_event_alloc()
12469 err = security_perf_event_alloc(event); in perf_event_alloc()
12474 account_event(event); in perf_event_alloc()
12476 return event; in perf_event_alloc()
12479 __free_event(event); in perf_event_alloc()
12619 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12625 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12630 if (event == output_event) in perf_event_set_output()
12636 if (output_event->cpu != event->cpu) in perf_event_set_output()
12642 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12648 if (output_event->clock != event->clock) in perf_event_set_output()
12655 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12661 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12662 event->pmu != output_event->pmu) in perf_event_set_output()
12668 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12672 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12675 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12691 ring_buffer_attach(event, rb); in perf_event_set_output()
12695 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12703 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12709 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12714 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12719 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12723 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12727 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12734 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12772 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12777 * @group_fd: group leader event fd
12778 * @flags: perf event open flags
12786 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12890 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12892 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12893 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12897 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12898 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12908 pmu = event->pmu; in SYSCALL_DEFINE5()
12911 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12917 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12926 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12938 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
12953 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12958 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
12977 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12985 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
13000 if (is_software_event(event) && in SYSCALL_DEFINE5()
13003 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
13014 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
13019 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
13035 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
13040 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
13043 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
13048 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
13053 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
13060 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
13062 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
13069 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
13109 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
13121 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
13124 perf_event__header_size(event); in SYSCALL_DEFINE5()
13125 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
13127 event->owner = current; in SYSCALL_DEFINE5()
13129 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
13140 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
13145 * kept alive until we place the new event on the sibling_list. in SYSCALL_DEFINE5()
13153 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
13154 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
13163 free_event(event); in SYSCALL_DEFINE5()
13178 * @overflow_handler: callback to trigger when we hit the event
13189 struct perf_event *event; in perf_event_create_kernel_counter() local
13200 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
13202 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
13203 err = PTR_ERR(event); in perf_event_create_kernel_counter()
13208 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
13209 pmu = event->pmu; in perf_event_create_kernel_counter()
13212 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
13217 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
13230 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
13235 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
13239 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
13252 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
13257 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
13261 return event; in perf_event_create_kernel_counter()
13265 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
13271 free_event(event); in perf_event_create_kernel_counter()
13282 struct perf_event *event, *sibling; in __perf_pmu_remove() local
13284 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
13285 perf_remove_from_context(event, 0); in __perf_pmu_remove()
13286 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
13287 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13289 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
13299 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
13302 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
13306 event->cpu = cpu; in __perf_pmu_install_event()
13307 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
13308 event->pmu_ctx = epc; in __perf_pmu_install_event()
13310 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
13311 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
13312 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13315 * Now that event->ctx is updated and visible, put the old ctx. in __perf_pmu_install_event()
13323 struct perf_event *event, *tmp; in __perf_pmu_install() local
13333 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13334 if (event->group_leader == event) in __perf_pmu_install()
13337 list_del(&event->migrate_entry); in __perf_pmu_install()
13338 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13345 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13346 list_del(&event->migrate_entry); in __perf_pmu_install()
13347 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13411 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13413 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13433 perf_remove_from_context(event, detach_flags | DETACH_EXIT); in perf_event_exit_event()
13444 put_event(event); in perf_event_exit_event()
13451 perf_event_wakeup(event); in perf_event_exit_event()
13516 * When a child task exits, feed back event values to parent events.
13523 struct perf_event *event, *tmp; in perf_event_exit_task() local
13526 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13528 list_del_init(&event->owner_entry); in perf_event_exit_task()
13535 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13550 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13553 struct perf_event *parent = event->parent; in perf_free_event()
13559 list_del_init(&event->child_list); in perf_free_event()
13563 perf_group_detach(event); in perf_free_event()
13564 list_del_event(event, ctx); in perf_free_event()
13566 put_event(event); in perf_free_event()
13579 struct perf_event *event, *tmp; in perf_event_free_task() local
13599 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13600 perf_free_event(event, ctx); in perf_event_free_task()
13613 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13649 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13651 if (!event) in perf_event_attrs()
13654 return &event->attr; in perf_event_attrs()
13667 * Inherit an event from parent task to child task.
13729 * Make the child state follow the state of the parent event, in inherit_event()
13768 * Link this into the parent event's child list in inherit_event()
13777 * Inherits an event group.
13821 * Creates the child task context and tries to inherit the event-group.
13824 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13832 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13840 if (!event->attr.inherit || in inherit_task_group()
13841 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13843 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13863 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
13877 struct perf_event *event; in perf_event_init_context() local
13911 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13912 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13927 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13928 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
14043 struct perf_event *event; in __perf_event_exit_context() local
14047 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
14048 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()