Lines Matching refs:event
211 static bool is_kernel_event(struct perf_event *event) in is_kernel_event() argument
213 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
247 struct perf_event *event; member
255 struct perf_event *event = efs->event; in event_function() local
256 struct perf_event_context *ctx = event->ctx; in event_function()
291 efs->func(event, cpuctx, ctx, efs->data); in event_function()
298 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
300 struct perf_event_context *ctx = event->ctx; in event_function_call()
304 .event = event, in event_function_call()
309 if (!event->parent) { in event_function_call()
319 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
345 func(event, NULL, ctx, data); in event_function_call()
355 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
357 struct perf_event_context *ctx = event->ctx; in event_function_local()
394 func(event, cpuctx, ctx, data); in event_function_local()
609 static u64 perf_event_time(struct perf_event *event);
618 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
620 return event->clock(); in perf_event_clock()
646 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
648 struct perf_event *leader = event->group_leader; in __perf_effective_state()
653 return event->state; in __perf_effective_state()
657 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
659 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
660 u64 delta = now - event->tstamp; in __perf_update_times()
662 *enabled = event->total_time_enabled; in __perf_update_times()
666 *running = event->total_time_running; in __perf_update_times()
671 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
673 u64 now = perf_event_time(event); in perf_event_update_time()
675 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
676 &event->total_time_running); in perf_event_update_time()
677 event->tstamp = now; in perf_event_update_time()
689 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
691 if (event->state == state) in perf_event_set_state()
694 perf_event_update_time(event); in perf_event_set_state()
699 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
700 perf_event_update_sibling_time(event); in perf_event_set_state()
702 WRITE_ONCE(event->state, state); in perf_event_set_state()
752 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
757 if (!event->cgrp) in perf_cgroup_match()
771 event->cgrp->css.cgroup); in perf_cgroup_match()
774 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
776 css_put(&event->cgrp->css); in perf_detach_cgroup()
777 event->cgrp = NULL; in perf_detach_cgroup()
780 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
782 return event->cgrp != NULL; in is_cgroup_event()
785 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
789 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
793 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
797 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
835 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
843 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
846 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
922 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
963 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
980 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
985 event->cgrp = cgrp; in perf_cgroup_connect()
993 perf_detach_cgroup(event); in perf_cgroup_connect()
1000 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1004 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1007 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1022 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1026 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1029 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1046 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1051 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1054 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1059 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1068 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1080 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1085 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1091 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1096 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1300 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1306 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1314 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1324 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1326 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1329 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1355 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1362 if (event->parent) in perf_event_pid_type()
1363 event = event->parent; in perf_event_pid_type()
1365 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1372 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1374 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1377 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1379 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1386 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1388 u64 id = event->id; in primary_event_id()
1390 if (event->parent) in primary_event_id()
1391 id = event->parent->id; in primary_event_id()
1511 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1513 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1518 if (is_cgroup_event(event)) in perf_event_time()
1519 return perf_cgroup_event_time(event); in perf_event_time()
1524 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1526 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1531 if (is_cgroup_event(event)) in perf_event_time_now()
1532 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1541 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1543 struct perf_event_context *ctx = event->ctx; in get_event_type()
1552 if (event->group_leader != event) in get_event_type()
1553 event = event->group_leader; in get_event_type()
1555 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1565 static void init_event_group(struct perf_event *event) in init_event_group() argument
1567 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1568 event->group_index = 0; in init_event_group()
1576 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1578 if (event->attr.pinned) in get_event_groups()
1593 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1598 if (event->cgrp) in event_cgroup()
1599 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1707 struct perf_event *event) in perf_event_groups_insert() argument
1709 event->group_index = ++groups->index; in perf_event_groups_insert()
1711 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1718 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1722 groups = get_event_groups(event, ctx); in add_event_to_groups()
1723 perf_event_groups_insert(groups, event); in add_event_to_groups()
1731 struct perf_event *event) in perf_event_groups_delete() argument
1733 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1736 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1737 init_event_group(event); in perf_event_groups_delete()
1744 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1748 groups = get_event_groups(event, ctx); in del_event_from_groups()
1749 perf_event_groups_delete(groups, event); in del_event_from_groups()
1774 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1777 .cpu = event->cpu, in perf_event_groups_next()
1779 .cgroup = event_cgroup(event), in perf_event_groups_next()
1783 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1790 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1791 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1792 event; event = perf_event_groups_next(event, pmu))
1797 #define perf_event_groups_for_each(event, groups) \ argument
1798 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1799 typeof(*event), group_node); event; \
1800 event = rb_entry_safe(rb_next(&event->group_node), \
1801 typeof(*event), group_node))
1816 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1820 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1821 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1823 event->tstamp = perf_event_time(event); in list_add_event()
1830 if (event->group_leader == event) { in list_add_event()
1831 event->group_caps = event->event_caps; in list_add_event()
1832 add_event_to_groups(event, ctx); in list_add_event()
1835 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1837 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1839 if (event->attr.inherit_stat) in list_add_event()
1841 if (has_inherit_and_sample_read(&event->attr)) in list_add_event()
1844 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1845 perf_cgroup_event_enable(event, ctx); in list_add_event()
1848 event->pmu_ctx->nr_events++; in list_add_event()
1854 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1856 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1890 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1908 size += event->read_size; in __perf_event_header_size()
1928 event->header_size = size; in __perf_event_header_size()
1935 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1937 event->read_size = in perf_event__header_size()
1938 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1939 event->group_leader->nr_siblings); in perf_event__header_size()
1940 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1943 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1946 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1967 event->id_header_size = size; in perf_event__id_header_size()
1981 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1983 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
1985 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
2000 if (event == group_leader) in perf_event_validate_size()
2012 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2014 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2016 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2022 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2025 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2027 if (group_leader == event) in perf_group_attach()
2030 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2032 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2034 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2049 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2051 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2057 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2060 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2063 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2065 if (event->attr.inherit_stat) in list_del_event()
2067 if (has_inherit_and_sample_read(&event->attr)) in list_del_event()
2070 list_del_rcu(&event->event_entry); in list_del_event()
2072 if (event->group_leader == event) in list_del_event()
2073 del_event_from_groups(event, ctx); in list_del_event()
2082 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2083 perf_cgroup_event_disable(event, ctx); in list_del_event()
2084 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2088 event->pmu_ctx->nr_events--; in list_del_event()
2092 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2097 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2100 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2103 static void put_event(struct perf_event *event);
2104 static void event_sched_out(struct perf_event *event,
2107 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2109 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2115 if (event->aux_event) { in perf_put_aux_event()
2116 iter = event->aux_event; in perf_put_aux_event()
2117 event->aux_event = NULL; in perf_put_aux_event()
2126 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2127 if (iter->aux_event != event) in perf_put_aux_event()
2131 put_event(event); in perf_put_aux_event()
2139 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2143 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2145 return event->attr.aux_output || has_aux_action(event); in perf_need_aux_event()
2148 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2163 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2166 if (event->attr.aux_output && in perf_get_aux_event()
2167 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2170 if ((event->attr.aux_pause || event->attr.aux_resume) && in perf_get_aux_event()
2174 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2186 event->aux_event = group_leader; in perf_get_aux_event()
2191 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2193 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2194 &event->pmu_ctx->flexible_active; in get_event_list()
2203 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2205 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2206 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2209 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2211 struct perf_event *leader = event->group_leader; in perf_group_detach()
2213 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2220 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2223 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2225 perf_put_aux_event(event); in perf_group_detach()
2230 if (leader != event) { in perf_group_detach()
2231 list_del_init(&event->sibling_list); in perf_group_detach()
2232 event->group_leader->nr_siblings--; in perf_group_detach()
2233 event->group_leader->group_generation++; in perf_group_detach()
2242 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2251 sibling->group_caps = event->group_caps; in perf_group_detach()
2254 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2260 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2272 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2274 struct perf_event *parent_event = event->parent; in perf_child_detach()
2276 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2279 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2286 sync_child_event(event); in perf_child_detach()
2287 list_del_init(&event->child_list); in perf_child_detach()
2290 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2292 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2296 event_filter_match(struct perf_event *event) in event_filter_match() argument
2298 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2299 perf_cgroup_match(event); in event_filter_match()
2303 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2305 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2311 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2314 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2322 list_del_init(&event->active_list); in event_sched_out()
2324 perf_pmu_disable(event->pmu); in event_sched_out()
2326 event->pmu->del(event, 0); in event_sched_out()
2327 event->oncpu = -1; in event_sched_out()
2329 if (event->pending_disable) { in event_sched_out()
2330 event->pending_disable = 0; in event_sched_out()
2331 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2335 perf_event_set_state(event, state); in event_sched_out()
2337 if (!is_software_event(event)) in event_sched_out()
2339 if (event->attr.freq && event->attr.sample_freq) { in event_sched_out()
2343 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2346 perf_pmu_enable(event->pmu); in event_sched_out()
2352 struct perf_event *event; in group_sched_out() local
2364 for_each_sibling_event(event, group_event) in group_sched_out()
2365 event_sched_out(event, ctx); in group_sched_out()
2397 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) in ctx_time_update_event() argument
2403 update_cgrp_time_from_event(event); in ctx_time_update_event()
2418 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2423 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2433 event->pending_disable = 1; in __perf_remove_from_context()
2434 event_sched_out(event, ctx); in __perf_remove_from_context()
2436 perf_group_detach(event); in __perf_remove_from_context()
2438 perf_child_detach(event); in __perf_remove_from_context()
2439 list_del_event(event, ctx); in __perf_remove_from_context()
2441 event->state = PERF_EVENT_STATE_DEAD; in __perf_remove_from_context()
2477 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2479 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2490 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2497 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2503 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2508 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2511 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2512 ctx_time_update_event(ctx, event); in __perf_event_disable()
2514 if (event == event->group_leader) in __perf_event_disable()
2515 group_sched_out(event, ctx); in __perf_event_disable()
2517 event_sched_out(event, ctx); in __perf_event_disable()
2519 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2520 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2522 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2539 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2541 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2544 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2550 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2553 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2555 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2562 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2566 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2567 _perf_event_disable(event); in perf_event_disable()
2568 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2572 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2574 event->pending_disable = 1; in perf_event_disable_inatomic()
2575 irq_work_queue(&event->pending_disable_irq); in perf_event_disable_inatomic()
2580 static void perf_log_throttle(struct perf_event *event, int enable);
2581 static void perf_log_itrace_start(struct perf_event *event);
2584 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2586 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2590 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2594 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2597 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2604 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2611 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2612 perf_log_throttle(event, 1); in event_sched_in()
2613 event->hw.interrupts = 0; in event_sched_in()
2616 perf_pmu_disable(event->pmu); in event_sched_in()
2618 perf_log_itrace_start(event); in event_sched_in()
2620 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2621 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2622 event->oncpu = -1; in event_sched_in()
2627 if (!is_software_event(event)) in event_sched_in()
2629 if (event->attr.freq && event->attr.sample_freq) { in event_sched_in()
2633 if (event->attr.exclusive) in event_sched_in()
2637 perf_pmu_enable(event->pmu); in event_sched_in()
2645 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2659 for_each_sibling_event(event, group_event) { in group_sched_in()
2660 if (event_sched_in(event, ctx)) { in group_sched_in()
2661 partial_group = event; in group_sched_in()
2675 for_each_sibling_event(event, group_event) { in group_sched_in()
2676 if (event == partial_group) in group_sched_in()
2679 event_sched_out(event, ctx); in group_sched_in()
2691 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2693 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2699 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2711 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2720 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2723 list_add_event(event, ctx); in add_event_to_ctx()
2724 perf_group_attach(event); in add_event_to_ctx()
2836 struct perf_event *event = info; in __perf_install_in_context() local
2837 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2868 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2875 event->cgrp->css.cgroup); in __perf_install_in_context()
2881 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2882 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
2883 get_event_type(event)); in __perf_install_in_context()
2885 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2894 static bool exclusive_event_installable(struct perf_event *event,
2904 struct perf_event *event, in perf_install_in_context() argument
2911 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2913 if (event->cpu != -1) in perf_install_in_context()
2914 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2920 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2930 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2931 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2937 add_event_to_ctx(event, ctx); in perf_install_in_context()
2943 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2985 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
3007 add_event_to_ctx(event, ctx); in perf_install_in_context()
3014 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
3019 struct perf_event *leader = event->group_leader; in __perf_event_enable()
3022 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
3023 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
3028 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
3029 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3034 if (!event_filter_match(event)) in __perf_event_enable()
3041 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
3048 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3060 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3062 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3065 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3066 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3079 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3083 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3084 event->group_leader == event) in _perf_event_enable()
3087 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3091 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3097 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3101 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3102 _perf_event_enable(event); in perf_event_enable()
3103 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3108 struct perf_event *event; member
3115 struct perf_event *event = sd->event; in __perf_event_stop() local
3118 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3128 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3131 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3143 event->pmu->start(event, 0); in __perf_event_stop()
3148 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3151 .event = event, in perf_event_stop()
3157 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3168 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3197 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3199 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3201 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3205 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3206 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3207 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3213 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3218 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3221 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3222 _perf_event_enable(event); in _perf_event_refresh()
3230 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3235 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3236 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3237 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3267 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3274 if (event->attr.type != attr->type) in perf_event_modify_attr()
3277 switch (event->attr.type) { in perf_event_modify_attr()
3286 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3288 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3294 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3295 err = func(event, attr); in perf_event_modify_attr()
3298 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3305 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3313 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3329 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3332 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3336 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3339 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3461 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3466 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3476 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3477 event->pmu->read(event); in __perf_event_sync_stat()
3479 perf_event_update_time(event); in __perf_event_sync_stat()
3486 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3489 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3490 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3495 perf_event_update_userpage(event); in __perf_event_sync_stat()
3502 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3509 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3515 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3518 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3520 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3788 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) in __heap_add() argument
3792 if (event) { in __heap_add()
3793 itrs[heap->nr] = event; in __heap_add()
3890 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3892 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3895 perf_event_update_time(event); in event_update_userpage()
3896 perf_event_update_userpage(event); in event_update_userpage()
3903 struct perf_event *event; in group_update_userpage() local
3908 for_each_sibling_event(event, group_event) in group_update_userpage()
3909 event_update_userpage(event); in group_update_userpage()
3912 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3914 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3917 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3920 if (!event_filter_match(event)) in merge_sched_in()
3923 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3924 if (!group_sched_in(event, ctx)) in merge_sched_in()
3925 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3928 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3930 if (event->attr.pinned) { in merge_sched_in()
3931 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3932 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3936 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3937 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3939 group_update_userpage(event); in merge_sched_in()
4099 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4101 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4175 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4177 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4181 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4199 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4204 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4210 struct perf_event *event; in perf_adjust_freq_unthr_events() local
4215 list_for_each_entry(event, event_list, active_list) { in perf_adjust_freq_unthr_events()
4216 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_events()
4220 if (!event_filter_match(event)) in perf_adjust_freq_unthr_events()
4223 hwc = &event->hw; in perf_adjust_freq_unthr_events()
4227 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_events()
4228 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4229 event->pmu->start(event, 0); in perf_adjust_freq_unthr_events()
4232 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4238 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_events()
4240 now = local64_read(&event->count); in perf_adjust_freq_unthr_events()
4252 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_events()
4254 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_events()
4298 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4307 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4308 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4315 struct perf_event *event; in ctx_event_to_rotate() local
4323 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4325 if (event) in ctx_event_to_rotate()
4336 event = __node_2_pe(node); in ctx_event_to_rotate()
4343 event = __node_2_pe(node); in ctx_event_to_rotate()
4350 event = __node_2_pe(node); in ctx_event_to_rotate()
4359 return event; in ctx_event_to_rotate()
4442 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4445 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4448 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4449 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4452 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4466 struct perf_event *event; in perf_event_enable_on_exec() local
4481 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4482 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4483 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4502 static void perf_remove_from_owner(struct perf_event *event);
4503 static void perf_event_exit_event(struct perf_event *event,
4513 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4522 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4523 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4526 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4527 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4531 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4547 struct perf_event *event; member
4554 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4562 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) { in __perf_event_read_cpu()
4563 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu); in __perf_event_read_cpu()
4569 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4586 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4587 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4589 struct pmu *pmu = event->pmu; in __perf_event_read()
4602 ctx_time_update_event(ctx, event); in __perf_event_read()
4604 perf_event_update_time(event); in __perf_event_read()
4606 perf_event_update_sibling_time(event); in __perf_event_read()
4608 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4612 pmu->read(event); in __perf_event_read()
4619 pmu->read(event); in __perf_event_read()
4621 for_each_sibling_event(sub, event) { in __perf_event_read()
4637 static inline u64 perf_event_count(struct perf_event *event, bool self) in perf_event_count() argument
4640 return local64_read(&event->count); in perf_event_count()
4642 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4645 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4653 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4654 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4665 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4683 if (event->attr.inherit) { in perf_event_read_local()
4689 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4690 event->hw.target != current) { in perf_event_read_local()
4699 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4700 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4703 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4710 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4721 event->pmu->read(event); in perf_event_read_local()
4723 *value = local64_read(&event->count); in perf_event_read_local()
4727 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4739 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4741 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4760 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4765 .event = event, in perf_event_read()
4771 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4788 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4792 state = event->state; in perf_event_read()
4802 ctx_time_update_event(ctx, event); in perf_event_read()
4804 perf_event_update_time(event); in perf_event_read()
4806 perf_event_update_sibling_time(event); in perf_event_read()
4877 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4886 err = perf_allow_cpu(&event->attr); in find_get_context()
4890 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4951 struct perf_event *event) in find_get_pmu_context() argument
4964 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4984 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_pmu_context()
5077 static void perf_event_free_filter(struct perf_event *event);
5081 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
5083 if (event->ns) in free_event_rcu()
5084 put_pid_ns(event->ns); in free_event_rcu()
5085 perf_event_free_filter(event); in free_event_rcu()
5086 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
5089 static void ring_buffer_attach(struct perf_event *event,
5092 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
5094 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
5097 list_del_rcu(&event->sb_list); in detach_sb_event()
5101 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5103 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5105 if (event->parent) in is_sb_event()
5108 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5120 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5122 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5123 detach_sb_event(event); in unaccount_pmu_sb_event()
5148 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5152 if (event->parent) in unaccount_event()
5155 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5157 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5159 if (event->attr.build_id) in unaccount_event()
5161 if (event->attr.comm) in unaccount_event()
5163 if (event->attr.namespaces) in unaccount_event()
5165 if (event->attr.cgroup) in unaccount_event()
5167 if (event->attr.task) in unaccount_event()
5169 if (event->attr.freq) in unaccount_event()
5171 if (event->attr.context_switch) { in unaccount_event()
5175 if (is_cgroup_event(event)) in unaccount_event()
5177 if (has_branch_stack(event)) in unaccount_event()
5179 if (event->attr.ksymbol) in unaccount_event()
5181 if (event->attr.bpf_event) in unaccount_event()
5183 if (event->attr.text_poke) in unaccount_event()
5191 unaccount_pmu_sb_event(event); in unaccount_event()
5214 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5216 struct pmu *pmu = event->pmu; in exclusive_event_init()
5234 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5245 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5247 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5253 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5269 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5273 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5281 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5288 static void perf_addr_filters_splice(struct perf_event *event,
5291 static void perf_pending_task_sync(struct perf_event *event) in perf_pending_task_sync() argument
5293 struct callback_head *head = &event->pending_task; in perf_pending_task_sync()
5295 if (!event->pending_work) in perf_pending_task_sync()
5302 event->pending_work = 0; in perf_pending_task_sync()
5303 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task_sync()
5312 rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE); in perf_pending_task_sync()
5315 static void _free_event(struct perf_event *event) in _free_event() argument
5317 irq_work_sync(&event->pending_irq); in _free_event()
5318 irq_work_sync(&event->pending_disable_irq); in _free_event()
5319 perf_pending_task_sync(event); in _free_event()
5321 unaccount_event(event); in _free_event()
5323 security_perf_event_free(event); in _free_event()
5325 if (event->rb) { in _free_event()
5332 mutex_lock(&event->mmap_mutex); in _free_event()
5333 ring_buffer_attach(event, NULL); in _free_event()
5334 mutex_unlock(&event->mmap_mutex); in _free_event()
5337 if (is_cgroup_event(event)) in _free_event()
5338 perf_detach_cgroup(event); in _free_event()
5340 if (!event->parent) { in _free_event()
5341 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
5345 perf_event_free_bpf_prog(event); in _free_event()
5346 perf_addr_filters_splice(event, NULL); in _free_event()
5347 kfree(event->addr_filter_ranges); in _free_event()
5349 if (event->destroy) in _free_event()
5350 event->destroy(event); in _free_event()
5356 if (event->hw.target) in _free_event()
5357 put_task_struct(event->hw.target); in _free_event()
5359 if (event->pmu_ctx) in _free_event()
5360 put_pmu_ctx(event->pmu_ctx); in _free_event()
5366 if (event->ctx) in _free_event()
5367 put_ctx(event->ctx); in _free_event()
5369 exclusive_event_destroy(event); in _free_event()
5370 module_put(event->pmu->module); in _free_event()
5372 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5379 static void free_event(struct perf_event *event) in free_event() argument
5381 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5383 atomic_long_read(&event->refcount), event)) { in free_event()
5388 _free_event(event); in free_event()
5394 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5405 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5433 if (event->owner) { in perf_remove_from_owner()
5434 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5435 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5442 static void put_event(struct perf_event *event) in put_event() argument
5444 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5447 _free_event(event); in put_event()
5455 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5457 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5466 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5471 if (!is_kernel_event(event)) in perf_event_release_kernel()
5472 perf_remove_from_owner(event); in perf_event_release_kernel()
5474 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5488 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5490 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5493 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5494 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5517 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5519 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5526 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5535 put_event(event); in perf_event_release_kernel()
5540 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5555 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5572 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5586 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5594 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5596 (void)perf_event_read(event, false); in __perf_event_read_value()
5597 total += perf_event_count(event, false); in __perf_event_read_value()
5599 *enabled += event->total_time_enabled + in __perf_event_read_value()
5600 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5601 *running += event->total_time_running + in __perf_event_read_value()
5602 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5604 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5610 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5615 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5620 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5621 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5622 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5707 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5710 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5717 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5737 ret = event->read_size; in perf_read_group()
5738 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5749 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5756 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5762 values[n++] = primary_event_id(event); in perf_read_one()
5764 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5772 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5776 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5779 mutex_lock(&event->child_mutex); in is_event_hup()
5780 no_children = list_empty(&event->child_list); in is_event_hup()
5781 mutex_unlock(&event->child_mutex); in is_event_hup()
5789 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5791 u64 read_format = event->attr.read_format; in __perf_read()
5799 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5802 if (count < event->read_size) in __perf_read()
5805 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5807 ret = perf_read_group(event, read_format, buf); in __perf_read()
5809 ret = perf_read_one(event, read_format, buf); in __perf_read()
5817 struct perf_event *event = file->private_data; in perf_read() local
5821 ret = security_perf_event_read(event); in perf_read()
5825 ctx = perf_event_ctx_lock(event); in perf_read()
5826 ret = __perf_read(event, buf, count); in perf_read()
5827 perf_event_ctx_unlock(event, ctx); in perf_read()
5834 struct perf_event *event = file->private_data; in perf_poll() local
5838 poll_wait(file, &event->waitq, wait); in perf_poll()
5840 if (is_event_hup(event)) in perf_poll()
5847 mutex_lock(&event->mmap_mutex); in perf_poll()
5848 rb = event->rb; in perf_poll()
5851 mutex_unlock(&event->mmap_mutex); in perf_poll()
5855 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5857 (void)perf_event_read(event, false); in _perf_event_reset()
5858 local64_set(&event->count, 0); in _perf_event_reset()
5859 perf_event_update_userpage(event); in _perf_event_reset()
5863 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5868 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5869 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5870 _perf_event_disable(event); in perf_event_pause()
5871 count = local64_read(&event->count); in perf_event_pause()
5873 local64_set(&event->count, 0); in perf_event_pause()
5874 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5886 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5891 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5893 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5894 func(event); in perf_event_for_each_child()
5895 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5897 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5900 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5903 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5908 event = event->group_leader; in perf_event_for_each()
5910 perf_event_for_each_child(event, func); in perf_event_for_each()
5911 for_each_sibling_event(sibling, event) in perf_event_for_each()
5915 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5923 if (event->attr.freq) { in __perf_event_period()
5924 event->attr.sample_freq = value; in __perf_event_period()
5926 event->attr.sample_period = value; in __perf_event_period()
5927 event->hw.sample_period = value; in __perf_event_period()
5930 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5932 perf_pmu_disable(event->pmu); in __perf_event_period()
5937 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5938 event->hw.interrupts = 0; in __perf_event_period()
5939 perf_log_throttle(event, 1); in __perf_event_period()
5941 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5944 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5947 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5948 perf_pmu_enable(event->pmu); in __perf_event_period()
5952 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5954 return event->pmu->check_period(event, value); in perf_event_check_period()
5957 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5959 if (!is_sampling_event(event)) in _perf_event_period()
5965 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5968 if (perf_event_check_period(event, value)) in _perf_event_period()
5971 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5974 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5979 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5984 ctx = perf_event_ctx_lock(event); in perf_event_period()
5985 ret = _perf_event_period(event, value); in perf_event_period()
5986 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5999 static int perf_event_set_output(struct perf_event *event,
6001 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6005 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
6022 return _perf_event_refresh(event, arg); in _perf_ioctl()
6031 return _perf_event_period(event, value); in _perf_ioctl()
6035 u64 id = primary_event_id(event); in _perf_ioctl()
6051 return perf_event_set_output(event, output_event); in _perf_ioctl()
6055 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
6066 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
6079 rb = rcu_dereference(event->rb); in _perf_ioctl()
6090 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6100 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6107 perf_event_for_each(event, func); in _perf_ioctl()
6109 perf_event_for_each_child(event, func); in _perf_ioctl()
6116 struct perf_event *event = file->private_data; in perf_ioctl() local
6121 ret = security_perf_event_write(event); in perf_ioctl()
6125 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6126 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6127 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6157 struct perf_event *event; in perf_event_task_enable() local
6160 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_enable()
6161 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6162 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6163 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6173 struct perf_event *event; in perf_event_task_disable() local
6176 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { in perf_event_task_disable()
6177 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6178 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6179 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6186 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6188 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6191 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6194 return event->pmu->event_idx(event); in perf_event_index()
6197 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6203 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6220 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6229 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6236 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6249 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6259 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6260 userpg->offset = perf_event_count(event, false); in perf_event_update_userpage()
6262 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6265 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6268 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6270 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6282 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
6293 rb = rcu_dereference(event->rb); in perf_mmap_fault()
6315 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6321 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6323 if (event->rb) { in ring_buffer_attach()
6328 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6330 old_rb = event->rb; in ring_buffer_attach()
6332 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6335 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6336 event->rcu_pending = 1; in ring_buffer_attach()
6340 if (event->rcu_pending) { in ring_buffer_attach()
6341 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6342 event->rcu_pending = 0; in ring_buffer_attach()
6346 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6360 if (has_aux(event)) in ring_buffer_attach()
6361 perf_event_stop(event, 0); in ring_buffer_attach()
6363 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6372 wake_up_all(&event->waitq); in ring_buffer_attach()
6376 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6380 if (event->parent) in ring_buffer_wakeup()
6381 event = event->parent; in ring_buffer_wakeup()
6384 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6386 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6387 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6392 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6396 if (event->parent) in ring_buffer_get()
6397 event = event->parent; in ring_buffer_get()
6400 rb = rcu_dereference(event->rb); in ring_buffer_get()
6422 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6424 atomic_inc(&event->mmap_count); in perf_mmap_open()
6425 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6428 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6430 if (event->pmu->event_mapped) in perf_mmap_open()
6431 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6434 static void perf_pmu_output_stop(struct perf_event *event);
6446 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6447 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6453 if (event->pmu->event_unmapped) in perf_mmap_close()
6454 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6468 perf_pmu_output_stop(event); in perf_mmap_close()
6484 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6487 ring_buffer_attach(event, NULL); in perf_mmap_close()
6488 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6501 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6502 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6511 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6522 if (event->rb == rb) in perf_mmap_close()
6523 ring_buffer_attach(event, NULL); in perf_mmap_close()
6525 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6526 put_event(event); in perf_mmap_close()
6563 struct perf_event *event = file->private_data; in perf_mmap() local
6579 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6585 ret = security_perf_event_read(event); in perf_mmap()
6601 if (!event->rb) in perf_mmap()
6608 mutex_lock(&event->mmap_mutex); in perf_mmap()
6611 rb = event->rb; in perf_mmap()
6666 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6668 mutex_lock(&event->mmap_mutex); in perf_mmap()
6669 if (event->rb) { in perf_mmap()
6670 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6675 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6680 ring_buffer_attach(event, NULL); in perf_mmap()
6681 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6727 WARN_ON(!rb && event->rb); in perf_mmap()
6734 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6735 event->cpu, flags); in perf_mmap()
6746 ring_buffer_attach(event, rb); in perf_mmap()
6748 perf_event_update_time(event); in perf_mmap()
6749 perf_event_init_userpage(event); in perf_mmap()
6750 perf_event_update_userpage(event); in perf_mmap()
6752 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6753 event->attr.aux_watermark, flags); in perf_mmap()
6763 atomic_inc(&event->mmap_count); in perf_mmap()
6770 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6779 if (event->pmu->event_mapped) in perf_mmap()
6780 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6788 struct perf_event *event = filp->private_data; in perf_fasync() local
6792 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6818 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6820 ring_buffer_wakeup(event); in perf_event_wakeup()
6822 if (event->pending_kill) { in perf_event_wakeup()
6823 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6824 event->pending_kill = 0; in perf_event_wakeup()
6828 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6835 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6845 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6846 event->orig_type, event->attr.sig_data); in perf_sigtrap()
6852 static void __perf_pending_disable(struct perf_event *event) in __perf_pending_disable() argument
6854 int cpu = READ_ONCE(event->oncpu); in __perf_pending_disable()
6867 if (event->pending_disable) { in __perf_pending_disable()
6868 event->pending_disable = 0; in __perf_pending_disable()
6869 perf_event_disable_local(event); in __perf_pending_disable()
6894 irq_work_queue_on(&event->pending_disable_irq, cpu); in __perf_pending_disable()
6899 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq); in perf_pending_disable() local
6907 __perf_pending_disable(event); in perf_pending_disable()
6914 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6927 if (event->pending_wakeup) { in perf_pending_irq()
6928 event->pending_wakeup = 0; in perf_pending_irq()
6929 perf_event_wakeup(event); in perf_pending_irq()
6938 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
6953 if (event->pending_work) { in perf_pending_task()
6954 event->pending_work = 0; in perf_pending_task()
6955 perf_sigtrap(event); in perf_pending_task()
6956 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task()
6957 rcuwait_wake_up(&event->pending_work_wait); in perf_pending_task()
7003 static bool should_sample_guest(struct perf_event *event) in should_sample_guest() argument
7005 return !event->attr.exclude_guest && perf_guest_state(); in should_sample_guest()
7008 unsigned long perf_misc_flags(struct perf_event *event, in perf_misc_flags() argument
7011 if (should_sample_guest(event)) in perf_misc_flags()
7017 unsigned long perf_instruction_pointer(struct perf_event *event, in perf_instruction_pointer() argument
7020 if (should_sample_guest(event)) in perf_instruction_pointer()
7159 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7163 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7198 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7222 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7231 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7235 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7284 struct perf_event *event, in __perf_event_header__init_id() argument
7287 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7292 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7293 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7297 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7300 data->id = primary_event_id(event); in __perf_event_header__init_id()
7303 data->stream_id = event->id; in __perf_event_header__init_id()
7313 struct perf_event *event) in perf_event_header__init_id() argument
7315 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7316 header->size += event->id_header_size; in perf_event_header__init_id()
7317 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7345 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7349 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7354 struct perf_event *event, in perf_output_read_one() argument
7357 u64 read_format = event->attr.read_format; in perf_output_read_one()
7361 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr)); in perf_output_read_one()
7364 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7368 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7371 values[n++] = primary_event_id(event); in perf_output_read_one()
7373 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7379 struct perf_event *event, in perf_output_read_group() argument
7382 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7383 u64 read_format = event->attr.read_format; in perf_output_read_group()
7387 bool self = has_inherit_and_sample_read(&event->attr); in perf_output_read_group()
7403 if ((leader != event) && in perf_output_read_group()
7418 if ((sub != event) && in perf_output_read_group()
7449 struct perf_event *event) in perf_output_read() argument
7452 u64 read_format = event->attr.read_format; in perf_output_read()
7464 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7466 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7467 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7469 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7475 struct perf_event *event) in perf_output_sample() argument
7509 perf_output_read(handle, event); in perf_output_sample()
7560 if (branch_sample_hw_index(event)) in perf_output_sample()
7590 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7621 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7645 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7648 if (!event->attr.watermark) { in perf_output_sample()
7649 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7792 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7794 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7795 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7797 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7798 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7815 struct perf_event *event, in perf_prepare_sample() argument
7818 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7836 data->type = event->attr.sample_type; in perf_prepare_sample()
7840 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
7843 data->ip = perf_instruction_pointer(event, regs); in perf_prepare_sample()
7848 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
7875 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7890 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7891 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7937 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7979 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7990 event->attr.aux_sample_size); in perf_prepare_sample()
7992 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
8002 struct perf_event *event, in perf_prepare_header() argument
8006 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
8007 header->misc = perf_misc_flags(event, regs); in perf_prepare_header()
8020 static void __perf_event_aux_pause(struct perf_event *event, bool pause) in __perf_event_aux_pause() argument
8023 if (!event->hw.aux_paused) { in __perf_event_aux_pause()
8024 event->hw.aux_paused = 1; in __perf_event_aux_pause()
8025 event->pmu->stop(event, PERF_EF_PAUSE); in __perf_event_aux_pause()
8028 if (event->hw.aux_paused) { in __perf_event_aux_pause()
8029 event->hw.aux_paused = 0; in __perf_event_aux_pause()
8030 event->pmu->start(event, PERF_EF_RESUME); in __perf_event_aux_pause()
8035 static void perf_event_aux_pause(struct perf_event *event, bool pause) in perf_event_aux_pause() argument
8039 if (WARN_ON_ONCE(!event)) in perf_event_aux_pause()
8042 rb = ring_buffer_get(event); in perf_event_aux_pause()
8056 __perf_event_aux_pause(event, pause); in perf_event_aux_pause()
8064 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
8079 perf_prepare_sample(data, event, regs); in __perf_event_output()
8080 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
8082 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
8086 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
8096 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
8100 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
8104 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
8108 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
8112 perf_event_output(struct perf_event *event, in perf_event_output() argument
8116 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
8131 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
8140 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
8142 .pid = perf_event_pid(event, task), in perf_event_read_event()
8143 .tid = perf_event_tid(event, task), in perf_event_read_event()
8147 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
8148 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
8153 perf_output_read(&handle, event); in perf_event_read_event()
8154 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
8159 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8166 struct perf_event *event; in perf_iterate_ctx() local
8168 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8170 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8172 if (!event_filter_match(event)) in perf_iterate_ctx()
8176 output(event, data); in perf_iterate_ctx()
8183 struct perf_event *event; in perf_iterate_sb_cpu() local
8185 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8191 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8194 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8196 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8198 output(event, data); in perf_iterate_sb_cpu()
8241 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8243 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8248 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8254 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8255 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8263 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8267 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8291 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8293 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8297 .event = event, in __perf_event_output_stop()
8300 if (!has_aux(event)) in __perf_event_output_stop()
8304 parent = event; in __perf_event_output_stop()
8322 struct perf_event *event = info; in __perf_pmu_output_stop() local
8325 .rb = event->rb, in __perf_pmu_output_stop()
8338 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8345 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8359 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8389 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8391 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8392 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8393 event->attr.task; in perf_event_task_match()
8396 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8405 if (!perf_event_task_match(event)) in perf_event_task_output()
8408 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8410 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8415 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8416 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8419 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8421 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8424 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8425 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8428 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8432 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8495 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8497 return event->attr.comm; in perf_event_comm_match()
8500 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8509 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8512 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8513 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8519 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8520 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8526 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8594 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8596 return event->attr.namespaces; in perf_event_namespaces_match()
8599 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8608 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8612 &sample, event); in perf_event_namespaces_output()
8613 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8618 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8620 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8625 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8722 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8724 return event->attr.cgroup; in perf_event_cgroup_match()
8727 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8735 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8739 &sample, event); in perf_event_cgroup_output()
8740 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8748 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8833 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8840 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8841 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8844 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8855 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8858 if (event->attr.mmap2) { in perf_event_mmap_output()
8868 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8869 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8874 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8875 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8877 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8879 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8884 if (event->attr.mmap2) { in perf_event_mmap_output()
8903 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
9064 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
9066 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
9072 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
9081 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
9088 event->addr_filters_gen++; in __perf_addr_filters_adjust()
9092 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
9151 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9173 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9174 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9180 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9188 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9206 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9208 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9214 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9233 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9235 return event->attr.context_switch; in perf_event_switch_match()
9238 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9245 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9249 if (event->ctx->task) { in perf_event_switch_output()
9256 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9258 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9261 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9263 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9267 if (event->ctx->task) in perf_event_switch_output()
9272 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9310 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9327 .time = perf_event_clock(event), in perf_log_throttle()
9328 .id = primary_event_id(event), in perf_log_throttle()
9329 .stream_id = event->id, in perf_log_throttle()
9335 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9337 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9343 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9363 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9365 return event->attr.ksymbol; in perf_event_ksymbol_match()
9368 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9375 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9379 &sample, event); in perf_event_ksymbol_output()
9380 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9387 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9453 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9455 return event->attr.bpf_event; in perf_event_bpf_match()
9458 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9465 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9469 &sample, event); in perf_event_bpf_output()
9470 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9476 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9555 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9557 return event->attr.text_poke; in perf_event_text_poke_match()
9560 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9568 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9571 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9573 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9588 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9625 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9627 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9630 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9641 if (event->parent) in perf_log_itrace_start()
9642 event = event->parent; in perf_log_itrace_start()
9644 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9645 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9651 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9652 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9654 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9655 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9661 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9666 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9676 if (event->parent) in perf_report_aux_output_id()
9677 event = event->parent; in perf_report_aux_output_id()
9684 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9685 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9691 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9698 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9700 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9715 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9720 if (event->attr.freq) { in __perf_event_account_interrupt()
9727 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9733 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9735 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9738 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9745 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9752 static int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9758 .event = event, in bpf_overflow_handler()
9767 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
9769 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
9779 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9783 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9787 if (event->prog) in perf_event_set_bpf_handler()
9793 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9795 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
9796 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9797 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9810 event->prog = prog; in perf_event_set_bpf_handler()
9811 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
9815 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9817 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9822 event->prog = NULL; in perf_event_free_bpf_handler()
9826 static inline int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9833 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9840 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9849 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9853 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9860 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9863 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9865 if (event->attr.aux_pause) in __perf_event_overflow()
9866 perf_event_aux_pause(event->aux_event, true); in __perf_event_overflow()
9868 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && in __perf_event_overflow()
9869 !bpf_overflow_handler(event, data, regs)) in __perf_event_overflow()
9877 event->pending_kill = POLL_IN; in __perf_event_overflow()
9878 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9880 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9881 perf_event_disable_inatomic(event); in __perf_event_overflow()
9884 if (event->attr.sigtrap) { in __perf_event_overflow()
9891 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9900 if (!event->pending_work && in __perf_event_overflow()
9901 !task_work_add(current, &event->pending_task, notify_mode)) { in __perf_event_overflow()
9902 event->pending_work = pending_id; in __perf_event_overflow()
9903 local_inc(&event->ctx->nr_no_switch_fast); in __perf_event_overflow()
9905 event->pending_addr = 0; in __perf_event_overflow()
9907 event->pending_addr = data->addr; in __perf_event_overflow()
9909 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9922 WARN_ON_ONCE(event->pending_work != pending_id); in __perf_event_overflow()
9926 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9928 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9929 event->pending_wakeup = 1; in __perf_event_overflow()
9930 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9933 if (event->attr.aux_resume) in __perf_event_overflow()
9934 perf_event_aux_pause(event->aux_event, false); in __perf_event_overflow()
9939 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9943 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9964 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9966 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9987 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9991 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9995 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
10001 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
10013 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
10017 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
10019 local64_add(nr, &event->count); in perf_swevent_event()
10024 if (!is_sampling_event(event)) in perf_swevent_event()
10027 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
10029 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10031 data->period = event->hw.last_period; in perf_swevent_event()
10033 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
10034 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
10039 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
10042 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
10045 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
10049 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
10052 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
10059 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
10065 if (event->attr.type != type) in perf_swevent_match()
10068 if (event->attr.config != event_id) in perf_swevent_match()
10071 if (perf_exclude_event(event, regs)) in perf_swevent_match()
10107 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
10110 u32 event_id = event->attr.config; in find_swevent_head()
10111 u64 type = event->attr.type; in find_swevent_head()
10119 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
10132 struct perf_event *event; in do_perf_sw_event() local
10140 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
10141 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
10142 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
10188 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
10192 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
10195 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
10198 if (is_sampling_event(event)) { in perf_swevent_add()
10200 perf_swevent_set_period(event); in perf_swevent_add()
10205 head = find_swevent_head(swhash, event); in perf_swevent_add()
10209 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
10210 perf_event_update_userpage(event); in perf_swevent_add()
10215 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
10217 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
10220 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
10222 event->hw.state = 0; in perf_swevent_start()
10225 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
10227 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10319 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10321 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10323 WARN_ON(event->parent); in sw_perf_event_destroy()
10332 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10334 u64 event_id = event->attr.config; in perf_swevent_init()
10336 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10342 if (has_branch_stack(event)) in perf_swevent_init()
10347 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10350 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10360 if (!event->parent) { in perf_swevent_init()
10368 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10389 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10391 perf_trace_destroy(event); in tp_perf_event_destroy()
10394 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10398 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10404 if (has_branch_stack(event)) in perf_tp_event_init()
10407 err = perf_trace_init(event); in perf_tp_event_init()
10411 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10427 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10433 if (event->parent) in perf_tp_filter_match()
10434 event = event->parent; in perf_tp_filter_match()
10436 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10441 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10445 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10450 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10453 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
10471 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10479 struct perf_event *event) in __perf_tp_event_target_task() argument
10483 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10486 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10488 if (perf_tp_event_match(event, data, regs)) in __perf_tp_event_target_task()
10489 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10499 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10501 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10502 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10503 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10507 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10508 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10509 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10519 struct perf_event *event; in perf_tp_event() local
10533 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10534 if (perf_tp_event_match(event, &data, regs)) { in perf_tp_event()
10535 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10613 static int perf_kprobe_event_init(struct perf_event *event);
10625 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10630 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10639 if (has_branch_stack(event)) in perf_kprobe_event_init()
10642 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10643 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10647 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10672 static int perf_uprobe_event_init(struct perf_event *event);
10684 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10690 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10699 if (has_branch_stack(event)) in perf_uprobe_event_init()
10702 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10703 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10704 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10708 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10725 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10727 ftrace_profile_free_filter(event); in perf_event_free_filter()
10734 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10736 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10739 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10743 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10749 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10754 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10755 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10757 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10758 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10759 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10760 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10779 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10785 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10788 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10790 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10791 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10794 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10803 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10807 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10813 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10835 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10837 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10864 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10870 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10874 if (event->parent) in perf_addr_filters_splice()
10877 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10879 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10881 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10883 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10913 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10915 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10916 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10944 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10945 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10947 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10949 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10950 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10956 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10966 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
11020 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
11047 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
11133 if (!event->ctx->task) in perf_event_parse_addr_filter()
11148 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11177 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11186 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11188 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11191 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11195 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11200 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11203 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11211 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11216 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11226 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11227 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11241 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11245 if (has_addr_filter(event)) in perf_event_set_filter()
11246 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11261 struct perf_event *event; in perf_swevent_hrtimer() local
11264 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11266 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11269 event->pmu->read(event); in perf_swevent_hrtimer()
11271 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11274 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11275 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11276 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11280 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11286 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11288 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11291 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11307 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11309 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11311 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11319 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11321 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11323 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11333 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11334 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11336 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11337 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11340 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11348 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11354 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11355 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11358 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11360 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11361 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11364 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11366 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11367 cpu_clock_event_update(event); in cpu_clock_event_stop()
11370 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11373 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11374 perf_event_update_userpage(event); in cpu_clock_event_add()
11379 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11381 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11384 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11386 cpu_clock_event_update(event); in cpu_clock_event_read()
11389 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11391 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11394 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11400 if (has_branch_stack(event)) in cpu_clock_event_init()
11403 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11426 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11431 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11433 local64_add(delta, &event->count); in task_clock_event_update()
11436 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11438 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11439 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11442 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11444 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11445 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11448 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11451 task_clock_event_start(event, flags); in task_clock_event_add()
11452 perf_event_update_userpage(event); in task_clock_event_add()
11457 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11459 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11462 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11465 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11466 u64 time = event->ctx->time + delta; in task_clock_event_read()
11468 task_clock_event_update(event, time); in task_clock_event_read()
11471 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11473 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11476 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11482 if (has_branch_stack(event)) in task_clock_event_init()
11485 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11517 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11559 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11910 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11912 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11913 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11916 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11930 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11935 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11940 event->pmu = pmu; in perf_try_init_event()
11941 ret = pmu->event_init(event); in perf_try_init_event()
11944 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11948 has_extended_regs(event)) in perf_try_init_event()
11952 event_has_any_exclude_flag(event)) in perf_try_init_event()
11955 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { in perf_try_init_event()
11956 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); in perf_try_init_event()
11965 event->event_caps |= PERF_EV_CAP_READ_SCOPE; in perf_try_init_event()
11971 if (ret && event->destroy) in perf_try_init_event()
11972 event->destroy(event); in perf_try_init_event()
11981 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11993 event->orig_type = event->attr.type; in perf_init_event()
11996 if (event->parent && event->parent->pmu) { in perf_init_event()
11997 pmu = event->parent->pmu; in perf_init_event()
11998 ret = perf_try_init_event(pmu, event); in perf_init_event()
12007 type = event->attr.type; in perf_init_event()
12009 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
12014 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
12023 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
12027 ret = perf_try_init_event(pmu, event); in perf_init_event()
12028 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
12029 type = event->attr.type; in perf_init_event()
12040 ret = perf_try_init_event(pmu, event); in perf_init_event()
12057 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
12059 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
12062 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
12073 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
12075 if (is_sb_event(event)) in account_pmu_sb_event()
12076 attach_sb_event(event); in account_pmu_sb_event()
12100 static void account_event(struct perf_event *event) in account_event() argument
12104 if (event->parent) in account_event()
12107 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
12109 if (event->attr.mmap || event->attr.mmap_data) in account_event()
12111 if (event->attr.build_id) in account_event()
12113 if (event->attr.comm) in account_event()
12115 if (event->attr.namespaces) in account_event()
12117 if (event->attr.cgroup) in account_event()
12119 if (event->attr.task) in account_event()
12121 if (event->attr.freq) in account_event()
12123 if (event->attr.context_switch) { in account_event()
12127 if (has_branch_stack(event)) in account_event()
12129 if (is_cgroup_event(event)) in account_event()
12131 if (event->attr.ksymbol) in account_event()
12133 if (event->attr.bpf_event) in account_event()
12135 if (event->attr.text_poke) in account_event()
12166 account_pmu_sb_event(event); in account_event()
12181 struct perf_event *event; in perf_event_alloc() local
12196 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
12198 if (!event) in perf_event_alloc()
12206 group_leader = event; in perf_event_alloc()
12208 mutex_init(&event->child_mutex); in perf_event_alloc()
12209 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
12211 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
12212 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
12213 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
12214 init_event_group(event); in perf_event_alloc()
12215 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
12216 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
12217 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
12218 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
12221 init_waitqueue_head(&event->waitq); in perf_event_alloc()
12222 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
12223 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); in perf_event_alloc()
12224 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
12225 rcuwait_init(&event->pending_work_wait); in perf_event_alloc()
12227 mutex_init(&event->mmap_mutex); in perf_event_alloc()
12228 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
12230 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
12231 event->cpu = cpu; in perf_event_alloc()
12232 event->attr = *attr; in perf_event_alloc()
12233 event->group_leader = group_leader; in perf_event_alloc()
12234 event->pmu = NULL; in perf_event_alloc()
12235 event->oncpu = -1; in perf_event_alloc()
12237 event->parent = parent_event; in perf_event_alloc()
12239 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
12240 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
12242 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12245 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12248 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12254 event->hw.target = get_task_struct(task); in perf_event_alloc()
12257 event->clock = &local_clock; in perf_event_alloc()
12259 event->clock = parent_event->clock; in perf_event_alloc()
12269 event->prog = prog; in perf_event_alloc()
12275 event->overflow_handler = overflow_handler; in perf_event_alloc()
12276 event->overflow_handler_context = context; in perf_event_alloc()
12277 } else if (is_write_backward(event)){ in perf_event_alloc()
12278 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12279 event->overflow_handler_context = NULL; in perf_event_alloc()
12281 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12282 event->overflow_handler_context = NULL; in perf_event_alloc()
12285 perf_event__state_init(event); in perf_event_alloc()
12289 hwc = &event->hw; in perf_event_alloc()
12306 if (!has_branch_stack(event)) in perf_event_alloc()
12307 event->attr.branch_sample_type = 0; in perf_event_alloc()
12309 pmu = perf_init_event(event); in perf_event_alloc()
12325 if (event->attr.aux_output && in perf_event_alloc()
12327 event->attr.aux_pause || event->attr.aux_resume)) { in perf_event_alloc()
12332 if (event->attr.aux_pause && event->attr.aux_resume) { in perf_event_alloc()
12337 if (event->attr.aux_start_paused) { in perf_event_alloc()
12342 event->hw.aux_paused = 1; in perf_event_alloc()
12346 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12351 err = exclusive_event_init(event); in perf_event_alloc()
12355 if (has_addr_filter(event)) { in perf_event_alloc()
12356 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12359 if (!event->addr_filter_ranges) { in perf_event_alloc()
12368 if (event->parent) { in perf_event_alloc()
12369 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12372 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12373 event->parent->addr_filter_ranges, in perf_event_alloc()
12379 event->addr_filters_gen = 1; in perf_event_alloc()
12382 if (!event->parent) { in perf_event_alloc()
12383 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12390 err = security_perf_event_alloc(event); in perf_event_alloc()
12395 account_event(event); in perf_event_alloc()
12397 return event; in perf_event_alloc()
12400 if (!event->parent) { in perf_event_alloc()
12401 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
12405 kfree(event->addr_filter_ranges); in perf_event_alloc()
12408 exclusive_event_destroy(event); in perf_event_alloc()
12411 if (is_cgroup_event(event)) in perf_event_alloc()
12412 perf_detach_cgroup(event); in perf_event_alloc()
12413 if (event->destroy) in perf_event_alloc()
12414 event->destroy(event); in perf_event_alloc()
12417 if (event->hw.target) in perf_event_alloc()
12418 put_task_struct(event->hw.target); in perf_event_alloc()
12419 call_rcu(&event->rcu_head, free_event_rcu); in perf_event_alloc()
12560 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12566 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12571 if (event == output_event) in perf_event_set_output()
12577 if (output_event->cpu != event->cpu) in perf_event_set_output()
12583 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12589 if (output_event->clock != event->clock) in perf_event_set_output()
12596 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12602 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12603 event->pmu != output_event->pmu) in perf_event_set_output()
12613 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12616 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12632 ring_buffer_attach(event, rb); in perf_event_set_output()
12636 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12644 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12650 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12655 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12660 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12664 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12668 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12675 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12727 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12831 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12833 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12834 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12838 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12839 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12849 pmu = event->pmu; in SYSCALL_DEFINE5()
12852 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12858 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12879 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
12899 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
12918 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12926 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12941 if (is_software_event(event) && in SYSCALL_DEFINE5()
12955 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
12976 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
12981 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12984 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12989 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12994 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
13003 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
13010 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
13065 perf_event__header_size(event); in SYSCALL_DEFINE5()
13066 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
13068 event->owner = current; in SYSCALL_DEFINE5()
13070 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
13081 list_add_tail(&event->owner_entry, ¤t->perf_event_list); in SYSCALL_DEFINE5()
13094 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
13095 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
13104 free_event(event); in SYSCALL_DEFINE5()
13130 struct perf_event *event; in perf_event_create_kernel_counter() local
13141 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
13143 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
13144 err = PTR_ERR(event); in perf_event_create_kernel_counter()
13149 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
13150 pmu = event->pmu; in perf_event_create_kernel_counter()
13153 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
13158 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
13171 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
13176 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
13193 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
13198 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
13202 return event; in perf_event_create_kernel_counter()
13206 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
13212 free_event(event); in perf_event_create_kernel_counter()
13223 struct perf_event *event, *sibling; in __perf_pmu_remove() local
13225 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
13226 perf_remove_from_context(event, 0); in __perf_pmu_remove()
13227 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
13228 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13230 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
13240 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
13243 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
13247 event->cpu = cpu; in __perf_pmu_install_event()
13248 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
13249 event->pmu_ctx = epc; in __perf_pmu_install_event()
13251 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
13252 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
13253 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13264 struct perf_event *event, *tmp; in __perf_pmu_install() local
13274 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13275 if (event->group_leader == event) in __perf_pmu_install()
13278 list_del(&event->migrate_entry); in __perf_pmu_install()
13279 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13286 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13287 list_del(&event->migrate_entry); in __perf_pmu_install()
13288 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13352 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13354 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13374 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
13377 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
13378 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
13390 free_event(event); in perf_event_exit_event()
13398 perf_event_wakeup(event); in perf_event_exit_event()
13470 struct perf_event *event, *tmp; in perf_event_exit_task() local
13473 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13475 list_del_init(&event->owner_entry); in perf_event_exit_task()
13482 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13497 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13500 struct perf_event *parent = event->parent; in perf_free_event()
13506 list_del_init(&event->child_list); in perf_free_event()
13512 perf_group_detach(event); in perf_free_event()
13513 list_del_event(event, ctx); in perf_free_event()
13515 free_event(event); in perf_free_event()
13528 struct perf_event *event, *tmp; in perf_event_free_task() local
13548 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13549 perf_free_event(event, ctx); in perf_event_free_task()
13598 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13600 if (!event) in perf_event_attrs()
13603 return &event->attr; in perf_event_attrs()
13781 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13789 if (!event->attr.inherit || in inherit_task_group()
13790 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13792 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13812 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
13826 struct perf_event *event; in perf_event_init_context() local
13860 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13861 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13876 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13877 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13991 struct perf_event *event; in __perf_event_exit_context() local
13995 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13996 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()