core.c (e89c6fdf9e0eb1b5a03574d4ca73e83eae8deb91) core.c (2965faa5e03d1e71e9ff9aa143fff39e0a77543a)
1/*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *

--- 3208 unchanged lines hidden (view full) ---

3217static inline u64 perf_event_count(struct perf_event *event)
3218{
3219 if (event->pmu->count)
3220 return event->pmu->count(event);
3221
3222 return __perf_event_count(event);
3223}
3224
1/*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *

--- 3208 unchanged lines hidden (view full) ---

3217static inline u64 perf_event_count(struct perf_event *event)
3218{
3219 if (event->pmu->count)
3220 return event->pmu->count(event);
3221
3222 return __perf_event_count(event);
3223}
3224
3225/*
3226 * NMI-safe method to read a local event, that is an event that
3227 * is:
3228 * - either for the current task, or for this CPU
3229 * - does not have inherit set, for inherited task events
3230 * will not be local and we cannot read them atomically
3231 * - must not have a pmu::count method
3232 */
3233u64 perf_event_read_local(struct perf_event *event)
3234{
3235 unsigned long flags;
3236 u64 val;
3237
3238 /*
3239 * Disabling interrupts avoids all counter scheduling (context
3240 * switches, timer based rotation and IPIs).
3241 */
3242 local_irq_save(flags);
3243
3244 /* If this is a per-task event, it must be for current */
3245 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3246 event->hw.target != current);
3247
3248 /* If this is a per-CPU event, it must be for this CPU */
3249 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3250 event->cpu != smp_processor_id());
3251
3252 /*
3253 * It must not be an event with inherit set, we cannot read
3254 * all child counters from atomic context.
3255 */
3256 WARN_ON_ONCE(event->attr.inherit);
3257
3258 /*
3259 * It must not have a pmu::count method, those are not
3260 * NMI safe.
3261 */
3262 WARN_ON_ONCE(event->pmu->count);
3263
3264 /*
3265 * If the event is currently on this CPU, its either a per-task event,
3266 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3267 * oncpu == -1).
3268 */
3269 if (event->oncpu == smp_processor_id())
3270 event->pmu->read(event);
3271
3272 val = local64_read(&event->count);
3273 local_irq_restore(flags);
3274
3275 return val;
3276}
3277
3225static u64 perf_event_read(struct perf_event *event)
3226{
3227 /*
3228 * If event is enabled and currently active on a CPU, update the
3229 * value in the event structure:
3230 */
3231 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3232 smp_call_function_single(event->oncpu,

--- 5480 unchanged lines hidden (view full) ---

8713void perf_event_delayed_put(struct task_struct *task)
8714{
8715 int ctxn;
8716
8717 for_each_task_context_nr(ctxn)
8718 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
8719}
8720
3278static u64 perf_event_read(struct perf_event *event)
3279{
3280 /*
3281 * If event is enabled and currently active on a CPU, update the
3282 * value in the event structure:
3283 */
3284 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3285 smp_call_function_single(event->oncpu,

--- 5480 unchanged lines hidden (view full) ---

8766void perf_event_delayed_put(struct task_struct *task)
8767{
8768 int ctxn;
8769
8770 for_each_task_context_nr(ctxn)
8771 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
8772}
8773
8774struct perf_event *perf_event_get(unsigned int fd)
8775{
8776 int err;
8777 struct fd f;
8778 struct perf_event *event;
8779
8780 err = perf_fget_light(fd, &f);
8781 if (err)
8782 return ERR_PTR(err);
8783
8784 event = f.file->private_data;
8785 atomic_long_inc(&event->refcount);
8786 fdput(f);
8787
8788 return event;
8789}
8790
8791const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
8792{
8793 if (!event)
8794 return ERR_PTR(-EINVAL);
8795
8796 return &event->attr;
8797}
8798
8721/*
8722 * inherit a event from parent task to child task:
8723 */
8724static struct perf_event *
8725inherit_event(struct perf_event *parent_event,
8726 struct task_struct *parent,
8727 struct perf_event_context *parent_ctx,
8728 struct task_struct *child,

--- 282 unchanged lines hidden (view full) ---

9011
9012 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
9013 WARN_ON(!hlist);
9014 rcu_assign_pointer(swhash->swevent_hlist, hlist);
9015 }
9016 mutex_unlock(&swhash->hlist_mutex);
9017}
9018
8799/*
8800 * inherit a event from parent task to child task:
8801 */
8802static struct perf_event *
8803inherit_event(struct perf_event *parent_event,
8804 struct task_struct *parent,
8805 struct perf_event_context *parent_ctx,
8806 struct task_struct *child,

--- 282 unchanged lines hidden (view full) ---

9089
9090 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
9091 WARN_ON(!hlist);
9092 rcu_assign_pointer(swhash->swevent_hlist, hlist);
9093 }
9094 mutex_unlock(&swhash->hlist_mutex);
9095}
9096
9019#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
9097#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
9020static void __perf_event_exit_context(void *__info)
9021{
9022 struct remove_event re = { .detach_group = true };
9023 struct perf_event_context *ctx = __info;
9024
9025 rcu_read_lock();
9026 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
9027 __perf_remove_from_context(&re);

--- 212 unchanged lines hidden ---
9098static void __perf_event_exit_context(void *__info)
9099{
9100 struct remove_event re = { .detach_group = true };
9101 struct perf_event_context *ctx = __info;
9102
9103 rcu_read_lock();
9104 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
9105 __perf_remove_from_context(&re);

--- 212 unchanged lines hidden ---