xref: /linux/drivers/perf/arm_pmu.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 #undef DEBUG
2 
3 /*
4  * ARM performance counter support.
5  *
6  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8  *
9  * This code is based on the sparc64 perf event code, which is in turn based
10  * on the x86 code.
11  */
12 #define pr_fmt(fmt) "hw perfevents: " fmt
13 
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/perf/arm_pmu.h>
20 #include <linux/slab.h>
21 #include <linux/sched/clock.h>
22 #include <linux/spinlock.h>
23 #include <linux/irq.h>
24 #include <linux/irqdesc.h>
25 
26 #include <asm/irq_regs.h>
27 
28 static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
29 static DEFINE_PER_CPU(int, cpu_irq);
30 
31 static inline u64 arm_pmu_event_max_period(struct perf_event *event)
32 {
33 	if (event->hw.flags & ARMPMU_EVT_64BIT)
34 		return GENMASK_ULL(63, 0);
35 	else
36 		return GENMASK_ULL(31, 0);
37 }
38 
39 static int
40 armpmu_map_cache_event(const unsigned (*cache_map)
41 				      [PERF_COUNT_HW_CACHE_MAX]
42 				      [PERF_COUNT_HW_CACHE_OP_MAX]
43 				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
44 		       u64 config)
45 {
46 	unsigned int cache_type, cache_op, cache_result, ret;
47 
48 	cache_type = (config >>  0) & 0xff;
49 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
50 		return -EINVAL;
51 
52 	cache_op = (config >>  8) & 0xff;
53 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
54 		return -EINVAL;
55 
56 	cache_result = (config >> 16) & 0xff;
57 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
58 		return -EINVAL;
59 
60 	if (!cache_map)
61 		return -ENOENT;
62 
63 	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
64 
65 	if (ret == CACHE_OP_UNSUPPORTED)
66 		return -ENOENT;
67 
68 	return ret;
69 }
70 
71 static int
72 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
73 {
74 	int mapping;
75 
76 	if (config >= PERF_COUNT_HW_MAX)
77 		return -EINVAL;
78 
79 	if (!event_map)
80 		return -ENOENT;
81 
82 	mapping = (*event_map)[config];
83 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
84 }
85 
86 static int
87 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
88 {
89 	return (int)(config & raw_event_mask);
90 }
91 
92 int
93 armpmu_map_event(struct perf_event *event,
94 		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
95 		 const unsigned (*cache_map)
96 				[PERF_COUNT_HW_CACHE_MAX]
97 				[PERF_COUNT_HW_CACHE_OP_MAX]
98 				[PERF_COUNT_HW_CACHE_RESULT_MAX],
99 		 u32 raw_event_mask)
100 {
101 	u64 config = event->attr.config;
102 	int type = event->attr.type;
103 
104 	if (type == event->pmu->type)
105 		return armpmu_map_raw_event(raw_event_mask, config);
106 
107 	switch (type) {
108 	case PERF_TYPE_HARDWARE:
109 		return armpmu_map_hw_event(event_map, config);
110 	case PERF_TYPE_HW_CACHE:
111 		return armpmu_map_cache_event(cache_map, config);
112 	case PERF_TYPE_RAW:
113 		return armpmu_map_raw_event(raw_event_mask, config);
114 	}
115 
116 	return -ENOENT;
117 }
118 
119 int armpmu_event_set_period(struct perf_event *event)
120 {
121 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
122 	struct hw_perf_event *hwc = &event->hw;
123 	s64 left = local64_read(&hwc->period_left);
124 	s64 period = hwc->sample_period;
125 	u64 max_period;
126 	int ret = 0;
127 
128 	max_period = arm_pmu_event_max_period(event);
129 	if (unlikely(left <= -period)) {
130 		left = period;
131 		local64_set(&hwc->period_left, left);
132 		hwc->last_period = period;
133 		ret = 1;
134 	}
135 
136 	if (unlikely(left <= 0)) {
137 		left += period;
138 		local64_set(&hwc->period_left, left);
139 		hwc->last_period = period;
140 		ret = 1;
141 	}
142 
143 	/*
144 	 * Limit the maximum period to prevent the counter value
145 	 * from overtaking the one we are about to program. In
146 	 * effect we are reducing max_period to account for
147 	 * interrupt latency (and we are being very conservative).
148 	 */
149 	if (left > (max_period >> 1))
150 		left = (max_period >> 1);
151 
152 	local64_set(&hwc->prev_count, (u64)-left);
153 
154 	armpmu->write_counter(event, (u64)(-left) & max_period);
155 
156 	perf_event_update_userpage(event);
157 
158 	return ret;
159 }
160 
161 u64 armpmu_event_update(struct perf_event *event)
162 {
163 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
164 	struct hw_perf_event *hwc = &event->hw;
165 	u64 delta, prev_raw_count, new_raw_count;
166 	u64 max_period = arm_pmu_event_max_period(event);
167 
168 again:
169 	prev_raw_count = local64_read(&hwc->prev_count);
170 	new_raw_count = armpmu->read_counter(event);
171 
172 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
173 			     new_raw_count) != prev_raw_count)
174 		goto again;
175 
176 	delta = (new_raw_count - prev_raw_count) & max_period;
177 
178 	local64_add(delta, &event->count);
179 	local64_sub(delta, &hwc->period_left);
180 
181 	return new_raw_count;
182 }
183 
184 static void
185 armpmu_read(struct perf_event *event)
186 {
187 	armpmu_event_update(event);
188 }
189 
190 static void
191 armpmu_stop(struct perf_event *event, int flags)
192 {
193 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
194 	struct hw_perf_event *hwc = &event->hw;
195 
196 	/*
197 	 * ARM pmu always has to update the counter, so ignore
198 	 * PERF_EF_UPDATE, see comments in armpmu_start().
199 	 */
200 	if (!(hwc->state & PERF_HES_STOPPED)) {
201 		armpmu->disable(event);
202 		armpmu_event_update(event);
203 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
204 	}
205 }
206 
207 static void armpmu_start(struct perf_event *event, int flags)
208 {
209 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
210 	struct hw_perf_event *hwc = &event->hw;
211 
212 	/*
213 	 * ARM pmu always has to reprogram the period, so ignore
214 	 * PERF_EF_RELOAD, see the comment below.
215 	 */
216 	if (flags & PERF_EF_RELOAD)
217 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
218 
219 	hwc->state = 0;
220 	/*
221 	 * Set the period again. Some counters can't be stopped, so when we
222 	 * were stopped we simply disabled the IRQ source and the counter
223 	 * may have been left counting. If we don't do this step then we may
224 	 * get an interrupt too soon or *way* too late if the overflow has
225 	 * happened since disabling.
226 	 */
227 	armpmu_event_set_period(event);
228 	armpmu->enable(event);
229 }
230 
231 static void
232 armpmu_del(struct perf_event *event, int flags)
233 {
234 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
235 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
236 	struct hw_perf_event *hwc = &event->hw;
237 	int idx = hwc->idx;
238 
239 	armpmu_stop(event, PERF_EF_UPDATE);
240 	hw_events->events[idx] = NULL;
241 	armpmu->clear_event_idx(hw_events, event);
242 	perf_event_update_userpage(event);
243 	/* Clear the allocated counter */
244 	hwc->idx = -1;
245 }
246 
247 static int
248 armpmu_add(struct perf_event *event, int flags)
249 {
250 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
251 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
252 	struct hw_perf_event *hwc = &event->hw;
253 	int idx;
254 
255 	/* An event following a process won't be stopped earlier */
256 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
257 		return -ENOENT;
258 
259 	/* If we don't have a space for the counter then finish early. */
260 	idx = armpmu->get_event_idx(hw_events, event);
261 	if (idx < 0)
262 		return idx;
263 
264 	/*
265 	 * If there is an event in the counter we are going to use then make
266 	 * sure it is disabled.
267 	 */
268 	event->hw.idx = idx;
269 	armpmu->disable(event);
270 	hw_events->events[idx] = event;
271 
272 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
273 	if (flags & PERF_EF_START)
274 		armpmu_start(event, PERF_EF_RELOAD);
275 
276 	/* Propagate our changes to the userspace mapping. */
277 	perf_event_update_userpage(event);
278 
279 	return 0;
280 }
281 
282 static int
283 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
284 			       struct perf_event *event)
285 {
286 	struct arm_pmu *armpmu;
287 
288 	if (is_software_event(event))
289 		return 1;
290 
291 	/*
292 	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
293 	 * core perf code won't check that the pmu->ctx == leader->ctx
294 	 * until after pmu->event_init(event).
295 	 */
296 	if (event->pmu != pmu)
297 		return 0;
298 
299 	if (event->state < PERF_EVENT_STATE_OFF)
300 		return 1;
301 
302 	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
303 		return 1;
304 
305 	armpmu = to_arm_pmu(event->pmu);
306 	return armpmu->get_event_idx(hw_events, event) >= 0;
307 }
308 
309 static int
310 validate_group(struct perf_event *event)
311 {
312 	struct perf_event *sibling, *leader = event->group_leader;
313 	struct pmu_hw_events fake_pmu;
314 
315 	/*
316 	 * Initialise the fake PMU. We only need to populate the
317 	 * used_mask for the purposes of validation.
318 	 */
319 	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
320 
321 	if (!validate_event(event->pmu, &fake_pmu, leader))
322 		return -EINVAL;
323 
324 	for_each_sibling_event(sibling, leader) {
325 		if (!validate_event(event->pmu, &fake_pmu, sibling))
326 			return -EINVAL;
327 	}
328 
329 	if (!validate_event(event->pmu, &fake_pmu, event))
330 		return -EINVAL;
331 
332 	return 0;
333 }
334 
335 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
336 {
337 	struct arm_pmu *armpmu;
338 	int ret;
339 	u64 start_clock, finish_clock;
340 
341 	/*
342 	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
343 	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
344 	 * do any necessary shifting, we just need to perform the first
345 	 * dereference.
346 	 */
347 	armpmu = *(void **)dev;
348 	if (WARN_ON_ONCE(!armpmu))
349 		return IRQ_NONE;
350 
351 	start_clock = sched_clock();
352 	ret = armpmu->handle_irq(armpmu);
353 	finish_clock = sched_clock();
354 
355 	perf_sample_event_took(finish_clock - start_clock);
356 	return ret;
357 }
358 
359 static int
360 event_requires_mode_exclusion(struct perf_event_attr *attr)
361 {
362 	return attr->exclude_idle || attr->exclude_user ||
363 	       attr->exclude_kernel || attr->exclude_hv;
364 }
365 
366 static int
367 __hw_perf_event_init(struct perf_event *event)
368 {
369 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
370 	struct hw_perf_event *hwc = &event->hw;
371 	int mapping;
372 
373 	hwc->flags = 0;
374 	mapping = armpmu->map_event(event);
375 
376 	if (mapping < 0) {
377 		pr_debug("event %x:%llx not supported\n", event->attr.type,
378 			 event->attr.config);
379 		return mapping;
380 	}
381 
382 	/*
383 	 * We don't assign an index until we actually place the event onto
384 	 * hardware. Use -1 to signify that we haven't decided where to put it
385 	 * yet. For SMP systems, each core has it's own PMU so we can't do any
386 	 * clever allocation or constraints checking at this point.
387 	 */
388 	hwc->idx		= -1;
389 	hwc->config_base	= 0;
390 	hwc->config		= 0;
391 	hwc->event_base		= 0;
392 
393 	/*
394 	 * Check whether we need to exclude the counter from certain modes.
395 	 */
396 	if ((!armpmu->set_event_filter ||
397 	     armpmu->set_event_filter(hwc, &event->attr)) &&
398 	     event_requires_mode_exclusion(&event->attr)) {
399 		pr_debug("ARM performance counters do not support "
400 			 "mode exclusion\n");
401 		return -EOPNOTSUPP;
402 	}
403 
404 	/*
405 	 * Store the event encoding into the config_base field.
406 	 */
407 	hwc->config_base	    |= (unsigned long)mapping;
408 
409 	if (!is_sampling_event(event)) {
410 		/*
411 		 * For non-sampling runs, limit the sample_period to half
412 		 * of the counter width. That way, the new counter value
413 		 * is far less likely to overtake the previous one unless
414 		 * you have some serious IRQ latency issues.
415 		 */
416 		hwc->sample_period  = arm_pmu_event_max_period(event) >> 1;
417 		hwc->last_period    = hwc->sample_period;
418 		local64_set(&hwc->period_left, hwc->sample_period);
419 	}
420 
421 	if (event->group_leader != event) {
422 		if (validate_group(event) != 0)
423 			return -EINVAL;
424 	}
425 
426 	return 0;
427 }
428 
429 static int armpmu_event_init(struct perf_event *event)
430 {
431 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
432 
433 	/*
434 	 * Reject CPU-affine events for CPUs that are of a different class to
435 	 * that which this PMU handles. Process-following events (where
436 	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
437 	 * reject them later (in armpmu_add) if they're scheduled on a
438 	 * different class of CPU.
439 	 */
440 	if (event->cpu != -1 &&
441 		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
442 		return -ENOENT;
443 
444 	/* does not support taken branch sampling */
445 	if (has_branch_stack(event))
446 		return -EOPNOTSUPP;
447 
448 	if (armpmu->map_event(event) == -ENOENT)
449 		return -ENOENT;
450 
451 	return __hw_perf_event_init(event);
452 }
453 
454 static void armpmu_enable(struct pmu *pmu)
455 {
456 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
457 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
458 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
459 
460 	/* For task-bound events we may be called on other CPUs */
461 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
462 		return;
463 
464 	if (enabled)
465 		armpmu->start(armpmu);
466 }
467 
468 static void armpmu_disable(struct pmu *pmu)
469 {
470 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
471 
472 	/* For task-bound events we may be called on other CPUs */
473 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
474 		return;
475 
476 	armpmu->stop(armpmu);
477 }
478 
479 /*
480  * In heterogeneous systems, events are specific to a particular
481  * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
482  * the same microarchitecture.
483  */
484 static int armpmu_filter_match(struct perf_event *event)
485 {
486 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
487 	unsigned int cpu = smp_processor_id();
488 	int ret;
489 
490 	ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
491 	if (ret && armpmu->filter_match)
492 		return armpmu->filter_match(event);
493 
494 	return ret;
495 }
496 
497 static ssize_t armpmu_cpumask_show(struct device *dev,
498 				   struct device_attribute *attr, char *buf)
499 {
500 	struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
501 	return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
502 }
503 
504 static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
505 
506 static struct attribute *armpmu_common_attrs[] = {
507 	&dev_attr_cpus.attr,
508 	NULL,
509 };
510 
511 static struct attribute_group armpmu_common_attr_group = {
512 	.attrs = armpmu_common_attrs,
513 };
514 
515 /* Set at runtime when we know what CPU type we are. */
516 static struct arm_pmu *__oprofile_cpu_pmu;
517 
518 /*
519  * Despite the names, these two functions are CPU-specific and are used
520  * by the OProfile/perf code.
521  */
522 const char *perf_pmu_name(void)
523 {
524 	if (!__oprofile_cpu_pmu)
525 		return NULL;
526 
527 	return __oprofile_cpu_pmu->name;
528 }
529 EXPORT_SYMBOL_GPL(perf_pmu_name);
530 
531 int perf_num_counters(void)
532 {
533 	int max_events = 0;
534 
535 	if (__oprofile_cpu_pmu != NULL)
536 		max_events = __oprofile_cpu_pmu->num_events;
537 
538 	return max_events;
539 }
540 EXPORT_SYMBOL_GPL(perf_num_counters);
541 
542 static int armpmu_count_irq_users(const int irq)
543 {
544 	int cpu, count = 0;
545 
546 	for_each_possible_cpu(cpu) {
547 		if (per_cpu(cpu_irq, cpu) == irq)
548 			count++;
549 	}
550 
551 	return count;
552 }
553 
554 void armpmu_free_irq(int irq, int cpu)
555 {
556 	if (per_cpu(cpu_irq, cpu) == 0)
557 		return;
558 	if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
559 		return;
560 
561 	if (!irq_is_percpu_devid(irq))
562 		free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
563 	else if (armpmu_count_irq_users(irq) == 1)
564 		free_percpu_irq(irq, &cpu_armpmu);
565 
566 	per_cpu(cpu_irq, cpu) = 0;
567 }
568 
569 int armpmu_request_irq(int irq, int cpu)
570 {
571 	int err = 0;
572 	const irq_handler_t handler = armpmu_dispatch_irq;
573 	if (!irq)
574 		return 0;
575 
576 	if (!irq_is_percpu_devid(irq)) {
577 		unsigned long irq_flags;
578 
579 		err = irq_force_affinity(irq, cpumask_of(cpu));
580 
581 		if (err && num_possible_cpus() > 1) {
582 			pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
583 				irq, cpu);
584 			goto err_out;
585 		}
586 
587 		irq_flags = IRQF_PERCPU |
588 			    IRQF_NOBALANCING |
589 			    IRQF_NO_THREAD;
590 
591 		irq_set_status_flags(irq, IRQ_NOAUTOEN);
592 		err = request_irq(irq, handler, irq_flags, "arm-pmu",
593 				  per_cpu_ptr(&cpu_armpmu, cpu));
594 	} else if (armpmu_count_irq_users(irq) == 0) {
595 		err = request_percpu_irq(irq, handler, "arm-pmu",
596 					 &cpu_armpmu);
597 	}
598 
599 	if (err)
600 		goto err_out;
601 
602 	per_cpu(cpu_irq, cpu) = irq;
603 	return 0;
604 
605 err_out:
606 	pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
607 	return err;
608 }
609 
610 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
611 {
612 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
613 	return per_cpu(hw_events->irq, cpu);
614 }
615 
616 /*
617  * PMU hardware loses all context when a CPU goes offline.
618  * When a CPU is hotplugged back in, since some hardware registers are
619  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
620  * junk values out of them.
621  */
622 static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
623 {
624 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
625 	int irq;
626 
627 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
628 		return 0;
629 	if (pmu->reset)
630 		pmu->reset(pmu);
631 
632 	per_cpu(cpu_armpmu, cpu) = pmu;
633 
634 	irq = armpmu_get_cpu_irq(pmu, cpu);
635 	if (irq) {
636 		if (irq_is_percpu_devid(irq))
637 			enable_percpu_irq(irq, IRQ_TYPE_NONE);
638 		else
639 			enable_irq(irq);
640 	}
641 
642 	return 0;
643 }
644 
645 static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
646 {
647 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
648 	int irq;
649 
650 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
651 		return 0;
652 
653 	irq = armpmu_get_cpu_irq(pmu, cpu);
654 	if (irq) {
655 		if (irq_is_percpu_devid(irq))
656 			disable_percpu_irq(irq);
657 		else
658 			disable_irq_nosync(irq);
659 	}
660 
661 	per_cpu(cpu_armpmu, cpu) = NULL;
662 
663 	return 0;
664 }
665 
666 #ifdef CONFIG_CPU_PM
667 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
668 {
669 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
670 	struct perf_event *event;
671 	int idx;
672 
673 	for (idx = 0; idx < armpmu->num_events; idx++) {
674 		event = hw_events->events[idx];
675 		if (!event)
676 			continue;
677 
678 		switch (cmd) {
679 		case CPU_PM_ENTER:
680 			/*
681 			 * Stop and update the counter
682 			 */
683 			armpmu_stop(event, PERF_EF_UPDATE);
684 			break;
685 		case CPU_PM_EXIT:
686 		case CPU_PM_ENTER_FAILED:
687 			 /*
688 			  * Restore and enable the counter.
689 			  * armpmu_start() indirectly calls
690 			  *
691 			  * perf_event_update_userpage()
692 			  *
693 			  * that requires RCU read locking to be functional,
694 			  * wrap the call within RCU_NONIDLE to make the
695 			  * RCU subsystem aware this cpu is not idle from
696 			  * an RCU perspective for the armpmu_start() call
697 			  * duration.
698 			  */
699 			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
700 			break;
701 		default:
702 			break;
703 		}
704 	}
705 }
706 
707 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
708 			     void *v)
709 {
710 	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
711 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
712 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
713 
714 	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
715 		return NOTIFY_DONE;
716 
717 	/*
718 	 * Always reset the PMU registers on power-up even if
719 	 * there are no events running.
720 	 */
721 	if (cmd == CPU_PM_EXIT && armpmu->reset)
722 		armpmu->reset(armpmu);
723 
724 	if (!enabled)
725 		return NOTIFY_OK;
726 
727 	switch (cmd) {
728 	case CPU_PM_ENTER:
729 		armpmu->stop(armpmu);
730 		cpu_pm_pmu_setup(armpmu, cmd);
731 		break;
732 	case CPU_PM_EXIT:
733 		cpu_pm_pmu_setup(armpmu, cmd);
734 	case CPU_PM_ENTER_FAILED:
735 		armpmu->start(armpmu);
736 		break;
737 	default:
738 		return NOTIFY_DONE;
739 	}
740 
741 	return NOTIFY_OK;
742 }
743 
744 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
745 {
746 	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
747 	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
748 }
749 
750 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
751 {
752 	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
753 }
754 #else
755 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
756 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
757 #endif
758 
759 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
760 {
761 	int err;
762 
763 	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
764 				       &cpu_pmu->node);
765 	if (err)
766 		goto out;
767 
768 	err = cpu_pm_pmu_register(cpu_pmu);
769 	if (err)
770 		goto out_unregister;
771 
772 	return 0;
773 
774 out_unregister:
775 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
776 					    &cpu_pmu->node);
777 out:
778 	return err;
779 }
780 
781 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
782 {
783 	cpu_pm_pmu_unregister(cpu_pmu);
784 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
785 					    &cpu_pmu->node);
786 }
787 
788 static struct arm_pmu *__armpmu_alloc(gfp_t flags)
789 {
790 	struct arm_pmu *pmu;
791 	int cpu;
792 
793 	pmu = kzalloc(sizeof(*pmu), flags);
794 	if (!pmu) {
795 		pr_info("failed to allocate PMU device!\n");
796 		goto out;
797 	}
798 
799 	pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
800 	if (!pmu->hw_events) {
801 		pr_info("failed to allocate per-cpu PMU data.\n");
802 		goto out_free_pmu;
803 	}
804 
805 	pmu->pmu = (struct pmu) {
806 		.pmu_enable	= armpmu_enable,
807 		.pmu_disable	= armpmu_disable,
808 		.event_init	= armpmu_event_init,
809 		.add		= armpmu_add,
810 		.del		= armpmu_del,
811 		.start		= armpmu_start,
812 		.stop		= armpmu_stop,
813 		.read		= armpmu_read,
814 		.filter_match	= armpmu_filter_match,
815 		.attr_groups	= pmu->attr_groups,
816 		/*
817 		 * This is a CPU PMU potentially in a heterogeneous
818 		 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
819 		 * and we have taken ctx sharing into account (e.g. with our
820 		 * pmu::filter_match callback and pmu::event_init group
821 		 * validation).
822 		 */
823 		.capabilities	= PERF_PMU_CAP_HETEROGENEOUS_CPUS,
824 	};
825 
826 	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
827 		&armpmu_common_attr_group;
828 
829 	for_each_possible_cpu(cpu) {
830 		struct pmu_hw_events *events;
831 
832 		events = per_cpu_ptr(pmu->hw_events, cpu);
833 		raw_spin_lock_init(&events->pmu_lock);
834 		events->percpu_pmu = pmu;
835 	}
836 
837 	return pmu;
838 
839 out_free_pmu:
840 	kfree(pmu);
841 out:
842 	return NULL;
843 }
844 
845 struct arm_pmu *armpmu_alloc(void)
846 {
847 	return __armpmu_alloc(GFP_KERNEL);
848 }
849 
850 struct arm_pmu *armpmu_alloc_atomic(void)
851 {
852 	return __armpmu_alloc(GFP_ATOMIC);
853 }
854 
855 
856 void armpmu_free(struct arm_pmu *pmu)
857 {
858 	free_percpu(pmu->hw_events);
859 	kfree(pmu);
860 }
861 
862 int armpmu_register(struct arm_pmu *pmu)
863 {
864 	int ret;
865 
866 	ret = cpu_pmu_init(pmu);
867 	if (ret)
868 		return ret;
869 
870 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
871 	if (ret)
872 		goto out_destroy;
873 
874 	if (!__oprofile_cpu_pmu)
875 		__oprofile_cpu_pmu = pmu;
876 
877 	pr_info("enabled with %s PMU driver, %d counters available\n",
878 		pmu->name, pmu->num_events);
879 
880 	return 0;
881 
882 out_destroy:
883 	cpu_pmu_destroy(pmu);
884 	return ret;
885 }
886 
887 static int arm_pmu_hp_init(void)
888 {
889 	int ret;
890 
891 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
892 				      "perf/arm/pmu:starting",
893 				      arm_perf_starting_cpu,
894 				      arm_perf_teardown_cpu);
895 	if (ret)
896 		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
897 		       ret);
898 	return ret;
899 }
900 subsys_initcall(arm_pmu_hp_init);
901