xref: /linux/arch/arm64/kvm/pmu-emul.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list.h>
11 #include <linux/perf_event.h>
12 #include <linux/perf/arm_pmu.h>
13 #include <linux/uaccess.h>
14 #include <asm/kvm_emulate.h>
15 #include <kvm/arm_pmu.h>
16 #include <kvm/arm_vgic.h>
17 
18 #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
19 
20 DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
21 
22 static LIST_HEAD(arm_pmus);
23 static DEFINE_MUTEX(arm_pmus_lock);
24 
25 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
26 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
27 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
28 
29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30 {
31 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
33 
34 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35 {
36 	return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
38 
39 static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40 {
41 	switch (pmuver) {
42 	case ID_AA64DFR0_EL1_PMUVer_IMP:
43 		return GENMASK(9, 0);
44 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
45 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
46 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
47 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
48 		return GENMASK(15, 0);
49 	default:		/* Shouldn't be here, just for sanity */
50 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51 		return 0;
52 	}
53 }
54 
55 static u32 kvm_pmu_event_mask(struct kvm *kvm)
56 {
57 	u64 dfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
58 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59 
60 	return __kvm_pmu_event_mask(pmuver);
61 }
62 
63 u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
64 {
65 	u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
66 		   kvm_pmu_event_mask(kvm);
67 
68 	if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
69 		mask |= ARMV8_PMU_INCLUDE_EL2;
70 
71 	if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
72 		mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
73 			ARMV8_PMU_EXCLUDE_NS_EL1 |
74 			ARMV8_PMU_EXCLUDE_EL3;
75 
76 	return mask;
77 }
78 
79 /**
80  * kvm_pmc_is_64bit - determine if counter is 64bit
81  * @pmc: counter context
82  */
83 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
84 {
85 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
86 
87 	return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
88 		kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
89 }
90 
91 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
92 {
93 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
94 	u64 val = kvm_vcpu_read_pmcr(vcpu);
95 
96 	if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
97 		return __vcpu_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HLP;
98 
99 	return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
100 	       (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
101 }
102 
103 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
104 {
105 	return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
106 		!kvm_pmc_has_64bit_overflow(pmc));
107 }
108 
109 static u32 counter_index_to_reg(u64 idx)
110 {
111 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
112 }
113 
114 static u32 counter_index_to_evtreg(u64 idx)
115 {
116 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
117 }
118 
119 static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc)
120 {
121 	return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx));
122 }
123 
124 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
125 {
126 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
127 	u64 counter, reg, enabled, running;
128 
129 	reg = counter_index_to_reg(pmc->idx);
130 	counter = __vcpu_sys_reg(vcpu, reg);
131 
132 	/*
133 	 * The real counter value is equal to the value of counter register plus
134 	 * the value perf event counts.
135 	 */
136 	if (pmc->perf_event)
137 		counter += perf_event_read_value(pmc->perf_event, &enabled,
138 						 &running);
139 
140 	if (!kvm_pmc_is_64bit(pmc))
141 		counter = lower_32_bits(counter);
142 
143 	return counter;
144 }
145 
146 /**
147  * kvm_pmu_get_counter_value - get PMU counter value
148  * @vcpu: The vcpu pointer
149  * @select_idx: The counter index
150  */
151 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
152 {
153 	if (!kvm_vcpu_has_pmu(vcpu))
154 		return 0;
155 
156 	return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
157 }
158 
159 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
160 {
161 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
162 	u64 reg;
163 
164 	kvm_pmu_release_perf_event(pmc);
165 
166 	reg = counter_index_to_reg(pmc->idx);
167 
168 	if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
169 	    !force) {
170 		/*
171 		 * Even with PMUv3p5, AArch32 cannot write to the top
172 		 * 32bit of the counters. The only possible course of
173 		 * action is to use PMCR.P, which will reset them to
174 		 * 0 (the only use of the 'force' parameter).
175 		 */
176 		val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
177 		val |= lower_32_bits(val);
178 	}
179 
180 	__vcpu_sys_reg(vcpu, reg) = val;
181 
182 	/* Recreate the perf event to reflect the updated sample_period */
183 	kvm_pmu_create_perf_event(pmc);
184 }
185 
186 /**
187  * kvm_pmu_set_counter_value - set PMU counter value
188  * @vcpu: The vcpu pointer
189  * @select_idx: The counter index
190  * @val: The counter value
191  */
192 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
193 {
194 	if (!kvm_vcpu_has_pmu(vcpu))
195 		return;
196 
197 	kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
198 }
199 
200 /**
201  * kvm_pmu_release_perf_event - remove the perf event
202  * @pmc: The PMU counter pointer
203  */
204 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
205 {
206 	if (pmc->perf_event) {
207 		perf_event_disable(pmc->perf_event);
208 		perf_event_release_kernel(pmc->perf_event);
209 		pmc->perf_event = NULL;
210 	}
211 }
212 
213 /**
214  * kvm_pmu_stop_counter - stop PMU counter
215  * @pmc: The PMU counter pointer
216  *
217  * If this counter has been configured to monitor some event, release it here.
218  */
219 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
220 {
221 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
222 	u64 reg, val;
223 
224 	if (!pmc->perf_event)
225 		return;
226 
227 	val = kvm_pmu_get_pmc_value(pmc);
228 
229 	reg = counter_index_to_reg(pmc->idx);
230 
231 	__vcpu_sys_reg(vcpu, reg) = val;
232 
233 	kvm_pmu_release_perf_event(pmc);
234 }
235 
236 /**
237  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
238  * @vcpu: The vcpu pointer
239  *
240  */
241 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
242 {
243 	int i;
244 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
245 
246 	for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
247 		pmu->pmc[i].idx = i;
248 }
249 
250 /**
251  * kvm_pmu_vcpu_reset - reset pmu state for cpu
252  * @vcpu: The vcpu pointer
253  *
254  */
255 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
256 {
257 	unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
258 	int i;
259 
260 	for_each_set_bit(i, &mask, 32)
261 		kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
262 }
263 
264 /**
265  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
266  * @vcpu: The vcpu pointer
267  *
268  */
269 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
270 {
271 	int i;
272 
273 	for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++)
274 		kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
275 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
276 }
277 
278 static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
279 {
280 	unsigned int hpmn, n;
281 
282 	if (!vcpu_has_nv(vcpu))
283 		return 0;
284 
285 	hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
286 	n = vcpu->kvm->arch.pmcr_n;
287 
288 	/*
289 	 * Programming HPMN to a value greater than PMCR_EL0.N is
290 	 * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
291 	 * UNKNOWN number of counters (in our case, zero) are reserved for EL2.
292 	 */
293 	if (hpmn >= n)
294 		return 0;
295 
296 	/*
297 	 * Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
298 	 * implemented. Since KVM's ability to emulate HPMN=0 does not directly
299 	 * depend on hardware (all PMU registers are trapped), make the
300 	 * implementation choice that all counters are included in the second
301 	 * range reserved for EL2/EL3.
302 	 */
303 	return GENMASK(n - 1, hpmn);
304 }
305 
306 bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
307 {
308 	return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
309 }
310 
311 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
312 {
313 	u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
314 
315 	if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
316 		return mask;
317 
318 	return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
319 }
320 
321 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
322 {
323 	u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
324 
325 	if (val == 0)
326 		return BIT(ARMV8_PMU_CYCLE_IDX);
327 	else
328 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
329 }
330 
331 static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
332 {
333 	if (!pmc->perf_event) {
334 		kvm_pmu_create_perf_event(pmc);
335 		return;
336 	}
337 
338 	perf_event_enable(pmc->perf_event);
339 	if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
340 		kvm_debug("fail to enable perf event\n");
341 }
342 
343 static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
344 {
345 	if (pmc->perf_event)
346 		perf_event_disable(pmc->perf_event);
347 }
348 
349 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
350 {
351 	int i;
352 
353 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
354 		return;
355 
356 	for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
357 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
358 
359 		if (!(val & BIT(i)))
360 			continue;
361 
362 		if (kvm_pmu_counter_is_enabled(pmc))
363 			kvm_pmc_enable_perf_event(pmc);
364 		else
365 			kvm_pmc_disable_perf_event(pmc);
366 	}
367 
368 	kvm_vcpu_pmu_restore_guest(vcpu);
369 }
370 
371 /*
372  * Returns the PMU overflow state, which is true if there exists an event
373  * counter where the values of the global enable control, PMOVSSET_EL0[n], and
374  * PMINTENSET_EL1[n] are all 1.
375  */
376 static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
377 {
378 	u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
379 
380 	reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
381 
382 	/*
383 	 * PMCR_EL0.E is the global enable control for event counters available
384 	 * to EL0 and EL1.
385 	 */
386 	if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
387 		reg &= kvm_pmu_hyp_counter_mask(vcpu);
388 
389 	/*
390 	 * Otherwise, MDCR_EL2.HPME is the global enable control for event
391 	 * counters reserved for EL2.
392 	 */
393 	if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
394 		reg &= ~kvm_pmu_hyp_counter_mask(vcpu);
395 
396 	return reg;
397 }
398 
399 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
400 {
401 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
402 	bool overflow;
403 
404 	if (!kvm_vcpu_has_pmu(vcpu))
405 		return;
406 
407 	overflow = kvm_pmu_overflow_status(vcpu);
408 	if (pmu->irq_level == overflow)
409 		return;
410 
411 	pmu->irq_level = overflow;
412 
413 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
414 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
415 					      pmu->irq_num, overflow, pmu);
416 		WARN_ON(ret);
417 	}
418 }
419 
420 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
421 {
422 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
423 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
424 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
425 
426 	if (likely(irqchip_in_kernel(vcpu->kvm)))
427 		return false;
428 
429 	return pmu->irq_level != run_level;
430 }
431 
432 /*
433  * Reflect the PMU overflow interrupt output level into the kvm_run structure
434  */
435 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
436 {
437 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
438 
439 	/* Populate the timer bitmap for user space */
440 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
441 	if (vcpu->arch.pmu.irq_level)
442 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
443 }
444 
445 /**
446  * kvm_pmu_flush_hwstate - flush pmu state to cpu
447  * @vcpu: The vcpu pointer
448  *
449  * Check if the PMU has overflowed while we were running in the host, and inject
450  * an interrupt if that was the case.
451  */
452 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
453 {
454 	kvm_pmu_update_state(vcpu);
455 }
456 
457 /**
458  * kvm_pmu_sync_hwstate - sync pmu state from cpu
459  * @vcpu: The vcpu pointer
460  *
461  * Check if the PMU has overflowed while we were running in the guest, and
462  * inject an interrupt if that was the case.
463  */
464 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
465 {
466 	kvm_pmu_update_state(vcpu);
467 }
468 
469 /*
470  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
471  * to the event.
472  * This is why we need a callback to do it once outside of the NMI context.
473  */
474 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
475 {
476 	struct kvm_vcpu *vcpu;
477 
478 	vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
479 	kvm_vcpu_kick(vcpu);
480 }
481 
482 /*
483  * Perform an increment on any of the counters described in @mask,
484  * generating the overflow if required, and propagate it as a chained
485  * event if possible.
486  */
487 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
488 				      unsigned long mask, u32 event)
489 {
490 	int i;
491 
492 	if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
493 		return;
494 
495 	/* Weed out disabled counters */
496 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
497 
498 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
499 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
500 		u64 type, reg;
501 
502 		/* Filter on event type */
503 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
504 		type &= kvm_pmu_event_mask(vcpu->kvm);
505 		if (type != event)
506 			continue;
507 
508 		/* Increment this counter */
509 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
510 		if (!kvm_pmc_is_64bit(pmc))
511 			reg = lower_32_bits(reg);
512 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
513 
514 		/* No overflow? move on */
515 		if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
516 			continue;
517 
518 		/* Mark overflow */
519 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
520 
521 		if (kvm_pmu_counter_can_chain(pmc))
522 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
523 						  ARMV8_PMUV3_PERFCTR_CHAIN);
524 	}
525 }
526 
527 /* Compute the sample period for a given counter value */
528 static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
529 {
530 	u64 val;
531 
532 	if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
533 		val = (-counter) & GENMASK(63, 0);
534 	else
535 		val = (-counter) & GENMASK(31, 0);
536 
537 	return val;
538 }
539 
540 /*
541  * When the perf event overflows, set the overflow status and inform the vcpu.
542  */
543 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
544 				  struct perf_sample_data *data,
545 				  struct pt_regs *regs)
546 {
547 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
548 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
549 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
550 	int idx = pmc->idx;
551 	u64 period;
552 
553 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
554 
555 	/*
556 	 * Reset the sample period to the architectural limit,
557 	 * i.e. the point where the counter overflows.
558 	 */
559 	period = compute_period(pmc, local64_read(&perf_event->count));
560 
561 	local64_set(&perf_event->hw.period_left, 0);
562 	perf_event->attr.sample_period = period;
563 	perf_event->hw.sample_period = period;
564 
565 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
566 
567 	if (kvm_pmu_counter_can_chain(pmc))
568 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
569 					  ARMV8_PMUV3_PERFCTR_CHAIN);
570 
571 	if (kvm_pmu_overflow_status(vcpu)) {
572 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
573 
574 		if (!in_nmi())
575 			kvm_vcpu_kick(vcpu);
576 		else
577 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
578 	}
579 
580 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
581 }
582 
583 /**
584  * kvm_pmu_software_increment - do software increment
585  * @vcpu: The vcpu pointer
586  * @val: the value guest writes to PMSWINC register
587  */
588 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
589 {
590 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
591 }
592 
593 /**
594  * kvm_pmu_handle_pmcr - handle PMCR register
595  * @vcpu: The vcpu pointer
596  * @val: the value guest writes to PMCR register
597  */
598 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
599 {
600 	int i;
601 
602 	if (!kvm_vcpu_has_pmu(vcpu))
603 		return;
604 
605 	/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
606 	if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
607 		val &= ~ARMV8_PMU_PMCR_LP;
608 
609 	/* Request a reload of the PMU to enable/disable affected counters */
610 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
611 		kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
612 
613 	/* The reset bits don't indicate any state, and shouldn't be saved. */
614 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
615 
616 	if (val & ARMV8_PMU_PMCR_C)
617 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
618 
619 	if (val & ARMV8_PMU_PMCR_P) {
620 		/*
621 		 * Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
622 		 * to the 'guest' range of counters and never the 'hyp' range.
623 		 */
624 		unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
625 				     ~kvm_pmu_hyp_counter_mask(vcpu) &
626 				     ~BIT(ARMV8_PMU_CYCLE_IDX);
627 
628 		for_each_set_bit(i, &mask, 32)
629 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
630 	}
631 }
632 
633 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
634 {
635 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
636 	unsigned int mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
637 
638 	if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)))
639 		return false;
640 
641 	if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx))
642 		return mdcr & MDCR_EL2_HPME;
643 
644 	return kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E;
645 }
646 
647 static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc)
648 {
649 	u64 evtreg = kvm_pmc_read_evtreg(pmc);
650 	bool nsu = evtreg & ARMV8_PMU_EXCLUDE_NS_EL0;
651 	bool u = evtreg & ARMV8_PMU_EXCLUDE_EL0;
652 
653 	return u == nsu;
654 }
655 
656 static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc)
657 {
658 	u64 evtreg = kvm_pmc_read_evtreg(pmc);
659 	bool nsk = evtreg & ARMV8_PMU_EXCLUDE_NS_EL1;
660 	bool p = evtreg & ARMV8_PMU_EXCLUDE_EL1;
661 
662 	return p == nsk;
663 }
664 
665 static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
666 {
667 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
668 	u64 mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
669 
670 	if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD))
671 		return false;
672 
673 	return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
674 }
675 
676 /**
677  * kvm_pmu_create_perf_event - create a perf event for a counter
678  * @pmc: Counter context
679  */
680 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
681 {
682 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
683 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
684 	struct perf_event *event;
685 	struct perf_event_attr attr;
686 	u64 eventsel, evtreg;
687 
688 	evtreg = kvm_pmc_read_evtreg(pmc);
689 
690 	kvm_pmu_stop_counter(pmc);
691 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
692 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
693 	else
694 		eventsel = evtreg & kvm_pmu_event_mask(vcpu->kvm);
695 
696 	/*
697 	 * Neither SW increment nor chained events need to be backed
698 	 * by a perf event.
699 	 */
700 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
701 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
702 		return;
703 
704 	/*
705 	 * If we have a filter in place and that the event isn't allowed, do
706 	 * not install a perf event either.
707 	 */
708 	if (vcpu->kvm->arch.pmu_filter &&
709 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
710 		return;
711 
712 	memset(&attr, 0, sizeof(struct perf_event_attr));
713 	attr.type = arm_pmu->pmu.type;
714 	attr.size = sizeof(attr);
715 	attr.pinned = 1;
716 	attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
717 	attr.exclude_user = !kvm_pmc_counts_at_el0(pmc);
718 	attr.exclude_hv = 1; /* Don't count EL2 events */
719 	attr.exclude_host = 1; /* Don't count host events */
720 	attr.config = eventsel;
721 
722 	/*
723 	 * Filter events at EL1 (i.e. vEL2) when in a hyp context based on the
724 	 * guest's EL2 filter.
725 	 */
726 	if (unlikely(is_hyp_ctxt(vcpu)))
727 		attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc);
728 	else
729 		attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc);
730 
731 	/*
732 	 * If counting with a 64bit counter, advertise it to the perf
733 	 * code, carefully dealing with the initial sample period
734 	 * which also depends on the overflow.
735 	 */
736 	if (kvm_pmc_is_64bit(pmc))
737 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
738 
739 	attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
740 
741 	event = perf_event_create_kernel_counter(&attr, -1, current,
742 						 kvm_pmu_perf_overflow, pmc);
743 
744 	if (IS_ERR(event)) {
745 		pr_err_once("kvm: pmu event creation failed %ld\n",
746 			    PTR_ERR(event));
747 		return;
748 	}
749 
750 	pmc->perf_event = event;
751 }
752 
753 /**
754  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
755  * @vcpu: The vcpu pointer
756  * @data: The data guest writes to PMXEVTYPER_EL0
757  * @select_idx: The number of selected counter
758  *
759  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
760  * event with given hardware event number. Here we call perf_event API to
761  * emulate this action and create a kernel perf event for it.
762  */
763 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
764 				    u64 select_idx)
765 {
766 	struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
767 	u64 reg;
768 
769 	if (!kvm_vcpu_has_pmu(vcpu))
770 		return;
771 
772 	reg = counter_index_to_evtreg(pmc->idx);
773 	__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
774 
775 	kvm_pmu_create_perf_event(pmc);
776 }
777 
778 void kvm_host_pmu_init(struct arm_pmu *pmu)
779 {
780 	struct arm_pmu_entry *entry;
781 
782 	/*
783 	 * Check the sanitised PMU version for the system, as KVM does not
784 	 * support implementations where PMUv3 exists on a subset of CPUs.
785 	 */
786 	if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
787 		return;
788 
789 	mutex_lock(&arm_pmus_lock);
790 
791 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
792 	if (!entry)
793 		goto out_unlock;
794 
795 	entry->arm_pmu = pmu;
796 	list_add_tail(&entry->entry, &arm_pmus);
797 
798 	if (list_is_singular(&arm_pmus))
799 		static_branch_enable(&kvm_arm_pmu_available);
800 
801 out_unlock:
802 	mutex_unlock(&arm_pmus_lock);
803 }
804 
805 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
806 {
807 	struct arm_pmu *tmp, *pmu = NULL;
808 	struct arm_pmu_entry *entry;
809 	int cpu;
810 
811 	mutex_lock(&arm_pmus_lock);
812 
813 	/*
814 	 * It is safe to use a stale cpu to iterate the list of PMUs so long as
815 	 * the same value is used for the entirety of the loop. Given this, and
816 	 * the fact that no percpu data is used for the lookup there is no need
817 	 * to disable preemption.
818 	 *
819 	 * It is still necessary to get a valid cpu, though, to probe for the
820 	 * default PMU instance as userspace is not required to specify a PMU
821 	 * type. In order to uphold the preexisting behavior KVM selects the
822 	 * PMU instance for the core during vcpu init. A dependent use
823 	 * case would be a user with disdain of all things big.LITTLE that
824 	 * affines the VMM to a particular cluster of cores.
825 	 *
826 	 * In any case, userspace should just do the sane thing and use the UAPI
827 	 * to select a PMU type directly. But, be wary of the baggage being
828 	 * carried here.
829 	 */
830 	cpu = raw_smp_processor_id();
831 	list_for_each_entry(entry, &arm_pmus, entry) {
832 		tmp = entry->arm_pmu;
833 
834 		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
835 			pmu = tmp;
836 			break;
837 		}
838 	}
839 
840 	mutex_unlock(&arm_pmus_lock);
841 
842 	return pmu;
843 }
844 
845 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
846 {
847 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
848 	u64 val, mask = 0;
849 	int base, i, nr_events;
850 
851 	if (!kvm_vcpu_has_pmu(vcpu))
852 		return 0;
853 
854 	if (!pmceid1) {
855 		val = read_sysreg(pmceid0_el0);
856 		/* always support CHAIN */
857 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
858 		base = 0;
859 	} else {
860 		val = read_sysreg(pmceid1_el0);
861 		/*
862 		 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
863 		 * as RAZ
864 		 */
865 		val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
866 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
867 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
868 		base = 32;
869 	}
870 
871 	if (!bmap)
872 		return val;
873 
874 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
875 
876 	for (i = 0; i < 32; i += 8) {
877 		u64 byte;
878 
879 		byte = bitmap_get_value8(bmap, base + i);
880 		mask |= byte << i;
881 		if (nr_events >= (0x4000 + base + 32)) {
882 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
883 			mask |= byte << (32 + i);
884 		}
885 	}
886 
887 	return val & mask;
888 }
889 
890 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
891 {
892 	u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
893 
894 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
895 	__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
896 	__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
897 
898 	kvm_pmu_reprogram_counter_mask(vcpu, mask);
899 }
900 
901 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
902 {
903 	if (!kvm_vcpu_has_pmu(vcpu))
904 		return 0;
905 
906 	if (!vcpu->arch.pmu.created)
907 		return -EINVAL;
908 
909 	/*
910 	 * A valid interrupt configuration for the PMU is either to have a
911 	 * properly configured interrupt number and using an in-kernel
912 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
913 	 */
914 	if (irqchip_in_kernel(vcpu->kvm)) {
915 		int irq = vcpu->arch.pmu.irq_num;
916 		/*
917 		 * If we are using an in-kernel vgic, at this point we know
918 		 * the vgic will be initialized, so we can check the PMU irq
919 		 * number against the dimensions of the vgic and make sure
920 		 * it's valid.
921 		 */
922 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
923 			return -EINVAL;
924 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
925 		   return -EINVAL;
926 	}
927 
928 	/* One-off reload of the PMU on first run */
929 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
930 
931 	return 0;
932 }
933 
934 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
935 {
936 	if (irqchip_in_kernel(vcpu->kvm)) {
937 		int ret;
938 
939 		/*
940 		 * If using the PMU with an in-kernel virtual GIC
941 		 * implementation, we require the GIC to be already
942 		 * initialized when initializing the PMU.
943 		 */
944 		if (!vgic_initialized(vcpu->kvm))
945 			return -ENODEV;
946 
947 		if (!kvm_arm_pmu_irq_initialized(vcpu))
948 			return -ENXIO;
949 
950 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
951 					 &vcpu->arch.pmu);
952 		if (ret)
953 			return ret;
954 	}
955 
956 	init_irq_work(&vcpu->arch.pmu.overflow_work,
957 		      kvm_pmu_perf_overflow_notify_vcpu);
958 
959 	vcpu->arch.pmu.created = true;
960 	return 0;
961 }
962 
963 /*
964  * For one VM the interrupt type must be same for each vcpu.
965  * As a PPI, the interrupt number is the same for all vcpus,
966  * while as an SPI it must be a separate number per vcpu.
967  */
968 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
969 {
970 	unsigned long i;
971 	struct kvm_vcpu *vcpu;
972 
973 	kvm_for_each_vcpu(i, vcpu, kvm) {
974 		if (!kvm_arm_pmu_irq_initialized(vcpu))
975 			continue;
976 
977 		if (irq_is_ppi(irq)) {
978 			if (vcpu->arch.pmu.irq_num != irq)
979 				return false;
980 		} else {
981 			if (vcpu->arch.pmu.irq_num == irq)
982 				return false;
983 		}
984 	}
985 
986 	return true;
987 }
988 
989 /**
990  * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
991  * @kvm: The kvm pointer
992  */
993 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
994 {
995 	struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
996 
997 	/*
998 	 * The arm_pmu->cntr_mask considers the fixed counter(s) as well.
999 	 * Ignore those and return only the general-purpose counters.
1000 	 */
1001 	return bitmap_weight(arm_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS);
1002 }
1003 
1004 static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
1005 {
1006 	lockdep_assert_held(&kvm->arch.config_lock);
1007 
1008 	kvm->arch.arm_pmu = arm_pmu;
1009 	kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
1010 }
1011 
1012 /**
1013  * kvm_arm_set_default_pmu - No PMU set, get the default one.
1014  * @kvm: The kvm pointer
1015  *
1016  * The observant among you will notice that the supported_cpus
1017  * mask does not get updated for the default PMU even though it
1018  * is quite possible the selected instance supports only a
1019  * subset of cores in the system. This is intentional, and
1020  * upholds the preexisting behavior on heterogeneous systems
1021  * where vCPUs can be scheduled on any core but the guest
1022  * counters could stop working.
1023  */
1024 int kvm_arm_set_default_pmu(struct kvm *kvm)
1025 {
1026 	struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
1027 
1028 	if (!arm_pmu)
1029 		return -ENODEV;
1030 
1031 	kvm_arm_set_pmu(kvm, arm_pmu);
1032 	return 0;
1033 }
1034 
1035 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
1036 {
1037 	struct kvm *kvm = vcpu->kvm;
1038 	struct arm_pmu_entry *entry;
1039 	struct arm_pmu *arm_pmu;
1040 	int ret = -ENXIO;
1041 
1042 	lockdep_assert_held(&kvm->arch.config_lock);
1043 	mutex_lock(&arm_pmus_lock);
1044 
1045 	list_for_each_entry(entry, &arm_pmus, entry) {
1046 		arm_pmu = entry->arm_pmu;
1047 		if (arm_pmu->pmu.type == pmu_id) {
1048 			if (kvm_vm_has_ran_once(kvm) ||
1049 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
1050 				ret = -EBUSY;
1051 				break;
1052 			}
1053 
1054 			kvm_arm_set_pmu(kvm, arm_pmu);
1055 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
1056 			ret = 0;
1057 			break;
1058 		}
1059 	}
1060 
1061 	mutex_unlock(&arm_pmus_lock);
1062 	return ret;
1063 }
1064 
1065 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1066 {
1067 	struct kvm *kvm = vcpu->kvm;
1068 
1069 	lockdep_assert_held(&kvm->arch.config_lock);
1070 
1071 	if (!kvm_vcpu_has_pmu(vcpu))
1072 		return -ENODEV;
1073 
1074 	if (vcpu->arch.pmu.created)
1075 		return -EBUSY;
1076 
1077 	switch (attr->attr) {
1078 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
1079 		int __user *uaddr = (int __user *)(long)attr->addr;
1080 		int irq;
1081 
1082 		if (!irqchip_in_kernel(kvm))
1083 			return -EINVAL;
1084 
1085 		if (get_user(irq, uaddr))
1086 			return -EFAULT;
1087 
1088 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
1089 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
1090 			return -EINVAL;
1091 
1092 		if (!pmu_irq_is_valid(kvm, irq))
1093 			return -EINVAL;
1094 
1095 		if (kvm_arm_pmu_irq_initialized(vcpu))
1096 			return -EBUSY;
1097 
1098 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
1099 		vcpu->arch.pmu.irq_num = irq;
1100 		return 0;
1101 	}
1102 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
1103 		u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
1104 		struct kvm_pmu_event_filter __user *uaddr;
1105 		struct kvm_pmu_event_filter filter;
1106 		int nr_events;
1107 
1108 		/*
1109 		 * Allow userspace to specify an event filter for the entire
1110 		 * event range supported by PMUVer of the hardware, rather
1111 		 * than the guest's PMUVer for KVM backward compatibility.
1112 		 */
1113 		nr_events = __kvm_pmu_event_mask(pmuver) + 1;
1114 
1115 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1116 
1117 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
1118 			return -EFAULT;
1119 
1120 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
1121 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
1122 		     filter.action != KVM_PMU_EVENT_DENY))
1123 			return -EINVAL;
1124 
1125 		if (kvm_vm_has_ran_once(kvm))
1126 			return -EBUSY;
1127 
1128 		if (!kvm->arch.pmu_filter) {
1129 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
1130 			if (!kvm->arch.pmu_filter)
1131 				return -ENOMEM;
1132 
1133 			/*
1134 			 * The default depends on the first applied filter.
1135 			 * If it allows events, the default is to deny.
1136 			 * Conversely, if the first filter denies a set of
1137 			 * events, the default is to allow.
1138 			 */
1139 			if (filter.action == KVM_PMU_EVENT_ALLOW)
1140 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
1141 			else
1142 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1143 		}
1144 
1145 		if (filter.action == KVM_PMU_EVENT_ALLOW)
1146 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1147 		else
1148 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1149 
1150 		return 0;
1151 	}
1152 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1153 		int __user *uaddr = (int __user *)(long)attr->addr;
1154 		int pmu_id;
1155 
1156 		if (get_user(pmu_id, uaddr))
1157 			return -EFAULT;
1158 
1159 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1160 	}
1161 	case KVM_ARM_VCPU_PMU_V3_INIT:
1162 		return kvm_arm_pmu_v3_init(vcpu);
1163 	}
1164 
1165 	return -ENXIO;
1166 }
1167 
1168 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1169 {
1170 	switch (attr->attr) {
1171 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
1172 		int __user *uaddr = (int __user *)(long)attr->addr;
1173 		int irq;
1174 
1175 		if (!irqchip_in_kernel(vcpu->kvm))
1176 			return -EINVAL;
1177 
1178 		if (!kvm_vcpu_has_pmu(vcpu))
1179 			return -ENODEV;
1180 
1181 		if (!kvm_arm_pmu_irq_initialized(vcpu))
1182 			return -ENXIO;
1183 
1184 		irq = vcpu->arch.pmu.irq_num;
1185 		return put_user(irq, uaddr);
1186 	}
1187 	}
1188 
1189 	return -ENXIO;
1190 }
1191 
1192 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1193 {
1194 	switch (attr->attr) {
1195 	case KVM_ARM_VCPU_PMU_V3_IRQ:
1196 	case KVM_ARM_VCPU_PMU_V3_INIT:
1197 	case KVM_ARM_VCPU_PMU_V3_FILTER:
1198 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1199 		if (kvm_vcpu_has_pmu(vcpu))
1200 			return 0;
1201 	}
1202 
1203 	return -ENXIO;
1204 }
1205 
1206 u8 kvm_arm_pmu_get_pmuver_limit(void)
1207 {
1208 	u64 tmp;
1209 
1210 	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1211 	tmp = cpuid_feature_cap_perfmon_field(tmp,
1212 					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
1213 					      ID_AA64DFR0_EL1_PMUVer_V3P5);
1214 	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1215 }
1216 
1217 /**
1218  * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
1219  * @vcpu: The vcpu pointer
1220  */
1221 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
1222 {
1223 	u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
1224 
1225 	return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
1226 }
1227 
1228 void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
1229 {
1230 	bool reprogrammed = false;
1231 	unsigned long mask;
1232 	int i;
1233 
1234 	if (!kvm_vcpu_has_pmu(vcpu))
1235 		return;
1236 
1237 	mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1238 	for_each_set_bit(i, &mask, 32) {
1239 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
1240 
1241 		/*
1242 		 * We only need to reconfigure events where the filter is
1243 		 * different at EL1 vs. EL2, as we're multiplexing the true EL1
1244 		 * event filter bit for nested.
1245 		 */
1246 		if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
1247 			continue;
1248 
1249 		kvm_pmu_create_perf_event(pmc);
1250 		reprogrammed = true;
1251 	}
1252 
1253 	if (reprogrammed)
1254 		kvm_vcpu_pmu_restore_guest(vcpu);
1255 }
1256