xref: /linux/drivers/perf/riscv_pmu.c (revision f4b0c4b508364fde023e4f7b9f23f7e38c663dfe)
1f5bfa23fSAtish Patra // SPDX-License-Identifier: GPL-2.0
2f5bfa23fSAtish Patra /*
3f5bfa23fSAtish Patra  * RISC-V performance counter support.
4f5bfa23fSAtish Patra  *
5f5bfa23fSAtish Patra  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6f5bfa23fSAtish Patra  *
7f5bfa23fSAtish Patra  * This implementation is based on old RISC-V perf and ARM perf event code
8f5bfa23fSAtish Patra  * which are in turn based on sparc64 and x86 code.
9f5bfa23fSAtish Patra  */
10f5bfa23fSAtish Patra 
11f5bfa23fSAtish Patra #include <linux/cpumask.h>
12f5bfa23fSAtish Patra #include <linux/irq.h>
13f5bfa23fSAtish Patra #include <linux/irqdesc.h>
14f5bfa23fSAtish Patra #include <linux/perf/riscv_pmu.h>
15f5bfa23fSAtish Patra #include <linux/printk.h>
16f5bfa23fSAtish Patra #include <linux/smp.h>
1783c5e13bSAlexandre Ghiti #include <linux/sched_clock.h>
18f5bfa23fSAtish Patra 
19e9991434SAtish Patra #include <asm/sbi.h>
20e9991434SAtish Patra 
riscv_perf_user_access(struct perf_event * event)2183c5e13bSAlexandre Ghiti static bool riscv_perf_user_access(struct perf_event *event)
2283c5e13bSAlexandre Ghiti {
2383c5e13bSAlexandre Ghiti 	return ((event->attr.type == PERF_TYPE_HARDWARE) ||
2483c5e13bSAlexandre Ghiti 		(event->attr.type == PERF_TYPE_HW_CACHE) ||
2583c5e13bSAlexandre Ghiti 		(event->attr.type == PERF_TYPE_RAW)) &&
263fec3233SAlexandre Ghiti 		!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) &&
273fec3233SAlexandre Ghiti 		(event->hw.idx != -1);
2883c5e13bSAlexandre Ghiti }
2983c5e13bSAlexandre Ghiti 
arch_perf_update_userpage(struct perf_event * event,struct perf_event_mmap_page * userpg,u64 now)3083c5e13bSAlexandre Ghiti void arch_perf_update_userpage(struct perf_event *event,
3183c5e13bSAlexandre Ghiti 			       struct perf_event_mmap_page *userpg, u64 now)
3283c5e13bSAlexandre Ghiti {
3383c5e13bSAlexandre Ghiti 	struct clock_read_data *rd;
3483c5e13bSAlexandre Ghiti 	unsigned int seq;
3583c5e13bSAlexandre Ghiti 	u64 ns;
3683c5e13bSAlexandre Ghiti 
3783c5e13bSAlexandre Ghiti 	userpg->cap_user_time = 0;
3883c5e13bSAlexandre Ghiti 	userpg->cap_user_time_zero = 0;
3983c5e13bSAlexandre Ghiti 	userpg->cap_user_time_short = 0;
4083c5e13bSAlexandre Ghiti 	userpg->cap_user_rdpmc = riscv_perf_user_access(event);
4183c5e13bSAlexandre Ghiti 
42cc4c07c8SAlexandre Ghiti #ifdef CONFIG_RISCV_PMU
43cc4c07c8SAlexandre Ghiti 	/*
44cc4c07c8SAlexandre Ghiti 	 * The counters are 64-bit but the priv spec doesn't mandate all the
45cc4c07c8SAlexandre Ghiti 	 * bits to be implemented: that's why, counter width can vary based on
46cc4c07c8SAlexandre Ghiti 	 * the cpu vendor.
47cc4c07c8SAlexandre Ghiti 	 */
48cc4c07c8SAlexandre Ghiti 	if (userpg->cap_user_rdpmc)
49cc4c07c8SAlexandre Ghiti 		userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
50cc4c07c8SAlexandre Ghiti #endif
5183c5e13bSAlexandre Ghiti 
5283c5e13bSAlexandre Ghiti 	do {
5383c5e13bSAlexandre Ghiti 		rd = sched_clock_read_begin(&seq);
5483c5e13bSAlexandre Ghiti 
5583c5e13bSAlexandre Ghiti 		userpg->time_mult = rd->mult;
5683c5e13bSAlexandre Ghiti 		userpg->time_shift = rd->shift;
5783c5e13bSAlexandre Ghiti 		userpg->time_zero = rd->epoch_ns;
5883c5e13bSAlexandre Ghiti 		userpg->time_cycles = rd->epoch_cyc;
5983c5e13bSAlexandre Ghiti 		userpg->time_mask = rd->sched_clock_mask;
6083c5e13bSAlexandre Ghiti 
6183c5e13bSAlexandre Ghiti 		/*
6283c5e13bSAlexandre Ghiti 		 * Subtract the cycle base, such that software that
6383c5e13bSAlexandre Ghiti 		 * doesn't know about cap_user_time_short still 'works'
6483c5e13bSAlexandre Ghiti 		 * assuming no wraps.
6583c5e13bSAlexandre Ghiti 		 */
6683c5e13bSAlexandre Ghiti 		ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
6783c5e13bSAlexandre Ghiti 		userpg->time_zero -= ns;
6883c5e13bSAlexandre Ghiti 
6983c5e13bSAlexandre Ghiti 	} while (sched_clock_read_retry(seq));
7083c5e13bSAlexandre Ghiti 
7183c5e13bSAlexandre Ghiti 	userpg->time_offset = userpg->time_zero - now;
7283c5e13bSAlexandre Ghiti 
7383c5e13bSAlexandre Ghiti 	/*
7483c5e13bSAlexandre Ghiti 	 * time_shift is not expected to be greater than 31 due to
7583c5e13bSAlexandre Ghiti 	 * the original published conversion algorithm shifting a
7683c5e13bSAlexandre Ghiti 	 * 32-bit value (now specifies a 64-bit value) - refer
7783c5e13bSAlexandre Ghiti 	 * perf_event_mmap_page documentation in perf_event.h.
7883c5e13bSAlexandre Ghiti 	 */
7983c5e13bSAlexandre Ghiti 	if (userpg->time_shift == 32) {
8083c5e13bSAlexandre Ghiti 		userpg->time_shift = 31;
8183c5e13bSAlexandre Ghiti 		userpg->time_mult >>= 1;
8283c5e13bSAlexandre Ghiti 	}
8383c5e13bSAlexandre Ghiti 
8483c5e13bSAlexandre Ghiti 	/*
8583c5e13bSAlexandre Ghiti 	 * Internal timekeeping for enabled/running/stopped times
8683c5e13bSAlexandre Ghiti 	 * is always computed with the sched_clock.
8783c5e13bSAlexandre Ghiti 	 */
8883c5e13bSAlexandre Ghiti 	userpg->cap_user_time = 1;
8983c5e13bSAlexandre Ghiti 	userpg->cap_user_time_zero = 1;
9083c5e13bSAlexandre Ghiti 	userpg->cap_user_time_short = 1;
9183c5e13bSAlexandre Ghiti }
9283c5e13bSAlexandre Ghiti 
csr_read_num(int csr_num)93f5bfa23fSAtish Patra static unsigned long csr_read_num(int csr_num)
94f5bfa23fSAtish Patra {
95f5bfa23fSAtish Patra #define switchcase_csr_read(__csr_num, __val)		{\
96f5bfa23fSAtish Patra 	case __csr_num:					\
97f5bfa23fSAtish Patra 		__val = csr_read(__csr_num);		\
98f5bfa23fSAtish Patra 		break; }
99f5bfa23fSAtish Patra #define switchcase_csr_read_2(__csr_num, __val)		{\
100f5bfa23fSAtish Patra 	switchcase_csr_read(__csr_num + 0, __val)	 \
101f5bfa23fSAtish Patra 	switchcase_csr_read(__csr_num + 1, __val)}
102f5bfa23fSAtish Patra #define switchcase_csr_read_4(__csr_num, __val)		{\
103f5bfa23fSAtish Patra 	switchcase_csr_read_2(__csr_num + 0, __val)	 \
104f5bfa23fSAtish Patra 	switchcase_csr_read_2(__csr_num + 2, __val)}
105f5bfa23fSAtish Patra #define switchcase_csr_read_8(__csr_num, __val)		{\
106f5bfa23fSAtish Patra 	switchcase_csr_read_4(__csr_num + 0, __val)	 \
107f5bfa23fSAtish Patra 	switchcase_csr_read_4(__csr_num + 4, __val)}
108f5bfa23fSAtish Patra #define switchcase_csr_read_16(__csr_num, __val)	{\
109f5bfa23fSAtish Patra 	switchcase_csr_read_8(__csr_num + 0, __val)	 \
110f5bfa23fSAtish Patra 	switchcase_csr_read_8(__csr_num + 8, __val)}
111f5bfa23fSAtish Patra #define switchcase_csr_read_32(__csr_num, __val)	{\
112f5bfa23fSAtish Patra 	switchcase_csr_read_16(__csr_num + 0, __val)	 \
113f5bfa23fSAtish Patra 	switchcase_csr_read_16(__csr_num + 16, __val)}
114f5bfa23fSAtish Patra 
115f5bfa23fSAtish Patra 	unsigned long ret = 0;
116f5bfa23fSAtish Patra 
117f5bfa23fSAtish Patra 	switch (csr_num) {
118f5bfa23fSAtish Patra 	switchcase_csr_read_32(CSR_CYCLE, ret)
119f5bfa23fSAtish Patra 	switchcase_csr_read_32(CSR_CYCLEH, ret)
120f5bfa23fSAtish Patra 	default :
121f5bfa23fSAtish Patra 		break;
122f5bfa23fSAtish Patra 	}
123f5bfa23fSAtish Patra 
124f5bfa23fSAtish Patra 	return ret;
125f5bfa23fSAtish Patra #undef switchcase_csr_read_32
126f5bfa23fSAtish Patra #undef switchcase_csr_read_16
127f5bfa23fSAtish Patra #undef switchcase_csr_read_8
128f5bfa23fSAtish Patra #undef switchcase_csr_read_4
129f5bfa23fSAtish Patra #undef switchcase_csr_read_2
130f5bfa23fSAtish Patra #undef switchcase_csr_read
131f5bfa23fSAtish Patra }
132f5bfa23fSAtish Patra 
133f5bfa23fSAtish Patra /*
134f5bfa23fSAtish Patra  * Read the CSR of a corresponding counter.
135f5bfa23fSAtish Patra  */
riscv_pmu_ctr_read_csr(unsigned long csr)136f5bfa23fSAtish Patra unsigned long riscv_pmu_ctr_read_csr(unsigned long csr)
137f5bfa23fSAtish Patra {
138f5bfa23fSAtish Patra 	if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H ||
139f5bfa23fSAtish Patra 	   (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) {
140f5bfa23fSAtish Patra 		pr_err("Invalid performance counter csr %lx\n", csr);
141f5bfa23fSAtish Patra 		return -EINVAL;
142f5bfa23fSAtish Patra 	}
143f5bfa23fSAtish Patra 
144f5bfa23fSAtish Patra 	return csr_read_num(csr);
145f5bfa23fSAtish Patra }
146f5bfa23fSAtish Patra 
riscv_pmu_ctr_get_width_mask(struct perf_event * event)147f5bfa23fSAtish Patra u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
148f5bfa23fSAtish Patra {
149f5bfa23fSAtish Patra 	int cwidth;
150f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
151f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
152f5bfa23fSAtish Patra 
153f5bfa23fSAtish Patra 	if (hwc->idx == -1)
154f5bfa23fSAtish Patra 		/* Handle init case where idx is not initialized yet */
155f5bfa23fSAtish Patra 		cwidth = rvpmu->ctr_get_width(0);
156f5bfa23fSAtish Patra 	else
157f5bfa23fSAtish Patra 		cwidth = rvpmu->ctr_get_width(hwc->idx);
158f5bfa23fSAtish Patra 
159f5bfa23fSAtish Patra 	return GENMASK_ULL(cwidth, 0);
160f5bfa23fSAtish Patra }
161f5bfa23fSAtish Patra 
riscv_pmu_event_update(struct perf_event * event)162f5bfa23fSAtish Patra u64 riscv_pmu_event_update(struct perf_event *event)
163f5bfa23fSAtish Patra {
164f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
165f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
166f5bfa23fSAtish Patra 	u64 prev_raw_count, new_raw_count;
167f5bfa23fSAtish Patra 	unsigned long cmask;
168f5bfa23fSAtish Patra 	u64 oldval, delta;
169f5bfa23fSAtish Patra 
170f5bfa23fSAtish Patra 	if (!rvpmu->ctr_read)
171f5bfa23fSAtish Patra 		return 0;
172f5bfa23fSAtish Patra 
173f5bfa23fSAtish Patra 	cmask = riscv_pmu_ctr_get_width_mask(event);
174f5bfa23fSAtish Patra 
175f5bfa23fSAtish Patra 	do {
176f5bfa23fSAtish Patra 		prev_raw_count = local64_read(&hwc->prev_count);
177f5bfa23fSAtish Patra 		new_raw_count = rvpmu->ctr_read(event);
178f5bfa23fSAtish Patra 		oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
179f5bfa23fSAtish Patra 					 new_raw_count);
180f5bfa23fSAtish Patra 	} while (oldval != prev_raw_count);
181f5bfa23fSAtish Patra 
182f5bfa23fSAtish Patra 	delta = (new_raw_count - prev_raw_count) & cmask;
183f5bfa23fSAtish Patra 	local64_add(delta, &event->count);
184f5bfa23fSAtish Patra 	local64_sub(delta, &hwc->period_left);
185f5bfa23fSAtish Patra 
186f5bfa23fSAtish Patra 	return delta;
187f5bfa23fSAtish Patra }
188f5bfa23fSAtish Patra 
riscv_pmu_stop(struct perf_event * event,int flags)189e9a023f2SEric Lin void riscv_pmu_stop(struct perf_event *event, int flags)
190f5bfa23fSAtish Patra {
191f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
192f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
193f5bfa23fSAtish Patra 
194f5bfa23fSAtish Patra 	if (!(hwc->state & PERF_HES_STOPPED)) {
195f5bfa23fSAtish Patra 		if (rvpmu->ctr_stop) {
196f5bfa23fSAtish Patra 			rvpmu->ctr_stop(event, 0);
197f5bfa23fSAtish Patra 			hwc->state |= PERF_HES_STOPPED;
198f5bfa23fSAtish Patra 		}
199f5bfa23fSAtish Patra 		riscv_pmu_event_update(event);
200f5bfa23fSAtish Patra 		hwc->state |= PERF_HES_UPTODATE;
201f5bfa23fSAtish Patra 	}
202f5bfa23fSAtish Patra }
203f5bfa23fSAtish Patra 
riscv_pmu_event_set_period(struct perf_event * event)204f5bfa23fSAtish Patra int riscv_pmu_event_set_period(struct perf_event *event)
205f5bfa23fSAtish Patra {
206f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
207f5bfa23fSAtish Patra 	s64 left = local64_read(&hwc->period_left);
208f5bfa23fSAtish Patra 	s64 period = hwc->sample_period;
209f5bfa23fSAtish Patra 	int overflow = 0;
210f5bfa23fSAtish Patra 	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
211f5bfa23fSAtish Patra 
212f5bfa23fSAtish Patra 	if (unlikely(left <= -period)) {
213f5bfa23fSAtish Patra 		left = period;
214f5bfa23fSAtish Patra 		local64_set(&hwc->period_left, left);
215f5bfa23fSAtish Patra 		hwc->last_period = period;
216f5bfa23fSAtish Patra 		overflow = 1;
217f5bfa23fSAtish Patra 	}
218f5bfa23fSAtish Patra 
219f5bfa23fSAtish Patra 	if (unlikely(left <= 0)) {
220f5bfa23fSAtish Patra 		left += period;
221f5bfa23fSAtish Patra 		local64_set(&hwc->period_left, left);
222f5bfa23fSAtish Patra 		hwc->last_period = period;
223f5bfa23fSAtish Patra 		overflow = 1;
224f5bfa23fSAtish Patra 	}
225f5bfa23fSAtish Patra 
226f5bfa23fSAtish Patra 	/*
227f5bfa23fSAtish Patra 	 * Limit the maximum period to prevent the counter value
228f5bfa23fSAtish Patra 	 * from overtaking the one we are about to program. In
229f5bfa23fSAtish Patra 	 * effect we are reducing max_period to account for
230f5bfa23fSAtish Patra 	 * interrupt latency (and we are being very conservative).
231f5bfa23fSAtish Patra 	 */
232f5bfa23fSAtish Patra 	if (left > (max_period >> 1))
233f5bfa23fSAtish Patra 		left = (max_period >> 1);
234f5bfa23fSAtish Patra 
235f5bfa23fSAtish Patra 	local64_set(&hwc->prev_count, (u64)-left);
236f5bfa23fSAtish Patra 
23783c5e13bSAlexandre Ghiti 	perf_event_update_userpage(event);
23883c5e13bSAlexandre Ghiti 
239f5bfa23fSAtish Patra 	return overflow;
240f5bfa23fSAtish Patra }
241f5bfa23fSAtish Patra 
riscv_pmu_start(struct perf_event * event,int flags)242e9a023f2SEric Lin void riscv_pmu_start(struct perf_event *event, int flags)
243f5bfa23fSAtish Patra {
244f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
245f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
246f5bfa23fSAtish Patra 	uint64_t max_period = riscv_pmu_ctr_get_width_mask(event);
247f5bfa23fSAtish Patra 	u64 init_val;
248f5bfa23fSAtish Patra 
249f5bfa23fSAtish Patra 	if (flags & PERF_EF_RELOAD)
250f5bfa23fSAtish Patra 		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
251f5bfa23fSAtish Patra 
252f5bfa23fSAtish Patra 	hwc->state = 0;
253f5bfa23fSAtish Patra 	riscv_pmu_event_set_period(event);
254f5bfa23fSAtish Patra 	init_val = local64_read(&hwc->prev_count) & max_period;
255f5bfa23fSAtish Patra 	rvpmu->ctr_start(event, init_val);
256f5bfa23fSAtish Patra 	perf_event_update_userpage(event);
257f5bfa23fSAtish Patra }
258f5bfa23fSAtish Patra 
riscv_pmu_add(struct perf_event * event,int flags)259f5bfa23fSAtish Patra static int riscv_pmu_add(struct perf_event *event, int flags)
260f5bfa23fSAtish Patra {
261f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
262f5bfa23fSAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
263f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
264f5bfa23fSAtish Patra 	int idx;
265f5bfa23fSAtish Patra 
266f5bfa23fSAtish Patra 	idx = rvpmu->ctr_get_idx(event);
267f5bfa23fSAtish Patra 	if (idx < 0)
268f5bfa23fSAtish Patra 		return idx;
269f5bfa23fSAtish Patra 
270f5bfa23fSAtish Patra 	hwc->idx = idx;
271f5bfa23fSAtish Patra 	cpuc->events[idx] = event;
272f5bfa23fSAtish Patra 	cpuc->n_events++;
273f5bfa23fSAtish Patra 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
274f5bfa23fSAtish Patra 	if (flags & PERF_EF_START)
275f5bfa23fSAtish Patra 		riscv_pmu_start(event, PERF_EF_RELOAD);
276f5bfa23fSAtish Patra 
277f5bfa23fSAtish Patra 	/* Propagate our changes to the userspace mapping. */
278f5bfa23fSAtish Patra 	perf_event_update_userpage(event);
279f5bfa23fSAtish Patra 
280f5bfa23fSAtish Patra 	return 0;
281f5bfa23fSAtish Patra }
282f5bfa23fSAtish Patra 
riscv_pmu_del(struct perf_event * event,int flags)283f5bfa23fSAtish Patra static void riscv_pmu_del(struct perf_event *event, int flags)
284f5bfa23fSAtish Patra {
285f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
286f5bfa23fSAtish Patra 	struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
287f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
288f5bfa23fSAtish Patra 
289f5bfa23fSAtish Patra 	riscv_pmu_stop(event, PERF_EF_UPDATE);
290f5bfa23fSAtish Patra 	cpuc->events[hwc->idx] = NULL;
291f5bfa23fSAtish Patra 	/* The firmware need to reset the counter mapping */
292f5bfa23fSAtish Patra 	if (rvpmu->ctr_stop)
293f5bfa23fSAtish Patra 		rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET);
294f5bfa23fSAtish Patra 	cpuc->n_events--;
295f5bfa23fSAtish Patra 	if (rvpmu->ctr_clear_idx)
296f5bfa23fSAtish Patra 		rvpmu->ctr_clear_idx(event);
297f5bfa23fSAtish Patra 	perf_event_update_userpage(event);
298f5bfa23fSAtish Patra 	hwc->idx = -1;
299f5bfa23fSAtish Patra }
300f5bfa23fSAtish Patra 
riscv_pmu_read(struct perf_event * event)301f5bfa23fSAtish Patra static void riscv_pmu_read(struct perf_event *event)
302f5bfa23fSAtish Patra {
303f5bfa23fSAtish Patra 	riscv_pmu_event_update(event);
304f5bfa23fSAtish Patra }
305f5bfa23fSAtish Patra 
riscv_pmu_event_init(struct perf_event * event)306f5bfa23fSAtish Patra static int riscv_pmu_event_init(struct perf_event *event)
307f5bfa23fSAtish Patra {
308f5bfa23fSAtish Patra 	struct hw_perf_event *hwc = &event->hw;
309f5bfa23fSAtish Patra 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
310f5bfa23fSAtish Patra 	int mapped_event;
311f5bfa23fSAtish Patra 	u64 event_config = 0;
312f5bfa23fSAtish Patra 	uint64_t cmask;
313f5bfa23fSAtish Patra 
314ea687311SPu Lehui 	/* driver does not support branch stack sampling */
315ea687311SPu Lehui 	if (has_branch_stack(event))
316ea687311SPu Lehui 		return -EOPNOTSUPP;
317ea687311SPu Lehui 
318f5bfa23fSAtish Patra 	hwc->flags = 0;
319f5bfa23fSAtish Patra 	mapped_event = rvpmu->event_map(event, &event_config);
320f5bfa23fSAtish Patra 	if (mapped_event < 0) {
321f5bfa23fSAtish Patra 		pr_debug("event %x:%llx not supported\n", event->attr.type,
322f5bfa23fSAtish Patra 			 event->attr.config);
323f5bfa23fSAtish Patra 		return mapped_event;
324f5bfa23fSAtish Patra 	}
325f5bfa23fSAtish Patra 
326f5bfa23fSAtish Patra 	/*
327f5bfa23fSAtish Patra 	 * idx is set to -1 because the index of a general event should not be
328f5bfa23fSAtish Patra 	 * decided until binding to some counter in pmu->add().
329f5bfa23fSAtish Patra 	 * config will contain the information about counter CSR
330f5bfa23fSAtish Patra 	 * the idx will contain the counter index
331f5bfa23fSAtish Patra 	 */
332f5bfa23fSAtish Patra 	hwc->config = event_config;
333f5bfa23fSAtish Patra 	hwc->idx = -1;
334f5bfa23fSAtish Patra 	hwc->event_base = mapped_event;
335f5bfa23fSAtish Patra 
33683c5e13bSAlexandre Ghiti 	if (rvpmu->event_init)
33783c5e13bSAlexandre Ghiti 		rvpmu->event_init(event);
33883c5e13bSAlexandre Ghiti 
339f5bfa23fSAtish Patra 	if (!is_sampling_event(event)) {
340f5bfa23fSAtish Patra 		/*
341f5bfa23fSAtish Patra 		 * For non-sampling runs, limit the sample_period to half
342f5bfa23fSAtish Patra 		 * of the counter width. That way, the new counter value
343f5bfa23fSAtish Patra 		 * is far less likely to overtake the previous one unless
344f5bfa23fSAtish Patra 		 * you have some serious IRQ latency issues.
345f5bfa23fSAtish Patra 		 */
346f5bfa23fSAtish Patra 		cmask = riscv_pmu_ctr_get_width_mask(event);
347f5bfa23fSAtish Patra 		hwc->sample_period  =  cmask >> 1;
348f5bfa23fSAtish Patra 		hwc->last_period    = hwc->sample_period;
349f5bfa23fSAtish Patra 		local64_set(&hwc->period_left, hwc->sample_period);
350f5bfa23fSAtish Patra 	}
351f5bfa23fSAtish Patra 
352f5bfa23fSAtish Patra 	return 0;
353f5bfa23fSAtish Patra }
354f5bfa23fSAtish Patra 
riscv_pmu_event_idx(struct perf_event * event)35583c5e13bSAlexandre Ghiti static int riscv_pmu_event_idx(struct perf_event *event)
35683c5e13bSAlexandre Ghiti {
35783c5e13bSAlexandre Ghiti 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
35883c5e13bSAlexandre Ghiti 
35983c5e13bSAlexandre Ghiti 	if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
36083c5e13bSAlexandre Ghiti 		return 0;
36183c5e13bSAlexandre Ghiti 
36283c5e13bSAlexandre Ghiti 	if (rvpmu->csr_index)
36383c5e13bSAlexandre Ghiti 		return rvpmu->csr_index(event) + 1;
36483c5e13bSAlexandre Ghiti 
36583c5e13bSAlexandre Ghiti 	return 0;
36683c5e13bSAlexandre Ghiti }
36783c5e13bSAlexandre Ghiti 
riscv_pmu_event_mapped(struct perf_event * event,struct mm_struct * mm)36883c5e13bSAlexandre Ghiti static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
36983c5e13bSAlexandre Ghiti {
37083c5e13bSAlexandre Ghiti 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
37183c5e13bSAlexandre Ghiti 
37283c5e13bSAlexandre Ghiti 	if (rvpmu->event_mapped) {
37383c5e13bSAlexandre Ghiti 		rvpmu->event_mapped(event, mm);
37483c5e13bSAlexandre Ghiti 		perf_event_update_userpage(event);
37583c5e13bSAlexandre Ghiti 	}
37683c5e13bSAlexandre Ghiti }
37783c5e13bSAlexandre Ghiti 
riscv_pmu_event_unmapped(struct perf_event * event,struct mm_struct * mm)37883c5e13bSAlexandre Ghiti static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
37983c5e13bSAlexandre Ghiti {
38083c5e13bSAlexandre Ghiti 	struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
38183c5e13bSAlexandre Ghiti 
38283c5e13bSAlexandre Ghiti 	if (rvpmu->event_unmapped) {
38383c5e13bSAlexandre Ghiti 		rvpmu->event_unmapped(event, mm);
38483c5e13bSAlexandre Ghiti 		perf_event_update_userpage(event);
38583c5e13bSAlexandre Ghiti 	}
38683c5e13bSAlexandre Ghiti }
38783c5e13bSAlexandre Ghiti 
riscv_pmu_alloc(void)388f5bfa23fSAtish Patra struct riscv_pmu *riscv_pmu_alloc(void)
389f5bfa23fSAtish Patra {
390f5bfa23fSAtish Patra 	struct riscv_pmu *pmu;
391f5bfa23fSAtish Patra 	int cpuid, i;
392f5bfa23fSAtish Patra 	struct cpu_hw_events *cpuc;
393f5bfa23fSAtish Patra 
394f5bfa23fSAtish Patra 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
395f5bfa23fSAtish Patra 	if (!pmu)
396f5bfa23fSAtish Patra 		goto out;
397f5bfa23fSAtish Patra 
398f5bfa23fSAtish Patra 	pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL);
399f5bfa23fSAtish Patra 	if (!pmu->hw_events) {
400f5bfa23fSAtish Patra 		pr_info("failed to allocate per-cpu PMU data.\n");
401f5bfa23fSAtish Patra 		goto out_free_pmu;
402f5bfa23fSAtish Patra 	}
403f5bfa23fSAtish Patra 
404f5bfa23fSAtish Patra 	for_each_possible_cpu(cpuid) {
405f5bfa23fSAtish Patra 		cpuc = per_cpu_ptr(pmu->hw_events, cpuid);
406f5bfa23fSAtish Patra 		cpuc->n_events = 0;
407f5bfa23fSAtish Patra 		for (i = 0; i < RISCV_MAX_COUNTERS; i++)
408f5bfa23fSAtish Patra 			cpuc->events[i] = NULL;
409*a8625217SAtish Patra 		cpuc->snapshot_addr = NULL;
410f5bfa23fSAtish Patra 	}
411f5bfa23fSAtish Patra 	pmu->pmu = (struct pmu) {
412f5bfa23fSAtish Patra 		.event_init	= riscv_pmu_event_init,
41383c5e13bSAlexandre Ghiti 		.event_mapped	= riscv_pmu_event_mapped,
41483c5e13bSAlexandre Ghiti 		.event_unmapped	= riscv_pmu_event_unmapped,
41583c5e13bSAlexandre Ghiti 		.event_idx	= riscv_pmu_event_idx,
416f5bfa23fSAtish Patra 		.add		= riscv_pmu_add,
417f5bfa23fSAtish Patra 		.del		= riscv_pmu_del,
418f5bfa23fSAtish Patra 		.start		= riscv_pmu_start,
419f5bfa23fSAtish Patra 		.stop		= riscv_pmu_stop,
420f5bfa23fSAtish Patra 		.read		= riscv_pmu_read,
421f5bfa23fSAtish Patra 	};
422f5bfa23fSAtish Patra 
423f5bfa23fSAtish Patra 	return pmu;
424f5bfa23fSAtish Patra 
425f5bfa23fSAtish Patra out_free_pmu:
426f5bfa23fSAtish Patra 	kfree(pmu);
427f5bfa23fSAtish Patra out:
428f5bfa23fSAtish Patra 	return NULL;
429f5bfa23fSAtish Patra }
430