xref: /linux/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c (revision 971199ad2a0f1b2fbe14af13369704aff2999988)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * HiSilicon SoC L3C uncore Hardware event counters support
4  *
5  * Copyright (C) 2017 HiSilicon Limited
6  * Author: Anurup M <anurup.m@huawei.com>
7  *         Shaokun Zhang <zhangshaokun@hisilicon.com>
8  *
9  * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10  */
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/smp.h>
18 
19 #include "hisi_uncore_pmu.h"
20 
21 /* L3C register definition */
22 #define L3C_PERF_CTRL		0x0408
23 #define L3C_INT_MASK		0x0800
24 #define L3C_INT_STATUS		0x0808
25 #define L3C_INT_CLEAR		0x080c
26 #define L3C_CORE_CTRL           0x1b04
27 #define L3C_TRACETAG_CTRL       0x1b20
28 #define L3C_DATSRC_TYPE         0x1b48
29 #define L3C_DATSRC_CTRL         0x1bf0
30 #define L3C_EVENT_CTRL	        0x1c00
31 #define L3C_VERSION		0x1cf0
32 #define L3C_EVENT_TYPE0		0x1d00
33 /*
34  * If the HW version only supports a 48-bit counter, then
35  * bits [63:48] are reserved, which are Read-As-Zero and
36  * Writes-Ignored.
37  */
38 #define L3C_CNTR0_LOWER		0x1e00
39 
40 /* L3C has 8-counters */
41 #define L3C_NR_COUNTERS		0x8
42 #define L3C_MAX_EXT		2
43 
44 #define L3C_PERF_CTRL_EN	0x10000
45 #define L3C_TRACETAG_EN		BIT(31)
46 #define L3C_TRACETAG_REQ_SHIFT	7
47 #define L3C_TRACETAG_MARK_EN	BIT(0)
48 #define L3C_TRACETAG_REQ_EN	(L3C_TRACETAG_MARK_EN | BIT(2))
49 #define L3C_TRACETAG_CORE_EN	(L3C_TRACETAG_MARK_EN | BIT(3))
50 #define L3C_CORE_EN		BIT(20)
51 #define L3C_COER_NONE		0x0
52 #define L3C_DATSRC_MASK		0xFF
53 #define L3C_DATSRC_SKT_EN	BIT(23)
54 #define L3C_DATSRC_NONE		0x0
55 #define L3C_EVTYPE_NONE		0xff
56 #define L3C_V1_NR_EVENTS	0x59
57 #define L3C_V2_NR_EVENTS	0xFF
58 
59 HISI_PMU_EVENT_ATTR_EXTRACTOR(ext, config, 17, 16);
60 /*
61  * Remain the config1:0-7 for backward compatibility if some existing users
62  * hardcode the config1:0-7 directly without parsing the sysfs attribute.
63  */
64 HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core_deprecated, config1, 7, 0);
65 HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
66 HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
67 HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
68 HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config2, 15, 0);
69 
70 struct hisi_l3c_pmu {
71 	struct hisi_pmu l3c_pmu;
72 
73 	/* MMIO and IRQ resources for extension events */
74 	void __iomem *ext_base[L3C_MAX_EXT];
75 	int ext_irq[L3C_MAX_EXT];
76 	int ext_num;
77 };
78 
79 #define to_hisi_l3c_pmu(_l3c_pmu) \
80 	container_of(_l3c_pmu, struct hisi_l3c_pmu, l3c_pmu)
81 
82 /*
83  * The hardware counter idx used in counter enable/disable,
84  * interrupt enable/disable and status check, etc.
85  */
86 #define L3C_HW_IDX(_cntr_idx)		((_cntr_idx) % L3C_NR_COUNTERS)
87 
88 /* Range of ext counters in used mask. */
89 #define L3C_CNTR_EXT_L(_ext)		(((_ext) + 1) * L3C_NR_COUNTERS)
90 #define L3C_CNTR_EXT_H(_ext)		(((_ext) + 2) * L3C_NR_COUNTERS)
91 
92 struct hisi_l3c_pmu_ext {
93 	bool support_ext;
94 };
95 
support_ext(struct hisi_l3c_pmu * pmu)96 static bool support_ext(struct hisi_l3c_pmu *pmu)
97 {
98 	struct hisi_l3c_pmu_ext *l3c_pmu_ext = pmu->l3c_pmu.dev_info->private;
99 
100 	return l3c_pmu_ext->support_ext;
101 }
102 
103 /*
104  * tt_core was extended to cover all the CPUs sharing the L3 and was moved from
105  * config1:0-7 to config2:0-*. Try it first and fallback to tt_core_deprecated
106  * if user's still using the deprecated one.
107  */
hisi_l3c_pmu_get_tt_core(struct perf_event * event)108 static u32 hisi_l3c_pmu_get_tt_core(struct perf_event *event)
109 {
110 	u32 core = hisi_get_tt_core(event);
111 
112 	if (core)
113 		return core;
114 
115 	return hisi_get_tt_core_deprecated(event);
116 }
117 
hisi_l3c_pmu_get_event_idx(struct perf_event * event)118 static int hisi_l3c_pmu_get_event_idx(struct perf_event *event)
119 {
120 	struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
121 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
122 	unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
123 	int ext = hisi_get_ext(event);
124 	int idx;
125 
126 	/*
127 	 * For an L3C PMU that supports extension events, we can monitor
128 	 * maximum 2 * num_counters to 3 * num_counters events, depending on
129 	 * the number of ext regions supported by hardware. Thus use bit
130 	 * [0, num_counters - 1] for normal events and bit
131 	 * [ext * num_counters, (ext + 1) * num_counters - 1] for extension
132 	 * events. The idx allocation will keep unchanged for normal events and
133 	 * we can also use the idx to distinguish whether it's an extension
134 	 * event or not.
135 	 *
136 	 * Since normal events and extension events locates on the different
137 	 * address space, save the base address to the event->hw.event_base.
138 	 */
139 	if (ext && !support_ext(hisi_l3c_pmu))
140 		return -EOPNOTSUPP;
141 
142 	if (ext)
143 		event->hw.event_base = (unsigned long)hisi_l3c_pmu->ext_base[ext - 1];
144 	else
145 		event->hw.event_base = (unsigned long)l3c_pmu->base;
146 
147 	ext -= 1;
148 	idx = find_next_zero_bit(used_mask, L3C_CNTR_EXT_H(ext), L3C_CNTR_EXT_L(ext));
149 
150 	if (idx >= L3C_CNTR_EXT_H(ext))
151 		return -EAGAIN;
152 
153 	set_bit(idx, used_mask);
154 
155 	return idx;
156 }
157 
hisi_l3c_pmu_event_readl(struct hw_perf_event * hwc,u32 reg)158 static u32 hisi_l3c_pmu_event_readl(struct hw_perf_event *hwc, u32 reg)
159 {
160 	return readl((void __iomem *)hwc->event_base + reg);
161 }
162 
hisi_l3c_pmu_event_writel(struct hw_perf_event * hwc,u32 reg,u32 val)163 static void hisi_l3c_pmu_event_writel(struct hw_perf_event *hwc, u32 reg, u32 val)
164 {
165 	writel(val, (void __iomem *)hwc->event_base + reg);
166 }
167 
hisi_l3c_pmu_event_readq(struct hw_perf_event * hwc,u32 reg)168 static u64 hisi_l3c_pmu_event_readq(struct hw_perf_event *hwc, u32 reg)
169 {
170 	return readq((void __iomem *)hwc->event_base + reg);
171 }
172 
hisi_l3c_pmu_event_writeq(struct hw_perf_event * hwc,u32 reg,u64 val)173 static void hisi_l3c_pmu_event_writeq(struct hw_perf_event *hwc, u32 reg, u64 val)
174 {
175 	writeq(val, (void __iomem *)hwc->event_base + reg);
176 }
177 
hisi_l3c_pmu_config_req_tracetag(struct perf_event * event)178 static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
179 {
180 	struct hw_perf_event *hwc = &event->hw;
181 	u32 tt_req = hisi_get_tt_req(event);
182 
183 	if (tt_req) {
184 		u32 val;
185 
186 		/* Set request-type for tracetag */
187 		val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
188 		val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
189 		val |= L3C_TRACETAG_REQ_EN;
190 		hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
191 
192 		/* Enable request-tracetag statistics */
193 		val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
194 		val |= L3C_TRACETAG_EN;
195 		hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
196 	}
197 }
198 
hisi_l3c_pmu_clear_req_tracetag(struct perf_event * event)199 static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
200 {
201 	struct hw_perf_event *hwc = &event->hw;
202 	u32 tt_req = hisi_get_tt_req(event);
203 
204 	if (tt_req) {
205 		u32 val;
206 
207 		/* Clear request-type */
208 		val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
209 		val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
210 		val &= ~L3C_TRACETAG_REQ_EN;
211 		hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
212 
213 		/* Disable request-tracetag statistics */
214 		val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
215 		val &= ~L3C_TRACETAG_EN;
216 		hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
217 	}
218 }
219 
hisi_l3c_pmu_write_ds(struct perf_event * event,u32 ds_cfg)220 static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
221 {
222 	struct hw_perf_event *hwc = &event->hw;
223 	u32 reg, reg_idx, shift, val;
224 	int idx = L3C_HW_IDX(hwc->idx);
225 
226 	/*
227 	 * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
228 	 * There are 2 datasource ctrl register for the 8 hardware counters.
229 	 * Datasrc is 8-bits and for the former 4 hardware counters,
230 	 * L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters,
231 	 * L3C_DATSRC_TYPE1 is chosen.
232 	 */
233 	reg = L3C_DATSRC_TYPE + (idx / 4) * 4;
234 	reg_idx = idx % 4;
235 	shift = 8 * reg_idx;
236 
237 	val = hisi_l3c_pmu_event_readl(hwc, reg);
238 	val &= ~(L3C_DATSRC_MASK << shift);
239 	val |= ds_cfg << shift;
240 	hisi_l3c_pmu_event_writel(hwc, reg, val);
241 }
242 
hisi_l3c_pmu_config_ds(struct perf_event * event)243 static void hisi_l3c_pmu_config_ds(struct perf_event *event)
244 {
245 	struct hw_perf_event *hwc = &event->hw;
246 	u32 ds_cfg = hisi_get_datasrc_cfg(event);
247 	u32 ds_skt = hisi_get_datasrc_skt(event);
248 
249 	if (ds_cfg)
250 		hisi_l3c_pmu_write_ds(event, ds_cfg);
251 
252 	if (ds_skt) {
253 		u32 val;
254 
255 		val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
256 		val |= L3C_DATSRC_SKT_EN;
257 		hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
258 	}
259 }
260 
hisi_l3c_pmu_clear_ds(struct perf_event * event)261 static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
262 {
263 	struct hw_perf_event *hwc = &event->hw;
264 	u32 ds_cfg = hisi_get_datasrc_cfg(event);
265 	u32 ds_skt = hisi_get_datasrc_skt(event);
266 
267 	if (ds_cfg)
268 		hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE);
269 
270 	if (ds_skt) {
271 		u32 val;
272 
273 		val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
274 		val &= ~L3C_DATSRC_SKT_EN;
275 		hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
276 	}
277 }
278 
hisi_l3c_pmu_config_core_tracetag(struct perf_event * event)279 static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
280 {
281 	struct hw_perf_event *hwc = &event->hw;
282 	u32 core = hisi_l3c_pmu_get_tt_core(event);
283 
284 	if (core) {
285 		u32 val;
286 
287 		/* Config and enable core information */
288 		hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, core);
289 		val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
290 		val |= L3C_CORE_EN;
291 		hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
292 
293 		/* Enable core-tracetag statistics */
294 		val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
295 		val |= L3C_TRACETAG_CORE_EN;
296 		hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
297 	}
298 }
299 
hisi_l3c_pmu_clear_core_tracetag(struct perf_event * event)300 static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
301 {
302 	struct hw_perf_event *hwc = &event->hw;
303 	u32 core = hisi_l3c_pmu_get_tt_core(event);
304 
305 	if (core) {
306 		u32 val;
307 
308 		/* Clear core information */
309 		hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, L3C_COER_NONE);
310 		val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
311 		val &= ~L3C_CORE_EN;
312 		hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
313 
314 		/* Disable core-tracetag statistics */
315 		val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
316 		val &= ~L3C_TRACETAG_CORE_EN;
317 		hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
318 	}
319 }
320 
hisi_l3c_pmu_have_filter(struct perf_event * event)321 static bool hisi_l3c_pmu_have_filter(struct perf_event *event)
322 {
323 	return hisi_get_tt_req(event) || hisi_l3c_pmu_get_tt_core(event) ||
324 	       hisi_get_datasrc_cfg(event) || hisi_get_datasrc_skt(event);
325 }
326 
hisi_l3c_pmu_enable_filter(struct perf_event * event)327 static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
328 {
329 	if (hisi_l3c_pmu_have_filter(event)) {
330 		hisi_l3c_pmu_config_req_tracetag(event);
331 		hisi_l3c_pmu_config_core_tracetag(event);
332 		hisi_l3c_pmu_config_ds(event);
333 	}
334 }
335 
hisi_l3c_pmu_disable_filter(struct perf_event * event)336 static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
337 {
338 	if (hisi_l3c_pmu_have_filter(event)) {
339 		hisi_l3c_pmu_clear_ds(event);
340 		hisi_l3c_pmu_clear_core_tracetag(event);
341 		hisi_l3c_pmu_clear_req_tracetag(event);
342 	}
343 }
344 
hisi_l3c_pmu_check_filter(struct perf_event * event)345 static int hisi_l3c_pmu_check_filter(struct perf_event *event)
346 {
347 	struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
348 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
349 	int ext = hisi_get_ext(event);
350 
351 	if (ext < 0 || ext > hisi_l3c_pmu->ext_num)
352 		return -EINVAL;
353 
354 	if (hisi_get_tt_core(event) && hisi_get_tt_core_deprecated(event))
355 		return -EINVAL;
356 
357 	return 0;
358 }
359 
360 /*
361  * Select the counter register offset using the counter index
362  */
hisi_l3c_pmu_get_counter_offset(int cntr_idx)363 static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
364 {
365 	return L3C_CNTR0_LOWER + L3C_HW_IDX(cntr_idx) * 8;
366 }
367 
hisi_l3c_pmu_read_counter(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc)368 static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
369 				     struct hw_perf_event *hwc)
370 {
371 	return hisi_l3c_pmu_event_readq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx));
372 }
373 
hisi_l3c_pmu_write_counter(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc,u64 val)374 static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
375 				       struct hw_perf_event *hwc, u64 val)
376 {
377 	hisi_l3c_pmu_event_writeq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx), val);
378 }
379 
hisi_l3c_pmu_write_evtype(struct hisi_pmu * l3c_pmu,int idx,u32 type)380 static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
381 				      u32 type)
382 {
383 	struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
384 	u32 reg, reg_idx, shift, val;
385 
386 	idx = L3C_HW_IDX(idx);
387 
388 	/*
389 	 * Select the appropriate event select register(L3C_EVENT_TYPE0/1).
390 	 * There are 2 event select registers for the 8 hardware counters.
391 	 * Event code is 8-bits and for the former 4 hardware counters,
392 	 * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
393 	 * L3C_EVENT_TYPE1 is chosen.
394 	 */
395 	reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
396 	reg_idx = idx % 4;
397 	shift = 8 * reg_idx;
398 
399 	/* Write event code to L3C_EVENT_TYPEx Register */
400 	val = hisi_l3c_pmu_event_readl(hwc, reg);
401 	val &= ~(L3C_EVTYPE_NONE << shift);
402 	val |= type << shift;
403 	hisi_l3c_pmu_event_writel(hwc, reg, val);
404 }
405 
hisi_l3c_pmu_start_counters(struct hisi_pmu * l3c_pmu)406 static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
407 {
408 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
409 	unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
410 	unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
411 	u32 val;
412 	int i;
413 
414 	/*
415 	 * Check if any counter belongs to the normal range (instead of ext
416 	 * range). If so, enable it.
417 	 */
418 	if (used_cntr < L3C_NR_COUNTERS) {
419 		val = readl(l3c_pmu->base + L3C_PERF_CTRL);
420 		val |= L3C_PERF_CTRL_EN;
421 		writel(val, l3c_pmu->base + L3C_PERF_CTRL);
422 	}
423 
424 	/* If not, do enable it on ext ranges. */
425 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
426 		/* Find used counter in this ext range, skip the range if not. */
427 		used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
428 		if (used_cntr >= L3C_CNTR_EXT_H(i))
429 			continue;
430 
431 		val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
432 		val |= L3C_PERF_CTRL_EN;
433 		writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
434 	}
435 }
436 
hisi_l3c_pmu_stop_counters(struct hisi_pmu * l3c_pmu)437 static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
438 {
439 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
440 	unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
441 	unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
442 	u32 val;
443 	int i;
444 
445 	/*
446 	 * Check if any counter belongs to the normal range (instead of ext
447 	 * range). If so, stop it.
448 	 */
449 	if (used_cntr < L3C_NR_COUNTERS) {
450 		val = readl(l3c_pmu->base + L3C_PERF_CTRL);
451 		val &= ~L3C_PERF_CTRL_EN;
452 		writel(val, l3c_pmu->base + L3C_PERF_CTRL);
453 	}
454 
455 	/* If not, do stop it on ext ranges. */
456 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
457 		/* Find used counter in this ext range, skip the range if not. */
458 		used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
459 		if (used_cntr >= L3C_CNTR_EXT_H(i))
460 			continue;
461 
462 		val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
463 		val &= ~L3C_PERF_CTRL_EN;
464 		writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
465 	}
466 }
467 
hisi_l3c_pmu_enable_counter(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc)468 static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
469 					struct hw_perf_event *hwc)
470 {
471 	u32 val;
472 
473 	/* Enable counter index in L3C_EVENT_CTRL register */
474 	val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
475 	val |= 1 << L3C_HW_IDX(hwc->idx);
476 	hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
477 }
478 
hisi_l3c_pmu_disable_counter(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc)479 static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
480 					 struct hw_perf_event *hwc)
481 {
482 	u32 val;
483 
484 	/* Clear counter index in L3C_EVENT_CTRL register */
485 	val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
486 	val &= ~(1 << L3C_HW_IDX(hwc->idx));
487 	hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
488 }
489 
hisi_l3c_pmu_enable_counter_int(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc)490 static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
491 					    struct hw_perf_event *hwc)
492 {
493 	u32 val;
494 
495 	val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
496 	/* Write 0 to enable interrupt */
497 	val &= ~(1 << L3C_HW_IDX(hwc->idx));
498 	hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
499 }
500 
hisi_l3c_pmu_disable_counter_int(struct hisi_pmu * l3c_pmu,struct hw_perf_event * hwc)501 static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
502 					     struct hw_perf_event *hwc)
503 {
504 	u32 val;
505 
506 	val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
507 	/* Write 1 to mask interrupt */
508 	val |= 1 << L3C_HW_IDX(hwc->idx);
509 	hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
510 }
511 
hisi_l3c_pmu_get_int_status(struct hisi_pmu * l3c_pmu)512 static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
513 {
514 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
515 	u32 ext_int, status, status_ext = 0;
516 	int i;
517 
518 	status = readl(l3c_pmu->base + L3C_INT_STATUS);
519 
520 	if (!support_ext(hisi_l3c_pmu))
521 		return status;
522 
523 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
524 		ext_int = readl(hisi_l3c_pmu->ext_base[i] + L3C_INT_STATUS);
525 		status_ext |= ext_int << (L3C_NR_COUNTERS * i);
526 	}
527 
528 	return status | (status_ext << L3C_NR_COUNTERS);
529 }
530 
hisi_l3c_pmu_clear_int_status(struct hisi_pmu * l3c_pmu,int idx)531 static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
532 {
533 	struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
534 
535 	hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << L3C_HW_IDX(idx));
536 }
537 
hisi_l3c_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * l3c_pmu)538 static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
539 				  struct hisi_pmu *l3c_pmu)
540 {
541 	hisi_uncore_pmu_init_topology(l3c_pmu, &pdev->dev);
542 
543 	/*
544 	 * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
545 	 * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
546 	 */
547 	if (l3c_pmu->topo.sccl_id < 0) {
548 		dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
549 		return -EINVAL;
550 	}
551 
552 	if (l3c_pmu->topo.ccl_id < 0) {
553 		dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
554 		return -EINVAL;
555 	}
556 
557 	l3c_pmu->dev_info = device_get_match_data(&pdev->dev);
558 	if (!l3c_pmu->dev_info)
559 		return -ENODEV;
560 
561 	l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
562 	if (IS_ERR(l3c_pmu->base)) {
563 		dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
564 		return PTR_ERR(l3c_pmu->base);
565 	}
566 
567 	l3c_pmu->identifier = readl(l3c_pmu->base + L3C_VERSION);
568 
569 	return 0;
570 }
571 
hisi_l3c_pmu_init_ext(struct hisi_pmu * l3c_pmu,struct platform_device * pdev)572 static int hisi_l3c_pmu_init_ext(struct hisi_pmu *l3c_pmu, struct platform_device *pdev)
573 {
574 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
575 	int ret, irq, ext_num, i;
576 	char *irqname;
577 
578 	/* HiSilicon L3C PMU supporting ext should have more than 1 irq resources. */
579 	ext_num = platform_irq_count(pdev);
580 	if (ext_num < L3C_MAX_EXT)
581 		return -ENODEV;
582 
583 	/*
584 	 * The number of ext supported equals the number of irq - 1, since one
585 	 * of the irqs belongs to the normal part of PMU.
586 	 */
587 	hisi_l3c_pmu->ext_num = ext_num - 1;
588 
589 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
590 		hisi_l3c_pmu->ext_base[i] = devm_platform_ioremap_resource(pdev, i + 1);
591 		if (IS_ERR(hisi_l3c_pmu->ext_base[i]))
592 			return PTR_ERR(hisi_l3c_pmu->ext_base[i]);
593 
594 		irq = platform_get_irq(pdev, i + 1);
595 		if (irq < 0)
596 			return irq;
597 
598 		irqname = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s ext%d",
599 					 dev_name(&pdev->dev), i + 1);
600 		if (!irqname)
601 			return -ENOMEM;
602 
603 		ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
604 				       IRQF_NOBALANCING | IRQF_NO_THREAD,
605 				       irqname, l3c_pmu);
606 		if (ret < 0)
607 			return dev_err_probe(&pdev->dev, ret,
608 				"Fail to request EXT IRQ: %d.\n", irq);
609 
610 		hisi_l3c_pmu->ext_irq[i] = irq;
611 	}
612 
613 	return 0;
614 }
615 
616 static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
617 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
618 	NULL,
619 };
620 
621 static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
622 	.name = "format",
623 	.attrs = hisi_l3c_pmu_v1_format_attr,
624 };
625 
626 static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
627 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
628 	HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
629 	HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
630 	HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
631 	HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
632 	HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
633 	NULL
634 };
635 
636 static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
637 	.name = "format",
638 	.attrs = hisi_l3c_pmu_v2_format_attr,
639 };
640 
641 static struct attribute *hisi_l3c_pmu_v3_format_attr[] = {
642 	HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
643 	HISI_PMU_FORMAT_ATTR(ext, "config:16-17"),
644 	HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
645 	HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
646 	HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
647 	NULL
648 };
649 
650 static const struct attribute_group hisi_l3c_pmu_v3_format_group = {
651 	.name = "format",
652 	.attrs = hisi_l3c_pmu_v3_format_attr,
653 };
654 
655 static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
656 	HISI_PMU_EVENT_ATTR(rd_cpipe,		0x00),
657 	HISI_PMU_EVENT_ATTR(wr_cpipe,		0x01),
658 	HISI_PMU_EVENT_ATTR(rd_hit_cpipe,	0x02),
659 	HISI_PMU_EVENT_ATTR(wr_hit_cpipe,	0x03),
660 	HISI_PMU_EVENT_ATTR(victim_num,		0x04),
661 	HISI_PMU_EVENT_ATTR(rd_spipe,		0x20),
662 	HISI_PMU_EVENT_ATTR(wr_spipe,		0x21),
663 	HISI_PMU_EVENT_ATTR(rd_hit_spipe,	0x22),
664 	HISI_PMU_EVENT_ATTR(wr_hit_spipe,	0x23),
665 	HISI_PMU_EVENT_ATTR(back_invalid,	0x29),
666 	HISI_PMU_EVENT_ATTR(retry_cpu,		0x40),
667 	HISI_PMU_EVENT_ATTR(retry_ring,		0x41),
668 	HISI_PMU_EVENT_ATTR(prefetch_drop,	0x42),
669 	NULL,
670 };
671 
672 static const struct attribute_group hisi_l3c_pmu_v1_events_group = {
673 	.name = "events",
674 	.attrs = hisi_l3c_pmu_v1_events_attr,
675 };
676 
677 static struct attribute *hisi_l3c_pmu_v2_events_attr[] = {
678 	HISI_PMU_EVENT_ATTR(l3c_hit,		0x48),
679 	HISI_PMU_EVENT_ATTR(cycles,		0x7f),
680 	HISI_PMU_EVENT_ATTR(l3c_ref,		0xb8),
681 	HISI_PMU_EVENT_ATTR(dat_access,		0xb9),
682 	NULL
683 };
684 
685 static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
686 	.name = "events",
687 	.attrs = hisi_l3c_pmu_v2_events_attr,
688 };
689 
690 static struct attribute *hisi_l3c_pmu_v3_events_attr[] = {
691 	HISI_PMU_EVENT_ATTR(rd_spipe,		0x18),
692 	HISI_PMU_EVENT_ATTR(rd_hit_spipe,	0x19),
693 	HISI_PMU_EVENT_ATTR(wr_spipe,		0x1a),
694 	HISI_PMU_EVENT_ATTR(wr_hit_spipe,	0x1b),
695 	HISI_PMU_EVENT_ATTR(io_rd_spipe,	0x1c),
696 	HISI_PMU_EVENT_ATTR(io_rd_hit_spipe,	0x1d),
697 	HISI_PMU_EVENT_ATTR(io_wr_spipe,	0x1e),
698 	HISI_PMU_EVENT_ATTR(io_wr_hit_spipe,	0x1f),
699 	HISI_PMU_EVENT_ATTR(cycles,		0x7f),
700 	HISI_PMU_EVENT_ATTR(l3c_ref,		0xbc),
701 	HISI_PMU_EVENT_ATTR(l3c2ring,		0xbd),
702 	NULL
703 };
704 
705 static const struct attribute_group hisi_l3c_pmu_v3_events_group = {
706 	.name = "events",
707 	.attrs = hisi_l3c_pmu_v3_events_attr,
708 };
709 
710 static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
711 	&hisi_l3c_pmu_v1_format_group,
712 	&hisi_l3c_pmu_v1_events_group,
713 	&hisi_pmu_cpumask_attr_group,
714 	&hisi_pmu_identifier_group,
715 	NULL,
716 };
717 
718 static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
719 	&hisi_l3c_pmu_v2_format_group,
720 	&hisi_l3c_pmu_v2_events_group,
721 	&hisi_pmu_cpumask_attr_group,
722 	&hisi_pmu_identifier_group,
723 	NULL
724 };
725 
726 static const struct attribute_group *hisi_l3c_pmu_v3_attr_groups[] = {
727 	&hisi_l3c_pmu_v3_format_group,
728 	&hisi_l3c_pmu_v3_events_group,
729 	&hisi_pmu_cpumask_attr_group,
730 	&hisi_pmu_identifier_group,
731 	NULL
732 };
733 
734 static struct hisi_l3c_pmu_ext hisi_l3c_pmu_support_ext = {
735 	.support_ext = true,
736 };
737 
738 static struct hisi_l3c_pmu_ext hisi_l3c_pmu_not_support_ext = {
739 	.support_ext = false,
740 };
741 
742 static const struct hisi_pmu_dev_info hisi_l3c_pmu_v1 = {
743 	.attr_groups = hisi_l3c_pmu_v1_attr_groups,
744 	.counter_bits = 48,
745 	.check_event = L3C_V1_NR_EVENTS,
746 	.private = &hisi_l3c_pmu_not_support_ext,
747 };
748 
749 static const struct hisi_pmu_dev_info hisi_l3c_pmu_v2 = {
750 	.attr_groups = hisi_l3c_pmu_v2_attr_groups,
751 	.counter_bits = 64,
752 	.check_event = L3C_V2_NR_EVENTS,
753 	.private = &hisi_l3c_pmu_not_support_ext,
754 };
755 
756 static const struct hisi_pmu_dev_info hisi_l3c_pmu_v3 = {
757 	.attr_groups = hisi_l3c_pmu_v3_attr_groups,
758 	.counter_bits = 64,
759 	.check_event = L3C_V2_NR_EVENTS,
760 	.private = &hisi_l3c_pmu_support_ext,
761 };
762 
763 static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
764 	.write_evtype		= hisi_l3c_pmu_write_evtype,
765 	.get_event_idx		= hisi_l3c_pmu_get_event_idx,
766 	.start_counters		= hisi_l3c_pmu_start_counters,
767 	.stop_counters		= hisi_l3c_pmu_stop_counters,
768 	.enable_counter		= hisi_l3c_pmu_enable_counter,
769 	.disable_counter	= hisi_l3c_pmu_disable_counter,
770 	.enable_counter_int	= hisi_l3c_pmu_enable_counter_int,
771 	.disable_counter_int	= hisi_l3c_pmu_disable_counter_int,
772 	.write_counter		= hisi_l3c_pmu_write_counter,
773 	.read_counter		= hisi_l3c_pmu_read_counter,
774 	.get_int_status		= hisi_l3c_pmu_get_int_status,
775 	.clear_int_status	= hisi_l3c_pmu_clear_int_status,
776 	.enable_filter		= hisi_l3c_pmu_enable_filter,
777 	.disable_filter		= hisi_l3c_pmu_disable_filter,
778 	.check_filter		= hisi_l3c_pmu_check_filter,
779 };
780 
hisi_l3c_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * l3c_pmu)781 static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
782 				  struct hisi_pmu *l3c_pmu)
783 {
784 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
785 	struct hisi_l3c_pmu_ext *l3c_pmu_dev_ext;
786 	int ret;
787 
788 	ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
789 	if (ret)
790 		return ret;
791 
792 	ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev);
793 	if (ret)
794 		return ret;
795 
796 	l3c_pmu->pmu_events.attr_groups = l3c_pmu->dev_info->attr_groups;
797 	l3c_pmu->counter_bits = l3c_pmu->dev_info->counter_bits;
798 	l3c_pmu->check_event = l3c_pmu->dev_info->check_event;
799 	l3c_pmu->num_counters = L3C_NR_COUNTERS;
800 	l3c_pmu->ops = &hisi_uncore_l3c_ops;
801 	l3c_pmu->dev = &pdev->dev;
802 	l3c_pmu->on_cpu = -1;
803 
804 	l3c_pmu_dev_ext = l3c_pmu->dev_info->private;
805 	if (l3c_pmu_dev_ext->support_ext) {
806 		ret = hisi_l3c_pmu_init_ext(l3c_pmu, pdev);
807 		if (ret)
808 			return ret;
809 		/*
810 		 * The extension events have their own counters with the
811 		 * same number of the normal events counters. So we can
812 		 * have at maximum num_counters * ext events monitored.
813 		 */
814 		l3c_pmu->num_counters += hisi_l3c_pmu->ext_num * L3C_NR_COUNTERS;
815 	}
816 
817 	return 0;
818 }
819 
hisi_l3c_pmu_probe(struct platform_device * pdev)820 static int hisi_l3c_pmu_probe(struct platform_device *pdev)
821 {
822 	struct hisi_l3c_pmu *hisi_l3c_pmu;
823 	struct hisi_pmu *l3c_pmu;
824 	char *name;
825 	int ret;
826 
827 	hisi_l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*hisi_l3c_pmu), GFP_KERNEL);
828 	if (!hisi_l3c_pmu)
829 		return -ENOMEM;
830 
831 	l3c_pmu = &hisi_l3c_pmu->l3c_pmu;
832 	platform_set_drvdata(pdev, l3c_pmu);
833 
834 	ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
835 	if (ret)
836 		return ret;
837 
838 	if (l3c_pmu->topo.sub_id >= 0)
839 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d_%d",
840 				      l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id,
841 				      l3c_pmu->topo.sub_id);
842 	else
843 		name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
844 				      l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
845 	if (!name)
846 		return -ENOMEM;
847 
848 	ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
849 				       &l3c_pmu->node);
850 	if (ret) {
851 		dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
852 		return ret;
853 	}
854 
855 	hisi_pmu_init(l3c_pmu, THIS_MODULE);
856 
857 	ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
858 	if (ret) {
859 		dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
860 		cpuhp_state_remove_instance_nocalls(
861 			CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node);
862 	}
863 
864 	return ret;
865 }
866 
hisi_l3c_pmu_remove(struct platform_device * pdev)867 static void hisi_l3c_pmu_remove(struct platform_device *pdev)
868 {
869 	struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
870 
871 	perf_pmu_unregister(&l3c_pmu->pmu);
872 	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
873 					    &l3c_pmu->node);
874 }
875 
876 static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
877 	{ "HISI0213", (kernel_ulong_t)&hisi_l3c_pmu_v1 },
878 	{ "HISI0214", (kernel_ulong_t)&hisi_l3c_pmu_v2 },
879 	{ "HISI0215", (kernel_ulong_t)&hisi_l3c_pmu_v3 },
880 	{}
881 };
882 MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
883 
884 static struct platform_driver hisi_l3c_pmu_driver = {
885 	.driver = {
886 		.name = "hisi_l3c_pmu",
887 		.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
888 		.suppress_bind_attrs = true,
889 	},
890 	.probe = hisi_l3c_pmu_probe,
891 	.remove = hisi_l3c_pmu_remove,
892 };
893 
hisi_l3c_pmu_online_cpu(unsigned int cpu,struct hlist_node * node)894 static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
895 {
896 	struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
897 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
898 	int ret, i;
899 
900 	ret = hisi_uncore_pmu_online_cpu(cpu, node);
901 	if (ret)
902 		return ret;
903 
904 	/* Avoid L3C pmu not supporting ext from ext irq migrating. */
905 	if (!support_ext(hisi_l3c_pmu))
906 		return 0;
907 
908 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
909 		WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
910 					 cpumask_of(l3c_pmu->on_cpu)));
911 
912 	return 0;
913 }
914 
hisi_l3c_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)915 static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
916 {
917 	struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
918 	struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
919 	int ret, i;
920 
921 	ret = hisi_uncore_pmu_offline_cpu(cpu, node);
922 	if (ret)
923 		return ret;
924 
925 	/* If failed to find any available CPU, skip irq migration. */
926 	if (l3c_pmu->on_cpu < 0)
927 		return 0;
928 
929 	/* Avoid L3C pmu not supporting ext from ext irq migrating. */
930 	if (!support_ext(hisi_l3c_pmu))
931 		return 0;
932 
933 	for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
934 		WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
935 					 cpumask_of(l3c_pmu->on_cpu)));
936 
937 	return 0;
938 }
939 
hisi_l3c_pmu_module_init(void)940 static int __init hisi_l3c_pmu_module_init(void)
941 {
942 	int ret;
943 
944 	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
945 				      "AP_PERF_ARM_HISI_L3_ONLINE",
946 				      hisi_l3c_pmu_online_cpu,
947 				      hisi_l3c_pmu_offline_cpu);
948 	if (ret) {
949 		pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
950 		return ret;
951 	}
952 
953 	ret = platform_driver_register(&hisi_l3c_pmu_driver);
954 	if (ret)
955 		cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
956 
957 	return ret;
958 }
959 module_init(hisi_l3c_pmu_module_init);
960 
hisi_l3c_pmu_module_exit(void)961 static void __exit hisi_l3c_pmu_module_exit(void)
962 {
963 	platform_driver_unregister(&hisi_l3c_pmu_driver);
964 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
965 }
966 module_exit(hisi_l3c_pmu_module_exit);
967 
968 MODULE_IMPORT_NS("HISI_PMU");
969 MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
970 MODULE_LICENSE("GPL v2");
971 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
972 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
973