xref: /linux/drivers/perf/marvell_cn10k_ddr_pmu.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
4  *
5  * Copyright (C) 2021-2024 Marvell.
6  */
7 
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/perf_event.h>
13 #include <linux/hrtimer.h>
14 #include <linux/acpi.h>
15 #include <linux/platform_device.h>
16 
17 /* Performance Counters Operating Mode Control Registers */
18 #define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL	0x8020
19 #define ODY_DDRC_PERF_CNT_OP_MODE_CTRL		0x20020
20 #define OP_MODE_CTRL_VAL_MANUAL	0x1
21 
22 /* Performance Counters Start Operation Control Registers */
23 #define CN10K_DDRC_PERF_CNT_START_OP_CTRL	0x8028
24 #define ODY_DDRC_PERF_CNT_START_OP_CTRL		0x200A0
25 #define START_OP_CTRL_VAL_START		0x1ULL
26 #define START_OP_CTRL_VAL_ACTIVE	0x2
27 
28 /* Performance Counters End Operation Control Registers */
29 #define CN10K_DDRC_PERF_CNT_END_OP_CTRL	0x8030
30 #define ODY_DDRC_PERF_CNT_END_OP_CTRL	0x200E0
31 #define END_OP_CTRL_VAL_END		0x1ULL
32 
33 /* Performance Counters End Status Registers */
34 #define CN10K_DDRC_PERF_CNT_END_STATUS		0x8038
35 #define ODY_DDRC_PERF_CNT_END_STATUS		0x20120
36 #define END_STATUS_VAL_END_TIMER_MODE_END	0x1
37 
38 /* Performance Counters Configuration Registers */
39 #define CN10K_DDRC_PERF_CFG_BASE		0x8040
40 #define ODY_DDRC_PERF_CFG_BASE			0x20160
41 
42 /* 8 Generic event counter + 2 fixed event counters */
43 #define DDRC_PERF_NUM_GEN_COUNTERS	8
44 #define DDRC_PERF_NUM_FIX_COUNTERS	2
45 #define DDRC_PERF_READ_COUNTER_IDX	DDRC_PERF_NUM_GEN_COUNTERS
46 #define DDRC_PERF_WRITE_COUNTER_IDX	(DDRC_PERF_NUM_GEN_COUNTERS + 1)
47 #define DDRC_PERF_NUM_COUNTERS		(DDRC_PERF_NUM_GEN_COUNTERS + \
48 					 DDRC_PERF_NUM_FIX_COUNTERS)
49 
50 /* Generic event counter registers */
51 #define DDRC_PERF_CFG(base, n)		((base) + 8 * (n))
52 #define EVENT_ENABLE			BIT_ULL(63)
53 
54 /* Two dedicated event counters for DDR reads and writes */
55 #define EVENT_DDR_READS			101
56 #define EVENT_DDR_WRITES		100
57 
58 #define DDRC_PERF_REG(base, n)		((base) + 8 * (n))
59 /*
60  * programmable events IDs in programmable event counters.
61  * DO NOT change these event-id numbers, they are used to
62  * program event bitmap in h/w.
63  */
64 #define EVENT_DFI_CMD_IS_RETRY			61
65 #define EVENT_RD_UC_ECC_ERROR			60
66 #define EVENT_RD_CRC_ERROR			59
67 #define EVENT_CAPAR_ERROR			58
68 #define EVENT_WR_CRC_ERROR			57
69 #define EVENT_DFI_PARITY_POISON			56
70 #define EVENT_RETRY_FIFO_FULL			46
71 #define EVENT_DFI_CYCLES			45
72 
73 #define EVENT_OP_IS_ZQLATCH			55
74 #define EVENT_OP_IS_ZQSTART			54
75 #define EVENT_OP_IS_TCR_MRR			53
76 #define EVENT_OP_IS_DQSOSC_MRR			52
77 #define EVENT_OP_IS_DQSOSC_MPC			51
78 #define EVENT_VISIBLE_WIN_LIMIT_REACHED_WR	50
79 #define EVENT_VISIBLE_WIN_LIMIT_REACHED_RD	49
80 #define EVENT_BSM_STARVATION			48
81 #define EVENT_BSM_ALLOC				47
82 #define EVENT_LPR_REQ_WITH_NOCREDIT		46
83 #define EVENT_HPR_REQ_WITH_NOCREDIT		45
84 #define EVENT_OP_IS_ZQCS			44
85 #define EVENT_OP_IS_ZQCL			43
86 #define EVENT_OP_IS_LOAD_MODE			42
87 #define EVENT_OP_IS_SPEC_REF			41
88 #define EVENT_OP_IS_CRIT_REF			40
89 #define EVENT_OP_IS_REFRESH			39
90 #define EVENT_OP_IS_ENTER_MPSM			35
91 #define EVENT_OP_IS_ENTER_POWERDOWN		31
92 #define EVENT_OP_IS_ENTER_SELFREF		27
93 #define EVENT_WAW_HAZARD			26
94 #define EVENT_RAW_HAZARD			25
95 #define EVENT_WAR_HAZARD			24
96 #define EVENT_WRITE_COMBINE			23
97 #define EVENT_RDWR_TRANSITIONS			22
98 #define EVENT_PRECHARGE_FOR_OTHER		21
99 #define EVENT_PRECHARGE_FOR_RDWR		20
100 #define EVENT_OP_IS_PRECHARGE			19
101 #define EVENT_OP_IS_MWR				18
102 #define EVENT_OP_IS_WR				17
103 #define EVENT_OP_IS_RD				16
104 #define EVENT_OP_IS_RD_ACTIVATE			15
105 #define EVENT_OP_IS_RD_OR_WR			14
106 #define EVENT_OP_IS_ACTIVATE			13
107 #define EVENT_WR_XACT_WHEN_CRITICAL		12
108 #define EVENT_LPR_XACT_WHEN_CRITICAL		11
109 #define EVENT_HPR_XACT_WHEN_CRITICAL		10
110 #define EVENT_DFI_RD_DATA_CYCLES		9
111 #define EVENT_DFI_WR_DATA_CYCLES		8
112 #define EVENT_ACT_BYPASS			7
113 #define EVENT_READ_BYPASS			6
114 #define EVENT_HIF_HI_PRI_RD			5
115 #define EVENT_HIF_RMW				4
116 #define EVENT_HIF_RD				3
117 #define EVENT_HIF_WR				2
118 #define EVENT_HIF_RD_OR_WR			1
119 
120 /* Event counter value registers */
121 #define CN10K_DDRC_PERF_CNT_VALUE_BASE	0x8080
122 #define ODY_DDRC_PERF_CNT_VALUE_BASE	0x201C0
123 
124 /* Fixed event counter enable/disable register */
125 #define CN10K_DDRC_PERF_CNT_FREERUN_EN		0x80C0
126 #define DDRC_PERF_FREERUN_WRITE_EN	0x1
127 #define DDRC_PERF_FREERUN_READ_EN	0x2
128 
129 /* Fixed event counter control register */
130 #define CN10K_DDRC_PERF_CNT_FREERUN_CTRL	0x80C8
131 #define ODY_DDRC_PERF_CNT_FREERUN_CTRL		0x20240
132 #define DDRC_FREERUN_WRITE_CNT_CLR	0x1
133 #define DDRC_FREERUN_READ_CNT_CLR	0x2
134 
135 /* Fixed event counter clear register, defined only for Odyssey */
136 #define ODY_DDRC_PERF_CNT_FREERUN_CLR  0x20248
137 
138 #define DDRC_PERF_CNT_VALUE_OVERFLOW	BIT_ULL(48)
139 #define DDRC_PERF_CNT_MAX_VALUE		GENMASK_ULL(48, 0)
140 
141 /* Fixed event counter value register */
142 #define CN10K_DDRC_PERF_CNT_VALUE_WR_OP		0x80D0
143 #define CN10K_DDRC_PERF_CNT_VALUE_RD_OP		0x80D8
144 #define ODY_DDRC_PERF_CNT_VALUE_WR_OP		0x20250
145 #define ODY_DDRC_PERF_CNT_VALUE_RD_OP		0x20258
146 
147 struct cn10k_ddr_pmu {
148 	struct pmu pmu;
149 	void __iomem *base;
150 	const struct ddr_pmu_platform_data *p_data;
151 	const struct ddr_pmu_ops *ops;
152 	unsigned int cpu;
153 	struct	device *dev;
154 	int active_events;
155 	struct perf_event *events[DDRC_PERF_NUM_COUNTERS];
156 	struct hrtimer hrtimer;
157 	struct hlist_node node;
158 };
159 
160 struct ddr_pmu_ops {
161 	void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
162 					    bool enable);
163 	void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
164 					     bool enable);
165 	void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
166 	void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
167 	void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
168 };
169 
170 #define to_cn10k_ddr_pmu(p)	container_of(p, struct cn10k_ddr_pmu, pmu)
171 
172 struct ddr_pmu_platform_data {
173 	u64 counter_overflow_val;
174 	u64 counter_max_val;
175 	u64 cnt_base;
176 	u64 cfg_base;
177 	u64 cnt_op_mode_ctrl;
178 	u64 cnt_start_op_ctrl;
179 	u64 cnt_end_op_ctrl;
180 	u64 cnt_end_status;
181 	u64 cnt_freerun_en;
182 	u64 cnt_freerun_ctrl;
183 	u64 cnt_freerun_clr;
184 	u64 cnt_value_wr_op;
185 	u64 cnt_value_rd_op;
186 	bool is_cn10k;
187 	bool is_ody;
188 };
189 
190 static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
191 					struct device_attribute *attr,
192 					char *page)
193 {
194 	struct perf_pmu_events_attr *pmu_attr;
195 
196 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
197 	return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
198 
199 }
200 
201 #define CN10K_DDR_PMU_EVENT_ATTR(_name, _id)				     \
202 	PMU_EVENT_ATTR_ID(_name, cn10k_ddr_pmu_event_show, _id)
203 
204 static struct attribute *cn10k_ddr_perf_events_attrs[] = {
205 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
206 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
207 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
208 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
209 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
210 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
211 	CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
212 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_wr_data_access, EVENT_DFI_WR_DATA_CYCLES),
213 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dif_rd_data_access, EVENT_DFI_RD_DATA_CYCLES),
214 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
215 					EVENT_HPR_XACT_WHEN_CRITICAL),
216 	CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
217 					EVENT_LPR_XACT_WHEN_CRITICAL),
218 	CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
219 					EVENT_WR_XACT_WHEN_CRITICAL),
220 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
221 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, EVENT_OP_IS_RD_OR_WR),
222 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, EVENT_OP_IS_RD_ACTIVATE),
223 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
224 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
225 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
226 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
227 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, EVENT_PRECHARGE_FOR_RDWR),
228 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
229 					EVENT_PRECHARGE_FOR_OTHER),
230 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
231 	CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
232 	CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
233 	CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
234 	CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
235 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
236 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, EVENT_OP_IS_ENTER_POWERDOWN),
237 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
238 	CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
239 	CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
240 	CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
241 	CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
242 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
243 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
244 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hpr_req_with_nocredit,
245 					EVENT_HPR_REQ_WITH_NOCREDIT),
246 	CN10K_DDR_PMU_EVENT_ATTR(ddr_lpr_req_with_nocredit,
247 					EVENT_LPR_REQ_WITH_NOCREDIT),
248 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
249 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
250 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
251 					EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
252 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
253 					EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
254 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
255 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
256 	CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
257 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
258 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
259 	/* Free run event counters */
260 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
261 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
262 	NULL
263 };
264 
265 static struct attribute *odyssey_ddr_perf_events_attrs[] = {
266 	/* Programmable */
267 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
268 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
269 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
270 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
271 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
272 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
273 	CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
274 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access,
275 				 EVENT_DFI_WR_DATA_CYCLES),
276 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access,
277 				 EVENT_DFI_RD_DATA_CYCLES),
278 	CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
279 				 EVENT_HPR_XACT_WHEN_CRITICAL),
280 	CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
281 				 EVENT_LPR_XACT_WHEN_CRITICAL),
282 	CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
283 				 EVENT_WR_XACT_WHEN_CRITICAL),
284 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
285 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access,
286 				 EVENT_OP_IS_RD_OR_WR),
287 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access,
288 				 EVENT_OP_IS_RD_ACTIVATE),
289 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
290 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
291 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
292 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
293 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr,
294 				 EVENT_PRECHARGE_FOR_RDWR),
295 	CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
296 				 EVENT_PRECHARGE_FOR_OTHER),
297 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
298 	CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
299 	CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
300 	CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
301 	CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
302 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
303 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown,
304 				 EVENT_OP_IS_ENTER_POWERDOWN),
305 	CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
306 	CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
307 	CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
308 	CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
309 	CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
310 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
311 	CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
312 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES),
313 	CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full,
314 				 EVENT_RETRY_FIFO_FULL),
315 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
316 	CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
317 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
318 				 EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
319 	CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
320 				 EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
321 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
322 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
323 	CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
324 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
325 	CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
326 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison,
327 				 EVENT_DFI_PARITY_POISON),
328 	CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR),
329 	CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR),
330 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR),
331 	CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR),
332 	CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY),
333 	/* Free run event counters */
334 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
335 	CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
336 	NULL
337 };
338 
339 static struct attribute_group odyssey_ddr_perf_events_attr_group = {
340 	.name = "events",
341 	.attrs = odyssey_ddr_perf_events_attrs,
342 };
343 
344 static struct attribute_group cn10k_ddr_perf_events_attr_group = {
345 	.name = "events",
346 	.attrs = cn10k_ddr_perf_events_attrs,
347 };
348 
349 PMU_FORMAT_ATTR(event, "config:0-8");
350 
351 static struct attribute *cn10k_ddr_perf_format_attrs[] = {
352 	&format_attr_event.attr,
353 	NULL,
354 };
355 
356 static struct attribute_group cn10k_ddr_perf_format_attr_group = {
357 	.name = "format",
358 	.attrs = cn10k_ddr_perf_format_attrs,
359 };
360 
361 static ssize_t cn10k_ddr_perf_cpumask_show(struct device *dev,
362 					   struct device_attribute *attr,
363 					   char *buf)
364 {
365 	struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev);
366 
367 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
368 }
369 
370 static struct device_attribute cn10k_ddr_perf_cpumask_attr =
371 	__ATTR(cpumask, 0444, cn10k_ddr_perf_cpumask_show, NULL);
372 
373 static struct attribute *cn10k_ddr_perf_cpumask_attrs[] = {
374 	&cn10k_ddr_perf_cpumask_attr.attr,
375 	NULL,
376 };
377 
378 static struct attribute_group cn10k_ddr_perf_cpumask_attr_group = {
379 	.attrs = cn10k_ddr_perf_cpumask_attrs,
380 };
381 
382 static const struct attribute_group *cn10k_attr_groups[] = {
383 	&cn10k_ddr_perf_events_attr_group,
384 	&cn10k_ddr_perf_format_attr_group,
385 	&cn10k_ddr_perf_cpumask_attr_group,
386 	NULL,
387 };
388 
389 static const struct attribute_group *odyssey_attr_groups[] = {
390 	&odyssey_ddr_perf_events_attr_group,
391 	&cn10k_ddr_perf_format_attr_group,
392 	&cn10k_ddr_perf_cpumask_attr_group,
393 	NULL
394 };
395 
396 /* Default poll timeout is 100 sec, which is very sufficient for
397  * 48 bit counter incremented max at 5.6 GT/s, which may take many
398  * hours to overflow.
399  */
400 static unsigned long cn10k_ddr_pmu_poll_period_sec = 100;
401 module_param_named(poll_period_sec, cn10k_ddr_pmu_poll_period_sec, ulong, 0644);
402 
403 static ktime_t cn10k_ddr_pmu_timer_period(void)
404 {
405 	return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
406 }
407 
408 static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
409 				     struct cn10k_ddr_pmu *ddr_pmu)
410 {
411 	int err = 0;
412 
413 	switch (eventid) {
414 	case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
415 		if (!ddr_pmu->p_data->is_ody) {
416 			err = -EINVAL;
417 			break;
418 		}
419 		fallthrough;
420 	case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
421 	case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
422 		*event_bitmap = (1ULL << (eventid - 1));
423 		break;
424 	case EVENT_OP_IS_ENTER_SELFREF:
425 	case EVENT_OP_IS_ENTER_POWERDOWN:
426 	case EVENT_OP_IS_ENTER_MPSM:
427 		*event_bitmap = (0xFULL << (eventid - 1));
428 		break;
429 	default:
430 		err = -EINVAL;
431 	}
432 
433 	if (err)
434 		pr_err("%s Invalid eventid %d\n", __func__, eventid);
435 	return err;
436 }
437 
438 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
439 					struct perf_event *event)
440 {
441 	u8 config = event->attr.config;
442 	int i;
443 
444 	/* DDR read free-run counter index */
445 	if (config == EVENT_DDR_READS) {
446 		pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event;
447 		return DDRC_PERF_READ_COUNTER_IDX;
448 	}
449 
450 	/* DDR write free-run counter index */
451 	if (config == EVENT_DDR_WRITES) {
452 		pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event;
453 		return DDRC_PERF_WRITE_COUNTER_IDX;
454 	}
455 
456 	/* Allocate DDR generic counters */
457 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
458 		if (pmu->events[i] == NULL) {
459 			pmu->events[i] = event;
460 			return i;
461 		}
462 	}
463 
464 	return -ENOENT;
465 }
466 
467 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter)
468 {
469 	pmu->events[counter] = NULL;
470 }
471 
472 static int cn10k_ddr_perf_event_init(struct perf_event *event)
473 {
474 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
475 	struct hw_perf_event *hwc = &event->hw;
476 
477 	if (event->attr.type != event->pmu->type)
478 		return -ENOENT;
479 
480 	if (is_sampling_event(event)) {
481 		dev_info(pmu->dev, "Sampling not supported!\n");
482 		return -EOPNOTSUPP;
483 	}
484 
485 	if (event->cpu < 0) {
486 		dev_warn(pmu->dev, "Can't provide per-task data!\n");
487 		return -EOPNOTSUPP;
488 	}
489 
490 	/*  We must NOT create groups containing mixed PMUs */
491 	if (event->group_leader->pmu != event->pmu &&
492 	    !is_software_event(event->group_leader))
493 		return -EINVAL;
494 
495 	/* Set ownership of event to one CPU, same event can not be observed
496 	 * on multiple cpus at same time.
497 	 */
498 	event->cpu = pmu->cpu;
499 	hwc->idx = -1;
500 	return 0;
501 }
502 
503 static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
504 					 int counter)
505 {
506 	const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
507 	u64 ctrl_reg = p_data->cnt_start_op_ctrl;
508 
509 	writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
510 		       DDRC_PERF_REG(ctrl_reg, counter));
511 }
512 
513 static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
514 					int counter)
515 {
516 	const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
517 	u64 ctrl_reg = p_data->cnt_end_op_ctrl;
518 
519 	writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
520 		       DDRC_PERF_REG(ctrl_reg, counter));
521 }
522 
523 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
524 					  int counter, bool enable)
525 {
526 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
527 	u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl;
528 	const struct ddr_pmu_ops *ops = pmu->ops;
529 	bool is_ody = pmu->p_data->is_ody;
530 	u32 reg;
531 	u64 val;
532 
533 	if (counter > DDRC_PERF_NUM_COUNTERS) {
534 		pr_err("Error: unsupported counter %d\n", counter);
535 		return;
536 	}
537 
538 	if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
539 		reg = DDRC_PERF_CFG(p_data->cfg_base, counter);
540 		val = readq_relaxed(pmu->base + reg);
541 
542 		if (enable)
543 			val |= EVENT_ENABLE;
544 		else
545 			val &= ~EVENT_ENABLE;
546 
547 		writeq_relaxed(val, pmu->base + reg);
548 
549 		if (is_ody) {
550 			if (enable) {
551 				/*
552 				 * Setup the PMU counter to work in
553 				 * manual mode
554 				 */
555 				reg = DDRC_PERF_REG(ctrl_reg, counter);
556 				writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL,
557 					       pmu->base + reg);
558 
559 				cn10k_ddr_perf_counter_start(pmu, counter);
560 			} else {
561 				cn10k_ddr_perf_counter_stop(pmu, counter);
562 			}
563 		}
564 	} else {
565 		if (counter == DDRC_PERF_READ_COUNTER_IDX)
566 			ops->enable_read_freerun_counter(pmu, enable);
567 		else
568 			ops->enable_write_freerun_counter(pmu, enable);
569 	}
570 }
571 
572 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
573 {
574 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
575 	u64 val;
576 
577 	if (counter == DDRC_PERF_READ_COUNTER_IDX)
578 		return readq_relaxed(pmu->base +
579 				     p_data->cnt_value_rd_op);
580 
581 	if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
582 		return readq_relaxed(pmu->base +
583 				     p_data->cnt_value_wr_op);
584 
585 	val = readq_relaxed(pmu->base +
586 			    DDRC_PERF_REG(p_data->cnt_base, counter));
587 	return val;
588 }
589 
590 static void cn10k_ddr_perf_event_update(struct perf_event *event)
591 {
592 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
593 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
594 	struct hw_perf_event *hwc = &event->hw;
595 	u64 prev_count, new_count, mask;
596 
597 	do {
598 		prev_count = local64_read(&hwc->prev_count);
599 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
600 	} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
601 
602 	mask = p_data->counter_max_val;
603 
604 	local64_add((new_count - prev_count) & mask, &event->count);
605 }
606 
607 static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
608 {
609 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
610 	struct hw_perf_event *hwc = &event->hw;
611 	int counter = hwc->idx;
612 
613 	local64_set(&hwc->prev_count, 0);
614 
615 	cn10k_ddr_perf_counter_enable(pmu, counter, true);
616 
617 	hwc->state = 0;
618 }
619 
620 static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
621 {
622 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
623 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
624 	const struct ddr_pmu_ops *ops = pmu->ops;
625 	struct hw_perf_event *hwc = &event->hw;
626 	u8 config = event->attr.config;
627 	int counter, ret;
628 	u32 reg_offset;
629 	u64 val;
630 
631 	counter = cn10k_ddr_perf_alloc_counter(pmu, event);
632 	if (counter < 0)
633 		return -EAGAIN;
634 
635 	pmu->active_events++;
636 	hwc->idx = counter;
637 
638 	if (pmu->active_events == 1)
639 		hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(),
640 			      HRTIMER_MODE_REL_PINNED);
641 
642 	if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
643 		/* Generic counters, configure event id */
644 		reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter);
645 		ret = ddr_perf_get_event_bitmap(config, &val, pmu);
646 		if (ret)
647 			return ret;
648 
649 		writeq_relaxed(val, pmu->base + reg_offset);
650 	} else {
651 		/* fixed event counter, clear counter value */
652 		if (counter == DDRC_PERF_READ_COUNTER_IDX)
653 			ops->clear_read_freerun_counter(pmu);
654 		else
655 			ops->clear_write_freerun_counter(pmu);
656 	}
657 
658 	hwc->state |= PERF_HES_STOPPED;
659 
660 	if (flags & PERF_EF_START)
661 		cn10k_ddr_perf_event_start(event, flags);
662 
663 	return 0;
664 }
665 
666 static void cn10k_ddr_perf_event_stop(struct perf_event *event, int flags)
667 {
668 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
669 	struct hw_perf_event *hwc = &event->hw;
670 	int counter = hwc->idx;
671 
672 	cn10k_ddr_perf_counter_enable(pmu, counter, false);
673 
674 	if (flags & PERF_EF_UPDATE)
675 		cn10k_ddr_perf_event_update(event);
676 
677 	hwc->state |= PERF_HES_STOPPED;
678 }
679 
680 static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
681 {
682 	struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
683 	struct hw_perf_event *hwc = &event->hw;
684 	int counter = hwc->idx;
685 
686 	cn10k_ddr_perf_event_stop(event, PERF_EF_UPDATE);
687 
688 	cn10k_ddr_perf_free_counter(pmu, counter);
689 	pmu->active_events--;
690 	hwc->idx = -1;
691 
692 	/* Cancel timer when no events to capture */
693 	if (pmu->active_events == 0)
694 		hrtimer_cancel(&pmu->hrtimer);
695 }
696 
697 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
698 {
699 	struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
700 	const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
701 
702 	writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
703 		       p_data->cnt_start_op_ctrl);
704 }
705 
706 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
707 {
708 	struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
709 	const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
710 
711 	writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
712 		       p_data->cnt_end_op_ctrl);
713 }
714 
715 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
716 {
717 	struct hw_perf_event *hwc;
718 	int i;
719 
720 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
721 		if (pmu->events[i] == NULL)
722 			continue;
723 
724 		cn10k_ddr_perf_event_update(pmu->events[i]);
725 	}
726 
727 	/* Reset previous count as h/w counter are reset */
728 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
729 		if (pmu->events[i] == NULL)
730 			continue;
731 
732 		hwc = &pmu->events[i]->hw;
733 		local64_set(&hwc->prev_count, 0);
734 	}
735 }
736 
737 static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
738 {
739 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
740 	u64 val;
741 
742 	val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
743 	if (enable)
744 		val |= DDRC_PERF_FREERUN_READ_EN;
745 	else
746 		val &= ~DDRC_PERF_FREERUN_READ_EN;
747 
748 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
749 }
750 
751 static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
752 {
753 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
754 	u64 val;
755 
756 	val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
757 	if (enable)
758 		val |= DDRC_PERF_FREERUN_WRITE_EN;
759 	else
760 		val &= ~DDRC_PERF_FREERUN_WRITE_EN;
761 
762 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
763 }
764 
765 static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
766 {
767 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
768 	u64 val;
769 
770 	val = DDRC_FREERUN_READ_CNT_CLR;
771 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
772 }
773 
774 static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
775 {
776 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
777 	u64 val;
778 
779 	val = DDRC_FREERUN_WRITE_CNT_CLR;
780 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
781 }
782 
783 static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
784 {
785 	cn10k_ddr_perf_event_update_all(pmu);
786 	cn10k_ddr_perf_pmu_disable(&pmu->pmu);
787 	cn10k_ddr_perf_pmu_enable(&pmu->pmu);
788 }
789 
790 static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
791 					    bool enable)
792 {
793 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
794 	u64 val;
795 
796 	val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
797 	if (enable)
798 		val |= DDRC_PERF_FREERUN_READ_EN;
799 	else
800 		val &= ~DDRC_PERF_FREERUN_READ_EN;
801 
802 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
803 }
804 
805 static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
806 					     bool enable)
807 {
808 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
809 	u64 val;
810 
811 	val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
812 	if (enable)
813 		val |= DDRC_PERF_FREERUN_WRITE_EN;
814 	else
815 		val &= ~DDRC_PERF_FREERUN_WRITE_EN;
816 
817 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
818 }
819 
820 static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
821 {
822 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
823 	u64 val;
824 
825 	val = DDRC_FREERUN_READ_CNT_CLR;
826 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
827 }
828 
829 static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
830 {
831 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
832 	u64 val;
833 
834 	val = DDRC_FREERUN_WRITE_CNT_CLR;
835 	writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
836 }
837 
838 static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
839 {
840 	/*
841 	 * On reaching the maximum value of the counter, the counter freezes
842 	 * there. The particular event is updated and the respective counter
843 	 * is stopped and started again so that it starts counting from zero
844 	 */
845 	cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
846 	cn10k_ddr_perf_counter_stop(pmu, evt_idx);
847 	cn10k_ddr_perf_counter_start(pmu, evt_idx);
848 }
849 
850 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
851 {
852 	const struct ddr_pmu_platform_data *p_data = pmu->p_data;
853 	const struct ddr_pmu_ops *ops = pmu->ops;
854 	struct perf_event *event;
855 	struct hw_perf_event *hwc;
856 	u64 prev_count, new_count;
857 	u64 value;
858 	int i;
859 
860 	event = pmu->events[DDRC_PERF_READ_COUNTER_IDX];
861 	if (event) {
862 		hwc = &event->hw;
863 		prev_count = local64_read(&hwc->prev_count);
864 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
865 
866 		/* Overflow condition is when new count less than
867 		 * previous count
868 		 */
869 		if (new_count < prev_count)
870 			cn10k_ddr_perf_event_update(event);
871 	}
872 
873 	event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX];
874 	if (event) {
875 		hwc = &event->hw;
876 		prev_count = local64_read(&hwc->prev_count);
877 		new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
878 
879 		/* Overflow condition is when new count less than
880 		 * previous count
881 		 */
882 		if (new_count < prev_count)
883 			cn10k_ddr_perf_event_update(event);
884 	}
885 
886 	for (i = 0; i < DDRC_PERF_NUM_GEN_COUNTERS; i++) {
887 		if (pmu->events[i] == NULL)
888 			continue;
889 
890 		value = cn10k_ddr_perf_read_counter(pmu, i);
891 		if (value == p_data->counter_max_val) {
892 			pr_info("Counter-(%d) reached max value\n", i);
893 			ops->pmu_overflow_handler(pmu, i);
894 		}
895 	}
896 
897 	return IRQ_HANDLED;
898 }
899 
900 static enum hrtimer_restart cn10k_ddr_pmu_timer_handler(struct hrtimer *hrtimer)
901 {
902 	struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu,
903 						 hrtimer);
904 	unsigned long flags;
905 
906 	local_irq_save(flags);
907 	cn10k_ddr_pmu_overflow_handler(pmu);
908 	local_irq_restore(flags);
909 
910 	hrtimer_forward_now(hrtimer, cn10k_ddr_pmu_timer_period());
911 	return HRTIMER_RESTART;
912 }
913 
914 static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
915 {
916 	struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu,
917 						     node);
918 	unsigned int target;
919 
920 	if (cpu != pmu->cpu)
921 		return 0;
922 
923 	target = cpumask_any_but(cpu_online_mask, cpu);
924 	if (target >= nr_cpu_ids)
925 		return 0;
926 
927 	perf_pmu_migrate_context(&pmu->pmu, cpu, target);
928 	pmu->cpu = target;
929 	return 0;
930 }
931 
932 static const struct ddr_pmu_ops ddr_pmu_ops = {
933 	.enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
934 	.enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
935 	.clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
936 	.clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
937 	.pmu_overflow_handler = ddr_pmu_overflow_hander,
938 };
939 
940 #if defined(CONFIG_ACPI) || defined(CONFIG_OF)
941 static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
942 	.counter_overflow_val =  BIT_ULL(48),
943 	.counter_max_val = GENMASK_ULL(48, 0),
944 	.cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE,
945 	.cfg_base = CN10K_DDRC_PERF_CFG_BASE,
946 	.cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL,
947 	.cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL,
948 	.cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL,
949 	.cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS,
950 	.cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN,
951 	.cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL,
952 	.cnt_freerun_clr = 0,
953 	.cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP,
954 	.cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP,
955 	.is_cn10k = TRUE,
956 };
957 #endif
958 
959 static const struct ddr_pmu_ops ddr_pmu_ody_ops = {
960 	.enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun,
961 	.enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun,
962 	.clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun,
963 	.clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun,
964 	.pmu_overflow_handler = ddr_pmu_ody_overflow_hander,
965 };
966 
967 #ifdef CONFIG_ACPI
968 static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = {
969 	.counter_overflow_val = 0,
970 	.counter_max_val = GENMASK_ULL(63, 0),
971 	.cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE,
972 	.cfg_base = ODY_DDRC_PERF_CFG_BASE,
973 	.cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL,
974 	.cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL,
975 	.cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL,
976 	.cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS,
977 	.cnt_freerun_en = 0,
978 	.cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL,
979 	.cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR,
980 	.cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP,
981 	.cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP,
982 	.is_ody = TRUE,
983 };
984 #endif
985 
986 static int cn10k_ddr_perf_probe(struct platform_device *pdev)
987 {
988 	const struct ddr_pmu_platform_data *dev_data;
989 	struct cn10k_ddr_pmu *ddr_pmu;
990 	struct resource *res;
991 	void __iomem *base;
992 	bool is_cn10k;
993 	bool is_ody;
994 	char *name;
995 	int ret;
996 
997 	ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL);
998 	if (!ddr_pmu)
999 		return -ENOMEM;
1000 
1001 	ddr_pmu->dev = &pdev->dev;
1002 	platform_set_drvdata(pdev, ddr_pmu);
1003 
1004 	dev_data = device_get_match_data(&pdev->dev);
1005 	if (!dev_data) {
1006 		dev_err(&pdev->dev, "Error: No device match data found\n");
1007 		return -ENODEV;
1008 	}
1009 
1010 	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1011 	if (IS_ERR(base))
1012 		return PTR_ERR(base);
1013 
1014 	ddr_pmu->base = base;
1015 
1016 	ddr_pmu->p_data = dev_data;
1017 	is_cn10k = ddr_pmu->p_data->is_cn10k;
1018 	is_ody = ddr_pmu->p_data->is_ody;
1019 
1020 	if (is_cn10k) {
1021 		ddr_pmu->ops = &ddr_pmu_ops;
1022 		/* Setup the PMU counter to work in manual mode */
1023 		writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
1024 			       ddr_pmu->p_data->cnt_op_mode_ctrl);
1025 
1026 		ddr_pmu->pmu = (struct pmu) {
1027 			.module	      = THIS_MODULE,
1028 			.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1029 			.task_ctx_nr = perf_invalid_context,
1030 			.attr_groups = cn10k_attr_groups,
1031 			.event_init  = cn10k_ddr_perf_event_init,
1032 			.add	     = cn10k_ddr_perf_event_add,
1033 			.del	     = cn10k_ddr_perf_event_del,
1034 			.start	     = cn10k_ddr_perf_event_start,
1035 			.stop	     = cn10k_ddr_perf_event_stop,
1036 			.read	     = cn10k_ddr_perf_event_update,
1037 			.pmu_enable  = cn10k_ddr_perf_pmu_enable,
1038 			.pmu_disable = cn10k_ddr_perf_pmu_disable,
1039 		};
1040 	}
1041 
1042 	if (is_ody) {
1043 		ddr_pmu->ops = &ddr_pmu_ody_ops;
1044 
1045 		ddr_pmu->pmu = (struct pmu) {
1046 			.module       = THIS_MODULE,
1047 			.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1048 			.task_ctx_nr = perf_invalid_context,
1049 			.attr_groups = odyssey_attr_groups,
1050 			.event_init  = cn10k_ddr_perf_event_init,
1051 			.add         = cn10k_ddr_perf_event_add,
1052 			.del         = cn10k_ddr_perf_event_del,
1053 			.start       = cn10k_ddr_perf_event_start,
1054 			.stop        = cn10k_ddr_perf_event_stop,
1055 			.read        = cn10k_ddr_perf_event_update,
1056 		};
1057 	}
1058 
1059 	/* Choose this cpu to collect perf data */
1060 	ddr_pmu->cpu = raw_smp_processor_id();
1061 
1062 	name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "mrvl_ddr_pmu_%llx",
1063 			      res->start);
1064 	if (!name)
1065 		return -ENOMEM;
1066 
1067 	hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1068 	ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
1069 
1070 	cpuhp_state_add_instance_nocalls(
1071 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
1072 				&ddr_pmu->node);
1073 
1074 	ret = perf_pmu_register(&ddr_pmu->pmu, name, -1);
1075 	if (ret)
1076 		goto error;
1077 
1078 	pr_info("DDR PMU Driver for ddrc@%llx\n", res->start);
1079 	return 0;
1080 error:
1081 	cpuhp_state_remove_instance_nocalls(
1082 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
1083 				&ddr_pmu->node);
1084 	return ret;
1085 }
1086 
1087 static void cn10k_ddr_perf_remove(struct platform_device *pdev)
1088 {
1089 	struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
1090 
1091 	cpuhp_state_remove_instance_nocalls(
1092 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
1093 				&ddr_pmu->node);
1094 
1095 	perf_pmu_unregister(&ddr_pmu->pmu);
1096 }
1097 
1098 #ifdef CONFIG_OF
1099 static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
1100 	{ .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
1101 	{ },
1102 };
1103 MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
1104 #endif
1105 
1106 #ifdef CONFIG_ACPI
1107 static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
1108 	{"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata },
1109 	{"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata},
1110 	{},
1111 };
1112 MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
1113 #endif
1114 
1115 static struct platform_driver cn10k_ddr_pmu_driver = {
1116 	.driver	= {
1117 		.name   = "cn10k-ddr-pmu",
1118 		.of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
1119 		.acpi_match_table  = ACPI_PTR(cn10k_ddr_pmu_acpi_match),
1120 		.suppress_bind_attrs = true,
1121 	},
1122 	.probe		= cn10k_ddr_perf_probe,
1123 	.remove		= cn10k_ddr_perf_remove,
1124 };
1125 
1126 static int __init cn10k_ddr_pmu_init(void)
1127 {
1128 	int ret;
1129 
1130 	ret = cpuhp_setup_state_multi(
1131 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
1132 				"perf/marvell/cn10k/ddr:online", NULL,
1133 				cn10k_ddr_pmu_offline_cpu);
1134 	if (ret)
1135 		return ret;
1136 
1137 	ret = platform_driver_register(&cn10k_ddr_pmu_driver);
1138 	if (ret)
1139 		cpuhp_remove_multi_state(
1140 				CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
1141 	return ret;
1142 }
1143 
1144 static void __exit cn10k_ddr_pmu_exit(void)
1145 {
1146 	platform_driver_unregister(&cn10k_ddr_pmu_driver);
1147 	cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE);
1148 }
1149 
1150 module_init(cn10k_ddr_pmu_init);
1151 module_exit(cn10k_ddr_pmu_exit);
1152 
1153 MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
1154 MODULE_DESCRIPTION("Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver");
1155 MODULE_LICENSE("GPL v2");
1156