xref: /linux/drivers/pci/controller/dwc/pcie-designware-debugfs.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe controller debugfs driver
4  *
5  * Copyright (C) 2025 Samsung Electronics Co., Ltd.
6  *		 http://www.samsung.com
7  *
8  * Author: Shradha Todi <shradha.t@samsung.com>
9  */
10 
11 #include <linux/debugfs.h>
12 
13 #include "pcie-designware.h"
14 
15 #define SD_STATUS_L1LANE_REG		0xb0
16 #define PIPE_RXVALID			BIT(18)
17 #define PIPE_DETECT_LANE		BIT(17)
18 #define LANE_SELECT			GENMASK(3, 0)
19 
20 #define ERR_INJ0_OFF			0x34
21 #define EINJ_VAL_DIFF			GENMASK(28, 16)
22 #define EINJ_VC_NUM			GENMASK(14, 12)
23 #define EINJ_TYPE_SHIFT			8
24 #define EINJ0_TYPE			GENMASK(11, 8)
25 #define EINJ1_TYPE			BIT(8)
26 #define EINJ2_TYPE			GENMASK(9, 8)
27 #define EINJ3_TYPE			GENMASK(10, 8)
28 #define EINJ4_TYPE			GENMASK(10, 8)
29 #define EINJ5_TYPE			BIT(8)
30 #define EINJ_COUNT			GENMASK(7, 0)
31 
32 #define ERR_INJ_ENABLE_REG		0x30
33 
34 #define RAS_DES_EVENT_COUNTER_DATA_REG	0xc
35 
36 #define RAS_DES_EVENT_COUNTER_CTRL_REG	0x8
37 #define EVENT_COUNTER_GROUP_SELECT	GENMASK(27, 24)
38 #define EVENT_COUNTER_EVENT_SELECT	GENMASK(23, 16)
39 #define EVENT_COUNTER_LANE_SELECT	GENMASK(11, 8)
40 #define EVENT_COUNTER_STATUS		BIT(7)
41 #define EVENT_COUNTER_ENABLE		GENMASK(4, 2)
42 #define PER_EVENT_ON			0x3
43 #define PER_EVENT_OFF			0x1
44 
45 #define DWC_DEBUGFS_BUF_MAX		128
46 
47 /**
48  * struct dwc_pcie_rasdes_info - Stores controller common information
49  * @ras_cap_offset: RAS DES vendor specific extended capability offset
50  * @reg_event_lock: Mutex used for RAS DES shadow event registers
51  *
52  * Any parameter constant to all files of the debugfs hierarchy for a single
53  * controller will be stored in this struct. It is allocated and assigned to
54  * controller specific struct dw_pcie during initialization.
55  */
56 struct dwc_pcie_rasdes_info {
57 	u32 ras_cap_offset;
58 	struct mutex reg_event_lock;
59 };
60 
61 /**
62  * struct dwc_pcie_rasdes_priv - Stores file specific private data information
63  * @pci: Reference to the dw_pcie structure
64  * @idx: Index of specific file related information in array of structs
65  *
66  * All debugfs files will have this struct as its private data.
67  */
68 struct dwc_pcie_rasdes_priv {
69 	struct dw_pcie *pci;
70 	int idx;
71 };
72 
73 /**
74  * struct dwc_pcie_err_inj - Store details about each error injection
75  *			     supported by DWC RAS DES
76  * @name: Name of the error that can be injected
77  * @err_inj_group: Group number to which the error belongs. The value
78  *		   can range from 0 to 5
79  * @err_inj_type: Each group can have multiple types of error
80  */
81 struct dwc_pcie_err_inj {
82 	const char *name;
83 	u32 err_inj_group;
84 	u32 err_inj_type;
85 };
86 
87 static const struct dwc_pcie_err_inj err_inj_list[] = {
88 	{"tx_lcrc", 0x0, 0x0},
89 	{"b16_crc_dllp", 0x0, 0x1},
90 	{"b16_crc_upd_fc", 0x0, 0x2},
91 	{"tx_ecrc", 0x0, 0x3},
92 	{"fcrc_tlp", 0x0, 0x4},
93 	{"parity_tsos", 0x0, 0x5},
94 	{"parity_skpos", 0x0, 0x6},
95 	{"rx_lcrc", 0x0, 0x8},
96 	{"rx_ecrc", 0x0, 0xb},
97 	{"tlp_err_seq", 0x1, 0x0},
98 	{"ack_nak_dllp_seq", 0x1, 0x1},
99 	{"ack_nak_dllp", 0x2, 0x0},
100 	{"upd_fc_dllp", 0x2, 0x1},
101 	{"nak_dllp", 0x2, 0x2},
102 	{"inv_sync_hdr_sym", 0x3, 0x0},
103 	{"com_pad_ts1", 0x3, 0x1},
104 	{"com_pad_ts2", 0x3, 0x2},
105 	{"com_fts", 0x3, 0x3},
106 	{"com_idl", 0x3, 0x4},
107 	{"end_edb", 0x3, 0x5},
108 	{"stp_sdp", 0x3, 0x6},
109 	{"com_skp", 0x3, 0x7},
110 	{"posted_tlp_hdr", 0x4, 0x0},
111 	{"non_post_tlp_hdr", 0x4, 0x1},
112 	{"cmpl_tlp_hdr", 0x4, 0x2},
113 	{"posted_tlp_data", 0x4, 0x4},
114 	{"non_post_tlp_data", 0x4, 0x5},
115 	{"cmpl_tlp_data", 0x4, 0x6},
116 	{"duplicate_tlp", 0x5, 0x0},
117 	{"nullified_tlp", 0x5, 0x1},
118 };
119 
120 static const u32 err_inj_type_mask[] = {
121 	EINJ0_TYPE,
122 	EINJ1_TYPE,
123 	EINJ2_TYPE,
124 	EINJ3_TYPE,
125 	EINJ4_TYPE,
126 	EINJ5_TYPE,
127 };
128 
129 /**
130  * struct dwc_pcie_event_counter - Store details about each event counter
131  *				   supported in DWC RAS DES
132  * @name: Name of the error counter
133  * @group_no: Group number that the event belongs to. The value can range
134  *	      from 0 to 7
135  * @event_no: Event number of the particular event. The value ranges are:
136  *		Group 0: 0 - 10
137  *		Group 1: 5 - 13
138  *		Group 2: 0 - 7
139  *		Group 3: 0 - 5
140  *		Group 4: 0 - 1
141  *		Group 5: 0 - 13
142  *		Group 6: 0 - 6
143  *		Group 7: 0 - 25
144  */
145 struct dwc_pcie_event_counter {
146 	const char *name;
147 	u32 group_no;
148 	u32 event_no;
149 };
150 
151 static const struct dwc_pcie_event_counter event_list[] = {
152 	{"ebuf_overflow", 0x0, 0x0},
153 	{"ebuf_underrun", 0x0, 0x1},
154 	{"decode_err", 0x0, 0x2},
155 	{"running_disparity_err", 0x0, 0x3},
156 	{"skp_os_parity_err", 0x0, 0x4},
157 	{"sync_header_err", 0x0, 0x5},
158 	{"rx_valid_deassertion", 0x0, 0x6},
159 	{"ctl_skp_os_parity_err", 0x0, 0x7},
160 	{"retimer_parity_err_1st", 0x0, 0x8},
161 	{"retimer_parity_err_2nd", 0x0, 0x9},
162 	{"margin_crc_parity_err", 0x0, 0xA},
163 	{"detect_ei_infer", 0x1, 0x5},
164 	{"receiver_err", 0x1, 0x6},
165 	{"rx_recovery_req", 0x1, 0x7},
166 	{"n_fts_timeout", 0x1, 0x8},
167 	{"framing_err", 0x1, 0x9},
168 	{"deskew_err", 0x1, 0xa},
169 	{"framing_err_in_l0", 0x1, 0xc},
170 	{"deskew_uncompleted_err", 0x1, 0xd},
171 	{"bad_tlp", 0x2, 0x0},
172 	{"lcrc_err", 0x2, 0x1},
173 	{"bad_dllp", 0x2, 0x2},
174 	{"replay_num_rollover", 0x2, 0x3},
175 	{"replay_timeout", 0x2, 0x4},
176 	{"rx_nak_dllp", 0x2, 0x5},
177 	{"tx_nak_dllp", 0x2, 0x6},
178 	{"retry_tlp", 0x2, 0x7},
179 	{"fc_timeout", 0x3, 0x0},
180 	{"poisoned_tlp", 0x3, 0x1},
181 	{"ecrc_error", 0x3, 0x2},
182 	{"unsupported_request", 0x3, 0x3},
183 	{"completer_abort", 0x3, 0x4},
184 	{"completion_timeout", 0x3, 0x5},
185 	{"ebuf_skp_add", 0x4, 0x0},
186 	{"ebuf_skp_del", 0x4, 0x1},
187 	{"l0_to_recovery_entry", 0x5, 0x0},
188 	{"l1_to_recovery_entry", 0x5, 0x1},
189 	{"tx_l0s_entry", 0x5, 0x2},
190 	{"rx_l0s_entry", 0x5, 0x3},
191 	{"aspm_l1_reject", 0x5, 0x4},
192 	{"l1_entry", 0x5, 0x5},
193 	{"l1_cpm", 0x5, 0x6},
194 	{"l1.1_entry", 0x5, 0x7},
195 	{"l1.2_entry", 0x5, 0x8},
196 	{"l1_short_duration", 0x5, 0x9},
197 	{"l1.2_abort", 0x5, 0xa},
198 	{"l2_entry", 0x5, 0xb},
199 	{"speed_change", 0x5, 0xc},
200 	{"link_width_change", 0x5, 0xd},
201 	{"tx_ack_dllp", 0x6, 0x0},
202 	{"tx_update_fc_dllp", 0x6, 0x1},
203 	{"rx_ack_dllp", 0x6, 0x2},
204 	{"rx_update_fc_dllp", 0x6, 0x3},
205 	{"rx_nullified_tlp", 0x6, 0x4},
206 	{"tx_nullified_tlp", 0x6, 0x5},
207 	{"rx_duplicate_tlp", 0x6, 0x6},
208 	{"tx_memory_write", 0x7, 0x0},
209 	{"tx_memory_read", 0x7, 0x1},
210 	{"tx_configuration_write", 0x7, 0x2},
211 	{"tx_configuration_read", 0x7, 0x3},
212 	{"tx_io_write", 0x7, 0x4},
213 	{"tx_io_read", 0x7, 0x5},
214 	{"tx_completion_without_data", 0x7, 0x6},
215 	{"tx_completion_w_data", 0x7, 0x7},
216 	{"tx_message_tlp_pcie_vc_only", 0x7, 0x8},
217 	{"tx_atomic", 0x7, 0x9},
218 	{"tx_tlp_with_prefix", 0x7, 0xa},
219 	{"rx_memory_write", 0x7, 0xb},
220 	{"rx_memory_read", 0x7, 0xc},
221 	{"rx_configuration_write", 0x7, 0xd},
222 	{"rx_configuration_read", 0x7, 0xe},
223 	{"rx_io_write", 0x7, 0xf},
224 	{"rx_io_read", 0x7, 0x10},
225 	{"rx_completion_without_data", 0x7, 0x11},
226 	{"rx_completion_w_data", 0x7, 0x12},
227 	{"rx_message_tlp_pcie_vc_only", 0x7, 0x13},
228 	{"rx_atomic", 0x7, 0x14},
229 	{"rx_tlp_with_prefix", 0x7, 0x15},
230 	{"tx_ccix_tlp", 0x7, 0x16},
231 	{"rx_ccix_tlp", 0x7, 0x17},
232 	{"tx_deferrable_memory_write_tlp", 0x7, 0x18},
233 	{"rx_deferrable_memory_write_tlp", 0x7, 0x19},
234 };
235 
236 static ssize_t lane_detect_read(struct file *file, char __user *buf,
237 				size_t count, loff_t *ppos)
238 {
239 	struct dw_pcie *pci = file->private_data;
240 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
241 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
242 	ssize_t pos;
243 	u32 val;
244 
245 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
246 	val = FIELD_GET(PIPE_DETECT_LANE, val);
247 	if (val)
248 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
249 	else
250 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
251 
252 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
253 }
254 
255 static ssize_t lane_detect_write(struct file *file, const char __user *buf,
256 				 size_t count, loff_t *ppos)
257 {
258 	struct dw_pcie *pci = file->private_data;
259 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
260 	u32 lane, val;
261 	int ret;
262 
263 	ret = kstrtou32_from_user(buf, count, 0, &lane);
264 	if (ret)
265 		return ret;
266 
267 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
268 	val &= ~(LANE_SELECT);
269 	val |= FIELD_PREP(LANE_SELECT, lane);
270 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
271 
272 	return count;
273 }
274 
275 static ssize_t rx_valid_read(struct file *file, char __user *buf,
276 			     size_t count, loff_t *ppos)
277 {
278 	struct dw_pcie *pci = file->private_data;
279 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
280 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
281 	ssize_t pos;
282 	u32 val;
283 
284 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
285 	val = FIELD_GET(PIPE_RXVALID, val);
286 	if (val)
287 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
288 	else
289 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
290 
291 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
292 }
293 
294 static ssize_t rx_valid_write(struct file *file, const char __user *buf,
295 			      size_t count, loff_t *ppos)
296 {
297 	return lane_detect_write(file, buf, count, ppos);
298 }
299 
300 static ssize_t err_inj_write(struct file *file, const char __user *buf,
301 			     size_t count, loff_t *ppos)
302 {
303 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
304 	struct dw_pcie *pci = pdata->pci;
305 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
306 	u32 val, counter, vc_num, err_group, type_mask;
307 	int val_diff = 0;
308 	char *kern_buf;
309 
310 	err_group = err_inj_list[pdata->idx].err_inj_group;
311 	type_mask = err_inj_type_mask[err_group];
312 
313 	kern_buf = memdup_user_nul(buf, count);
314 	if (IS_ERR(kern_buf))
315 		return PTR_ERR(kern_buf);
316 
317 	if (err_group == 4) {
318 		val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
319 		if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
320 			kfree(kern_buf);
321 			return -EINVAL;
322 		}
323 	} else if (err_group == 1) {
324 		val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
325 		if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
326 			kfree(kern_buf);
327 			return -EINVAL;
328 		}
329 	} else {
330 		val = kstrtou32(kern_buf, 0, &counter);
331 		if (val) {
332 			kfree(kern_buf);
333 			return val;
334 		}
335 	}
336 
337 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
338 	val &= ~(type_mask | EINJ_COUNT);
339 	val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
340 	val |= FIELD_PREP(EINJ_COUNT, counter);
341 
342 	if (err_group == 1 || err_group == 4) {
343 		val &= ~(EINJ_VAL_DIFF);
344 		val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
345 	}
346 	if (err_group == 4) {
347 		val &= ~(EINJ_VC_NUM);
348 		val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
349 	}
350 
351 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
352 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
353 
354 	kfree(kern_buf);
355 	return count;
356 }
357 
358 static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
359 			     struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
360 {
361 	u32 val;
362 
363 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
364 	val &= ~EVENT_COUNTER_ENABLE;
365 	val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
366 	val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
367 	val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
368 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
369 }
370 
371 static ssize_t counter_enable_read(struct file *file, char __user *buf,
372 				   size_t count, loff_t *ppos)
373 {
374 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
375 	struct dw_pcie *pci = pdata->pci;
376 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
377 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
378 	ssize_t pos;
379 	u32 val;
380 
381 	mutex_lock(&rinfo->reg_event_lock);
382 	set_event_number(pdata, pci, rinfo);
383 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
384 	mutex_unlock(&rinfo->reg_event_lock);
385 	val = FIELD_GET(EVENT_COUNTER_STATUS, val);
386 	if (val)
387 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
388 	else
389 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
390 
391 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
392 }
393 
394 static ssize_t counter_enable_write(struct file *file, const char __user *buf,
395 				    size_t count, loff_t *ppos)
396 {
397 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
398 	struct dw_pcie *pci = pdata->pci;
399 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
400 	u32 val, enable;
401 	int ret;
402 
403 	ret = kstrtou32_from_user(buf, count, 0, &enable);
404 	if (ret)
405 		return ret;
406 
407 	mutex_lock(&rinfo->reg_event_lock);
408 	set_event_number(pdata, pci, rinfo);
409 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
410 	if (enable)
411 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
412 	else
413 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
414 
415 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
416 
417 	/*
418 	 * While enabling the counter, always read the status back to check if
419 	 * it is enabled or not. Return error if it is not enabled to let the
420 	 * users know that the counter is not supported on the platform.
421 	 */
422 	if (enable) {
423 		val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
424 					RAS_DES_EVENT_COUNTER_CTRL_REG);
425 		if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
426 			mutex_unlock(&rinfo->reg_event_lock);
427 			return -EOPNOTSUPP;
428 		}
429 	}
430 
431 	mutex_unlock(&rinfo->reg_event_lock);
432 
433 	return count;
434 }
435 
436 static ssize_t counter_lane_read(struct file *file, char __user *buf,
437 				 size_t count, loff_t *ppos)
438 {
439 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
440 	struct dw_pcie *pci = pdata->pci;
441 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
442 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
443 	ssize_t pos;
444 	u32 val;
445 
446 	mutex_lock(&rinfo->reg_event_lock);
447 	set_event_number(pdata, pci, rinfo);
448 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
449 	mutex_unlock(&rinfo->reg_event_lock);
450 	val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
451 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
452 
453 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
454 }
455 
456 static ssize_t counter_lane_write(struct file *file, const char __user *buf,
457 				  size_t count, loff_t *ppos)
458 {
459 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
460 	struct dw_pcie *pci = pdata->pci;
461 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
462 	u32 val, lane;
463 	int ret;
464 
465 	ret = kstrtou32_from_user(buf, count, 0, &lane);
466 	if (ret)
467 		return ret;
468 
469 	mutex_lock(&rinfo->reg_event_lock);
470 	set_event_number(pdata, pci, rinfo);
471 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
472 	val &= ~(EVENT_COUNTER_LANE_SELECT);
473 	val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
474 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
475 	mutex_unlock(&rinfo->reg_event_lock);
476 
477 	return count;
478 }
479 
480 static ssize_t counter_value_read(struct file *file, char __user *buf,
481 				  size_t count, loff_t *ppos)
482 {
483 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
484 	struct dw_pcie *pci = pdata->pci;
485 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
486 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
487 	ssize_t pos;
488 	u32 val;
489 
490 	mutex_lock(&rinfo->reg_event_lock);
491 	set_event_number(pdata, pci, rinfo);
492 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
493 	mutex_unlock(&rinfo->reg_event_lock);
494 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
495 
496 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
497 }
498 
499 static int ltssm_status_show(struct seq_file *s, void *v)
500 {
501 	struct dw_pcie *pci = s->private;
502 	enum dw_pcie_ltssm val;
503 
504 	val = dw_pcie_get_ltssm(pci);
505 	seq_printf(s, "%s (0x%02x)\n", dw_pcie_ltssm_status_string(val), val);
506 
507 	return 0;
508 }
509 
510 static int ltssm_status_open(struct inode *inode, struct file *file)
511 {
512 	return single_open(file, ltssm_status_show, inode->i_private);
513 }
514 
515 #define dwc_debugfs_create(name)			\
516 debugfs_create_file(#name, 0644, rasdes_debug, pci,	\
517 			&dbg_ ## name ## _fops)
518 
519 #define DWC_DEBUGFS_FOPS(name)					\
520 static const struct file_operations dbg_ ## name ## _fops = {	\
521 	.open = simple_open,				\
522 	.read = name ## _read,				\
523 	.write = name ## _write				\
524 }
525 
526 DWC_DEBUGFS_FOPS(lane_detect);
527 DWC_DEBUGFS_FOPS(rx_valid);
528 
529 static const struct file_operations dwc_pcie_err_inj_ops = {
530 	.open = simple_open,
531 	.write = err_inj_write,
532 };
533 
534 static const struct file_operations dwc_pcie_counter_enable_ops = {
535 	.open = simple_open,
536 	.read = counter_enable_read,
537 	.write = counter_enable_write,
538 };
539 
540 static const struct file_operations dwc_pcie_counter_lane_ops = {
541 	.open = simple_open,
542 	.read = counter_lane_read,
543 	.write = counter_lane_write,
544 };
545 
546 static const struct file_operations dwc_pcie_counter_value_ops = {
547 	.open = simple_open,
548 	.read = counter_value_read,
549 };
550 
551 static const struct file_operations dwc_pcie_ltssm_status_ops = {
552 	.open = ltssm_status_open,
553 	.read = seq_read,
554 };
555 
556 static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
557 {
558 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
559 
560 	mutex_destroy(&rinfo->reg_event_lock);
561 }
562 
563 static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
564 {
565 	struct dentry *rasdes_debug, *rasdes_err_inj;
566 	struct dentry *rasdes_event_counter, *rasdes_events;
567 	struct dwc_pcie_rasdes_info *rasdes_info;
568 	struct dwc_pcie_rasdes_priv *priv_tmp;
569 	struct device *dev = pci->dev;
570 	int ras_cap, i, ret;
571 
572 	/*
573 	 * If a given SoC has no RAS DES capability, the following call is
574 	 * bound to return an error, breaking some existing platforms. So,
575 	 * return 0 here, as this is not necessarily an error.
576 	 */
577 	ras_cap = dw_pcie_find_rasdes_capability(pci);
578 	if (!ras_cap) {
579 		dev_dbg(dev, "no RAS DES capability available\n");
580 		return 0;
581 	}
582 
583 	rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
584 	if (!rasdes_info)
585 		return -ENOMEM;
586 
587 	/* Create subdirectories for Debug, Error Injection, Statistics. */
588 	rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
589 	rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
590 	rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
591 
592 	mutex_init(&rasdes_info->reg_event_lock);
593 	rasdes_info->ras_cap_offset = ras_cap;
594 	pci->debugfs->rasdes_info = rasdes_info;
595 
596 	/* Create debugfs files for Debug subdirectory. */
597 	dwc_debugfs_create(lane_detect);
598 	dwc_debugfs_create(rx_valid);
599 
600 	/* Create debugfs files for Error Injection subdirectory. */
601 	for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
602 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
603 		if (!priv_tmp) {
604 			ret = -ENOMEM;
605 			goto err_deinit;
606 		}
607 
608 		priv_tmp->idx = i;
609 		priv_tmp->pci = pci;
610 		debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
611 				    &dwc_pcie_err_inj_ops);
612 	}
613 
614 	/* Create debugfs files for Statistical Counter subdirectory. */
615 	for (i = 0; i < ARRAY_SIZE(event_list); i++) {
616 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
617 		if (!priv_tmp) {
618 			ret = -ENOMEM;
619 			goto err_deinit;
620 		}
621 
622 		priv_tmp->idx = i;
623 		priv_tmp->pci = pci;
624 		rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
625 		if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
626 			debugfs_create_file("lane_select", 0644, rasdes_events,
627 					    priv_tmp, &dwc_pcie_counter_lane_ops);
628 		}
629 		debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
630 				    &dwc_pcie_counter_value_ops);
631 		debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
632 				    &dwc_pcie_counter_enable_ops);
633 	}
634 
635 	return 0;
636 
637 err_deinit:
638 	dwc_pcie_rasdes_debugfs_deinit(pci);
639 	return ret;
640 }
641 
642 static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
643 {
644 	debugfs_create_file("ltssm_status", 0444, dir, pci,
645 			    &dwc_pcie_ltssm_status_ops);
646 }
647 
648 static int dw_pcie_ptm_check_capability(void *drvdata)
649 {
650 	struct dw_pcie *pci = drvdata;
651 
652 	pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
653 
654 	return pci->ptm_vsec_offset;
655 }
656 
657 static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
658 {
659 	struct dw_pcie *pci = drvdata;
660 	u32 val;
661 
662 	if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
663 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
664 		val |= PTM_REQ_AUTO_UPDATE_ENABLED;
665 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
666 	} else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
667 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
668 		val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
669 		val |= PTM_REQ_START_UPDATE;
670 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
671 	} else {
672 		return -EINVAL;
673 	}
674 
675 	return 0;
676 }
677 
678 static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
679 {
680 	struct dw_pcie *pci = drvdata;
681 	u32 val;
682 
683 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
684 	if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
685 		*mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
686 	else
687 		/*
688 		 * PTM_REQ_START_UPDATE is a self clearing register bit. So if
689 		 * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
690 		 * manual update is used.
691 		 */
692 		*mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
693 
694 	return 0;
695 }
696 
697 static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
698 {
699 	struct dw_pcie *pci = drvdata;
700 	u32 val;
701 
702 	if (valid) {
703 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
704 		val |= PTM_RES_CCONTEXT_VALID;
705 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
706 	} else {
707 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
708 		val &= ~PTM_RES_CCONTEXT_VALID;
709 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
710 	}
711 
712 	return 0;
713 }
714 
715 static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
716 {
717 	struct dw_pcie *pci = drvdata;
718 	u32 val;
719 
720 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
721 	*valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
722 
723 	return 0;
724 }
725 
726 static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
727 {
728 	struct dw_pcie *pci = drvdata;
729 	u32 msb, lsb;
730 
731 	do {
732 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
733 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
734 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
735 
736 	*clock = ((u64) msb) << 32 | lsb;
737 
738 	return 0;
739 }
740 
741 static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
742 {
743 	struct dw_pcie *pci = drvdata;
744 	u32 msb, lsb;
745 
746 	do {
747 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
748 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
749 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
750 
751 	*clock = ((u64) msb) << 32 | lsb;
752 
753 	return 0;
754 }
755 
756 static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
757 {
758 	struct dw_pcie *pci = drvdata;
759 	u32 msb, lsb;
760 
761 	do {
762 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
763 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
764 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
765 
766 	*clock = ((u64) msb) << 32 | lsb;
767 
768 	return 0;
769 }
770 
771 static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
772 {
773 	struct dw_pcie *pci = drvdata;
774 	u32 msb, lsb;
775 
776 	do {
777 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
778 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
779 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
780 
781 	*clock = ((u64) msb) << 32 | lsb;
782 
783 	return 0;
784 }
785 
786 static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
787 {
788 	struct dw_pcie *pci = drvdata;
789 	u32 msb, lsb;
790 
791 	do {
792 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
793 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
794 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
795 
796 	*clock = ((u64) msb) << 32 | lsb;
797 
798 	return 0;
799 }
800 
801 static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
802 {
803 	struct dw_pcie *pci = drvdata;
804 	u32 msb, lsb;
805 
806 	do {
807 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
808 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
809 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
810 
811 	*clock = ((u64) msb) << 32 | lsb;
812 
813 	return 0;
814 }
815 
816 static bool dw_pcie_ptm_context_update_visible(void *drvdata)
817 {
818 	struct dw_pcie *pci = drvdata;
819 
820 	return pci->mode == DW_PCIE_EP_TYPE;
821 }
822 
823 static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
824 {
825 	struct dw_pcie *pci = drvdata;
826 
827 	return pci->mode == DW_PCIE_RC_TYPE;
828 }
829 
830 static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
831 {
832 	/* PTM local clock is always visible */
833 	return true;
834 }
835 
836 static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
837 {
838 	struct dw_pcie *pci = drvdata;
839 
840 	return pci->mode == DW_PCIE_EP_TYPE;
841 }
842 
843 static bool dw_pcie_ptm_t1_visible(void *drvdata)
844 {
845 	struct dw_pcie *pci = drvdata;
846 
847 	return pci->mode == DW_PCIE_EP_TYPE;
848 }
849 
850 static bool dw_pcie_ptm_t2_visible(void *drvdata)
851 {
852 	struct dw_pcie *pci = drvdata;
853 
854 	return pci->mode == DW_PCIE_RC_TYPE;
855 }
856 
857 static bool dw_pcie_ptm_t3_visible(void *drvdata)
858 {
859 	struct dw_pcie *pci = drvdata;
860 
861 	return pci->mode == DW_PCIE_RC_TYPE;
862 }
863 
864 static bool dw_pcie_ptm_t4_visible(void *drvdata)
865 {
866 	struct dw_pcie *pci = drvdata;
867 
868 	return pci->mode == DW_PCIE_EP_TYPE;
869 }
870 
871 static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
872 	.check_capability = dw_pcie_ptm_check_capability,
873 	.context_update_write = dw_pcie_ptm_context_update_write,
874 	.context_update_read = dw_pcie_ptm_context_update_read,
875 	.context_valid_write = dw_pcie_ptm_context_valid_write,
876 	.context_valid_read = dw_pcie_ptm_context_valid_read,
877 	.local_clock_read = dw_pcie_ptm_local_clock_read,
878 	.master_clock_read = dw_pcie_ptm_master_clock_read,
879 	.t1_read = dw_pcie_ptm_t1_read,
880 	.t2_read = dw_pcie_ptm_t2_read,
881 	.t3_read = dw_pcie_ptm_t3_read,
882 	.t4_read = dw_pcie_ptm_t4_read,
883 	.context_update_visible = dw_pcie_ptm_context_update_visible,
884 	.context_valid_visible = dw_pcie_ptm_context_valid_visible,
885 	.local_clock_visible = dw_pcie_ptm_local_clock_visible,
886 	.master_clock_visible = dw_pcie_ptm_master_clock_visible,
887 	.t1_visible = dw_pcie_ptm_t1_visible,
888 	.t2_visible = dw_pcie_ptm_t2_visible,
889 	.t3_visible = dw_pcie_ptm_t3_visible,
890 	.t4_visible = dw_pcie_ptm_t4_visible,
891 };
892 
893 void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
894 {
895 	if (!pci->debugfs)
896 		return;
897 
898 	pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
899 	dwc_pcie_rasdes_debugfs_deinit(pci);
900 	debugfs_remove_recursive(pci->debugfs->debug_dir);
901 }
902 
903 void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
904 {
905 	char dirname[DWC_DEBUGFS_BUF_MAX];
906 	struct device *dev = pci->dev;
907 	struct debugfs_info *debugfs;
908 	struct dentry *dir;
909 	int err;
910 
911 	/* Create main directory for each platform driver. */
912 	snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
913 	dir = debugfs_create_dir(dirname, NULL);
914 	debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
915 	if (!debugfs)
916 		return;
917 
918 	debugfs->debug_dir = dir;
919 	pci->debugfs = debugfs;
920 	err = dwc_pcie_rasdes_debugfs_init(pci, dir);
921 	if (err)
922 		dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
923 			err);
924 
925 	dwc_pcie_ltssm_debugfs_init(pci, dir);
926 
927 	pci->mode = mode;
928 	pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
929 						   &dw_pcie_ptm_ops);
930 }
931