xref: /linux/drivers/pci/controller/dwc/pcie-designware-debugfs.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe controller debugfs driver
4  *
5  * Copyright (C) 2025 Samsung Electronics Co., Ltd.
6  *		 http://www.samsung.com
7  *
8  * Author: Shradha Todi <shradha.t@samsung.com>
9  */
10 
11 #include <linux/debugfs.h>
12 
13 #include "pcie-designware.h"
14 
15 #define SD_STATUS_L1LANE_REG		0xb0
16 #define PIPE_RXVALID			BIT(18)
17 #define PIPE_DETECT_LANE		BIT(17)
18 #define LANE_SELECT			GENMASK(3, 0)
19 
20 #define ERR_INJ0_OFF			0x34
21 #define EINJ_VAL_DIFF			GENMASK(28, 16)
22 #define EINJ_VC_NUM			GENMASK(14, 12)
23 #define EINJ_TYPE_SHIFT			8
24 #define EINJ0_TYPE			GENMASK(11, 8)
25 #define EINJ1_TYPE			BIT(8)
26 #define EINJ2_TYPE			GENMASK(9, 8)
27 #define EINJ3_TYPE			GENMASK(10, 8)
28 #define EINJ4_TYPE			GENMASK(10, 8)
29 #define EINJ5_TYPE			BIT(8)
30 #define EINJ_COUNT			GENMASK(7, 0)
31 
32 #define ERR_INJ_ENABLE_REG		0x30
33 
34 #define RAS_DES_EVENT_COUNTER_DATA_REG	0xc
35 
36 #define RAS_DES_EVENT_COUNTER_CTRL_REG	0x8
37 #define EVENT_COUNTER_GROUP_SELECT	GENMASK(27, 24)
38 #define EVENT_COUNTER_EVENT_SELECT	GENMASK(23, 16)
39 #define EVENT_COUNTER_LANE_SELECT	GENMASK(11, 8)
40 #define EVENT_COUNTER_STATUS		BIT(7)
41 #define EVENT_COUNTER_ENABLE		GENMASK(4, 2)
42 #define PER_EVENT_ON			0x3
43 #define PER_EVENT_OFF			0x1
44 
45 #define DWC_DEBUGFS_BUF_MAX		128
46 
47 /**
48  * struct dwc_pcie_rasdes_info - Stores controller common information
49  * @ras_cap_offset: RAS DES vendor specific extended capability offset
50  * @reg_event_lock: Mutex used for RAS DES shadow event registers
51  *
52  * Any parameter constant to all files of the debugfs hierarchy for a single
53  * controller will be stored in this struct. It is allocated and assigned to
54  * controller specific struct dw_pcie during initialization.
55  */
56 struct dwc_pcie_rasdes_info {
57 	u32 ras_cap_offset;
58 	struct mutex reg_event_lock;
59 };
60 
61 /**
62  * struct dwc_pcie_rasdes_priv - Stores file specific private data information
63  * @pci: Reference to the dw_pcie structure
64  * @idx: Index of specific file related information in array of structs
65  *
66  * All debugfs files will have this struct as its private data.
67  */
68 struct dwc_pcie_rasdes_priv {
69 	struct dw_pcie *pci;
70 	int idx;
71 };
72 
73 /**
74  * struct dwc_pcie_err_inj - Store details about each error injection
75  *			     supported by DWC RAS DES
76  * @name: Name of the error that can be injected
77  * @err_inj_group: Group number to which the error belongs. The value
78  *		   can range from 0 to 5
79  * @err_inj_type: Each group can have multiple types of error
80  */
81 struct dwc_pcie_err_inj {
82 	const char *name;
83 	u32 err_inj_group;
84 	u32 err_inj_type;
85 };
86 
87 static const struct dwc_pcie_err_inj err_inj_list[] = {
88 	{"tx_lcrc", 0x0, 0x0},
89 	{"b16_crc_dllp", 0x0, 0x1},
90 	{"b16_crc_upd_fc", 0x0, 0x2},
91 	{"tx_ecrc", 0x0, 0x3},
92 	{"fcrc_tlp", 0x0, 0x4},
93 	{"parity_tsos", 0x0, 0x5},
94 	{"parity_skpos", 0x0, 0x6},
95 	{"rx_lcrc", 0x0, 0x8},
96 	{"rx_ecrc", 0x0, 0xb},
97 	{"tlp_err_seq", 0x1, 0x0},
98 	{"ack_nak_dllp_seq", 0x1, 0x1},
99 	{"ack_nak_dllp", 0x2, 0x0},
100 	{"upd_fc_dllp", 0x2, 0x1},
101 	{"nak_dllp", 0x2, 0x2},
102 	{"inv_sync_hdr_sym", 0x3, 0x0},
103 	{"com_pad_ts1", 0x3, 0x1},
104 	{"com_pad_ts2", 0x3, 0x2},
105 	{"com_fts", 0x3, 0x3},
106 	{"com_idl", 0x3, 0x4},
107 	{"end_edb", 0x3, 0x5},
108 	{"stp_sdp", 0x3, 0x6},
109 	{"com_skp", 0x3, 0x7},
110 	{"posted_tlp_hdr", 0x4, 0x0},
111 	{"non_post_tlp_hdr", 0x4, 0x1},
112 	{"cmpl_tlp_hdr", 0x4, 0x2},
113 	{"posted_tlp_data", 0x4, 0x4},
114 	{"non_post_tlp_data", 0x4, 0x5},
115 	{"cmpl_tlp_data", 0x4, 0x6},
116 	{"duplicate_tlp", 0x5, 0x0},
117 	{"nullified_tlp", 0x5, 0x1},
118 };
119 
120 static const u32 err_inj_type_mask[] = {
121 	EINJ0_TYPE,
122 	EINJ1_TYPE,
123 	EINJ2_TYPE,
124 	EINJ3_TYPE,
125 	EINJ4_TYPE,
126 	EINJ5_TYPE,
127 };
128 
129 /**
130  * struct dwc_pcie_event_counter - Store details about each event counter
131  *				   supported in DWC RAS DES
132  * @name: Name of the error counter
133  * @group_no: Group number that the event belongs to. The value can range
134  *	      from 0 to 4
135  * @event_no: Event number of the particular event. The value ranges are:
136  *		Group 0: 0 - 10
137  *		Group 1: 5 - 13
138  *		Group 2: 0 - 7
139  *		Group 3: 0 - 5
140  *		Group 4: 0 - 1
141  */
142 struct dwc_pcie_event_counter {
143 	const char *name;
144 	u32 group_no;
145 	u32 event_no;
146 };
147 
148 static const struct dwc_pcie_event_counter event_list[] = {
149 	{"ebuf_overflow", 0x0, 0x0},
150 	{"ebuf_underrun", 0x0, 0x1},
151 	{"decode_err", 0x0, 0x2},
152 	{"running_disparity_err", 0x0, 0x3},
153 	{"skp_os_parity_err", 0x0, 0x4},
154 	{"sync_header_err", 0x0, 0x5},
155 	{"rx_valid_deassertion", 0x0, 0x6},
156 	{"ctl_skp_os_parity_err", 0x0, 0x7},
157 	{"retimer_parity_err_1st", 0x0, 0x8},
158 	{"retimer_parity_err_2nd", 0x0, 0x9},
159 	{"margin_crc_parity_err", 0x0, 0xA},
160 	{"detect_ei_infer", 0x1, 0x5},
161 	{"receiver_err", 0x1, 0x6},
162 	{"rx_recovery_req", 0x1, 0x7},
163 	{"n_fts_timeout", 0x1, 0x8},
164 	{"framing_err", 0x1, 0x9},
165 	{"deskew_err", 0x1, 0xa},
166 	{"framing_err_in_l0", 0x1, 0xc},
167 	{"deskew_uncompleted_err", 0x1, 0xd},
168 	{"bad_tlp", 0x2, 0x0},
169 	{"lcrc_err", 0x2, 0x1},
170 	{"bad_dllp", 0x2, 0x2},
171 	{"replay_num_rollover", 0x2, 0x3},
172 	{"replay_timeout", 0x2, 0x4},
173 	{"rx_nak_dllp", 0x2, 0x5},
174 	{"tx_nak_dllp", 0x2, 0x6},
175 	{"retry_tlp", 0x2, 0x7},
176 	{"fc_timeout", 0x3, 0x0},
177 	{"poisoned_tlp", 0x3, 0x1},
178 	{"ecrc_error", 0x3, 0x2},
179 	{"unsupported_request", 0x3, 0x3},
180 	{"completer_abort", 0x3, 0x4},
181 	{"completion_timeout", 0x3, 0x5},
182 	{"ebuf_skp_add", 0x4, 0x0},
183 	{"ebuf_skp_del", 0x4, 0x1},
184 };
185 
186 static ssize_t lane_detect_read(struct file *file, char __user *buf,
187 				size_t count, loff_t *ppos)
188 {
189 	struct dw_pcie *pci = file->private_data;
190 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
191 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
192 	ssize_t pos;
193 	u32 val;
194 
195 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
196 	val = FIELD_GET(PIPE_DETECT_LANE, val);
197 	if (val)
198 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
199 	else
200 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
201 
202 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
203 }
204 
205 static ssize_t lane_detect_write(struct file *file, const char __user *buf,
206 				 size_t count, loff_t *ppos)
207 {
208 	struct dw_pcie *pci = file->private_data;
209 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
210 	u32 lane, val;
211 
212 	val = kstrtou32_from_user(buf, count, 0, &lane);
213 	if (val)
214 		return val;
215 
216 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
217 	val &= ~(LANE_SELECT);
218 	val |= FIELD_PREP(LANE_SELECT, lane);
219 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
220 
221 	return count;
222 }
223 
224 static ssize_t rx_valid_read(struct file *file, char __user *buf,
225 			     size_t count, loff_t *ppos)
226 {
227 	struct dw_pcie *pci = file->private_data;
228 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
229 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
230 	ssize_t pos;
231 	u32 val;
232 
233 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
234 	val = FIELD_GET(PIPE_RXVALID, val);
235 	if (val)
236 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
237 	else
238 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
239 
240 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
241 }
242 
243 static ssize_t rx_valid_write(struct file *file, const char __user *buf,
244 			      size_t count, loff_t *ppos)
245 {
246 	return lane_detect_write(file, buf, count, ppos);
247 }
248 
249 static ssize_t err_inj_write(struct file *file, const char __user *buf,
250 			     size_t count, loff_t *ppos)
251 {
252 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
253 	struct dw_pcie *pci = pdata->pci;
254 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
255 	u32 val, counter, vc_num, err_group, type_mask;
256 	int val_diff = 0;
257 	char *kern_buf;
258 
259 	err_group = err_inj_list[pdata->idx].err_inj_group;
260 	type_mask = err_inj_type_mask[err_group];
261 
262 	kern_buf = memdup_user_nul(buf, count);
263 	if (IS_ERR(kern_buf))
264 		return PTR_ERR(kern_buf);
265 
266 	if (err_group == 4) {
267 		val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
268 		if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
269 			kfree(kern_buf);
270 			return -EINVAL;
271 		}
272 	} else if (err_group == 1) {
273 		val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
274 		if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
275 			kfree(kern_buf);
276 			return -EINVAL;
277 		}
278 	} else {
279 		val = kstrtou32(kern_buf, 0, &counter);
280 		if (val) {
281 			kfree(kern_buf);
282 			return val;
283 		}
284 	}
285 
286 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
287 	val &= ~(type_mask | EINJ_COUNT);
288 	val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
289 	val |= FIELD_PREP(EINJ_COUNT, counter);
290 
291 	if (err_group == 1 || err_group == 4) {
292 		val &= ~(EINJ_VAL_DIFF);
293 		val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
294 	}
295 	if (err_group == 4) {
296 		val &= ~(EINJ_VC_NUM);
297 		val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
298 	}
299 
300 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
301 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
302 
303 	kfree(kern_buf);
304 	return count;
305 }
306 
307 static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
308 			     struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
309 {
310 	u32 val;
311 
312 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
313 	val &= ~EVENT_COUNTER_ENABLE;
314 	val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
315 	val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
316 	val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
317 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
318 }
319 
320 static ssize_t counter_enable_read(struct file *file, char __user *buf,
321 				   size_t count, loff_t *ppos)
322 {
323 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
324 	struct dw_pcie *pci = pdata->pci;
325 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
326 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
327 	ssize_t pos;
328 	u32 val;
329 
330 	mutex_lock(&rinfo->reg_event_lock);
331 	set_event_number(pdata, pci, rinfo);
332 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
333 	mutex_unlock(&rinfo->reg_event_lock);
334 	val = FIELD_GET(EVENT_COUNTER_STATUS, val);
335 	if (val)
336 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
337 	else
338 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
339 
340 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
341 }
342 
343 static ssize_t counter_enable_write(struct file *file, const char __user *buf,
344 				    size_t count, loff_t *ppos)
345 {
346 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
347 	struct dw_pcie *pci = pdata->pci;
348 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
349 	u32 val, enable;
350 
351 	val = kstrtou32_from_user(buf, count, 0, &enable);
352 	if (val)
353 		return val;
354 
355 	mutex_lock(&rinfo->reg_event_lock);
356 	set_event_number(pdata, pci, rinfo);
357 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
358 	if (enable)
359 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
360 	else
361 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
362 
363 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
364 
365 	/*
366 	 * While enabling the counter, always read the status back to check if
367 	 * it is enabled or not. Return error if it is not enabled to let the
368 	 * users know that the counter is not supported on the platform.
369 	 */
370 	if (enable) {
371 		val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
372 					RAS_DES_EVENT_COUNTER_CTRL_REG);
373 		if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
374 			mutex_unlock(&rinfo->reg_event_lock);
375 			return -EOPNOTSUPP;
376 		}
377 	}
378 
379 	mutex_unlock(&rinfo->reg_event_lock);
380 
381 	return count;
382 }
383 
384 static ssize_t counter_lane_read(struct file *file, char __user *buf,
385 				 size_t count, loff_t *ppos)
386 {
387 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
388 	struct dw_pcie *pci = pdata->pci;
389 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
390 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
391 	ssize_t pos;
392 	u32 val;
393 
394 	mutex_lock(&rinfo->reg_event_lock);
395 	set_event_number(pdata, pci, rinfo);
396 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
397 	mutex_unlock(&rinfo->reg_event_lock);
398 	val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
399 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
400 
401 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
402 }
403 
404 static ssize_t counter_lane_write(struct file *file, const char __user *buf,
405 				  size_t count, loff_t *ppos)
406 {
407 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
408 	struct dw_pcie *pci = pdata->pci;
409 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
410 	u32 val, lane;
411 
412 	val = kstrtou32_from_user(buf, count, 0, &lane);
413 	if (val)
414 		return val;
415 
416 	mutex_lock(&rinfo->reg_event_lock);
417 	set_event_number(pdata, pci, rinfo);
418 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
419 	val &= ~(EVENT_COUNTER_LANE_SELECT);
420 	val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
421 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
422 	mutex_unlock(&rinfo->reg_event_lock);
423 
424 	return count;
425 }
426 
427 static ssize_t counter_value_read(struct file *file, char __user *buf,
428 				  size_t count, loff_t *ppos)
429 {
430 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
431 	struct dw_pcie *pci = pdata->pci;
432 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
433 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
434 	ssize_t pos;
435 	u32 val;
436 
437 	mutex_lock(&rinfo->reg_event_lock);
438 	set_event_number(pdata, pci, rinfo);
439 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
440 	mutex_unlock(&rinfo->reg_event_lock);
441 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
442 
443 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
444 }
445 
446 static int ltssm_status_show(struct seq_file *s, void *v)
447 {
448 	struct dw_pcie *pci = s->private;
449 	enum dw_pcie_ltssm val;
450 
451 	val = dw_pcie_get_ltssm(pci);
452 	seq_printf(s, "%s (0x%02x)\n", dw_pcie_ltssm_status_string(val), val);
453 
454 	return 0;
455 }
456 
457 static int ltssm_status_open(struct inode *inode, struct file *file)
458 {
459 	return single_open(file, ltssm_status_show, inode->i_private);
460 }
461 
462 #define dwc_debugfs_create(name)			\
463 debugfs_create_file(#name, 0644, rasdes_debug, pci,	\
464 			&dbg_ ## name ## _fops)
465 
466 #define DWC_DEBUGFS_FOPS(name)					\
467 static const struct file_operations dbg_ ## name ## _fops = {	\
468 	.open = simple_open,				\
469 	.read = name ## _read,				\
470 	.write = name ## _write				\
471 }
472 
473 DWC_DEBUGFS_FOPS(lane_detect);
474 DWC_DEBUGFS_FOPS(rx_valid);
475 
476 static const struct file_operations dwc_pcie_err_inj_ops = {
477 	.open = simple_open,
478 	.write = err_inj_write,
479 };
480 
481 static const struct file_operations dwc_pcie_counter_enable_ops = {
482 	.open = simple_open,
483 	.read = counter_enable_read,
484 	.write = counter_enable_write,
485 };
486 
487 static const struct file_operations dwc_pcie_counter_lane_ops = {
488 	.open = simple_open,
489 	.read = counter_lane_read,
490 	.write = counter_lane_write,
491 };
492 
493 static const struct file_operations dwc_pcie_counter_value_ops = {
494 	.open = simple_open,
495 	.read = counter_value_read,
496 };
497 
498 static const struct file_operations dwc_pcie_ltssm_status_ops = {
499 	.open = ltssm_status_open,
500 	.read = seq_read,
501 };
502 
503 static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
504 {
505 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
506 
507 	mutex_destroy(&rinfo->reg_event_lock);
508 }
509 
510 static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
511 {
512 	struct dentry *rasdes_debug, *rasdes_err_inj;
513 	struct dentry *rasdes_event_counter, *rasdes_events;
514 	struct dwc_pcie_rasdes_info *rasdes_info;
515 	struct dwc_pcie_rasdes_priv *priv_tmp;
516 	struct device *dev = pci->dev;
517 	int ras_cap, i, ret;
518 
519 	/*
520 	 * If a given SoC has no RAS DES capability, the following call is
521 	 * bound to return an error, breaking some existing platforms. So,
522 	 * return 0 here, as this is not necessarily an error.
523 	 */
524 	ras_cap = dw_pcie_find_rasdes_capability(pci);
525 	if (!ras_cap) {
526 		dev_dbg(dev, "no RAS DES capability available\n");
527 		return 0;
528 	}
529 
530 	rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
531 	if (!rasdes_info)
532 		return -ENOMEM;
533 
534 	/* Create subdirectories for Debug, Error Injection, Statistics. */
535 	rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
536 	rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
537 	rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
538 
539 	mutex_init(&rasdes_info->reg_event_lock);
540 	rasdes_info->ras_cap_offset = ras_cap;
541 	pci->debugfs->rasdes_info = rasdes_info;
542 
543 	/* Create debugfs files for Debug subdirectory. */
544 	dwc_debugfs_create(lane_detect);
545 	dwc_debugfs_create(rx_valid);
546 
547 	/* Create debugfs files for Error Injection subdirectory. */
548 	for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
549 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
550 		if (!priv_tmp) {
551 			ret = -ENOMEM;
552 			goto err_deinit;
553 		}
554 
555 		priv_tmp->idx = i;
556 		priv_tmp->pci = pci;
557 		debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
558 				    &dwc_pcie_err_inj_ops);
559 	}
560 
561 	/* Create debugfs files for Statistical Counter subdirectory. */
562 	for (i = 0; i < ARRAY_SIZE(event_list); i++) {
563 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
564 		if (!priv_tmp) {
565 			ret = -ENOMEM;
566 			goto err_deinit;
567 		}
568 
569 		priv_tmp->idx = i;
570 		priv_tmp->pci = pci;
571 		rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
572 		if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
573 			debugfs_create_file("lane_select", 0644, rasdes_events,
574 					    priv_tmp, &dwc_pcie_counter_lane_ops);
575 		}
576 		debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
577 				    &dwc_pcie_counter_value_ops);
578 		debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
579 				    &dwc_pcie_counter_enable_ops);
580 	}
581 
582 	return 0;
583 
584 err_deinit:
585 	dwc_pcie_rasdes_debugfs_deinit(pci);
586 	return ret;
587 }
588 
589 static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
590 {
591 	debugfs_create_file("ltssm_status", 0444, dir, pci,
592 			    &dwc_pcie_ltssm_status_ops);
593 }
594 
595 static int dw_pcie_ptm_check_capability(void *drvdata)
596 {
597 	struct dw_pcie *pci = drvdata;
598 
599 	pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
600 
601 	return pci->ptm_vsec_offset;
602 }
603 
604 static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
605 {
606 	struct dw_pcie *pci = drvdata;
607 	u32 val;
608 
609 	if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
610 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
611 		val |= PTM_REQ_AUTO_UPDATE_ENABLED;
612 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
613 	} else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
614 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
615 		val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
616 		val |= PTM_REQ_START_UPDATE;
617 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
618 	} else {
619 		return -EINVAL;
620 	}
621 
622 	return 0;
623 }
624 
625 static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
626 {
627 	struct dw_pcie *pci = drvdata;
628 	u32 val;
629 
630 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
631 	if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
632 		*mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
633 	else
634 		/*
635 		 * PTM_REQ_START_UPDATE is a self clearing register bit. So if
636 		 * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
637 		 * manual update is used.
638 		 */
639 		*mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
640 
641 	return 0;
642 }
643 
644 static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
645 {
646 	struct dw_pcie *pci = drvdata;
647 	u32 val;
648 
649 	if (valid) {
650 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
651 		val |= PTM_RES_CCONTEXT_VALID;
652 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
653 	} else {
654 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
655 		val &= ~PTM_RES_CCONTEXT_VALID;
656 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
657 	}
658 
659 	return 0;
660 }
661 
662 static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
663 {
664 	struct dw_pcie *pci = drvdata;
665 	u32 val;
666 
667 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
668 	*valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
669 
670 	return 0;
671 }
672 
673 static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
674 {
675 	struct dw_pcie *pci = drvdata;
676 	u32 msb, lsb;
677 
678 	do {
679 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
680 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
681 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
682 
683 	*clock = ((u64) msb) << 32 | lsb;
684 
685 	return 0;
686 }
687 
688 static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
689 {
690 	struct dw_pcie *pci = drvdata;
691 	u32 msb, lsb;
692 
693 	do {
694 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
695 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
696 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
697 
698 	*clock = ((u64) msb) << 32 | lsb;
699 
700 	return 0;
701 }
702 
703 static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
704 {
705 	struct dw_pcie *pci = drvdata;
706 	u32 msb, lsb;
707 
708 	do {
709 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
710 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
711 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
712 
713 	*clock = ((u64) msb) << 32 | lsb;
714 
715 	return 0;
716 }
717 
718 static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
719 {
720 	struct dw_pcie *pci = drvdata;
721 	u32 msb, lsb;
722 
723 	do {
724 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
725 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
726 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
727 
728 	*clock = ((u64) msb) << 32 | lsb;
729 
730 	return 0;
731 }
732 
733 static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
734 {
735 	struct dw_pcie *pci = drvdata;
736 	u32 msb, lsb;
737 
738 	do {
739 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
740 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
741 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
742 
743 	*clock = ((u64) msb) << 32 | lsb;
744 
745 	return 0;
746 }
747 
748 static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
749 {
750 	struct dw_pcie *pci = drvdata;
751 	u32 msb, lsb;
752 
753 	do {
754 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
755 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
756 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
757 
758 	*clock = ((u64) msb) << 32 | lsb;
759 
760 	return 0;
761 }
762 
763 static bool dw_pcie_ptm_context_update_visible(void *drvdata)
764 {
765 	struct dw_pcie *pci = drvdata;
766 
767 	return pci->mode == DW_PCIE_EP_TYPE;
768 }
769 
770 static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
771 {
772 	struct dw_pcie *pci = drvdata;
773 
774 	return pci->mode == DW_PCIE_RC_TYPE;
775 }
776 
777 static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
778 {
779 	/* PTM local clock is always visible */
780 	return true;
781 }
782 
783 static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
784 {
785 	struct dw_pcie *pci = drvdata;
786 
787 	return pci->mode == DW_PCIE_EP_TYPE;
788 }
789 
790 static bool dw_pcie_ptm_t1_visible(void *drvdata)
791 {
792 	struct dw_pcie *pci = drvdata;
793 
794 	return pci->mode == DW_PCIE_EP_TYPE;
795 }
796 
797 static bool dw_pcie_ptm_t2_visible(void *drvdata)
798 {
799 	struct dw_pcie *pci = drvdata;
800 
801 	return pci->mode == DW_PCIE_RC_TYPE;
802 }
803 
804 static bool dw_pcie_ptm_t3_visible(void *drvdata)
805 {
806 	struct dw_pcie *pci = drvdata;
807 
808 	return pci->mode == DW_PCIE_RC_TYPE;
809 }
810 
811 static bool dw_pcie_ptm_t4_visible(void *drvdata)
812 {
813 	struct dw_pcie *pci = drvdata;
814 
815 	return pci->mode == DW_PCIE_EP_TYPE;
816 }
817 
818 static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
819 	.check_capability = dw_pcie_ptm_check_capability,
820 	.context_update_write = dw_pcie_ptm_context_update_write,
821 	.context_update_read = dw_pcie_ptm_context_update_read,
822 	.context_valid_write = dw_pcie_ptm_context_valid_write,
823 	.context_valid_read = dw_pcie_ptm_context_valid_read,
824 	.local_clock_read = dw_pcie_ptm_local_clock_read,
825 	.master_clock_read = dw_pcie_ptm_master_clock_read,
826 	.t1_read = dw_pcie_ptm_t1_read,
827 	.t2_read = dw_pcie_ptm_t2_read,
828 	.t3_read = dw_pcie_ptm_t3_read,
829 	.t4_read = dw_pcie_ptm_t4_read,
830 	.context_update_visible = dw_pcie_ptm_context_update_visible,
831 	.context_valid_visible = dw_pcie_ptm_context_valid_visible,
832 	.local_clock_visible = dw_pcie_ptm_local_clock_visible,
833 	.master_clock_visible = dw_pcie_ptm_master_clock_visible,
834 	.t1_visible = dw_pcie_ptm_t1_visible,
835 	.t2_visible = dw_pcie_ptm_t2_visible,
836 	.t3_visible = dw_pcie_ptm_t3_visible,
837 	.t4_visible = dw_pcie_ptm_t4_visible,
838 };
839 
840 void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
841 {
842 	if (!pci->debugfs)
843 		return;
844 
845 	pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
846 	dwc_pcie_rasdes_debugfs_deinit(pci);
847 	debugfs_remove_recursive(pci->debugfs->debug_dir);
848 }
849 
850 void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
851 {
852 	char dirname[DWC_DEBUGFS_BUF_MAX];
853 	struct device *dev = pci->dev;
854 	struct debugfs_info *debugfs;
855 	struct dentry *dir;
856 	int err;
857 
858 	/* Create main directory for each platform driver. */
859 	snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
860 	dir = debugfs_create_dir(dirname, NULL);
861 	debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
862 	if (!debugfs)
863 		return;
864 
865 	debugfs->debug_dir = dir;
866 	pci->debugfs = debugfs;
867 	err = dwc_pcie_rasdes_debugfs_init(pci, dir);
868 	if (err)
869 		dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
870 			err);
871 
872 	dwc_pcie_ltssm_debugfs_init(pci, dir);
873 
874 	pci->mode = mode;
875 	pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
876 						   &dw_pcie_ptm_ops);
877 }
878