xref: /linux/drivers/pci/controller/dwc/pcie-designware-debugfs.c (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Synopsys DesignWare PCIe controller debugfs driver
4  *
5  * Copyright (C) 2025 Samsung Electronics Co., Ltd.
6  *		 http://www.samsung.com
7  *
8  * Author: Shradha Todi <shradha.t@samsung.com>
9  */
10 
11 #include <linux/debugfs.h>
12 
13 #include "pcie-designware.h"
14 
15 #define SD_STATUS_L1LANE_REG		0xb0
16 #define PIPE_RXVALID			BIT(18)
17 #define PIPE_DETECT_LANE		BIT(17)
18 #define LANE_SELECT			GENMASK(3, 0)
19 
20 #define ERR_INJ0_OFF			0x34
21 #define EINJ_VAL_DIFF			GENMASK(28, 16)
22 #define EINJ_VC_NUM			GENMASK(14, 12)
23 #define EINJ_TYPE_SHIFT			8
24 #define EINJ0_TYPE			GENMASK(11, 8)
25 #define EINJ1_TYPE			BIT(8)
26 #define EINJ2_TYPE			GENMASK(9, 8)
27 #define EINJ3_TYPE			GENMASK(10, 8)
28 #define EINJ4_TYPE			GENMASK(10, 8)
29 #define EINJ5_TYPE			BIT(8)
30 #define EINJ_COUNT			GENMASK(7, 0)
31 
32 #define ERR_INJ_ENABLE_REG		0x30
33 
34 #define RAS_DES_EVENT_COUNTER_DATA_REG	0xc
35 
36 #define RAS_DES_EVENT_COUNTER_CTRL_REG	0x8
37 #define EVENT_COUNTER_GROUP_SELECT	GENMASK(27, 24)
38 #define EVENT_COUNTER_EVENT_SELECT	GENMASK(23, 16)
39 #define EVENT_COUNTER_LANE_SELECT	GENMASK(11, 8)
40 #define EVENT_COUNTER_STATUS		BIT(7)
41 #define EVENT_COUNTER_ENABLE		GENMASK(4, 2)
42 #define PER_EVENT_ON			0x3
43 #define PER_EVENT_OFF			0x1
44 
45 #define DWC_DEBUGFS_BUF_MAX		128
46 
47 /**
48  * struct dwc_pcie_rasdes_info - Stores controller common information
49  * @ras_cap_offset: RAS DES vendor specific extended capability offset
50  * @reg_event_lock: Mutex used for RAS DES shadow event registers
51  *
52  * Any parameter constant to all files of the debugfs hierarchy for a single
53  * controller will be stored in this struct. It is allocated and assigned to
54  * controller specific struct dw_pcie during initialization.
55  */
56 struct dwc_pcie_rasdes_info {
57 	u32 ras_cap_offset;
58 	struct mutex reg_event_lock;
59 };
60 
61 /**
62  * struct dwc_pcie_rasdes_priv - Stores file specific private data information
63  * @pci: Reference to the dw_pcie structure
64  * @idx: Index of specific file related information in array of structs
65  *
66  * All debugfs files will have this struct as its private data.
67  */
68 struct dwc_pcie_rasdes_priv {
69 	struct dw_pcie *pci;
70 	int idx;
71 };
72 
73 /**
74  * struct dwc_pcie_err_inj - Store details about each error injection
75  *			     supported by DWC RAS DES
76  * @name: Name of the error that can be injected
77  * @err_inj_group: Group number to which the error belongs. The value
78  *		   can range from 0 to 5
79  * @err_inj_type: Each group can have multiple types of error
80  */
81 struct dwc_pcie_err_inj {
82 	const char *name;
83 	u32 err_inj_group;
84 	u32 err_inj_type;
85 };
86 
87 static const struct dwc_pcie_err_inj err_inj_list[] = {
88 	{"tx_lcrc", 0x0, 0x0},
89 	{"b16_crc_dllp", 0x0, 0x1},
90 	{"b16_crc_upd_fc", 0x0, 0x2},
91 	{"tx_ecrc", 0x0, 0x3},
92 	{"fcrc_tlp", 0x0, 0x4},
93 	{"parity_tsos", 0x0, 0x5},
94 	{"parity_skpos", 0x0, 0x6},
95 	{"rx_lcrc", 0x0, 0x8},
96 	{"rx_ecrc", 0x0, 0xb},
97 	{"tlp_err_seq", 0x1, 0x0},
98 	{"ack_nak_dllp_seq", 0x1, 0x1},
99 	{"ack_nak_dllp", 0x2, 0x0},
100 	{"upd_fc_dllp", 0x2, 0x1},
101 	{"nak_dllp", 0x2, 0x2},
102 	{"inv_sync_hdr_sym", 0x3, 0x0},
103 	{"com_pad_ts1", 0x3, 0x1},
104 	{"com_pad_ts2", 0x3, 0x2},
105 	{"com_fts", 0x3, 0x3},
106 	{"com_idl", 0x3, 0x4},
107 	{"end_edb", 0x3, 0x5},
108 	{"stp_sdp", 0x3, 0x6},
109 	{"com_skp", 0x3, 0x7},
110 	{"posted_tlp_hdr", 0x4, 0x0},
111 	{"non_post_tlp_hdr", 0x4, 0x1},
112 	{"cmpl_tlp_hdr", 0x4, 0x2},
113 	{"posted_tlp_data", 0x4, 0x4},
114 	{"non_post_tlp_data", 0x4, 0x5},
115 	{"cmpl_tlp_data", 0x4, 0x6},
116 	{"duplicate_tlp", 0x5, 0x0},
117 	{"nullified_tlp", 0x5, 0x1},
118 };
119 
120 static const u32 err_inj_type_mask[] = {
121 	EINJ0_TYPE,
122 	EINJ1_TYPE,
123 	EINJ2_TYPE,
124 	EINJ3_TYPE,
125 	EINJ4_TYPE,
126 	EINJ5_TYPE,
127 };
128 
129 /**
130  * struct dwc_pcie_event_counter - Store details about each event counter
131  *				   supported in DWC RAS DES
132  * @name: Name of the error counter
133  * @group_no: Group number that the event belongs to. The value can range
134  *	      from 0 to 4
135  * @event_no: Event number of the particular event. The value ranges are:
136  *		Group 0: 0 - 10
137  *		Group 1: 5 - 13
138  *		Group 2: 0 - 7
139  *		Group 3: 0 - 5
140  *		Group 4: 0 - 1
141  */
142 struct dwc_pcie_event_counter {
143 	const char *name;
144 	u32 group_no;
145 	u32 event_no;
146 };
147 
148 static const struct dwc_pcie_event_counter event_list[] = {
149 	{"ebuf_overflow", 0x0, 0x0},
150 	{"ebuf_underrun", 0x0, 0x1},
151 	{"decode_err", 0x0, 0x2},
152 	{"running_disparity_err", 0x0, 0x3},
153 	{"skp_os_parity_err", 0x0, 0x4},
154 	{"sync_header_err", 0x0, 0x5},
155 	{"rx_valid_deassertion", 0x0, 0x6},
156 	{"ctl_skp_os_parity_err", 0x0, 0x7},
157 	{"retimer_parity_err_1st", 0x0, 0x8},
158 	{"retimer_parity_err_2nd", 0x0, 0x9},
159 	{"margin_crc_parity_err", 0x0, 0xA},
160 	{"detect_ei_infer", 0x1, 0x5},
161 	{"receiver_err", 0x1, 0x6},
162 	{"rx_recovery_req", 0x1, 0x7},
163 	{"n_fts_timeout", 0x1, 0x8},
164 	{"framing_err", 0x1, 0x9},
165 	{"deskew_err", 0x1, 0xa},
166 	{"framing_err_in_l0", 0x1, 0xc},
167 	{"deskew_uncompleted_err", 0x1, 0xd},
168 	{"bad_tlp", 0x2, 0x0},
169 	{"lcrc_err", 0x2, 0x1},
170 	{"bad_dllp", 0x2, 0x2},
171 	{"replay_num_rollover", 0x2, 0x3},
172 	{"replay_timeout", 0x2, 0x4},
173 	{"rx_nak_dllp", 0x2, 0x5},
174 	{"tx_nak_dllp", 0x2, 0x6},
175 	{"retry_tlp", 0x2, 0x7},
176 	{"fc_timeout", 0x3, 0x0},
177 	{"poisoned_tlp", 0x3, 0x1},
178 	{"ecrc_error", 0x3, 0x2},
179 	{"unsupported_request", 0x3, 0x3},
180 	{"completer_abort", 0x3, 0x4},
181 	{"completion_timeout", 0x3, 0x5},
182 	{"ebuf_skp_add", 0x4, 0x0},
183 	{"ebuf_skp_del", 0x4, 0x1},
184 };
185 
lane_detect_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)186 static ssize_t lane_detect_read(struct file *file, char __user *buf,
187 				size_t count, loff_t *ppos)
188 {
189 	struct dw_pcie *pci = file->private_data;
190 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
191 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
192 	ssize_t pos;
193 	u32 val;
194 
195 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
196 	val = FIELD_GET(PIPE_DETECT_LANE, val);
197 	if (val)
198 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
199 	else
200 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
201 
202 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
203 }
204 
lane_detect_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)205 static ssize_t lane_detect_write(struct file *file, const char __user *buf,
206 				 size_t count, loff_t *ppos)
207 {
208 	struct dw_pcie *pci = file->private_data;
209 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
210 	u32 lane, val;
211 
212 	val = kstrtou32_from_user(buf, count, 0, &lane);
213 	if (val)
214 		return val;
215 
216 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
217 	val &= ~(LANE_SELECT);
218 	val |= FIELD_PREP(LANE_SELECT, lane);
219 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
220 
221 	return count;
222 }
223 
rx_valid_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)224 static ssize_t rx_valid_read(struct file *file, char __user *buf,
225 			     size_t count, loff_t *ppos)
226 {
227 	struct dw_pcie *pci = file->private_data;
228 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
229 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
230 	ssize_t pos;
231 	u32 val;
232 
233 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
234 	val = FIELD_GET(PIPE_RXVALID, val);
235 	if (val)
236 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
237 	else
238 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
239 
240 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
241 }
242 
rx_valid_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)243 static ssize_t rx_valid_write(struct file *file, const char __user *buf,
244 			      size_t count, loff_t *ppos)
245 {
246 	return lane_detect_write(file, buf, count, ppos);
247 }
248 
err_inj_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)249 static ssize_t err_inj_write(struct file *file, const char __user *buf,
250 			     size_t count, loff_t *ppos)
251 {
252 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
253 	struct dw_pcie *pci = pdata->pci;
254 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
255 	u32 val, counter, vc_num, err_group, type_mask;
256 	int val_diff = 0;
257 	char *kern_buf;
258 
259 	err_group = err_inj_list[pdata->idx].err_inj_group;
260 	type_mask = err_inj_type_mask[err_group];
261 
262 	kern_buf = memdup_user_nul(buf, count);
263 	if (IS_ERR(kern_buf))
264 		return PTR_ERR(kern_buf);
265 
266 	if (err_group == 4) {
267 		val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
268 		if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
269 			kfree(kern_buf);
270 			return -EINVAL;
271 		}
272 	} else if (err_group == 1) {
273 		val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
274 		if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
275 			kfree(kern_buf);
276 			return -EINVAL;
277 		}
278 	} else {
279 		val = kstrtou32(kern_buf, 0, &counter);
280 		if (val) {
281 			kfree(kern_buf);
282 			return val;
283 		}
284 	}
285 
286 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
287 	val &= ~(type_mask | EINJ_COUNT);
288 	val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
289 	val |= FIELD_PREP(EINJ_COUNT, counter);
290 
291 	if (err_group == 1 || err_group == 4) {
292 		val &= ~(EINJ_VAL_DIFF);
293 		val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
294 	}
295 	if (err_group == 4) {
296 		val &= ~(EINJ_VC_NUM);
297 		val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
298 	}
299 
300 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
301 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
302 
303 	kfree(kern_buf);
304 	return count;
305 }
306 
set_event_number(struct dwc_pcie_rasdes_priv * pdata,struct dw_pcie * pci,struct dwc_pcie_rasdes_info * rinfo)307 static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
308 			     struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
309 {
310 	u32 val;
311 
312 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
313 	val &= ~EVENT_COUNTER_ENABLE;
314 	val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
315 	val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
316 	val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
317 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
318 }
319 
counter_enable_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)320 static ssize_t counter_enable_read(struct file *file, char __user *buf,
321 				   size_t count, loff_t *ppos)
322 {
323 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
324 	struct dw_pcie *pci = pdata->pci;
325 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
326 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
327 	ssize_t pos;
328 	u32 val;
329 
330 	mutex_lock(&rinfo->reg_event_lock);
331 	set_event_number(pdata, pci, rinfo);
332 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
333 	mutex_unlock(&rinfo->reg_event_lock);
334 	val = FIELD_GET(EVENT_COUNTER_STATUS, val);
335 	if (val)
336 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
337 	else
338 		pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
339 
340 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
341 }
342 
counter_enable_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)343 static ssize_t counter_enable_write(struct file *file, const char __user *buf,
344 				    size_t count, loff_t *ppos)
345 {
346 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
347 	struct dw_pcie *pci = pdata->pci;
348 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
349 	u32 val, enable;
350 
351 	val = kstrtou32_from_user(buf, count, 0, &enable);
352 	if (val)
353 		return val;
354 
355 	mutex_lock(&rinfo->reg_event_lock);
356 	set_event_number(pdata, pci, rinfo);
357 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
358 	if (enable)
359 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
360 	else
361 		val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
362 
363 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
364 
365 	/*
366 	 * While enabling the counter, always read the status back to check if
367 	 * it is enabled or not. Return error if it is not enabled to let the
368 	 * users know that the counter is not supported on the platform.
369 	 */
370 	if (enable) {
371 		val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
372 					RAS_DES_EVENT_COUNTER_CTRL_REG);
373 		if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
374 			mutex_unlock(&rinfo->reg_event_lock);
375 			return -EOPNOTSUPP;
376 		}
377 	}
378 
379 	mutex_unlock(&rinfo->reg_event_lock);
380 
381 	return count;
382 }
383 
counter_lane_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)384 static ssize_t counter_lane_read(struct file *file, char __user *buf,
385 				 size_t count, loff_t *ppos)
386 {
387 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
388 	struct dw_pcie *pci = pdata->pci;
389 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
390 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
391 	ssize_t pos;
392 	u32 val;
393 
394 	mutex_lock(&rinfo->reg_event_lock);
395 	set_event_number(pdata, pci, rinfo);
396 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
397 	mutex_unlock(&rinfo->reg_event_lock);
398 	val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
399 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
400 
401 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
402 }
403 
counter_lane_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)404 static ssize_t counter_lane_write(struct file *file, const char __user *buf,
405 				  size_t count, loff_t *ppos)
406 {
407 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
408 	struct dw_pcie *pci = pdata->pci;
409 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
410 	u32 val, lane;
411 
412 	val = kstrtou32_from_user(buf, count, 0, &lane);
413 	if (val)
414 		return val;
415 
416 	mutex_lock(&rinfo->reg_event_lock);
417 	set_event_number(pdata, pci, rinfo);
418 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
419 	val &= ~(EVENT_COUNTER_LANE_SELECT);
420 	val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
421 	dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
422 	mutex_unlock(&rinfo->reg_event_lock);
423 
424 	return count;
425 }
426 
counter_value_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)427 static ssize_t counter_value_read(struct file *file, char __user *buf,
428 				  size_t count, loff_t *ppos)
429 {
430 	struct dwc_pcie_rasdes_priv *pdata = file->private_data;
431 	struct dw_pcie *pci = pdata->pci;
432 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
433 	char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
434 	ssize_t pos;
435 	u32 val;
436 
437 	mutex_lock(&rinfo->reg_event_lock);
438 	set_event_number(pdata, pci, rinfo);
439 	val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
440 	mutex_unlock(&rinfo->reg_event_lock);
441 	pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
442 
443 	return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
444 }
445 
ltssm_status_string(enum dw_pcie_ltssm ltssm)446 static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
447 {
448 	const char *str;
449 
450 	switch (ltssm) {
451 #define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
452 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
453 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
454 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
455 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
456 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
457 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
458 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
459 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
460 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
461 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
462 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
463 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
464 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
465 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
466 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
467 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
468 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
469 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
470 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
471 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
472 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
473 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
474 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
475 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
476 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
477 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
478 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
479 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
480 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
481 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
482 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
483 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
484 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
485 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
486 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
487 	DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
488 	default:
489 		str = "DW_PCIE_LTSSM_UNKNOWN";
490 		break;
491 	}
492 
493 	return str + strlen("DW_PCIE_LTSSM_");
494 }
495 
ltssm_status_show(struct seq_file * s,void * v)496 static int ltssm_status_show(struct seq_file *s, void *v)
497 {
498 	struct dw_pcie *pci = s->private;
499 	enum dw_pcie_ltssm val;
500 
501 	val = dw_pcie_get_ltssm(pci);
502 	seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
503 
504 	return 0;
505 }
506 
ltssm_status_open(struct inode * inode,struct file * file)507 static int ltssm_status_open(struct inode *inode, struct file *file)
508 {
509 	return single_open(file, ltssm_status_show, inode->i_private);
510 }
511 
512 #define dwc_debugfs_create(name)			\
513 debugfs_create_file(#name, 0644, rasdes_debug, pci,	\
514 			&dbg_ ## name ## _fops)
515 
516 #define DWC_DEBUGFS_FOPS(name)					\
517 static const struct file_operations dbg_ ## name ## _fops = {	\
518 	.open = simple_open,				\
519 	.read = name ## _read,				\
520 	.write = name ## _write				\
521 }
522 
523 DWC_DEBUGFS_FOPS(lane_detect);
524 DWC_DEBUGFS_FOPS(rx_valid);
525 
526 static const struct file_operations dwc_pcie_err_inj_ops = {
527 	.open = simple_open,
528 	.write = err_inj_write,
529 };
530 
531 static const struct file_operations dwc_pcie_counter_enable_ops = {
532 	.open = simple_open,
533 	.read = counter_enable_read,
534 	.write = counter_enable_write,
535 };
536 
537 static const struct file_operations dwc_pcie_counter_lane_ops = {
538 	.open = simple_open,
539 	.read = counter_lane_read,
540 	.write = counter_lane_write,
541 };
542 
543 static const struct file_operations dwc_pcie_counter_value_ops = {
544 	.open = simple_open,
545 	.read = counter_value_read,
546 };
547 
548 static const struct file_operations dwc_pcie_ltssm_status_ops = {
549 	.open = ltssm_status_open,
550 	.read = seq_read,
551 };
552 
dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie * pci)553 static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
554 {
555 	struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
556 
557 	mutex_destroy(&rinfo->reg_event_lock);
558 }
559 
dwc_pcie_rasdes_debugfs_init(struct dw_pcie * pci,struct dentry * dir)560 static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
561 {
562 	struct dentry *rasdes_debug, *rasdes_err_inj;
563 	struct dentry *rasdes_event_counter, *rasdes_events;
564 	struct dwc_pcie_rasdes_info *rasdes_info;
565 	struct dwc_pcie_rasdes_priv *priv_tmp;
566 	struct device *dev = pci->dev;
567 	int ras_cap, i, ret;
568 
569 	/*
570 	 * If a given SoC has no RAS DES capability, the following call is
571 	 * bound to return an error, breaking some existing platforms. So,
572 	 * return 0 here, as this is not necessarily an error.
573 	 */
574 	ras_cap = dw_pcie_find_rasdes_capability(pci);
575 	if (!ras_cap) {
576 		dev_dbg(dev, "no RAS DES capability available\n");
577 		return 0;
578 	}
579 
580 	rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
581 	if (!rasdes_info)
582 		return -ENOMEM;
583 
584 	/* Create subdirectories for Debug, Error Injection, Statistics. */
585 	rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
586 	rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
587 	rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
588 
589 	mutex_init(&rasdes_info->reg_event_lock);
590 	rasdes_info->ras_cap_offset = ras_cap;
591 	pci->debugfs->rasdes_info = rasdes_info;
592 
593 	/* Create debugfs files for Debug subdirectory. */
594 	dwc_debugfs_create(lane_detect);
595 	dwc_debugfs_create(rx_valid);
596 
597 	/* Create debugfs files for Error Injection subdirectory. */
598 	for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
599 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
600 		if (!priv_tmp) {
601 			ret = -ENOMEM;
602 			goto err_deinit;
603 		}
604 
605 		priv_tmp->idx = i;
606 		priv_tmp->pci = pci;
607 		debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
608 				    &dwc_pcie_err_inj_ops);
609 	}
610 
611 	/* Create debugfs files for Statistical Counter subdirectory. */
612 	for (i = 0; i < ARRAY_SIZE(event_list); i++) {
613 		priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
614 		if (!priv_tmp) {
615 			ret = -ENOMEM;
616 			goto err_deinit;
617 		}
618 
619 		priv_tmp->idx = i;
620 		priv_tmp->pci = pci;
621 		rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
622 		if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
623 			debugfs_create_file("lane_select", 0644, rasdes_events,
624 					    priv_tmp, &dwc_pcie_counter_lane_ops);
625 		}
626 		debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
627 				    &dwc_pcie_counter_value_ops);
628 		debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
629 				    &dwc_pcie_counter_enable_ops);
630 	}
631 
632 	return 0;
633 
634 err_deinit:
635 	dwc_pcie_rasdes_debugfs_deinit(pci);
636 	return ret;
637 }
638 
dwc_pcie_ltssm_debugfs_init(struct dw_pcie * pci,struct dentry * dir)639 static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
640 {
641 	debugfs_create_file("ltssm_status", 0444, dir, pci,
642 			    &dwc_pcie_ltssm_status_ops);
643 }
644 
dw_pcie_ptm_check_capability(void * drvdata)645 static int dw_pcie_ptm_check_capability(void *drvdata)
646 {
647 	struct dw_pcie *pci = drvdata;
648 
649 	pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
650 
651 	return pci->ptm_vsec_offset;
652 }
653 
dw_pcie_ptm_context_update_write(void * drvdata,u8 mode)654 static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
655 {
656 	struct dw_pcie *pci = drvdata;
657 	u32 val;
658 
659 	if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
660 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
661 		val |= PTM_REQ_AUTO_UPDATE_ENABLED;
662 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
663 	} else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
664 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
665 		val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
666 		val |= PTM_REQ_START_UPDATE;
667 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
668 	} else {
669 		return -EINVAL;
670 	}
671 
672 	return 0;
673 }
674 
dw_pcie_ptm_context_update_read(void * drvdata,u8 * mode)675 static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
676 {
677 	struct dw_pcie *pci = drvdata;
678 	u32 val;
679 
680 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
681 	if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
682 		*mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
683 	else
684 		/*
685 		 * PTM_REQ_START_UPDATE is a self clearing register bit. So if
686 		 * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
687 		 * manual update is used.
688 		 */
689 		*mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
690 
691 	return 0;
692 }
693 
dw_pcie_ptm_context_valid_write(void * drvdata,bool valid)694 static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
695 {
696 	struct dw_pcie *pci = drvdata;
697 	u32 val;
698 
699 	if (valid) {
700 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
701 		val |= PTM_RES_CCONTEXT_VALID;
702 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
703 	} else {
704 		val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
705 		val &= ~PTM_RES_CCONTEXT_VALID;
706 		dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
707 	}
708 
709 	return 0;
710 }
711 
dw_pcie_ptm_context_valid_read(void * drvdata,bool * valid)712 static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
713 {
714 	struct dw_pcie *pci = drvdata;
715 	u32 val;
716 
717 	val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
718 	*valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
719 
720 	return 0;
721 }
722 
dw_pcie_ptm_local_clock_read(void * drvdata,u64 * clock)723 static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
724 {
725 	struct dw_pcie *pci = drvdata;
726 	u32 msb, lsb;
727 
728 	do {
729 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
730 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
731 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
732 
733 	*clock = ((u64) msb) << 32 | lsb;
734 
735 	return 0;
736 }
737 
dw_pcie_ptm_master_clock_read(void * drvdata,u64 * clock)738 static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
739 {
740 	struct dw_pcie *pci = drvdata;
741 	u32 msb, lsb;
742 
743 	do {
744 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
745 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
746 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
747 
748 	*clock = ((u64) msb) << 32 | lsb;
749 
750 	return 0;
751 }
752 
dw_pcie_ptm_t1_read(void * drvdata,u64 * clock)753 static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
754 {
755 	struct dw_pcie *pci = drvdata;
756 	u32 msb, lsb;
757 
758 	do {
759 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
760 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
761 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
762 
763 	*clock = ((u64) msb) << 32 | lsb;
764 
765 	return 0;
766 }
767 
dw_pcie_ptm_t2_read(void * drvdata,u64 * clock)768 static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
769 {
770 	struct dw_pcie *pci = drvdata;
771 	u32 msb, lsb;
772 
773 	do {
774 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
775 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
776 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
777 
778 	*clock = ((u64) msb) << 32 | lsb;
779 
780 	return 0;
781 }
782 
dw_pcie_ptm_t3_read(void * drvdata,u64 * clock)783 static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
784 {
785 	struct dw_pcie *pci = drvdata;
786 	u32 msb, lsb;
787 
788 	do {
789 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
790 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
791 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
792 
793 	*clock = ((u64) msb) << 32 | lsb;
794 
795 	return 0;
796 }
797 
dw_pcie_ptm_t4_read(void * drvdata,u64 * clock)798 static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
799 {
800 	struct dw_pcie *pci = drvdata;
801 	u32 msb, lsb;
802 
803 	do {
804 		msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
805 		lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
806 	} while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
807 
808 	*clock = ((u64) msb) << 32 | lsb;
809 
810 	return 0;
811 }
812 
dw_pcie_ptm_context_update_visible(void * drvdata)813 static bool dw_pcie_ptm_context_update_visible(void *drvdata)
814 {
815 	struct dw_pcie *pci = drvdata;
816 
817 	return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
818 }
819 
dw_pcie_ptm_context_valid_visible(void * drvdata)820 static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
821 {
822 	struct dw_pcie *pci = drvdata;
823 
824 	return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
825 }
826 
dw_pcie_ptm_local_clock_visible(void * drvdata)827 static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
828 {
829 	/* PTM local clock is always visible */
830 	return true;
831 }
832 
dw_pcie_ptm_master_clock_visible(void * drvdata)833 static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
834 {
835 	struct dw_pcie *pci = drvdata;
836 
837 	return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
838 }
839 
dw_pcie_ptm_t1_visible(void * drvdata)840 static bool dw_pcie_ptm_t1_visible(void *drvdata)
841 {
842 	struct dw_pcie *pci = drvdata;
843 
844 	return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
845 }
846 
dw_pcie_ptm_t2_visible(void * drvdata)847 static bool dw_pcie_ptm_t2_visible(void *drvdata)
848 {
849 	struct dw_pcie *pci = drvdata;
850 
851 	return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
852 }
853 
dw_pcie_ptm_t3_visible(void * drvdata)854 static bool dw_pcie_ptm_t3_visible(void *drvdata)
855 {
856 	struct dw_pcie *pci = drvdata;
857 
858 	return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
859 }
860 
dw_pcie_ptm_t4_visible(void * drvdata)861 static bool dw_pcie_ptm_t4_visible(void *drvdata)
862 {
863 	struct dw_pcie *pci = drvdata;
864 
865 	return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
866 }
867 
868 const struct pcie_ptm_ops dw_pcie_ptm_ops = {
869 	.check_capability = dw_pcie_ptm_check_capability,
870 	.context_update_write = dw_pcie_ptm_context_update_write,
871 	.context_update_read = dw_pcie_ptm_context_update_read,
872 	.context_valid_write = dw_pcie_ptm_context_valid_write,
873 	.context_valid_read = dw_pcie_ptm_context_valid_read,
874 	.local_clock_read = dw_pcie_ptm_local_clock_read,
875 	.master_clock_read = dw_pcie_ptm_master_clock_read,
876 	.t1_read = dw_pcie_ptm_t1_read,
877 	.t2_read = dw_pcie_ptm_t2_read,
878 	.t3_read = dw_pcie_ptm_t3_read,
879 	.t4_read = dw_pcie_ptm_t4_read,
880 	.context_update_visible = dw_pcie_ptm_context_update_visible,
881 	.context_valid_visible = dw_pcie_ptm_context_valid_visible,
882 	.local_clock_visible = dw_pcie_ptm_local_clock_visible,
883 	.master_clock_visible = dw_pcie_ptm_master_clock_visible,
884 	.t1_visible = dw_pcie_ptm_t1_visible,
885 	.t2_visible = dw_pcie_ptm_t2_visible,
886 	.t3_visible = dw_pcie_ptm_t3_visible,
887 	.t4_visible = dw_pcie_ptm_t4_visible,
888 };
889 
dwc_pcie_debugfs_deinit(struct dw_pcie * pci)890 void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
891 {
892 	if (!pci->debugfs)
893 		return;
894 
895 	pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
896 	dwc_pcie_rasdes_debugfs_deinit(pci);
897 	debugfs_remove_recursive(pci->debugfs->debug_dir);
898 }
899 
dwc_pcie_debugfs_init(struct dw_pcie * pci,enum dw_pcie_device_mode mode)900 void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
901 {
902 	char dirname[DWC_DEBUGFS_BUF_MAX];
903 	struct device *dev = pci->dev;
904 	struct debugfs_info *debugfs;
905 	struct dentry *dir;
906 	int err;
907 
908 	/* Create main directory for each platform driver. */
909 	snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
910 	dir = debugfs_create_dir(dirname, NULL);
911 	debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
912 	if (!debugfs)
913 		return;
914 
915 	debugfs->debug_dir = dir;
916 	pci->debugfs = debugfs;
917 	err = dwc_pcie_rasdes_debugfs_init(pci, dir);
918 	if (err)
919 		dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
920 			err);
921 
922 	dwc_pcie_ltssm_debugfs_init(pci, dir);
923 
924 	pci->mode = mode;
925 	pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
926 						   &dw_pcie_ptm_ops);
927 }
928