xref: /linux/drivers/pci/pcie/aer.c (revision 791d3ef2e11100449837dc0b6fe884e60ca3a484)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implement the AER root port service driver. The driver registers an IRQ
4  * handler. When a root port triggers an AER interrupt, the IRQ handler
5  * collects root port status and schedules work.
6  *
7  * Copyright (C) 2006 Intel Corp.
8  *	Tom Long Nguyen (tom.l.nguyen@intel.com)
9  *	Zhang Yanmin (yanmin.zhang@intel.com)
10  *
11  * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
12  *    Andrew Patterson <andrew.patterson@hp.com>
13  */
14 
15 #include <linux/cper.h>
16 #include <linux/pci.h>
17 #include <linux/pci-acpi.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/pm.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/kfifo.h>
26 #include <linux/slab.h>
27 #include <acpi/apei.h>
28 #include <ras/ras_event.h>
29 
30 #include "../pci.h"
31 #include "portdrv.h"
32 
33 #define AER_ERROR_SOURCES_MAX		100
34 #define AER_MAX_MULTI_ERR_DEVICES	5	/* Not likely to have more */
35 
36 struct aer_err_info {
37 	struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
38 	int error_dev_num;
39 
40 	unsigned int id:16;
41 
42 	unsigned int severity:2;	/* 0:NONFATAL | 1:FATAL | 2:COR */
43 	unsigned int __pad1:5;
44 	unsigned int multi_error_valid:1;
45 
46 	unsigned int first_error:5;
47 	unsigned int __pad2:2;
48 	unsigned int tlp_header_valid:1;
49 
50 	unsigned int status;		/* COR/UNCOR Error Status */
51 	unsigned int mask;		/* COR/UNCOR Error Mask */
52 	struct aer_header_log_regs tlp;	/* TLP Header */
53 };
54 
55 struct aer_err_source {
56 	unsigned int status;
57 	unsigned int id;
58 };
59 
60 struct aer_rpc {
61 	struct pci_dev *rpd;		/* Root Port device */
62 	struct work_struct dpc_handler;
63 	struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
64 	struct aer_err_info e_info;
65 	unsigned short prod_idx;	/* Error Producer Index */
66 	unsigned short cons_idx;	/* Error Consumer Index */
67 	int isr;
68 	spinlock_t e_lock;		/*
69 					 * Lock access to Error Status/ID Regs
70 					 * and error producer/consumer index
71 					 */
72 	struct mutex rpc_mutex;		/*
73 					 * only one thread could do
74 					 * recovery on the same
75 					 * root port hierarchy
76 					 */
77 };
78 
79 #define AER_LOG_TLP_MASKS		(PCI_ERR_UNC_POISON_TLP|	\
80 					PCI_ERR_UNC_ECRC|		\
81 					PCI_ERR_UNC_UNSUP|		\
82 					PCI_ERR_UNC_COMP_ABORT|		\
83 					PCI_ERR_UNC_UNX_COMP|		\
84 					PCI_ERR_UNC_MALF_TLP)
85 
86 #define SYSTEM_ERROR_INTR_ON_MESG_MASK	(PCI_EXP_RTCTL_SECEE|	\
87 					PCI_EXP_RTCTL_SENFEE|	\
88 					PCI_EXP_RTCTL_SEFEE)
89 #define ROOT_PORT_INTR_ON_MESG_MASK	(PCI_ERR_ROOT_CMD_COR_EN|	\
90 					PCI_ERR_ROOT_CMD_NONFATAL_EN|	\
91 					PCI_ERR_ROOT_CMD_FATAL_EN)
92 #define ERR_COR_ID(d)			(d & 0xffff)
93 #define ERR_UNCOR_ID(d)			(d >> 16)
94 
95 static int pcie_aer_disable;
96 
97 void pci_no_aer(void)
98 {
99 	pcie_aer_disable = 1;
100 }
101 
102 bool pci_aer_available(void)
103 {
104 	return !pcie_aer_disable && pci_msi_enabled();
105 }
106 
107 #ifdef CONFIG_PCIE_ECRC
108 
109 #define ECRC_POLICY_DEFAULT 0		/* ECRC set by BIOS */
110 #define ECRC_POLICY_OFF     1		/* ECRC off for performance */
111 #define ECRC_POLICY_ON      2		/* ECRC on for data integrity */
112 
113 static int ecrc_policy = ECRC_POLICY_DEFAULT;
114 
115 static const char *ecrc_policy_str[] = {
116 	[ECRC_POLICY_DEFAULT] = "bios",
117 	[ECRC_POLICY_OFF] = "off",
118 	[ECRC_POLICY_ON] = "on"
119 };
120 
121 /**
122  * enable_ercr_checking - enable PCIe ECRC checking for a device
123  * @dev: the PCI device
124  *
125  * Returns 0 on success, or negative on failure.
126  */
127 static int enable_ecrc_checking(struct pci_dev *dev)
128 {
129 	int pos;
130 	u32 reg32;
131 
132 	if (!pci_is_pcie(dev))
133 		return -ENODEV;
134 
135 	pos = dev->aer_cap;
136 	if (!pos)
137 		return -ENODEV;
138 
139 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
140 	if (reg32 & PCI_ERR_CAP_ECRC_GENC)
141 		reg32 |= PCI_ERR_CAP_ECRC_GENE;
142 	if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
143 		reg32 |= PCI_ERR_CAP_ECRC_CHKE;
144 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
145 
146 	return 0;
147 }
148 
149 /**
150  * disable_ercr_checking - disables PCIe ECRC checking for a device
151  * @dev: the PCI device
152  *
153  * Returns 0 on success, or negative on failure.
154  */
155 static int disable_ecrc_checking(struct pci_dev *dev)
156 {
157 	int pos;
158 	u32 reg32;
159 
160 	if (!pci_is_pcie(dev))
161 		return -ENODEV;
162 
163 	pos = dev->aer_cap;
164 	if (!pos)
165 		return -ENODEV;
166 
167 	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
168 	reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
169 	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
170 
171 	return 0;
172 }
173 
174 /**
175  * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
176  * @dev: the PCI device
177  */
178 void pcie_set_ecrc_checking(struct pci_dev *dev)
179 {
180 	switch (ecrc_policy) {
181 	case ECRC_POLICY_DEFAULT:
182 		return;
183 	case ECRC_POLICY_OFF:
184 		disable_ecrc_checking(dev);
185 		break;
186 	case ECRC_POLICY_ON:
187 		enable_ecrc_checking(dev);
188 		break;
189 	default:
190 		return;
191 	}
192 }
193 
194 /**
195  * pcie_ecrc_get_policy - parse kernel command-line ecrc option
196  */
197 void pcie_ecrc_get_policy(char *str)
198 {
199 	int i;
200 
201 	for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
202 		if (!strncmp(str, ecrc_policy_str[i],
203 			     strlen(ecrc_policy_str[i])))
204 			break;
205 	if (i >= ARRAY_SIZE(ecrc_policy_str))
206 		return;
207 
208 	ecrc_policy = i;
209 }
210 #endif	/* CONFIG_PCIE_ECRC */
211 
212 #ifdef CONFIG_ACPI_APEI
213 static inline int hest_match_pci(struct acpi_hest_aer_common *p,
214 				 struct pci_dev *pci)
215 {
216 	return   ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
217 		 ACPI_HEST_BUS(p->bus)     == pci->bus->number &&
218 		 p->device                 == PCI_SLOT(pci->devfn) &&
219 		 p->function               == PCI_FUNC(pci->devfn);
220 }
221 
222 static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
223 				struct pci_dev *dev)
224 {
225 	u16 hest_type = hest_hdr->type;
226 	u8 pcie_type = pci_pcie_type(dev);
227 
228 	if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
229 		pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
230 	    (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
231 		pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
232 	    (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
233 		(dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
234 		return true;
235 	return false;
236 }
237 
238 struct aer_hest_parse_info {
239 	struct pci_dev *pci_dev;
240 	int firmware_first;
241 };
242 
243 static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
244 {
245 	if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
246 	    hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
247 	    hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
248 		return 1;
249 	return 0;
250 }
251 
252 static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
253 {
254 	struct aer_hest_parse_info *info = data;
255 	struct acpi_hest_aer_common *p;
256 	int ff;
257 
258 	if (!hest_source_is_pcie_aer(hest_hdr))
259 		return 0;
260 
261 	p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
262 	ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
263 
264 	/*
265 	 * If no specific device is supplied, determine whether
266 	 * FIRMWARE_FIRST is set for *any* PCIe device.
267 	 */
268 	if (!info->pci_dev) {
269 		info->firmware_first |= ff;
270 		return 0;
271 	}
272 
273 	/* Otherwise, check the specific device */
274 	if (p->flags & ACPI_HEST_GLOBAL) {
275 		if (hest_match_type(hest_hdr, info->pci_dev))
276 			info->firmware_first = ff;
277 	} else
278 		if (hest_match_pci(p, info->pci_dev))
279 			info->firmware_first = ff;
280 
281 	return 0;
282 }
283 
284 static void aer_set_firmware_first(struct pci_dev *pci_dev)
285 {
286 	int rc;
287 	struct aer_hest_parse_info info = {
288 		.pci_dev	= pci_dev,
289 		.firmware_first	= 0,
290 	};
291 
292 	rc = apei_hest_parse(aer_hest_parse, &info);
293 
294 	if (rc)
295 		pci_dev->__aer_firmware_first = 0;
296 	else
297 		pci_dev->__aer_firmware_first = info.firmware_first;
298 	pci_dev->__aer_firmware_first_valid = 1;
299 }
300 
301 int pcie_aer_get_firmware_first(struct pci_dev *dev)
302 {
303 	if (!pci_is_pcie(dev))
304 		return 0;
305 
306 	if (!dev->__aer_firmware_first_valid)
307 		aer_set_firmware_first(dev);
308 	return dev->__aer_firmware_first;
309 }
310 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
311 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
312 
313 static bool aer_firmware_first;
314 
315 /**
316  * aer_acpi_firmware_first - Check if APEI should control AER.
317  */
318 bool aer_acpi_firmware_first(void)
319 {
320 	static bool parsed = false;
321 	struct aer_hest_parse_info info = {
322 		.pci_dev	= NULL,	/* Check all PCIe devices */
323 		.firmware_first	= 0,
324 	};
325 
326 	if (!parsed) {
327 		apei_hest_parse(aer_hest_parse, &info);
328 		aer_firmware_first = info.firmware_first;
329 		parsed = true;
330 	}
331 	return aer_firmware_first;
332 }
333 #endif
334 
335 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
336 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
337 
338 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
339 {
340 	if (pcie_aer_get_firmware_first(dev))
341 		return -EIO;
342 
343 	if (!dev->aer_cap)
344 		return -EIO;
345 
346 	return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
347 }
348 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
349 
350 int pci_disable_pcie_error_reporting(struct pci_dev *dev)
351 {
352 	if (pcie_aer_get_firmware_first(dev))
353 		return -EIO;
354 
355 	return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
356 					  PCI_EXP_AER_FLAGS);
357 }
358 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
359 
360 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
361 {
362 	int pos;
363 	u32 status;
364 
365 	pos = dev->aer_cap;
366 	if (!pos)
367 		return -EIO;
368 
369 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
370 	if (status)
371 		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
372 
373 	return 0;
374 }
375 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
376 
377 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
378 {
379 	int pos;
380 	u32 status;
381 	int port_type;
382 
383 	if (!pci_is_pcie(dev))
384 		return -ENODEV;
385 
386 	pos = dev->aer_cap;
387 	if (!pos)
388 		return -EIO;
389 
390 	port_type = pci_pcie_type(dev);
391 	if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
392 		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
393 		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
394 	}
395 
396 	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
397 	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
398 
399 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
400 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
401 
402 	return 0;
403 }
404 
405 int pci_aer_init(struct pci_dev *dev)
406 {
407 	dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
408 	return pci_cleanup_aer_error_status_regs(dev);
409 }
410 
411 #define AER_AGENT_RECEIVER		0
412 #define AER_AGENT_REQUESTER		1
413 #define AER_AGENT_COMPLETER		2
414 #define AER_AGENT_TRANSMITTER		3
415 
416 #define AER_AGENT_REQUESTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
417 	0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
418 #define AER_AGENT_COMPLETER_MASK(t)	((t == AER_CORRECTABLE) ?	\
419 	0 : PCI_ERR_UNC_COMP_ABORT)
420 #define AER_AGENT_TRANSMITTER_MASK(t)	((t == AER_CORRECTABLE) ?	\
421 	(PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
422 
423 #define AER_GET_AGENT(t, e)						\
424 	((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER :	\
425 	(e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER :	\
426 	(e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER :	\
427 	AER_AGENT_RECEIVER)
428 
429 #define AER_PHYSICAL_LAYER_ERROR	0
430 #define AER_DATA_LINK_LAYER_ERROR	1
431 #define AER_TRANSACTION_LAYER_ERROR	2
432 
433 #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
434 	PCI_ERR_COR_RCVR : 0)
435 #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\
436 	(PCI_ERR_COR_BAD_TLP|						\
437 	PCI_ERR_COR_BAD_DLLP|						\
438 	PCI_ERR_COR_REP_ROLL|						\
439 	PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
440 
441 #define AER_GET_LAYER_ERROR(t, e)					\
442 	((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
443 	(e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
444 	AER_TRANSACTION_LAYER_ERROR)
445 
446 /*
447  * AER error strings
448  */
449 static const char *aer_error_severity_string[] = {
450 	"Uncorrected (Non-Fatal)",
451 	"Uncorrected (Fatal)",
452 	"Corrected"
453 };
454 
455 static const char *aer_error_layer[] = {
456 	"Physical Layer",
457 	"Data Link Layer",
458 	"Transaction Layer"
459 };
460 
461 static const char *aer_correctable_error_string[] = {
462 	"Receiver Error",		/* Bit Position 0	*/
463 	NULL,
464 	NULL,
465 	NULL,
466 	NULL,
467 	NULL,
468 	"Bad TLP",			/* Bit Position 6	*/
469 	"Bad DLLP",			/* Bit Position 7	*/
470 	"RELAY_NUM Rollover",		/* Bit Position 8	*/
471 	NULL,
472 	NULL,
473 	NULL,
474 	"Replay Timer Timeout",		/* Bit Position 12	*/
475 	"Advisory Non-Fatal",		/* Bit Position 13	*/
476 	"Corrected Internal Error",	/* Bit Position 14	*/
477 	"Header Log Overflow",		/* Bit Position 15	*/
478 };
479 
480 static const char *aer_uncorrectable_error_string[] = {
481 	"Undefined",			/* Bit Position 0	*/
482 	NULL,
483 	NULL,
484 	NULL,
485 	"Data Link Protocol",		/* Bit Position 4	*/
486 	"Surprise Down Error",		/* Bit Position 5	*/
487 	NULL,
488 	NULL,
489 	NULL,
490 	NULL,
491 	NULL,
492 	NULL,
493 	"Poisoned TLP",			/* Bit Position 12	*/
494 	"Flow Control Protocol",	/* Bit Position 13	*/
495 	"Completion Timeout",		/* Bit Position 14	*/
496 	"Completer Abort",		/* Bit Position 15	*/
497 	"Unexpected Completion",	/* Bit Position 16	*/
498 	"Receiver Overflow",		/* Bit Position 17	*/
499 	"Malformed TLP",		/* Bit Position 18	*/
500 	"ECRC",				/* Bit Position 19	*/
501 	"Unsupported Request",		/* Bit Position 20	*/
502 	"ACS Violation",		/* Bit Position 21	*/
503 	"Uncorrectable Internal Error",	/* Bit Position 22	*/
504 	"MC Blocked TLP",		/* Bit Position 23	*/
505 	"AtomicOp Egress Blocked",	/* Bit Position 24	*/
506 	"TLP Prefix Blocked Error",	/* Bit Position 25	*/
507 };
508 
509 static const char *aer_agent_string[] = {
510 	"Receiver ID",
511 	"Requester ID",
512 	"Completer ID",
513 	"Transmitter ID"
514 };
515 
516 static void __print_tlp_header(struct pci_dev *dev,
517 			       struct aer_header_log_regs *t)
518 {
519 	pci_err(dev, "  TLP Header: %08x %08x %08x %08x\n",
520 		t->dw0, t->dw1, t->dw2, t->dw3);
521 }
522 
523 static void __aer_print_error(struct pci_dev *dev,
524 			      struct aer_err_info *info)
525 {
526 	int i, status;
527 	const char *errmsg = NULL;
528 	status = (info->status & ~info->mask);
529 
530 	for (i = 0; i < 32; i++) {
531 		if (!(status & (1 << i)))
532 			continue;
533 
534 		if (info->severity == AER_CORRECTABLE)
535 			errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
536 				aer_correctable_error_string[i] : NULL;
537 		else
538 			errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
539 				aer_uncorrectable_error_string[i] : NULL;
540 
541 		if (errmsg)
542 			pci_err(dev, "   [%2d] %-22s%s\n", i, errmsg,
543 				info->first_error == i ? " (First)" : "");
544 		else
545 			pci_err(dev, "   [%2d] Unknown Error Bit%s\n",
546 				i, info->first_error == i ? " (First)" : "");
547 	}
548 }
549 
550 static void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
551 {
552 	int layer, agent;
553 	int id = ((dev->bus->number << 8) | dev->devfn);
554 
555 	if (!info->status) {
556 		pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
557 			aer_error_severity_string[info->severity]);
558 		goto out;
559 	}
560 
561 	layer = AER_GET_LAYER_ERROR(info->severity, info->status);
562 	agent = AER_GET_AGENT(info->severity, info->status);
563 
564 	pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
565 		aer_error_severity_string[info->severity],
566 		aer_error_layer[layer], aer_agent_string[agent]);
567 
568 	pci_err(dev, "  device [%04x:%04x] error status/mask=%08x/%08x\n",
569 		dev->vendor, dev->device,
570 		info->status, info->mask);
571 
572 	__aer_print_error(dev, info);
573 
574 	if (info->tlp_header_valid)
575 		__print_tlp_header(dev, &info->tlp);
576 
577 out:
578 	if (info->id && info->error_dev_num > 1 && info->id == id)
579 		pci_err(dev, "  Error of this Agent is reported first\n");
580 
581 	trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
582 			info->severity, info->tlp_header_valid, &info->tlp);
583 }
584 
585 static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
586 {
587 	u8 bus = info->id >> 8;
588 	u8 devfn = info->id & 0xff;
589 
590 	pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n",
591 		info->multi_error_valid ? "Multiple " : "",
592 		aer_error_severity_string[info->severity],
593 		pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
594 }
595 
596 #ifdef CONFIG_ACPI_APEI_PCIEAER
597 int cper_severity_to_aer(int cper_severity)
598 {
599 	switch (cper_severity) {
600 	case CPER_SEV_RECOVERABLE:
601 		return AER_NONFATAL;
602 	case CPER_SEV_FATAL:
603 		return AER_FATAL;
604 	default:
605 		return AER_CORRECTABLE;
606 	}
607 }
608 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
609 
610 void cper_print_aer(struct pci_dev *dev, int aer_severity,
611 		    struct aer_capability_regs *aer)
612 {
613 	int layer, agent, tlp_header_valid = 0;
614 	u32 status, mask;
615 	struct aer_err_info info;
616 
617 	if (aer_severity == AER_CORRECTABLE) {
618 		status = aer->cor_status;
619 		mask = aer->cor_mask;
620 	} else {
621 		status = aer->uncor_status;
622 		mask = aer->uncor_mask;
623 		tlp_header_valid = status & AER_LOG_TLP_MASKS;
624 	}
625 
626 	layer = AER_GET_LAYER_ERROR(aer_severity, status);
627 	agent = AER_GET_AGENT(aer_severity, status);
628 
629 	memset(&info, 0, sizeof(info));
630 	info.severity = aer_severity;
631 	info.status = status;
632 	info.mask = mask;
633 	info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
634 
635 	pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
636 	__aer_print_error(dev, &info);
637 	pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
638 		aer_error_layer[layer], aer_agent_string[agent]);
639 
640 	if (aer_severity != AER_CORRECTABLE)
641 		pci_err(dev, "aer_uncor_severity: 0x%08x\n",
642 			aer->uncor_severity);
643 
644 	if (tlp_header_valid)
645 		__print_tlp_header(dev, &aer->header_log);
646 
647 	trace_aer_event(dev_name(&dev->dev), (status & ~mask),
648 			aer_severity, tlp_header_valid, &aer->header_log);
649 }
650 #endif
651 
652 /**
653  * add_error_device - list device to be handled
654  * @e_info: pointer to error info
655  * @dev: pointer to pci_dev to be added
656  */
657 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
658 {
659 	if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
660 		e_info->dev[e_info->error_dev_num] = dev;
661 		e_info->error_dev_num++;
662 		return 0;
663 	}
664 	return -ENOSPC;
665 }
666 
667 /**
668  * is_error_source - check whether the device is source of reported error
669  * @dev: pointer to pci_dev to be checked
670  * @e_info: pointer to reported error info
671  */
672 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
673 {
674 	int pos;
675 	u32 status, mask;
676 	u16 reg16;
677 
678 	/*
679 	 * When bus id is equal to 0, it might be a bad id
680 	 * reported by root port.
681 	 */
682 	if ((PCI_BUS_NUM(e_info->id) != 0) &&
683 	    !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
684 		/* Device ID match? */
685 		if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
686 			return true;
687 
688 		/* Continue id comparing if there is no multiple error */
689 		if (!e_info->multi_error_valid)
690 			return false;
691 	}
692 
693 	/*
694 	 * When either
695 	 *      1) bus id is equal to 0. Some ports might lose the bus
696 	 *              id of error source id;
697 	 *      2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
698 	 *      3) There are multiple errors and prior ID comparing fails;
699 	 * We check AER status registers to find possible reporter.
700 	 */
701 	if (atomic_read(&dev->enable_cnt) == 0)
702 		return false;
703 
704 	/* Check if AER is enabled */
705 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
706 	if (!(reg16 & PCI_EXP_AER_FLAGS))
707 		return false;
708 
709 	pos = dev->aer_cap;
710 	if (!pos)
711 		return false;
712 
713 	/* Check if error is recorded */
714 	if (e_info->severity == AER_CORRECTABLE) {
715 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
716 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
717 	} else {
718 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
719 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
720 	}
721 	if (status & ~mask)
722 		return true;
723 
724 	return false;
725 }
726 
727 static int find_device_iter(struct pci_dev *dev, void *data)
728 {
729 	struct aer_err_info *e_info = (struct aer_err_info *)data;
730 
731 	if (is_error_source(dev, e_info)) {
732 		/* List this device */
733 		if (add_error_device(e_info, dev)) {
734 			/* We cannot handle more... Stop iteration */
735 			/* TODO: Should print error message here? */
736 			return 1;
737 		}
738 
739 		/* If there is only a single error, stop iteration */
740 		if (!e_info->multi_error_valid)
741 			return 1;
742 	}
743 	return 0;
744 }
745 
746 /**
747  * find_source_device - search through device hierarchy for source device
748  * @parent: pointer to Root Port pci_dev data structure
749  * @e_info: including detailed error information such like id
750  *
751  * Return true if found.
752  *
753  * Invoked by DPC when error is detected at the Root Port.
754  * Caller of this function must set id, severity, and multi_error_valid of
755  * struct aer_err_info pointed by @e_info properly.  This function must fill
756  * e_info->error_dev_num and e_info->dev[], based on the given information.
757  */
758 static bool find_source_device(struct pci_dev *parent,
759 		struct aer_err_info *e_info)
760 {
761 	struct pci_dev *dev = parent;
762 	int result;
763 
764 	/* Must reset in this function */
765 	e_info->error_dev_num = 0;
766 
767 	/* Is Root Port an agent that sends error message? */
768 	result = find_device_iter(dev, e_info);
769 	if (result)
770 		return true;
771 
772 	pci_walk_bus(parent->subordinate, find_device_iter, e_info);
773 
774 	if (!e_info->error_dev_num) {
775 		pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n",
776 			   e_info->id);
777 		return false;
778 	}
779 	return true;
780 }
781 
782 /**
783  * handle_error_source - handle logging error into an event log
784  * @dev: pointer to pci_dev data structure of error source device
785  * @info: comprehensive error information
786  *
787  * Invoked when an error being detected by Root Port.
788  */
789 static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
790 {
791 	int pos;
792 
793 	if (info->severity == AER_CORRECTABLE) {
794 		/*
795 		 * Correctable error does not need software intervention.
796 		 * No need to go through error recovery process.
797 		 */
798 		pos = dev->aer_cap;
799 		if (pos)
800 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
801 					info->status);
802 	} else if (info->severity == AER_NONFATAL)
803 		pcie_do_nonfatal_recovery(dev);
804 	else if (info->severity == AER_FATAL)
805 		pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
806 }
807 
808 #ifdef CONFIG_ACPI_APEI_PCIEAER
809 
810 #define AER_RECOVER_RING_ORDER		4
811 #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER)
812 
813 struct aer_recover_entry {
814 	u8	bus;
815 	u8	devfn;
816 	u16	domain;
817 	int	severity;
818 	struct aer_capability_regs *regs;
819 };
820 
821 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
822 		    AER_RECOVER_RING_SIZE);
823 
824 static void aer_recover_work_func(struct work_struct *work)
825 {
826 	struct aer_recover_entry entry;
827 	struct pci_dev *pdev;
828 
829 	while (kfifo_get(&aer_recover_ring, &entry)) {
830 		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
831 						   entry.devfn);
832 		if (!pdev) {
833 			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
834 			       entry.domain, entry.bus,
835 			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
836 			continue;
837 		}
838 		cper_print_aer(pdev, entry.severity, entry.regs);
839 		if (entry.severity == AER_NONFATAL)
840 			pcie_do_nonfatal_recovery(pdev);
841 		else if (entry.severity == AER_FATAL)
842 			pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
843 		pci_dev_put(pdev);
844 	}
845 }
846 
847 /*
848  * Mutual exclusion for writers of aer_recover_ring, reader side don't
849  * need lock, because there is only one reader and lock is not needed
850  * between reader and writer.
851  */
852 static DEFINE_SPINLOCK(aer_recover_ring_lock);
853 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
854 
855 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
856 		       int severity, struct aer_capability_regs *aer_regs)
857 {
858 	unsigned long flags;
859 	struct aer_recover_entry entry = {
860 		.bus		= bus,
861 		.devfn		= devfn,
862 		.domain		= domain,
863 		.severity	= severity,
864 		.regs		= aer_regs,
865 	};
866 
867 	spin_lock_irqsave(&aer_recover_ring_lock, flags);
868 	if (kfifo_put(&aer_recover_ring, entry))
869 		schedule_work(&aer_recover_work);
870 	else
871 		pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
872 		       domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
873 	spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
874 }
875 EXPORT_SYMBOL_GPL(aer_recover_queue);
876 #endif
877 
878 /**
879  * get_device_error_info - read error status from dev and store it to info
880  * @dev: pointer to the device expected to have a error record
881  * @info: pointer to structure to store the error record
882  *
883  * Return 1 on success, 0 on error.
884  *
885  * Note that @info is reused among all error devices. Clear fields properly.
886  */
887 static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
888 {
889 	int pos, temp;
890 
891 	/* Must reset in this function */
892 	info->status = 0;
893 	info->tlp_header_valid = 0;
894 
895 	pos = dev->aer_cap;
896 
897 	/* The device might not support AER */
898 	if (!pos)
899 		return 0;
900 
901 	if (info->severity == AER_CORRECTABLE) {
902 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
903 			&info->status);
904 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
905 			&info->mask);
906 		if (!(info->status & ~info->mask))
907 			return 0;
908 	} else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
909 		info->severity == AER_NONFATAL) {
910 
911 		/* Link is still healthy for IO reads */
912 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
913 			&info->status);
914 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
915 			&info->mask);
916 		if (!(info->status & ~info->mask))
917 			return 0;
918 
919 		/* Get First Error Pointer */
920 		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
921 		info->first_error = PCI_ERR_CAP_FEP(temp);
922 
923 		if (info->status & AER_LOG_TLP_MASKS) {
924 			info->tlp_header_valid = 1;
925 			pci_read_config_dword(dev,
926 				pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
927 			pci_read_config_dword(dev,
928 				pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
929 			pci_read_config_dword(dev,
930 				pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
931 			pci_read_config_dword(dev,
932 				pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
933 		}
934 	}
935 
936 	return 1;
937 }
938 
939 static inline void aer_process_err_devices(struct aer_err_info *e_info)
940 {
941 	int i;
942 
943 	/* Report all before handle them, not to lost records by reset etc. */
944 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
945 		if (get_device_error_info(e_info->dev[i], e_info))
946 			aer_print_error(e_info->dev[i], e_info);
947 	}
948 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
949 		if (get_device_error_info(e_info->dev[i], e_info))
950 			handle_error_source(e_info->dev[i], e_info);
951 	}
952 }
953 
954 /**
955  * aer_isr_one_error - consume an error detected by root port
956  * @rpc: pointer to the root port which holds an error
957  * @e_src: pointer to an error source
958  */
959 static void aer_isr_one_error(struct aer_rpc *rpc,
960 		struct aer_err_source *e_src)
961 {
962 	struct pci_dev *pdev = rpc->rpd;
963 	struct aer_err_info *e_info = &rpc->e_info;
964 
965 	/*
966 	 * There is a possibility that both correctable error and
967 	 * uncorrectable error being logged. Report correctable error first.
968 	 */
969 	if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
970 		e_info->id = ERR_COR_ID(e_src->id);
971 		e_info->severity = AER_CORRECTABLE;
972 
973 		if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
974 			e_info->multi_error_valid = 1;
975 		else
976 			e_info->multi_error_valid = 0;
977 		aer_print_port_info(pdev, e_info);
978 
979 		if (find_source_device(pdev, e_info))
980 			aer_process_err_devices(e_info);
981 	}
982 
983 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
984 		e_info->id = ERR_UNCOR_ID(e_src->id);
985 
986 		if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
987 			e_info->severity = AER_FATAL;
988 		else
989 			e_info->severity = AER_NONFATAL;
990 
991 		if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
992 			e_info->multi_error_valid = 1;
993 		else
994 			e_info->multi_error_valid = 0;
995 
996 		aer_print_port_info(pdev, e_info);
997 
998 		if (find_source_device(pdev, e_info))
999 			aer_process_err_devices(e_info);
1000 	}
1001 }
1002 
1003 /**
1004  * get_e_source - retrieve an error source
1005  * @rpc: pointer to the root port which holds an error
1006  * @e_src: pointer to store retrieved error source
1007  *
1008  * Return 1 if an error source is retrieved, otherwise 0.
1009  *
1010  * Invoked by DPC handler to consume an error.
1011  */
1012 static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
1013 {
1014 	unsigned long flags;
1015 
1016 	/* Lock access to Root error producer/consumer index */
1017 	spin_lock_irqsave(&rpc->e_lock, flags);
1018 	if (rpc->prod_idx == rpc->cons_idx) {
1019 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1020 		return 0;
1021 	}
1022 
1023 	*e_src = rpc->e_sources[rpc->cons_idx];
1024 	rpc->cons_idx++;
1025 	if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
1026 		rpc->cons_idx = 0;
1027 	spin_unlock_irqrestore(&rpc->e_lock, flags);
1028 
1029 	return 1;
1030 }
1031 
1032 /**
1033  * aer_isr - consume errors detected by root port
1034  * @work: definition of this work item
1035  *
1036  * Invoked, as DPC, when root port records new detected error
1037  */
1038 static void aer_isr(struct work_struct *work)
1039 {
1040 	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
1041 	struct aer_err_source uninitialized_var(e_src);
1042 
1043 	mutex_lock(&rpc->rpc_mutex);
1044 	while (get_e_source(rpc, &e_src))
1045 		aer_isr_one_error(rpc, &e_src);
1046 	mutex_unlock(&rpc->rpc_mutex);
1047 }
1048 
1049 /**
1050  * aer_irq - Root Port's ISR
1051  * @irq: IRQ assigned to Root Port
1052  * @context: pointer to Root Port data structure
1053  *
1054  * Invoked when Root Port detects AER messages.
1055  */
1056 irqreturn_t aer_irq(int irq, void *context)
1057 {
1058 	unsigned int status, id;
1059 	struct pcie_device *pdev = (struct pcie_device *)context;
1060 	struct aer_rpc *rpc = get_service_data(pdev);
1061 	int next_prod_idx;
1062 	unsigned long flags;
1063 	int pos;
1064 
1065 	pos = pdev->port->aer_cap;
1066 	/*
1067 	 * Must lock access to Root Error Status Reg, Root Error ID Reg,
1068 	 * and Root error producer/consumer index
1069 	 */
1070 	spin_lock_irqsave(&rpc->e_lock, flags);
1071 
1072 	/* Read error status */
1073 	pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
1074 	if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
1075 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1076 		return IRQ_NONE;
1077 	}
1078 
1079 	/* Read error source and clear error status */
1080 	pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
1081 	pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
1082 
1083 	/* Store error source for later DPC handler */
1084 	next_prod_idx = rpc->prod_idx + 1;
1085 	if (next_prod_idx == AER_ERROR_SOURCES_MAX)
1086 		next_prod_idx = 0;
1087 	if (next_prod_idx == rpc->cons_idx) {
1088 		/*
1089 		 * Error Storm Condition - possibly the same error occurred.
1090 		 * Drop the error.
1091 		 */
1092 		spin_unlock_irqrestore(&rpc->e_lock, flags);
1093 		return IRQ_HANDLED;
1094 	}
1095 	rpc->e_sources[rpc->prod_idx].status =  status;
1096 	rpc->e_sources[rpc->prod_idx].id = id;
1097 	rpc->prod_idx = next_prod_idx;
1098 	spin_unlock_irqrestore(&rpc->e_lock, flags);
1099 
1100 	/*  Invoke DPC handler */
1101 	schedule_work(&rpc->dpc_handler);
1102 
1103 	return IRQ_HANDLED;
1104 }
1105 EXPORT_SYMBOL_GPL(aer_irq);
1106 
1107 static int set_device_error_reporting(struct pci_dev *dev, void *data)
1108 {
1109 	bool enable = *((bool *)data);
1110 	int type = pci_pcie_type(dev);
1111 
1112 	if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
1113 	    (type == PCI_EXP_TYPE_UPSTREAM) ||
1114 	    (type == PCI_EXP_TYPE_DOWNSTREAM)) {
1115 		if (enable)
1116 			pci_enable_pcie_error_reporting(dev);
1117 		else
1118 			pci_disable_pcie_error_reporting(dev);
1119 	}
1120 
1121 	if (enable)
1122 		pcie_set_ecrc_checking(dev);
1123 
1124 	return 0;
1125 }
1126 
1127 /**
1128  * set_downstream_devices_error_reporting - enable/disable the error reporting  bits on the root port and its downstream ports.
1129  * @dev: pointer to root port's pci_dev data structure
1130  * @enable: true = enable error reporting, false = disable error reporting.
1131  */
1132 static void set_downstream_devices_error_reporting(struct pci_dev *dev,
1133 						   bool enable)
1134 {
1135 	set_device_error_reporting(dev, &enable);
1136 
1137 	if (!dev->subordinate)
1138 		return;
1139 	pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
1140 }
1141 
1142 /**
1143  * aer_enable_rootport - enable Root Port's interrupts when receiving messages
1144  * @rpc: pointer to a Root Port data structure
1145  *
1146  * Invoked when PCIe bus loads AER service driver.
1147  */
1148 static void aer_enable_rootport(struct aer_rpc *rpc)
1149 {
1150 	struct pci_dev *pdev = rpc->rpd;
1151 	int aer_pos;
1152 	u16 reg16;
1153 	u32 reg32;
1154 
1155 	/* Clear PCIe Capability's Device Status */
1156 	pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
1157 	pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
1158 
1159 	/* Disable system error generation in response to error messages */
1160 	pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
1161 				   SYSTEM_ERROR_INTR_ON_MESG_MASK);
1162 
1163 	aer_pos = pdev->aer_cap;
1164 	/* Clear error status */
1165 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
1166 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
1167 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
1168 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
1169 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
1170 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
1171 
1172 	/*
1173 	 * Enable error reporting for the root port device and downstream port
1174 	 * devices.
1175 	 */
1176 	set_downstream_devices_error_reporting(pdev, true);
1177 
1178 	/* Enable Root Port's interrupt in response to error messages */
1179 	pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
1180 	reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1181 	pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
1182 }
1183 
1184 /**
1185  * aer_disable_rootport - disable Root Port's interrupts when receiving messages
1186  * @rpc: pointer to a Root Port data structure
1187  *
1188  * Invoked when PCIe bus unloads AER service driver.
1189  */
1190 static void aer_disable_rootport(struct aer_rpc *rpc)
1191 {
1192 	struct pci_dev *pdev = rpc->rpd;
1193 	u32 reg32;
1194 	int pos;
1195 
1196 	/*
1197 	 * Disable error reporting for the root port device and downstream port
1198 	 * devices.
1199 	 */
1200 	set_downstream_devices_error_reporting(pdev, false);
1201 
1202 	pos = pdev->aer_cap;
1203 	/* Disable Root's interrupt in response to error messages */
1204 	pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1205 	reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1206 	pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1207 
1208 	/* Clear Root's error status reg */
1209 	pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
1210 	pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
1211 }
1212 
1213 /**
1214  * aer_alloc_rpc - allocate Root Port data structure
1215  * @dev: pointer to the pcie_dev data structure
1216  *
1217  * Invoked when Root Port's AER service is loaded.
1218  */
1219 static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
1220 {
1221 	struct aer_rpc *rpc;
1222 
1223 	rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
1224 	if (!rpc)
1225 		return NULL;
1226 
1227 	/* Initialize Root lock access, e_lock, to Root Error Status Reg */
1228 	spin_lock_init(&rpc->e_lock);
1229 
1230 	rpc->rpd = dev->port;
1231 	INIT_WORK(&rpc->dpc_handler, aer_isr);
1232 	mutex_init(&rpc->rpc_mutex);
1233 
1234 	/* Use PCIe bus function to store rpc into PCIe device */
1235 	set_service_data(dev, rpc);
1236 
1237 	return rpc;
1238 }
1239 
1240 /**
1241  * aer_remove - clean up resources
1242  * @dev: pointer to the pcie_dev data structure
1243  *
1244  * Invoked when PCI Express bus unloads or AER probe fails.
1245  */
1246 static void aer_remove(struct pcie_device *dev)
1247 {
1248 	struct aer_rpc *rpc = get_service_data(dev);
1249 
1250 	if (rpc) {
1251 		/* If register interrupt service, it must be free. */
1252 		if (rpc->isr)
1253 			free_irq(dev->irq, dev);
1254 
1255 		flush_work(&rpc->dpc_handler);
1256 		aer_disable_rootport(rpc);
1257 		kfree(rpc);
1258 		set_service_data(dev, NULL);
1259 	}
1260 }
1261 
1262 /**
1263  * aer_probe - initialize resources
1264  * @dev: pointer to the pcie_dev data structure
1265  *
1266  * Invoked when PCI Express bus loads AER service driver.
1267  */
1268 static int aer_probe(struct pcie_device *dev)
1269 {
1270 	int status;
1271 	struct aer_rpc *rpc;
1272 	struct device *device = &dev->port->dev;
1273 
1274 	/* Alloc rpc data structure */
1275 	rpc = aer_alloc_rpc(dev);
1276 	if (!rpc) {
1277 		dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
1278 		aer_remove(dev);
1279 		return -ENOMEM;
1280 	}
1281 
1282 	/* Request IRQ ISR */
1283 	status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
1284 	if (status) {
1285 		dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
1286 			   dev->irq);
1287 		aer_remove(dev);
1288 		return status;
1289 	}
1290 
1291 	rpc->isr = 1;
1292 
1293 	aer_enable_rootport(rpc);
1294 	dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
1295 	return 0;
1296 }
1297 
1298 /**
1299  * aer_root_reset - reset link on Root Port
1300  * @dev: pointer to Root Port's pci_dev data structure
1301  *
1302  * Invoked by Port Bus driver when performing link reset at Root Port.
1303  */
1304 static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
1305 {
1306 	u32 reg32;
1307 	int pos;
1308 
1309 	pos = dev->aer_cap;
1310 
1311 	/* Disable Root's interrupt in response to error messages */
1312 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1313 	reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
1314 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1315 
1316 	pci_reset_bridge_secondary_bus(dev);
1317 	pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
1318 
1319 	/* Clear Root Error Status */
1320 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
1321 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
1322 
1323 	/* Enable Root Port's interrupt in response to error messages */
1324 	pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
1325 	reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
1326 	pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
1327 
1328 	return PCI_ERS_RESULT_RECOVERED;
1329 }
1330 
1331 /**
1332  * aer_error_resume - clean up corresponding error status bits
1333  * @dev: pointer to Root Port's pci_dev data structure
1334  *
1335  * Invoked by Port Bus driver during nonfatal recovery.
1336  */
1337 static void aer_error_resume(struct pci_dev *dev)
1338 {
1339 	int pos;
1340 	u32 status, mask;
1341 	u16 reg16;
1342 
1343 	/* Clean up Root device status */
1344 	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
1345 	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
1346 
1347 	/* Clean AER Root Error Status */
1348 	pos = dev->aer_cap;
1349 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
1350 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
1351 	status &= ~mask; /* Clear corresponding nonfatal bits */
1352 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
1353 }
1354 
1355 static struct pcie_port_service_driver aerdriver = {
1356 	.name		= "aer",
1357 	.port_type	= PCI_EXP_TYPE_ROOT_PORT,
1358 	.service	= PCIE_PORT_SERVICE_AER,
1359 
1360 	.probe		= aer_probe,
1361 	.remove		= aer_remove,
1362 	.error_resume	= aer_error_resume,
1363 	.reset_link	= aer_root_reset,
1364 };
1365 
1366 /**
1367  * aer_service_init - register AER root service driver
1368  *
1369  * Invoked when AER root service driver is loaded.
1370  */
1371 static int __init aer_service_init(void)
1372 {
1373 	if (!pci_aer_available() || aer_acpi_firmware_first())
1374 		return -ENXIO;
1375 	return pcie_port_service_register(&aerdriver);
1376 }
1377 device_initcall(aer_service_init);
1378