xref: /titanic_41/usr/src/uts/sun4u/io/pci/pci_fm.c (revision 49218d4f8e4d84d1c08aeb267bcf6e451f2056dc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunddi.h>
31 #include <sys/sunndi.h>
32 #include <sys/ddi_impldefs.h>
33 #include <sys/async.h>
34 #include <sys/membar.h>
35 #include <sys/spl.h>
36 #include <sys/iommu.h>
37 #include <sys/pci/pci_obj.h>
38 #include <sys/fm/util.h>
39 #include <sys/fm/io/pci.h>
40 #include <sys/fm/io/ddi.h>
41 #include <sys/fm/io/sun4upci.h>
42 #include <sys/fm/protocol.h>
43 #include <sys/intr.h>
44 
45 /*LINTLIBRARY*/
46 
47 /*
48  * The routines below are generic sun4u PCI interfaces to support
49  * Fault Management.
50  *
51  * pci_dma_check, pci_acc_check, pci_handle_lookup are functions used
52  * to associate a captured PCI address to a particular dma/acc handle.
53  *
54  * pci_fm_acc_setup, pci_fm_init_child, pci_fm_create,
55  * pci_fm_destroy are constructors/destructors used to setup and teardown
56  * necessary resources.
57  *
58  * pci_bus_enter, pci_bus_exit are registered via busops and are used to
59  * provide exclusive access to the PCI bus.
60  *
61  * pci_err_callback is the registered callback for PCI which is called
62  * by the CPU code when it detects a UE/TO/BERR.
63  *
64  * pbm_ereport_post is used by the PBM code to generically report all
65  * PBM errors.
66  *
67  */
68 
69 /*
70  * Function called after a dma fault occurred to find out whether the
71  * fault address is associated with a driver that is able to handle faults
72  * and recover from faults.
73  */
74 /* ARGSUSED */
75 static int
76 pci_dma_check(dev_info_t *dip, const void *handle, const void *comp_addr,
77     const void *not_used)
78 {
79 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
80 	pfn_t fault_pfn = mmu_btop(*(uint64_t *)comp_addr);
81 	pfn_t comp_pfn;
82 	int page;
83 
84 	/*
85 	 * The driver has to set DDI_DMA_FLAGERR to recover from dma faults.
86 	 */
87 	ASSERT(mp);
88 
89 	for (page = 0; page < mp->dmai_ndvmapages; page++) {
90 		comp_pfn = PCI_GET_MP_PFN(mp, page);
91 		if (fault_pfn == comp_pfn)
92 			return (DDI_FM_NONFATAL);
93 	}
94 
95 	return (DDI_FM_UNKNOWN);
96 }
97 
98 /*
99  * Function used to check if a given access handle owns the failing address.
100  * Called by ndi_fmc_error, when we detect a PIO error.
101  */
102 /* ARGSUSED */
103 static int
104 pci_acc_check(dev_info_t *dip, const void *handle, const void *comp_addr,
105     const void *not_used)
106 {
107 	pfn_t pfn, fault_pfn;
108 	ddi_acc_hdl_t *hp;
109 
110 	hp = impl_acc_hdl_get((ddi_acc_handle_t)handle);
111 
112 	ASSERT(hp);
113 
114 	pfn = hp->ah_pfn;
115 	fault_pfn = mmu_btop(*(uint64_t *)comp_addr);
116 	if (fault_pfn >= pfn && fault_pfn < (pfn + hp->ah_pnum))
117 		return (DDI_FM_NONFATAL);
118 
119 	return (DDI_FM_UNKNOWN);
120 }
121 
122 /*
123  * Function used by PCI error handlers to check if captured address is stored
124  * in the DMA or ACC handle caches.
125  */
126 int
127 pci_handle_lookup(dev_info_t *dip, int type, uint64_t fme_ena, void *afar)
128 {
129 	int status = DDI_FM_UNKNOWN;
130 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
131 
132 	if (type == DMA_HANDLE && DDI_FM_DMA_ERR_CAP(pci_p->pci_fm_cap))
133 		status = ndi_fmc_error(dip, NULL, type, pci_dma_check,
134 		    fme_ena, afar);
135 	else if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap))
136 		status = ndi_fmc_error(dip, NULL, type, pci_acc_check,
137 		    fme_ena, afar);
138 
139 	return (status);
140 }
141 
142 /*
143  * Function used to setup access functions depending on level of desired
144  * protection.
145  */
146 void
147 pci_fm_acc_setup(ddi_map_req_t *mp, dev_info_t *rdip)
148 {
149 	uchar_t fflag;
150 	ddi_acc_hdl_t *hp;
151 	ddi_acc_impl_t *ap;
152 
153 	hp = mp->map_handlep;
154 	ap = (ddi_acc_impl_t *)hp->ah_platform_private;
155 	fflag = ap->ahi_common.ah_acc.devacc_attr_access;
156 
157 	if (mp->map_op == DDI_MO_MAP_LOCKED) {
158 		ndi_fmc_insert(rdip, ACC_HANDLE, (void *)hp, NULL);
159 		switch (fflag) {
160 		case DDI_FLAGERR_ACC:
161 			ap->ahi_get8 = i_ddi_prot_get8;
162 			ap->ahi_get16 = i_ddi_prot_get16;
163 			ap->ahi_get32 = i_ddi_prot_get32;
164 			ap->ahi_get64 = i_ddi_prot_get64;
165 			ap->ahi_put8 = i_ddi_prot_put8;
166 			ap->ahi_put16 = i_ddi_prot_put16;
167 			ap->ahi_put32 = i_ddi_prot_put32;
168 			ap->ahi_put64 = i_ddi_prot_put64;
169 			ap->ahi_rep_get8 = i_ddi_prot_rep_get8;
170 			ap->ahi_rep_get16 = i_ddi_prot_rep_get16;
171 			ap->ahi_rep_get32 = i_ddi_prot_rep_get32;
172 			ap->ahi_rep_get64 = i_ddi_prot_rep_get64;
173 			ap->ahi_rep_put8 = i_ddi_prot_rep_put8;
174 			ap->ahi_rep_put16 = i_ddi_prot_rep_put16;
175 			ap->ahi_rep_put32 = i_ddi_prot_rep_put32;
176 			ap->ahi_rep_put64 = i_ddi_prot_rep_put64;
177 			break;
178 		case DDI_CAUTIOUS_ACC :
179 			ap->ahi_get8 = i_ddi_caut_get8;
180 			ap->ahi_get16 = i_ddi_caut_get16;
181 			ap->ahi_get32 = i_ddi_caut_get32;
182 			ap->ahi_get64 = i_ddi_caut_get64;
183 			ap->ahi_put8 = i_ddi_caut_put8;
184 			ap->ahi_put16 = i_ddi_caut_put16;
185 			ap->ahi_put32 = i_ddi_caut_put32;
186 			ap->ahi_put64 = i_ddi_caut_put64;
187 			ap->ahi_rep_get8 = i_ddi_caut_rep_get8;
188 			ap->ahi_rep_get16 = i_ddi_caut_rep_get16;
189 			ap->ahi_rep_get32 = i_ddi_caut_rep_get32;
190 			ap->ahi_rep_get64 = i_ddi_caut_rep_get64;
191 			ap->ahi_rep_put8 = i_ddi_caut_rep_put8;
192 			ap->ahi_rep_put16 = i_ddi_caut_rep_put16;
193 			ap->ahi_rep_put32 = i_ddi_caut_rep_put32;
194 			ap->ahi_rep_put64 = i_ddi_caut_rep_put64;
195 			break;
196 		default:
197 			break;
198 		}
199 	} else if (mp->map_op == DDI_MO_UNMAP) {
200 		ndi_fmc_remove(rdip, ACC_HANDLE, (void *)hp);
201 	}
202 }
203 
204 /*
205  * Function used to initialize FMA for our children nodes. Called
206  * through pci busops when child node calls ddi_fm_init.
207  */
208 /* ARGSUSED */
209 int
210 pci_fm_init_child(dev_info_t *dip, dev_info_t *tdip, int cap,
211     ddi_iblock_cookie_t *ibc)
212 {
213 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
214 
215 	ASSERT(ibc != NULL);
216 	*ibc = pci_p->pci_pbm_p->pbm_iblock_cookie;
217 
218 	return (pci_p->pci_fm_cap);
219 }
220 
221 /*
222  * Lock accesses to the pci bus, to be able to protect against bus errors.
223  */
224 void
225 pci_bus_enter(dev_info_t *dip, ddi_acc_handle_t handle)
226 {
227 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
228 	pbm_t *pbm_p = pci_p->pci_pbm_p;
229 
230 	membar_sync();
231 
232 	mutex_enter(&pbm_p->pbm_pokefault_mutex);
233 	pbm_p->pbm_excl_handle = handle;
234 }
235 
236 /*
237  * Unlock access to bus and clear errors before exiting.
238  */
239 /* ARGSUSED */
240 void
241 pci_bus_exit(dev_info_t *dip, ddi_acc_handle_t handle)
242 {
243 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
244 	pbm_t *pbm_p = pci_p->pci_pbm_p;
245 	ddi_fm_error_t derr;
246 
247 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pokefault_mutex));
248 
249 	membar_sync();
250 
251 	mutex_enter(&pci_p->pci_common_p->pci_fm_mutex);
252 	ddi_fm_acc_err_get(pbm_p->pbm_excl_handle, &derr, DDI_FME_VERSION);
253 
254 	if (derr.fme_status == DDI_FM_OK) {
255 		if (pci_check_error(pci_p) != 0) {
256 			(void) pci_pbm_err_handler(pci_p->pci_dip, &derr,
257 					(const void *)pci_p, PCI_BUS_EXIT_CALL);
258 		}
259 	}
260 	mutex_exit(&pci_p->pci_common_p->pci_fm_mutex);
261 
262 	pbm_p->pbm_excl_handle = NULL;
263 	mutex_exit(&pbm_p->pbm_pokefault_mutex);
264 }
265 
266 /*
267  * PCI error callback which is registered with our parent to call
268  * for PCI logging when the CPU traps due to BERR/TO/UE.
269  */
270 int
271 pci_err_callback(dev_info_t *dip, ddi_fm_error_t *derr,
272     const void *impl_data)
273 {
274 	pci_t *pci_p = (pci_t *)impl_data;
275 	pci_common_t *cmn_p = pci_p->pci_common_p;
276 	ecc_t *ecc_p = cmn_p->pci_common_ecc_p;
277 	ecc_errstate_t ecc_err;
278 	int fatal = 0;
279 	int nonfatal = 0;
280 	int unknown = 0;
281 	int ret = DDI_FM_OK;
282 
283 	bzero(&ecc_err, sizeof (ecc_err));
284 	mutex_enter(&cmn_p->pci_fm_mutex);
285 	/*
286 	 * Check and log ecc and pbm errors
287 	 */
288 	ecc_err.ecc_ii_p = ecc_p->ecc_ue;
289 	ecc_err.ecc_ena = derr->fme_ena;
290 	ecc_err.ecc_caller = PCI_TRAP_CALL;
291 
292 	if ((ret = ecc_err_handler(&ecc_err)) == DDI_FM_FATAL)
293 		fatal++;
294 	else if (ret == DDI_FM_NONFATAL)
295 		nonfatal++;
296 	else if (ret == DDI_FM_UNKNOWN)
297 		unknown++;
298 
299 	if (pci_check_error(pci_p) != 0) {
300 		int err = pci_pbm_err_handler(pci_p->pci_dip, derr,
301 				(const void *)pci_p, PCI_TRAP_CALL);
302 		if (err == DDI_FM_FATAL)
303 			fatal++;
304 		else if (err == DDI_FM_NONFATAL)
305 			nonfatal++;
306 		else if (err == DDI_FM_UNKNOWN)
307 			unknown++;
308 	}
309 
310 	mutex_exit(&cmn_p->pci_fm_mutex);
311 
312 	if (fatal)
313 		return (DDI_FM_FATAL);
314 	else if (nonfatal)
315 		return (DDI_FM_NONFATAL);
316 	else if (unknown)
317 		return (DDI_FM_UNKNOWN);
318 	else
319 		return (DDI_FM_OK);
320 }
321 
322 /*
323  * private version of walk_devs() that can be used during panic. No
324  * sleeping or locking required.
325  */
326 static int
327 pci_tgt_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
328 {
329 	while (dip) {
330 		switch ((*f)(dip, arg)) {
331 		case DDI_WALK_TERMINATE:
332 			return (DDI_WALK_TERMINATE);
333 		case DDI_WALK_CONTINUE:
334 			if (pci_tgt_walk_devs(ddi_get_child(dip), f,
335 			    arg) == DDI_WALK_TERMINATE)
336 				return (DDI_WALK_TERMINATE);
337 			break;
338 		case DDI_WALK_PRUNECHILD:
339 			break;
340 		}
341 		dip = ddi_get_next_sibling(dip);
342 	}
343 	return (DDI_WALK_CONTINUE);
344 }
345 
346 static int
347 pci_check_regs(dev_info_t *dip, void *arg)
348 {
349 	int reglen;
350 	int rn;
351 	int totreg;
352 	pci_regspec_t *drv_regp;
353 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
354 
355 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
356 	    "assigned-addresses", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
357 		return (DDI_WALK_CONTINUE);
358 
359 	totreg = reglen / sizeof (pci_regspec_t);
360 	for (rn = 0; rn < totreg; rn++) {
361 		if (tgt_err->tgt_pci_space ==
362 		    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
363 		    (tgt_err->tgt_pci_addr >=
364 		    (uint64_t)drv_regp[rn].pci_phys_low +
365 		    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
366 		    (tgt_err->tgt_pci_addr <
367 		    (uint64_t)drv_regp[rn].pci_phys_low +
368 		    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
369 		    (uint64_t)drv_regp[rn].pci_size_low +
370 		    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
371 			tgt_err->tgt_dip = dip;
372 			kmem_free(drv_regp, reglen);
373 			return (DDI_WALK_TERMINATE);
374 		}
375 	}
376 	kmem_free(drv_regp, reglen);
377 	return (DDI_WALK_CONTINUE);
378 }
379 
380 static int
381 pci_check_ranges(dev_info_t *dip, void *arg)
382 {
383 	uint64_t range_parent_begin;
384 	uint64_t range_parent_size;
385 	uint64_t range_parent_end;
386 	uint32_t space_type;
387 	uint32_t bus_num;
388 	uint32_t range_offset;
389 	pci_ranges_t *pci_ranges, *rangep;
390 	pci_bus_range_t *pci_bus_rangep;
391 	int pci_ranges_length;
392 	int nrange;
393 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
394 	int i, size;
395 
396 	if (strcmp(ddi_node_name(dip), "pci") != 0)
397 		return (DDI_WALK_CONTINUE);
398 
399 	/*
400 	 * Get the ranges property.
401 	 */
402 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
403 		(caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
404 		return (DDI_WALK_CONTINUE);
405 	}
406 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
407 	rangep = pci_ranges;
408 	pci_fix_ranges(pci_ranges, nrange);
409 
410 	for (i = 0; i < nrange; i++, rangep++) {
411 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
412 		    rangep->parent_low;
413 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
414 		    rangep->size_low;
415 		range_parent_end = range_parent_begin + range_parent_size - 1;
416 
417 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
418 		    (tgt_err->tgt_err_addr > range_parent_end)) {
419 			/* Not in range */
420 			continue;
421 		}
422 		space_type = PCI_REG_ADDR_G(rangep->child_high);
423 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
424 			/* Config space address - check bus range */
425 			range_offset = tgt_err->tgt_err_addr -
426 			    range_parent_begin;
427 			bus_num = PCI_REG_BUS_G(range_offset);
428 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
429 			    DDI_PROP_DONTPASS, "bus-range",
430 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
431 				continue;
432 			}
433 			if ((bus_num < pci_bus_rangep->lo) ||
434 			    (bus_num > pci_bus_rangep->hi)) {
435 				/*
436 				 * Bus number not appropriate for this
437 				 * pci nexus.
438 				 */
439 				kmem_free(pci_bus_rangep, size);
440 				continue;
441 			}
442 			kmem_free(pci_bus_rangep, size);
443 		}
444 
445 		/* We have a match if we get here - compute pci address */
446 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
447 		    range_parent_begin;
448 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
449 		    rangep->child_low);
450 		tgt_err->tgt_pci_space = space_type;
451 		if (panicstr)
452 			pci_tgt_walk_devs(dip, pci_check_regs, (void *)tgt_err);
453 		else
454 			ddi_walk_devs(dip, pci_check_regs, (void *)tgt_err);
455 		if (tgt_err->tgt_dip != NULL) {
456 			kmem_free(pci_ranges, pci_ranges_length);
457 			return (DDI_WALK_TERMINATE);
458 		}
459 	}
460 	kmem_free(pci_ranges, pci_ranges_length);
461 	return (DDI_WALK_PRUNECHILD);
462 }
463 
464 /*
465  * need special version of ddi_fm_ereport_post() as the leaf driver may
466  * not be hardened.
467  */
468 void
469 pci_tgt_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
470     uint8_t version, ...)
471 {
472 	char *name;
473 	char device_path[MAXPATHLEN];
474 	char ddi_error_class[FM_MAX_CLASS];
475 	nvlist_t *ereport, *detector;
476 	nv_alloc_t *nva;
477 	errorq_elem_t *eqep;
478 	va_list ap;
479 
480 	if (panicstr) {
481 		eqep = errorq_reserve(ereport_errorq);
482 		if (eqep == NULL)
483 			return;
484 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
485 		nva = errorq_elem_nva(ereport_errorq, eqep);
486 		detector = fm_nvlist_create(nva);
487 	} else {
488 		ereport = fm_nvlist_create(NULL);
489 		detector = fm_nvlist_create(NULL);
490 	}
491 
492 	(void) ddi_pathname(dip, device_path);
493 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
494 	    device_path, NULL);
495 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
496 	    DDI_IO_CLASS, error_class);
497 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
498 
499 	va_start(ap, version);
500 	name = va_arg(ap, char *);
501 	(void) i_fm_payload_set(ereport, name, ap);
502 	va_end(ap);
503 
504 	if (panicstr) {
505 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
506 	} else {
507 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
508 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
509 		fm_nvlist_destroy(detector, FM_NVA_FREE);
510 	}
511 }
512 
513 /*
514  * Function used to drain pci_target_queue, either during panic or after softint
515  * is generated, to generate target device ereports based on captured physical
516  * addresss
517  */
518 static void
519 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
520 {
521 	char buf[FM_MAX_CLASS];
522 
523 	/*
524 	 * The following assumes that all pci_pci bridge devices
525 	 * are configured as transparant. Find the top-level pci
526 	 * nexus which has tgt_err_addr in one of its ranges, converting this
527 	 * to a pci address in the process. Then starting at this node do
528 	 * another tree walk to find a device with the pci address we've
529 	 * found within range of one of it's assigned-addresses properties.
530 	 */
531 	tgt_err->tgt_dip = NULL;
532 	if (panicstr)
533 		pci_tgt_walk_devs(ddi_root_node(), pci_check_ranges,
534 		    (void *)tgt_err);
535 	else
536 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
537 		    (void *)tgt_err);
538 	if (tgt_err->tgt_dip == NULL)
539 		return;
540 
541 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
542 	    tgt_err->tgt_err_class);
543 	pci_tgt_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
544 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
545 }
546 
547 void
548 pci_fm_create(pci_t *pci_p)
549 {
550 	pci_common_t *cmn_p = pci_p->pci_common_p;
551 
552 	/*
553 	 * PCI detected ECC errorq, to schedule async handling
554 	 * of ECC errors and logging.
555 	 * The errorq is created here but destroyed when _fini is called
556 	 * for the pci module.
557 	 */
558 	if (pci_ecc_queue == NULL) {
559 		pci_ecc_queue = errorq_create("pci_ecc_queue",
560 				(errorq_func_t)ecc_err_drain,
561 				(void *)NULL,
562 				ECC_MAX_ERRS, sizeof (ecc_errstate_t),
563 				PIL_2, ERRORQ_VITAL);
564 		if (pci_ecc_queue == NULL)
565 			panic("failed to create required system error queue");
566 	}
567 
568 	/*
569 	 * PCI target errorq, to schedule async handling of generation of
570 	 * target device ereports based on captured physical address.
571 	 * The errorq is created here but destroyed when _fini is called
572 	 * for the pci module.
573 	 */
574 	if (pci_target_queue == NULL) {
575 		pci_target_queue = errorq_create("pci_target_queue",
576 				(errorq_func_t)pci_target_drain,
577 				(void *)NULL,
578 				TARGET_MAX_ERRS, sizeof (pci_target_err_t),
579 				PIL_2, ERRORQ_VITAL);
580 		if (pci_target_queue == NULL)
581 			panic("failed to create required system error queue");
582 	}
583 
584 	/*
585 	 * Initialize FMA support
586 	 * The axq workaround prevents fault management of access errors
587 	 */
588 	if (pci_p->pci_pbm_p->pbm_pio_limit == 0)
589 		pci_p->pci_fm_cap = DDI_FM_EREPORT_CAPABLE |
590 			DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
591 			DDI_FM_ERRCB_CAPABLE;
592 	else
593 		pci_p->pci_fm_cap = DDI_FM_EREPORT_CAPABLE |
594 			DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
595 	/*
596 	 * Call parent to get it's capablity
597 	 */
598 	ddi_fm_init(pci_p->pci_dip, &pci_p->pci_fm_cap,
599 			&pci_p->pci_fm_ibc);
600 	/*
601 	 * Need to be ereport and error handler cabable
602 	 */
603 	ASSERT((pci_p->pci_fm_cap & DDI_FM_ERRCB_CAPABLE) &&
604 	    (pci_p->pci_fm_cap & DDI_FM_EREPORT_CAPABLE));
605 	/*
606 	 * Initialize error handling mutex.
607 	 */
608 	if (cmn_p->pci_common_refcnt == 0) {
609 		mutex_init(&cmn_p->pci_fm_mutex, NULL, MUTEX_DRIVER,
610 				(void *)pci_p->pci_fm_ibc);
611 	}
612 
613 	/*
614 	 * Register error callback with our parent.
615 	 */
616 	ddi_fm_handler_register(pci_p->pci_dip, pci_err_callback,
617 			pci_p);
618 
619 }
620 
621 void
622 pci_fm_destroy(pci_t *pci_p)
623 {
624 	pci_common_t *cmn_p = pci_p->pci_common_p;
625 
626 	/* schizo non-shared objects */
627 	ddi_fm_handler_unregister(pci_p->pci_dip);
628 	ddi_fm_fini(pci_p->pci_dip);
629 
630 	if (cmn_p->pci_common_refcnt != 0)
631 		return;
632 
633 	mutex_destroy(&cmn_p->pci_fm_mutex);
634 }
635 
636 /*
637  * Function used to post PCI block module specific ereports.
638  */
639 void
640 pbm_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
641 {
642 	char buf[FM_MAX_CLASS];
643 
644 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
645 	    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
646 
647 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
648 
649 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
650 	    FM_VERSION, DATA_TYPE_UINT8, 0,
651 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
652 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
653 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
654 	    PCI_PBM_AFSR, DATA_TYPE_UINT64, pbm_err->pbm_afsr,
655 	    PCI_PBM_AFAR, DATA_TYPE_UINT64, pbm_err->pbm_afar,
656 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
657 	    PCI_PBM_VALOG, DATA_TYPE_UINT64, pbm_err->pbm_va_log,
658 	    NULL);
659 }
660