xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pcisch.c (revision 4fceebdf03eeac0d7c58a4f70cc19b00a8c40a73)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Schizo specifics implementation:
30  *	interrupt mapping register
31  *	PBM configuration
32  *	ECC and PBM error handling
33  *	Iommu mapping handling
34  *	Streaming Cache flushing
35  */
36 
37 #include <sys/types.h>
38 #include <sys/kmem.h>
39 #include <sys/sysmacros.h>
40 #include <sys/async.h>
41 #include <sys/systm.h>
42 #include <sys/ivintr.h>
43 #include <sys/machsystm.h>	/* lddphys() */
44 #include <sys/machsystm.h>	/* lddphys, intr_dist_add */
45 #include <sys/iommutsb.h>
46 #include <sys/promif.h>		/* prom_printf */
47 #include <sys/map.h>
48 #include <sys/ddi.h>
49 #include <sys/sunddi.h>
50 #include <sys/sunndi.h>
51 #include <sys/spl.h>
52 #include <sys/fm/util.h>
53 #include <sys/ddi_impldefs.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/io/sun4upci.h>
56 #include <sys/fm/io/ddi.h>
57 #include <sys/fm/io/pci.h>
58 #include <sys/pci/pci_obj.h>
59 #include <sys/pci/pcisch.h>
60 #include <sys/pci/pcisch_mi.h>
61 #include <sys/pci/pcisch_asm.h>
62 #include <sys/x_call.h>		/* XCALL_PIL */
63 
64 /*LINTLIBRARY*/
65 
66 extern uint8_t ldstub(uint8_t *);
67 
68 #define	IOMMU_CTX_BITMAP_SIZE	(1 << (12 - 3))
69 static void iommu_ctx_free(iommu_t *);
70 static int iommu_tlb_scrub(iommu_t *, int);
71 static uint32_t pci_identity_init(pci_t *);
72 
73 static void pci_cb_clear_error(cb_t *, cb_errstate_t *);
74 static void pci_clear_error(pci_t *, pbm_errstate_t *);
75 static uint32_t pci_identity_init(pci_t *pci_p);
76 static int pci_intr_setup(pci_t *pci_p);
77 static void iommu_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
78 static void cb_ereport_post(dev_info_t *, uint64_t, cb_errstate_t *);
79 static void pcix_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
80 static void pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar,
81 		ecc_region_t region);
82 static void pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p);
83 static void tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p,
84 		dvma_addr_t dvma_pg, int npages);
85 
86 static int pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p);
87 
88 static pci_ksinfo_t	*pci_name_kstat;
89 static pci_ksinfo_t	*saf_name_kstat;
90 
91 extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value);
92 
93 /* called by pci_attach() DDI_ATTACH to initialize pci objects */
94 int
95 pci_obj_setup(pci_t *pci_p)
96 {
97 	pci_common_t *cmn_p;
98 	uint32_t chip_id = pci_identity_init(pci_p);
99 	uint32_t cmn_id = PCI_CMN_ID(ID_CHIP_TYPE(chip_id), pci_p->pci_id);
100 	int ret;
101 
102 	/* Perform allocations first to avoid delicate unwinding. */
103 	if (pci_alloc_tsb(pci_p) != DDI_SUCCESS)
104 		return (DDI_FAILURE);
105 
106 	mutex_enter(&pci_global_mutex);
107 	cmn_p = get_pci_common_soft_state(cmn_id);
108 	if (cmn_p == NULL) {
109 		if (alloc_pci_common_soft_state(cmn_id) != DDI_SUCCESS) {
110 			mutex_exit(&pci_global_mutex);
111 			pci_free_tsb(pci_p);
112 			return (DDI_FAILURE);
113 		}
114 		cmn_p = get_pci_common_soft_state(cmn_id);
115 		cmn_p->pci_common_id = cmn_id;
116 		cmn_p->pci_common_tsb_cookie = IOMMU_TSB_COOKIE_NONE;
117 	}
118 
119 	ASSERT((pci_p->pci_side == 0) || (pci_p->pci_side == 1));
120 	if (cmn_p->pci_p[pci_p->pci_side]) {
121 		/* second side attach */
122 		pci_p->pci_side = PCI_OTHER_SIDE(pci_p->pci_side);
123 		ASSERT(cmn_p->pci_p[pci_p->pci_side] == NULL);
124 	}
125 
126 	cmn_p->pci_p[pci_p->pci_side] = pci_p;
127 	pci_p->pci_common_p = cmn_p;
128 
129 	if (cmn_p->pci_common_refcnt == 0)
130 		cmn_p->pci_chip_id = chip_id;
131 
132 	ib_create(pci_p);
133 
134 	/*
135 	 * The initialization of cb internal interrupts depends on ib
136 	 */
137 	if (cmn_p->pci_common_refcnt == 0) {
138 		cb_create(pci_p);
139 		cmn_p->pci_common_cb_p = pci_p->pci_cb_p;
140 	} else
141 		pci_p->pci_cb_p = cmn_p->pci_common_cb_p;
142 
143 	iommu_create(pci_p);
144 
145 	if (cmn_p->pci_common_refcnt == 0) {
146 		ecc_create(pci_p);
147 		cmn_p->pci_common_ecc_p = pci_p->pci_ecc_p;
148 	} else
149 		pci_p->pci_ecc_p = cmn_p->pci_common_ecc_p;
150 
151 	pbm_create(pci_p);
152 	sc_create(pci_p);
153 
154 	pci_fm_create(pci_p);
155 
156 	if ((ret = pci_intr_setup(pci_p)) != DDI_SUCCESS)
157 		goto done;
158 
159 	pci_kstat_create(pci_p);
160 
161 	cmn_p->pci_common_attachcnt++;
162 	cmn_p->pci_common_refcnt++;
163 done:
164 	mutex_exit(&pci_global_mutex);
165 	if (ret != DDI_SUCCESS)
166 		cmn_err(CE_WARN, "pci_obj_setup failed %x", ret);
167 	return (ret);
168 }
169 
170 /* called by pci_detach() DDI_DETACH to destroy pci objects */
171 void
172 pci_obj_destroy(pci_t *pci_p)
173 {
174 	pci_common_t *cmn_p;
175 	mutex_enter(&pci_global_mutex);
176 
177 	cmn_p = pci_p->pci_common_p;
178 	cmn_p->pci_common_refcnt--;
179 	cmn_p->pci_common_attachcnt--;
180 
181 	pci_kstat_destroy(pci_p);
182 
183 	/* schizo non-shared objects */
184 	pci_fm_destroy(pci_p);
185 
186 	sc_destroy(pci_p);
187 	pbm_destroy(pci_p);
188 	iommu_destroy(pci_p);
189 	pci_mi_destroy(pci_p);
190 	ib_destroy(pci_p);
191 
192 	if (cmn_p->pci_common_refcnt != 0) {
193 		pci_intr_teardown(pci_p);
194 		cmn_p->pci_p[pci_p->pci_side] = NULL;
195 		mutex_exit(&pci_global_mutex);
196 		return;
197 	}
198 
199 	/* schizo shared objects - uses cmn_p, must be destroyed before cmn */
200 	ecc_destroy(pci_p);
201 	cb_destroy(pci_p);
202 
203 	free_pci_common_soft_state(cmn_p->pci_common_id);
204 	pci_intr_teardown(pci_p);
205 	mutex_exit(&pci_global_mutex);
206 }
207 
208 /* called by pci_attach() DDI_RESUME to (re)initialize pci objects */
209 void
210 pci_obj_resume(pci_t *pci_p)
211 {
212 	pci_common_t *cmn_p = pci_p->pci_common_p;
213 
214 	mutex_enter(&pci_global_mutex);
215 
216 	ib_configure(pci_p->pci_ib_p);
217 	iommu_configure(pci_p->pci_iommu_p);
218 
219 	if (cmn_p->pci_common_attachcnt == 0)
220 		ecc_configure(pci_p);
221 
222 	ib_resume(pci_p->pci_ib_p);
223 
224 	pbm_configure(pci_p->pci_pbm_p);
225 	sc_configure(pci_p->pci_sc_p);
226 
227 	if (cmn_p->pci_common_attachcnt == 0)
228 		cb_resume(pci_p->pci_cb_p);
229 
230 	pbm_resume(pci_p->pci_pbm_p);
231 
232 	cmn_p->pci_common_attachcnt++;
233 	mutex_exit(&pci_global_mutex);
234 }
235 
236 /* called by pci_detach() DDI_SUSPEND to suspend pci objects */
237 void
238 pci_obj_suspend(pci_t *pci_p)
239 {
240 	mutex_enter(&pci_global_mutex);
241 
242 	pbm_suspend(pci_p->pci_pbm_p);
243 	ib_suspend(pci_p->pci_ib_p);
244 
245 	if (!--pci_p->pci_common_p->pci_common_attachcnt)
246 		cb_suspend(pci_p->pci_cb_p);
247 
248 	mutex_exit(&pci_global_mutex);
249 }
250 
251 /*
252  * add an additional 0x35 or 0x36 ino interrupt on platforms don't have them
253  * This routine has multiple places that assumes interrupt takes one cell
254  * each and cell size is same as integer size.
255  */
256 static int
257 pci_intr_setup(pci_t *pci_p)
258 {
259 	dev_info_t *dip = pci_p->pci_dip;
260 	pbm_t *pbm_p = pci_p->pci_pbm_p;
261 	cb_t *cb_p = pci_p->pci_cb_p;
262 	uint32_t *intr_buf, *new_intr_buf, *ino_buf;
263 	int intr_len, intr_cnt, ret, ino_buf_len;
264 
265 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
266 		"interrupts", (caddr_t)&intr_buf, &intr_len) != DDI_SUCCESS)
267 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
268 			ddi_driver_name(dip), ddi_get_instance(dip));
269 
270 	intr_cnt = BYTES_TO_1275_CELLS(intr_len);
271 	if (intr_cnt < CBNINTR_CDMA)	/* CBNINTR_CDMA is 0 based */
272 		cmn_err(CE_PANIC, "%s%d: <%d interrupts", ddi_driver_name(dip),
273 			ddi_get_instance(dip), CBNINTR_CDMA);
274 
275 	if (intr_cnt == CBNINTR_CDMA)
276 		intr_cnt++;
277 
278 	new_intr_buf = kmem_alloc(CELLS_1275_TO_BYTES(intr_cnt), KM_SLEEP);
279 	bcopy(intr_buf, new_intr_buf, intr_len);
280 	kmem_free(intr_buf, intr_len);
281 
282 	new_intr_buf[CBNINTR_CDMA] = PBM_CDMA_INO_BASE +
283 	    ((new_intr_buf[CBNINTR_PBM] & 0x1) ^ 0x1);
284 
285 	pci_p->pci_inos = new_intr_buf;
286 	pci_p->pci_inos_len = CELLS_1275_TO_BYTES(intr_cnt);
287 
288 	if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts",
289 		(int *)new_intr_buf, intr_cnt))
290 		cmn_err(CE_PANIC, "%s%d: cannot update interrupts property\n",
291 			ddi_driver_name(dip), ddi_get_instance(dip));
292 
293 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
294 	    "ino-bitmap", (caddr_t)&ino_buf, &ino_buf_len) == DDI_SUCCESS) {
295 
296 		ino_buf[1] |= (1 << (new_intr_buf[CBNINTR_CDMA] - 0x20));
297 
298 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
299 		    "ino-bitmap", (int *)ino_buf, 2);
300 
301 		kmem_free(ino_buf, ino_buf_len);
302 	}
303 
304 	pci_mi_setup(pci_p);
305 
306 	if (pci_p->pci_common_p->pci_common_refcnt == 0) {
307 		cb_p->cb_no_of_inos = intr_cnt;
308 		if (ret = cb_register_intr(pci_p))
309 			goto teardown;
310 		if (ret = ecc_register_intr(pci_p))
311 			goto teardown;
312 
313 		intr_dist_add(cb_intr_dist, cb_p);
314 		cb_enable_intr(pci_p);
315 		ecc_enable_intr(pci_p);
316 	}
317 
318 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
319 		pbm_p->pbm_sync_ino = pci_p->pci_inos[CBNINTR_PBM];
320 	if (ret = pbm_register_intr(pbm_p)) {
321 		if (pci_p->pci_common_p->pci_common_refcnt == 0)
322 			intr_dist_rem(cb_intr_dist, cb_p);
323 		goto teardown;
324 	}
325 	intr_dist_add(pbm_intr_dist, pbm_p);
326 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_PBM]);
327 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_CDMA]);
328 
329 	intr_dist_add_weighted(ib_intr_dist_all, pci_p->pci_ib_p);
330 	return (DDI_SUCCESS);
331 teardown:
332 	pci_mi_destroy(pci_p);
333 	pci_intr_teardown(pci_p);
334 	return (ret);
335 }
336 
337 uint64_t
338 pci_sc_configure(pci_t *pci_p)
339 {
340 	int instance;
341 	dev_info_t *dip = pci_p->pci_dip;
342 
343 	instance = ddi_get_instance(dip);
344 	if ((pci_xmits_sc_max_prf & (1 << instance)) &&
345 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS))
346 		return (XMITS_SC_MAX_PRF);
347 	else
348 		return (0);
349 }
350 
351 static void
352 pci_schizo_cdma_sync(pbm_t *pbm_p)
353 {
354 	pci_t *pci_p = pbm_p->pbm_pci_p;
355 	hrtime_t start_time;
356 	ib_ino_t cdma_ino = pci_p->pci_inos[CBNINTR_CDMA];
357 	volatile uint64_t *clr_p =
358 	    ib_clear_intr_reg_addr(pci_p->pci_ib_p, cdma_ino);
359 	volatile uint64_t *state_reg_p =
360 	    IB_INO_INTR_STATE_REG(pci_p->pci_ib_p, cdma_ino);
361 	uint32_t fail_cnt = pci_cdma_intr_count;
362 
363 	mutex_enter(&pbm_p->pbm_sync_mutex);
364 	pbm_p->pbm_cdma_flag = PBM_CDMA_PEND;
365 	IB_INO_INTR_TRIG(clr_p);
366 wait:
367 	start_time = gethrtime();
368 	while (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE && !panicstr) {
369 		if (gethrtime() - start_time <= pci_cdma_intr_timeout)
370 			continue;
371 		/*
372 		 * if master-interrupt supported then check if OBP reset CDMA's
373 		 * state machine to IDLE meaning OBP recvd CDMA interrupt during
374 		 * L1-A sequence; otherwise, CDMA's state machine will remain at
375 		 * state PENDING and _never_ reset to another value--even when
376 		 * pbm_cdma_flag == DONE.
377 		 */
378 		if (pci_mi_check() && IB_INO_INTR_IDLE(state_reg_p, cdma_ino))
379 			break;
380 		if (--fail_cnt > 0)
381 			goto wait;
382 		if (pbm_p->pbm_cdma_flag == PBM_CDMA_DONE)
383 			break;
384 		cmn_err(CE_PANIC, "%s (%s): consistent dma sync timeout"
385 		    " (cdma ino 0x%x, st=%lu)\n",
386 		    pbm_p->pbm_nameinst_str, pbm_p->pbm_nameaddr_str,
387 		    cdma_ino, PRINT_STATE(state_reg_p, cdma_ino));
388 	}
389 	mutex_exit(&pbm_p->pbm_sync_mutex);
390 }
391 
392 #if !defined(lint)
393 #include <sys/cpuvar.h>
394 #endif
395 
396 #define	SYNC_HW_BUSY(pa, mask)	(lddphysio(pa) & (mask))
397 
398 /*
399  * Consistent DMA Sync/Flush
400  *
401  * XMITS and Tomatillo use multi-threaded sync/flush register.
402  * Called from interrupt wrapper: the associated ino is used to index
403  *	the distinctive register bit.
404  * Called from pci_dma_sync(): the bit belongs to PBM is shared
405  *	for all calls from pci_dma_sync(). Xmits requires serialization
406  *	while Tomatillo does not.
407  */
408 void
409 pci_pbm_dma_sync(pbm_t *pbm_p, ib_ino_t ino)
410 {
411 	pci_t *pci_p = pbm_p->pbm_pci_p;
412 	hrtime_t start_time;
413 	uint64_t ino_mask, sync_reg_pa;
414 	volatile uint64_t flag_val;
415 	uint32_t locked, chip_type = CHIP_TYPE(pci_p);
416 	int	i;
417 
418 	if (chip_type == PCI_CHIP_SCHIZO) {
419 		pci_schizo_cdma_sync(pbm_p);
420 		return;
421 	}
422 
423 	sync_reg_pa = pbm_p->pbm_sync_reg_pa;
424 
425 	locked = 0;
426 	if (((chip_type == PCI_CHIP_XMITS) && (ino == pbm_p->pbm_sync_ino)) ||
427 	    pci_sync_lock) {
428 		locked = 1;
429 		mutex_enter(&pbm_p->pbm_sync_mutex);
430 	}
431 	ino_mask = 1ull << ino;
432 	stdphysio(sync_reg_pa, ino_mask);
433 
434 	for (i = 0; i < 5; i++) {
435 		if ((flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) == 0)
436 			goto done;
437 	}
438 
439 	start_time = gethrtime();
440 	for (; (flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) != 0; i++) {
441 		if (gethrtime() - start_time > pci_sync_buf_timeout)
442 			break;
443 	}
444 
445 	if (flag_val && SYNC_HW_BUSY(sync_reg_pa, ino_mask) && !panicstr)
446 		cmn_err(CE_PANIC, "%s: pbm dma sync %lx,%lx timeout!",
447 			pbm_p->pbm_nameaddr_str, sync_reg_pa, flag_val);
448 done:
449 	/* optional: stdphysio(sync_reg_pa - 8, ino_mask); */
450 	if (locked)
451 		mutex_exit(&pbm_p->pbm_sync_mutex);
452 
453 	if (tomatillo_store_store_wrka) {
454 #if !defined(lint)
455 		kpreempt_disable();
456 #endif
457 		tomatillo_store_store_order();
458 #if !defined(lint)
459 		kpreempt_enable();
460 #endif
461 	}
462 
463 }
464 
465 /*ARGSUSED*/
466 void
467 pci_fix_ranges(pci_ranges_t *rng_p, int rng_entries)
468 {
469 }
470 
471 /*
472  * map_pci_registers
473  *
474  * This function is called from the attach routine to map the registers
475  * accessed by this driver.
476  *
477  * used by: pci_attach()
478  *
479  * return value: DDI_FAILURE on failure
480  */
481 int
482 map_pci_registers(pci_t *pci_p, dev_info_t *dip)
483 {
484 	ddi_device_acc_attr_t attr;
485 	int len;
486 
487 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
488 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
489 
490 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
491 
492 	/*
493 	 * Register set 0 is PCI CSR Base
494 	 */
495 	if (ddi_regs_map_setup(dip, 0, &pci_p->pci_address[0], 0, 0,
496 	    &attr, &pci_p->pci_ac[0]) != DDI_SUCCESS) {
497 		len = 0;
498 		goto fail;
499 	}
500 	/*
501 	 * Register set 1 is Schizo CSR Base
502 	 */
503 	if (ddi_regs_map_setup(dip, 1, &pci_p->pci_address[1], 0, 0,
504 	    &attr, &pci_p->pci_ac[1]) != DDI_SUCCESS) {
505 		len = 1;
506 		goto fail;
507 	}
508 
509 	/*
510 	 * The third register set contains the bridge's configuration
511 	 * header.  This header is at the very beginning of the bridge's
512 	 * configuration space.  This space has litte-endian byte order.
513 	 */
514 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
515 	if (ddi_regs_map_setup(dip, 2, &pci_p->pci_address[2], 0,
516 	    PCI_CONF_HDR_SIZE, &attr, &pci_p->pci_ac[2]) != DDI_SUCCESS) {
517 		len = 2;
518 		goto fail;
519 	}
520 
521 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
522 	    "reg", &len) || (len / sizeof (pci_nexus_regspec_t) < 4))
523 		goto done;
524 
525 	/*
526 	 * The optional fourth register bank points to the
527 	 * interrupt concentrator registers.
528 	 */
529 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
530 	if (ddi_regs_map_setup(dip, 3, &pci_p->pci_address[3], 0,
531 	    0, &attr, &pci_p->pci_ac[3]) != DDI_SUCCESS) {
532 		len = 3;
533 		goto fail;
534 	}
535 
536 done:
537 	DEBUG4(DBG_ATTACH, dip, "address (%p,%p,%p,%p)\n",
538 	    pci_p->pci_address[0], pci_p->pci_address[1],
539 	    pci_p->pci_address[2], pci_p->pci_address[3]);
540 
541 	return (DDI_SUCCESS);
542 
543 
544 fail:
545 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
546 		ddi_driver_name(dip), ddi_get_instance(dip), len);
547 	for (; len--; ddi_regs_map_free(&pci_p->pci_ac[len]));
548 	return (DDI_FAILURE);
549 }
550 
551 /*
552  * unmap_pci_registers:
553  *
554  * This routine unmap the registers mapped by map_pci_registers.
555  *
556  * used by: pci_detach()
557  *
558  * return value: none
559  */
560 void
561 unmap_pci_registers(pci_t *pci_p)
562 {
563 	int i;
564 
565 	for (i = 0; i < 4; i++) {
566 		if (pci_p->pci_ac[i])
567 			ddi_regs_map_free(&pci_p->pci_ac[i]);
568 	}
569 }
570 
571 uint64_t
572 ib_get_map_reg(ib_mondo_t mondo, uint32_t cpu_id)
573 {
574 	uint32_t agent_id;
575 	uint32_t node_id;
576 
577 	/* ensure that cpu_id is only 10 bits. */
578 	ASSERT((cpu_id & ~0x3ff) == 0);
579 
580 	agent_id = cpu_id & 0x1f;
581 	node_id = (cpu_id >> 5) & 0x1f;
582 
583 	return ((mondo) | (agent_id << COMMON_INTR_MAP_REG_TID_SHIFT) |
584 	    (node_id << SCHIZO_INTR_MAP_REG_NID_SHIFT) |
585 	    COMMON_INTR_MAP_REG_VALID);
586 }
587 
588 uint32_t
589 ib_map_reg_get_cpu(volatile uint64_t reg)
590 {
591 	return (((reg & COMMON_INTR_MAP_REG_TID) >>
592 		COMMON_INTR_MAP_REG_TID_SHIFT) |
593 			((reg & SCHIZO_INTR_MAP_REG_NID) >>
594 			(SCHIZO_INTR_MAP_REG_NID_SHIFT-5)));
595 }
596 
597 uint64_t *
598 ib_intr_map_reg_addr(ib_t *ib_p, ib_ino_t ino)
599 {
600 	/*
601 	 * Schizo maps all interrupts in one contiguous area.
602 	 * (PCI_CSRBase + 0x00.1000 + INO * 8).
603 	 */
604 	return ((uint64_t *)(ib_p->ib_intr_map_regs) + (ino & 0x3f));
605 }
606 
607 uint64_t *
608 ib_clear_intr_reg_addr(ib_t *ib_p, ib_ino_t ino)	/* XXX - needs work */
609 {
610 	/*
611 	 * Schizo maps clear intr. registers in contiguous area.
612 	 * (PCI_CSRBase + 0x00.1400 + INO * 8).
613 	 */
614 	return ((uint64_t *)(ib_p->ib_slot_clear_intr_regs) + (ino & 0x3f));
615 }
616 
617 /*
618  * schizo does not have mapping register per slot, so no sharing
619  * is done.
620  */
621 /*ARGSUSED*/
622 void
623 ib_ino_map_reg_share(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
624 {
625 }
626 
627 /*
628  * return true if there are interrupts using this mapping register
629  */
630 /*ARGSUSED*/
631 int
632 ib_ino_map_reg_unshare(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
633 {
634 	return (ino_p->ino_ipil_size);
635 }
636 
637 void
638 pci_pbm_intr_dist(pbm_t *pbm_p)
639 {
640 	pci_t *pci_p = pbm_p->pbm_pci_p;
641 	ib_t *ib_p = pci_p->pci_ib_p;
642 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[CBNINTR_CDMA]);
643 
644 	mutex_enter(&pbm_p->pbm_sync_mutex);
645 	ib_intr_dist_nintr(ib_p, ino, ib_intr_map_reg_addr(ib_p, ino));
646 	mutex_exit(&pbm_p->pbm_sync_mutex);
647 }
648 
649 uint32_t
650 pci_xlate_intr(dev_info_t *dip, dev_info_t *rdip, ib_t *ib_p, uint32_t intr)
651 {
652 	return (IB_INO_TO_MONDO(ib_p, intr));
653 }
654 
655 
656 /*
657  * Return the cpuid to to be used for an ino.  We have no special cpu
658  * assignment constraints for this nexus, so just call intr_dist_cpuid().
659  */
660 /* ARGSUSED */
661 uint32_t
662 pci_intr_dist_cpuid(ib_t *ib_p, ib_ino_info_t *ino_p)
663 {
664 	return (intr_dist_cpuid());
665 }
666 
667 void
668 pci_cb_teardown(pci_t *pci_p)
669 {
670 	cb_t 	*cb_p = pci_p->pci_cb_p;
671 	uint32_t mondo;
672 
673 	if (!pci_buserr_interrupt)
674 		return;
675 
676 	mondo = ((pci_p->pci_cb_p->cb_ign  << PCI_INO_BITS) |
677 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
678 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
679 
680 	cb_disable_nintr(cb_p, CBNINTR_BUS_ERROR, IB_INTR_WAIT);
681 	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR]) == 0);
682 }
683 
684 int
685 cb_register_intr(pci_t *pci_p)
686 {
687 	uint32_t mondo;
688 
689 	if (!pci_buserr_interrupt)
690 		return (DDI_SUCCESS);
691 
692 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
693 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
694 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
695 
696 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR],
697 	    (intrfunc)cb_buserr_intr, (caddr_t)pci_p->pci_cb_p,
698 	    NULL, NULL) == 0);
699 
700 	return (PCI_ATTACH_RETCODE(PCI_CB_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
701 }
702 
703 void
704 cb_enable_intr(pci_t *pci_p)
705 {
706 	if (pci_buserr_interrupt)
707 		cb_enable_nintr(pci_p, CBNINTR_BUS_ERROR);
708 }
709 
710 uint64_t
711 cb_ino_to_map_pa(cb_t *cb_p, ib_ino_t ino)
712 {
713 	return (cb_p->cb_map_pa + (ino << 3));
714 }
715 
716 uint64_t
717 cb_ino_to_clr_pa(cb_t *cb_p, ib_ino_t ino)
718 {
719 	return (cb_p->cb_clr_pa + (ino << 3));
720 }
721 
722 /*
723  * Useful on psycho only.
724  */
725 int
726 cb_remove_xintr(pci_t *pci_p, dev_info_t *dip, dev_info_t *rdip, ib_ino_t ino,
727 ib_mondo_t mondo)
728 {
729 	return (DDI_FAILURE);
730 }
731 
732 void
733 pbm_configure(pbm_t *pbm_p)
734 {
735 	pci_t *pci_p = pbm_p->pbm_pci_p;
736 	dev_info_t *dip = pbm_p->pbm_pci_p->pci_dip;
737 	int instance = ddi_get_instance(dip);
738 	uint64_t l;
739 	uint64_t mask = 1ll << instance;
740 	ushort_t s = 0;
741 
742 	l = *pbm_p->pbm_ctrl_reg;	/* save control register state */
743 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
744 
745 	/*
746 	 * See if any SERR# signals are asserted.  We'll clear them later.
747 	 */
748 	if (l & COMMON_PCI_CTRL_SERR)
749 		cmn_err(CE_WARN, "%s%d: SERR asserted on pci bus\n",
750 		    ddi_driver_name(dip), instance);
751 
752 	/*
753 	 * Determine if PCI bus is running at 33 or 66 mhz.
754 	 */
755 	if (l & COMMON_PCI_CTRL_SPEED)
756 		pbm_p->pbm_speed = PBM_SPEED_66MHZ;
757 	else
758 		pbm_p->pbm_speed = PBM_SPEED_33MHZ;
759 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: %d mhz\n",
760 	    pbm_p->pbm_speed  == PBM_SPEED_66MHZ ? 66 : 33);
761 
762 	if (pci_set_dto_value & mask) {
763 		l &= ~(3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
764 		l |= pci_dto_value << SCHIZO_PCI_CTRL_PTO_SHIFT;
765 	} else if (PCI_CHIP_ID(pci_p) >= TOMATILLO_VER_21) {
766 		l |= (3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
767 	}
768 
769 	/*
770 	 * Enable error interrupts.
771 	 */
772 	if (pci_error_intr_enable & mask)
773 		l |= SCHIZO_PCI_CTRL_ERR_INT_EN;
774 	else
775 		l &= ~SCHIZO_PCI_CTRL_ERR_INT_EN;
776 
777 	/*
778 	 * Enable pci streaming byte errors and error interrupts.
779 	 */
780 	if (pci_sbh_error_intr_enable & mask)
781 		l |= SCHIZO_PCI_CTRL_SBH_INT_EN;
782 	else
783 		l &= ~SCHIZO_PCI_CTRL_SBH_INT_EN;
784 
785 	/*
786 	 * Enable pci discard timeout error interrupt.
787 	 */
788 	if (pci_mmu_error_intr_enable & mask)
789 		l |= SCHIZO_PCI_CTRL_MMU_INT_EN;
790 	else
791 		l &= ~SCHIZO_PCI_CTRL_MMU_INT_EN;
792 
793 	/*
794 	 * Enable PCI-X error interrupts.
795 	 */
796 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
797 
798 		if (xmits_error_intr_enable & mask)
799 			l |= XMITS_PCI_CTRL_X_ERRINT_EN;
800 		else
801 			l &= ~XMITS_PCI_CTRL_X_ERRINT_EN;
802 		/*
803 		 * Panic if older XMITS hardware is found.
804 		 */
805 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)
806 			if (PCI_CHIP_ID(pci_p) <= XMITS_VER_10)
807 				cmn_err(CE_PANIC, "%s (%s): PCIX mode "
808 				"unsupported on XMITS version %d\n",
809 				    pbm_p->pbm_nameinst_str,
810 				    pbm_p->pbm_nameaddr_str, CHIP_VER(pci_p));
811 
812 		if (xmits_perr_recov_int_enable) {
813 			if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
814 				uint64_t pcix_err;
815 				/*
816 				 * Enable interrupt on PERR
817 				 */
818 				pcix_err = *pbm_p->pbm_pcix_err_stat_reg;
819 				pcix_err |= XMITS_PCIX_STAT_PERR_RECOV_INT_EN;
820 				pcix_err &= ~XMITS_PCIX_STAT_SERR_ON_PERR;
821 				*pbm_p->pbm_pcix_err_stat_reg = pcix_err;
822 			}
823 		}
824 
825 		/*
826 		 * Enable parity error detection on internal memories
827 		 */
828 		*pbm_p->pbm_pci_ped_ctrl = 0x3fff;
829 	}
830 
831 	/*
832 	 * Enable/disable bus parking.
833 	 */
834 	if ((pci_bus_parking_enable & mask) &&
835 	    !ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
836 	    "no-bus-parking"))
837 		l |= SCHIZO_PCI_CTRL_ARB_PARK;
838 	else
839 		l &= ~SCHIZO_PCI_CTRL_ARB_PARK;
840 
841 	/*
842 	 * Enable arbitration.
843 	 */
844 	l |= PCI_CHIP_ID(pci_p) == XMITS_VER_10 ? XMITS10_PCI_CTRL_ARB_EN_MASK :
845 		SCHIZO_PCI_CTRL_ARB_EN_MASK;
846 
847 	/*
848 	 * Make sure SERR is clear
849 	 */
850 	l |= COMMON_PCI_CTRL_SERR;
851 
852 
853 	/*
854 	 * Enable DTO interrupt, if desired.
855 	 */
856 
857 	if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_20 || (pci_dto_intr_enable &
858 	    mask))
859 		l |=	 (TOMATILLO_PCI_CTRL_DTO_INT_EN);
860 	else
861 		l &=	 ~(TOMATILLO_PCI_CTRL_DTO_INT_EN);
862 
863 	l |= TOMATILLO_PCI_CTRL_PEN_RD_MLTPL |
864 		TOMATILLO_PCI_CTRL_PEN_RD_ONE |
865 		TOMATILLO_PCI_CTRL_PEN_RD_LINE;
866 
867 	/*
868 	 * Now finally write the control register with the appropriate value.
869 	 */
870 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
871 	*pbm_p->pbm_ctrl_reg = l;
872 
873 	/*
874 	 * Enable IO Prefetch on Tomatillo
875 	 */
876 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
877 		volatile uint64_t *ioc_csr_p = pbm_p->pbm_ctrl_reg +
878 			((TOMATILLO_IOC_CSR_OFF -
879 			SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
880 		*ioc_csr_p = TOMATILLO_WRT_PEN |
881 			(1 << TOMATILLO_POFFSET_SHIFT) |
882 			TOMATILLO_C_PEN_RD_MLTPL |
883 			TOMATILLO_C_PEN_RD_ONE |
884 			TOMATILLO_C_PEN_RD_LINE;
885 	}
886 
887 	/*
888 	 * Allow DMA write parity errors to generate an interrupt.
889 	 * This is implemented on Schizo 2.5 and greater and XMITS 3.0
890 	 * and greater.  Setting this on earlier versions of XMITS 3.0
891 	 * has no affect.
892 	 */
893 	if (((CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) &&
894 	    PCI_CHIP_ID(pci_p) >= SCHIZO_VER_25) ||
895 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)) {
896 		volatile uint64_t *pbm_icd = pbm_p->pbm_ctrl_reg +
897 		    ((SCHIZO_PERF_PCI_ICD_OFFSET -
898 		    SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
899 
900 		*pbm_icd |= SCHIZO_PERF_PCI_ICD_DMAW_PARITY_INT_ENABLE;
901 	}
902 
903 	/*
904 	 * Clear any PBM errors.
905 	 */
906 	l = (SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_PE_SHIFT) |
907 		(SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_SE_SHIFT);
908 	*pbm_p->pbm_async_flt_status_reg = l;
909 
910 	/*
911 	 * Allow the diag register to be set based upon variable that
912 	 * can be configured via /etc/system.
913 	 */
914 	l = *pbm_p->pbm_diag_reg;
915 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
916 
917 	/*
918 	 * Enable/disable retry limit.
919 	 */
920 	if (pci_retry_disable & mask)
921 		l |= COMMON_PCI_DIAG_DIS_RETRY;
922 	else
923 		l &= ~COMMON_PCI_DIAG_DIS_RETRY;
924 
925 	/*
926 	 * Enable/disable DMA write/interrupt synchronization.
927 	 */
928 	if (pci_intsync_disable & mask)
929 		l |= COMMON_PCI_DIAG_DIS_INTSYNC;
930 	else
931 		l &= ~COMMON_PCI_DIAG_DIS_INTSYNC;
932 
933 	/*
934 	 * Enable/disable retry arbitration priority.
935 	 */
936 	if (pci_enable_retry_arb & mask)
937 		l &= ~SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
938 	else
939 		l |= SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
940 
941 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
942 	*pbm_p->pbm_diag_reg = l;
943 
944 	/*
945 	 * Enable SERR# and parity reporting via command register.
946 	 */
947 	s = pci_perr_enable & mask ? PCI_COMM_PARITY_DETECT : 0;
948 	s |= pci_serr_enable & mask ? PCI_COMM_SERR_ENABLE : 0;
949 
950 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg=%x\n", s);
951 	pbm_p->pbm_config_header->ch_command_reg = s;
952 
953 	/*
954 	 * Clear error bits in configuration status register.
955 	 */
956 	s = PCI_STAT_PERROR | PCI_STAT_S_PERROR |
957 		PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |
958 		PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR;
959 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg=%x\n", s);
960 	pbm_p->pbm_config_header->ch_status_reg = s;
961 
962 	/*
963 	 * The current versions of the obp are suppose to set the latency
964 	 * timer register but do not.  Bug 1234181 is open against this
965 	 * problem.  Until this bug is fixed we check to see if the obp
966 	 * has attempted to set the latency timer register by checking
967 	 * for the existence of a "latency-timer" property.
968 	 */
969 	if (pci_set_latency_timer_register) {
970 		DEBUG1(DBG_ATTACH, dip,
971 		    "pbm_configure: set schizo latency timer to %x\n",
972 			pci_latency_timer);
973 		pbm_p->pbm_config_header->ch_latency_timer_reg =
974 			pci_latency_timer;
975 	}
976 
977 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
978 		(int)pbm_p->pbm_config_header->ch_latency_timer_reg);
979 
980 	/*
981 	 * Adjust xmits_upper_retry_counter if set in /etc/system
982 	 *
983 	 * NOTE: current implementation resets UPPR_RTRY counter for
984 	 * _all_ XMITS' PBMs and does not support tuning per PBM.
985 	 */
986 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
987 		uint_t xurc = xmits_upper_retry_counter &
988 		    XMITS_UPPER_RETRY_MASK;
989 
990 		if (xurc) {
991 			*pbm_p->pbm_upper_retry_counter_reg = (uint64_t)xurc;
992 			DEBUG1(DBG_ATTACH, dip, "pbm_configure: Setting XMITS"
993 			    " uppr_rtry counter = 0x%lx\n",
994 			    *pbm_p->pbm_upper_retry_counter_reg);
995 		}
996 	}
997 }
998 
999 uint_t
1000 pbm_disable_pci_errors(pbm_t *pbm_p)
1001 {
1002 	pci_t *pci_p = pbm_p->pbm_pci_p;
1003 	ib_t *ib_p = pci_p->pci_ib_p;
1004 
1005 	/*
1006 	 * Disable error and streaming byte hole interrupts via the
1007 	 * PBM control register.
1008 	 */
1009 	*pbm_p->pbm_ctrl_reg &=
1010 		~(SCHIZO_PCI_CTRL_ERR_INT_EN | SCHIZO_PCI_CTRL_SBH_INT_EN |
1011 		SCHIZO_PCI_CTRL_MMU_INT_EN);
1012 
1013 	/*
1014 	 * Disable error interrupts via the interrupt mapping register.
1015 	 */
1016 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_NOWAIT);
1017 	return (BF_NONE);
1018 }
1019 
1020 /*
1021  * Layout of the dvma context bucket bitmap entry:
1022  *
1023  *	63 - 56		55 - 0
1024  *	8-bit lock	56-bit, each represent one context
1025  *	DCB_LOCK_BITS	DCB_BMAP_BITS
1026  */
1027 #define	DCB_LOCK_BITS	8
1028 #define	DCB_BMAP_BITS	(64 - DCB_LOCK_BITS)
1029 
1030 dvma_context_t
1031 pci_iommu_get_dvma_context(iommu_t *iommu_p, dvma_addr_t dvma_pg_index)
1032 {
1033 	dvma_context_t ctx;
1034 	int i = (dvma_pg_index >> 6) & 0x1f;	/* 5 bit index within bucket */
1035 	uint64_t ctx_mask, test = 1ull << i;
1036 	uint32_t bucket_no = dvma_pg_index & 0x3f;
1037 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1038 
1039 	uint32_t spl = ddi_enter_critical();	/* block interrupts */
1040 	if (ldstub((uint8_t *)bucket_ptr)) {	/* try lock */
1041 		ddi_exit_critical(spl);		/* unblock interrupt */
1042 		pci_iommu_ctx_lock_failure++;
1043 		return (0);
1044 	}
1045 
1046 	/* clear lock bits */
1047 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1048 	ASSERT(*bucket_ptr >> DCB_BMAP_BITS == 0xff);
1049 	ASSERT(ctx_mask >> DCB_BMAP_BITS == 0);
1050 
1051 	if (ctx_mask & test)			/* quick check i bit */
1052 		for (i = 0, test = 1ull; test & ctx_mask; test <<= 1, i++);
1053 	if (i < DCB_BMAP_BITS)
1054 		ctx_mask |= test;
1055 	*bucket_ptr = ctx_mask;			/* unlock */
1056 	ddi_exit_critical(spl);			/* unblock interrupts */
1057 
1058 	ctx = i < DCB_BMAP_BITS ? (bucket_no << 6) | i : 0;
1059 	DEBUG3(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1060 		"get_dvma_context: ctx_mask=0x%x.%x ctx=0x%x\n",
1061 		(uint32_t)(ctx_mask >> 32), (uint32_t)ctx_mask, ctx);
1062 	return (ctx);
1063 }
1064 
1065 void
1066 pci_iommu_free_dvma_context(iommu_t *iommu_p, dvma_context_t ctx)
1067 {
1068 	uint64_t ctx_mask;
1069 	uint32_t spl, bucket_no = ctx >> 6;
1070 	int bit_no = ctx & 0x3f;
1071 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1072 
1073 	DEBUG1(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1074 		"free_dvma_context: ctx=0x%x\n", ctx);
1075 
1076 	spl = ddi_enter_critical();			/* block interrupts */
1077 	while (ldstub((uint8_t *)bucket_ptr));		/* spin lock */
1078 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1079 							/* clear lock bits */
1080 	ASSERT(ctx_mask & (1ull << bit_no));
1081 	*bucket_ptr = ctx_mask ^ (1ull << bit_no);	/* clear & unlock */
1082 	ddi_exit_critical(spl);				/* unblock interrupt */
1083 }
1084 
1085 int
1086 pci_sc_ctx_inv(dev_info_t *dip, sc_t *sc_p, ddi_dma_impl_t *mp)
1087 {
1088 	dvma_context_t ctx = MP2CTX(mp);
1089 	volatile uint64_t *reg_addr = sc_p->sc_ctx_match_reg + ctx;
1090 	uint64_t matchreg;
1091 
1092 	if (!*reg_addr) {
1093 		DEBUG1(DBG_SC, dip, "ctx=%x no match\n", ctx);
1094 		return (DDI_SUCCESS);
1095 	}
1096 
1097 	*sc_p->sc_ctx_invl_reg = ctx;	/* 1st flush write */
1098 	matchreg = *reg_addr;		/* re-fetch after 1st flush */
1099 	if (!matchreg)
1100 		return (DDI_SUCCESS);
1101 
1102 	matchreg = (matchreg << SC_ENT_SHIFT) >> SC_ENT_SHIFT;	/* low 16-bit */
1103 	do {
1104 		if (matchreg & 1)
1105 			*sc_p->sc_ctx_invl_reg = ctx;
1106 		matchreg >>= 1;
1107 	} while (matchreg);
1108 
1109 	if (pci_ctx_no_compat || !*reg_addr)	/* compat: active ctx flush */
1110 		return (DDI_SUCCESS);
1111 
1112 	pci_ctx_unsuccess_count++;
1113 	if (pci_ctx_flush_warn)
1114 		cmn_err(pci_ctx_flush_warn, "%s%d: ctx flush unsuccessful\n",
1115 			NAMEINST(dip));
1116 	return (DDI_FAILURE);
1117 }
1118 
1119 void
1120 pci_cb_setup(pci_t *pci_p)
1121 {
1122 	dev_info_t *dip = pci_p->pci_dip;
1123 	cb_t *cb_p = pci_p->pci_cb_p;
1124 	uint64_t pa;
1125 	uint32_t chip_id = PCI_CHIP_ID(pci_p);
1126 	DEBUG1(DBG_ATTACH, dip, "cb_create: chip id %d\n", chip_id);
1127 
1128 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1129 		if ((!tm_mtlb_gc_manual) &&
1130 		    (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_24))
1131 			tm_mtlb_gc = 1;
1132 
1133 		if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_23) {
1134 			tomatillo_store_store_wrka = 1;
1135 			tomatillo_disallow_bypass = 1;
1136 			if (pci_spurintr_msgs == PCI_SPURINTR_MSG_DEFAULT)
1137 				pci_spurintr_msgs = 0;
1138 		}
1139 	}
1140 
1141 	if (chip_id == TOMATILLO_VER_20 || chip_id == TOMATILLO_VER_21)
1142 		cmn_err(CE_WARN, "Unsupported Tomatillo rev (%x)", chip_id);
1143 
1144 	if (chip_id < SCHIZO_VER_23)
1145 		pci_ctx_no_active_flush = 1;
1146 
1147 	cb_p->cb_node_id = PCI_ID_TO_NODEID(pci_p->pci_id);
1148 	cb_p->cb_ign	 = PCI_ID_TO_IGN(pci_p->pci_id);
1149 
1150 	/*
1151 	 * schizo control status reg bank is on the 2nd "reg" property entry
1152 	 * interrupt mapping/clear/state regs are on the 1st "reg" entry.
1153 	 *
1154 	 * ALL internal interrupts except pbm interrupts are shared by both
1155 	 * sides, 1st-side-attached is used as *the* owner.
1156 	 */
1157 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[1]);
1158 	cb_p->cb_base_pa = pa << MMU_PAGESHIFT;
1159 
1160 	pa = pci_p->pci_address[3] ?
1161 		(uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[3]) : 0;
1162 	cb_p->cb_icbase_pa = (pa == PFN_INVALID) ? 0 : pa << MMU_PAGESHIFT;
1163 
1164 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[0])
1165 		<< MMU_PAGESHIFT;
1166 	cb_p->cb_map_pa = pa + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1167 	cb_p->cb_clr_pa = pa + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1168 	cb_p->cb_obsta_pa = pa + COMMON_IB_OBIO_INTR_STATE_DIAG_REG;
1169 }
1170 
1171 void
1172 pci_ecc_setup(ecc_t *ecc_p)
1173 {
1174 	ecc_p->ecc_ue.ecc_errpndg_mask = SCHIZO_ECC_UE_AFSR_ERRPNDG;
1175 	ecc_p->ecc_ue.ecc_offset_mask = SCHIZO_ECC_UE_AFSR_QW_OFFSET;
1176 	ecc_p->ecc_ue.ecc_offset_shift = SCHIZO_ECC_UE_AFSR_QW_OFFSET_SHIFT;
1177 	ecc_p->ecc_ue.ecc_size_log2 = 4;
1178 
1179 	ecc_p->ecc_ce.ecc_errpndg_mask = SCHIZO_ECC_CE_AFSR_ERRPNDG;
1180 	ecc_p->ecc_ce.ecc_offset_mask = SCHIZO_ECC_CE_AFSR_QW_OFFSET;
1181 	ecc_p->ecc_ce.ecc_offset_shift = SCHIZO_ECC_CE_AFSR_QW_OFFSET_SHIFT;
1182 	ecc_p->ecc_ce.ecc_size_log2 = 4;
1183 }
1184 
1185 ushort_t
1186 pci_ecc_get_synd(uint64_t afsr)
1187 {
1188 	return ((ushort_t)((afsr & SCHIZO_ECC_CE_AFSR_SYND) >>
1189 	    SCHIZO_ECC_CE_AFSR_SYND_SHIFT));
1190 }
1191 
1192 /*
1193  * overwrite dvma end address (only on virtual-dma systems)
1194  * initialize tsb size
1195  * reset context bits
1196  * return: IOMMU CSR bank base address (VA)
1197  */
1198 
1199 uintptr_t
1200 pci_iommu_setup(iommu_t *iommu_p)
1201 {
1202 	pci_dvma_range_prop_t *dvma_prop;
1203 	int dvma_prop_len;
1204 
1205 	uintptr_t a;
1206 	pci_t *pci_p = iommu_p->iommu_pci_p;
1207 	dev_info_t *dip = pci_p->pci_dip;
1208 	uint_t tsb_size = iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie);
1209 	uint_t tsb_size_prop;
1210 
1211 	/*
1212 	 * Initializations for Tomatillo's micro TLB bug. errata #82
1213 	 */
1214 	if (tm_mtlb_gc) {
1215 		iommu_p->iommu_mtlb_nreq = 0;
1216 		iommu_p->iommu_mtlb_npgs = 0;
1217 		iommu_p->iommu_mtlb_maxpgs = tm_mtlb_maxpgs;
1218 		iommu_p->iommu_mtlb_req_p = (dvma_unbind_req_t *)
1219 		    kmem_zalloc(sizeof (dvma_unbind_req_t) *
1220 		    (tm_mtlb_maxpgs + 1), KM_SLEEP);
1221 		mutex_init(&iommu_p->iommu_mtlb_lock, NULL, MUTEX_DRIVER, NULL);
1222 	}
1223 
1224 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1225 		"virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
1226 		DDI_PROP_SUCCESS)
1227 		goto tsb_done;
1228 
1229 	if (dvma_prop_len != sizeof (pci_dvma_range_prop_t)) {
1230 		cmn_err(CE_WARN, "%s%d: invalid virtual-dma property",
1231 			ddi_driver_name(dip), ddi_get_instance(dip));
1232 		goto tsb_end;
1233 	}
1234 	iommu_p->iommu_dvma_end = dvma_prop->dvma_base +
1235 		(dvma_prop->dvma_len - 1);
1236 	tsb_size_prop = IOMMU_BTOP(dvma_prop->dvma_len) * sizeof (uint64_t);
1237 	tsb_size = MIN(tsb_size_prop, tsb_size);
1238 tsb_end:
1239 	kmem_free(dvma_prop, dvma_prop_len);
1240 tsb_done:
1241 	iommu_p->iommu_tsb_size = iommu_tsb_size_encode(tsb_size);
1242 	iommu_p->iommu_ctx_bitmap =
1243 		kmem_zalloc(IOMMU_CTX_BITMAP_SIZE, KM_SLEEP);
1244 	*iommu_p->iommu_ctx_bitmap = 1ull;	/* reserve context 0 */
1245 
1246 	/*
1247 	 * Determine the virtual address of the register block
1248 	 * containing the iommu control registers and determine
1249 	 * the virtual address of schizo specific iommu registers.
1250 	 */
1251 	a = (uintptr_t)pci_p->pci_address[0];
1252 	iommu_p->iommu_flush_ctx_reg =
1253 		(uint64_t *)(a + SCHIZO_IOMMU_FLUSH_CTX_REG_OFFSET);
1254 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
1255 		iommu_p->iommu_tfar_reg =
1256 			(uint64_t *)(a + TOMATILLO_IOMMU_ERR_TFAR_OFFSET);
1257 	return (a);	/* PCICSRBase */
1258 }
1259 
1260 void
1261 pci_iommu_teardown(iommu_t *iommu_p)
1262 {
1263 	if (pci_use_contexts)
1264 		iommu_ctx_free(iommu_p);
1265 	if (iommu_p->iommu_mtlb_req_p) {
1266 		kmem_free(iommu_p->iommu_mtlb_req_p,
1267 		    sizeof (dvma_unbind_req_t) * (tm_mtlb_maxpgs + 1));
1268 		mutex_destroy(&iommu_p->iommu_mtlb_lock);
1269 		iommu_p->iommu_mtlb_req_p = NULL;
1270 		iommu_p->iommu_mtlb_nreq = 0;
1271 		iommu_p->iommu_mtlb_npgs = iommu_p->iommu_mtlb_maxpgs = 0;
1272 	}
1273 }
1274 
1275 uintptr_t
1276 get_pbm_reg_base(pci_t *pci_p)
1277 {
1278 	return ((uintptr_t)
1279 		(pci_p->pci_address[0] + SCHIZO_PCI_CTRL_REG_OFFSET));
1280 }
1281 
1282 /* ARGSUSED */
1283 static boolean_t
1284 pci_pbm_panic_callb(void *arg, int code)
1285 {
1286 	pbm_t *pbm_p = (pbm_t *)arg;
1287 	volatile uint64_t *ctrl_reg_p;
1288 
1289 	if (pbm_p->pbm_quiesce_count > 0) {
1290 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1291 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1292 	}
1293 
1294 	return (B_TRUE);
1295 }
1296 
1297 static boolean_t
1298 pci_pbm_debug_callb(void *arg, int code)
1299 {
1300 	pbm_t *pbm_p = (pbm_t *)arg;
1301 	volatile uint64_t *ctrl_reg_p;
1302 	uint64_t ctrl_reg;
1303 
1304 	if (pbm_p->pbm_quiesce_count > 0) {
1305 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1306 		if (code == 0) {
1307 			*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1308 		} else {
1309 			ctrl_reg = pbm_p->pbm_saved_ctrl_reg;
1310 			ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
1311 			    SCHIZO_PCI_CTRL_ARB_PARK);
1312 			*ctrl_reg_p = ctrl_reg;
1313 		}
1314 	}
1315 
1316 	return (B_TRUE);
1317 }
1318 
1319 void
1320 pci_pbm_setup(pbm_t *pbm_p)
1321 {
1322 	pci_t *pci_p = pbm_p->pbm_pci_p;
1323 	caddr_t a = pci_p->pci_address[0]; /* PBM block base VA */
1324 	uint64_t pa = va_to_pa(a);
1325 	extern int segkmem_reloc;
1326 
1327 	mutex_init(&pbm_p->pbm_sync_mutex, NULL, MUTEX_DRIVER,
1328 	    (void *)ipltospl(XCALL_PIL));
1329 
1330 	pbm_p->pbm_config_header = (config_header_t *)pci_p->pci_address[2];
1331 	pbm_p->pbm_ctrl_reg = (uint64_t *)(a + SCHIZO_PCI_CTRL_REG_OFFSET);
1332 	pbm_p->pbm_diag_reg = (uint64_t *)(a + SCHIZO_PCI_DIAG_REG_OFFSET);
1333 	pbm_p->pbm_async_flt_status_reg =
1334 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1335 	pbm_p->pbm_async_flt_addr_reg =
1336 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1337 	pbm_p->pbm_estar_reg = (uint64_t *)(a + SCHIZO_PCI_ESTAR_REG_OFFSET);
1338 	pbm_p->pbm_pcix_err_stat_reg = (uint64_t *)(a +
1339 	    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
1340 	pbm_p->pbm_pci_ped_ctrl = (uint64_t *)(a +
1341 	    XMITS_PARITY_DETECT_REG_OFFSET);
1342 
1343 	/*
1344 	 * Create a property to indicate that this node supports DVMA
1345 	 * page relocation.
1346 	 */
1347 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO && segkmem_reloc != 0) {
1348 		pci_dvma_remap_enabled = 1;
1349 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1350 		    pci_p->pci_dip, "dvma-remap-supported");
1351 	}
1352 
1353 	/*
1354 	 * Register a panic callback so we can unquiesce this bus
1355 	 * if it has been placed in the quiesced state.
1356 	 */
1357 	pbm_p->pbm_panic_cb_id = callb_add(pci_pbm_panic_callb,
1358 	    (void *)pbm_p, CB_CL_PANIC, "pci_panic");
1359 	pbm_p->pbm_debug_cb_id = callb_add(pci_pbm_panic_callb,
1360 	    (void *)pbm_p, CB_CL_ENTER_DEBUGGER, "pci_debug_enter");
1361 
1362 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
1363 		goto non_schizo;
1364 
1365 	if (PCI_CHIP_ID(pci_p) >= SCHIZO_VER_23) {
1366 
1367 		pbm_p->pbm_sync_reg_pa = pa + SCHIZO_PBM_DMA_SYNC_REG_OFFSET;
1368 
1369 		/*
1370 		 * This is a software workaround to fix schizo hardware bug.
1371 		 * Create a boolean property and its existence means consistent
1372 		 * dma sync should not be done while in prom. The usb polled
1373 		 * code (OHCI,EHCI) will check for this property and will not
1374 		 * do dma sync if this property exist.
1375 		 */
1376 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1377 		    pci_p->pci_dip, "no-prom-cdma-sync");
1378 	}
1379 	return;
1380 non_schizo:
1381 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1382 		pci_dvma_sync_before_unmap = 1;
1383 		pa = pci_p->pci_cb_p->cb_icbase_pa;
1384 	}
1385 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)
1386 		pbm_p->pbm_upper_retry_counter_reg =
1387 		    (uint64_t *)(a + XMITS_UPPER_RETRY_COUNTER_REG_OFFSET);
1388 
1389 	pbm_p->pbm_sync_reg_pa = pa + PBM_DMA_SYNC_PEND_REG_OFFSET;
1390 }
1391 
1392 void
1393 pci_pbm_teardown(pbm_t *pbm_p)
1394 {
1395 	(void) callb_delete(pbm_p->pbm_panic_cb_id);
1396 	(void) callb_delete(pbm_p->pbm_debug_cb_id);
1397 }
1398 
1399 uintptr_t
1400 pci_ib_setup(ib_t *ib_p)
1401 {
1402 	/*
1403 	 * Determine virtual addresses of bridge specific registers,
1404 	 */
1405 	pci_t *pci_p = ib_p->ib_pci_p;
1406 	uintptr_t a = (uintptr_t)pci_p->pci_address[0];
1407 
1408 	ib_p->ib_ign = PCI_ID_TO_IGN(pci_p->pci_id);
1409 	ib_p->ib_max_ino = SCHIZO_MAX_INO;
1410 	ib_p->ib_slot_intr_map_regs = a + SCHIZO_IB_SLOT_INTR_MAP_REG_OFFSET;
1411 	ib_p->ib_intr_map_regs = a + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1412 	ib_p->ib_slot_clear_intr_regs =
1413 		a + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1414 	return (a);
1415 }
1416 
1417 void
1418 pci_sc_setup(sc_t *sc_p)
1419 {
1420 	pci_t *pci_p = sc_p->sc_pci_p;
1421 	uintptr_t a;
1422 
1423 	/*
1424 	 * Determine the virtual addresses of the stream cache
1425 	 * control/status and flush registers.
1426 	 */
1427 	a = (uintptr_t)pci_p->pci_address[0];	/* PCICSRBase */
1428 	sc_p->sc_ctrl_reg = (uint64_t *)(a + SCHIZO_SC_CTRL_REG_OFFSET);
1429 	sc_p->sc_invl_reg = (uint64_t *)(a + SCHIZO_SC_INVL_REG_OFFSET);
1430 	sc_p->sc_sync_reg = (uint64_t *)(a + SCHIZO_SC_SYNC_REG_OFFSET);
1431 	sc_p->sc_ctx_invl_reg = (uint64_t *)(a + SCHIZO_SC_CTX_INVL_REG_OFFSET);
1432 	sc_p->sc_ctx_match_reg =
1433 		(uint64_t *)(a + SCHIZO_SC_CTX_MATCH_REG_OFFSET);
1434 
1435 	/*
1436 	 * Determine the virtual addresses of the streaming cache
1437 	 * diagnostic access registers.
1438 	 */
1439 	sc_p->sc_data_diag_acc = (uint64_t *)(a + SCHIZO_SC_DATA_DIAG_OFFSET);
1440 	sc_p->sc_tag_diag_acc = (uint64_t *)(a + SCHIZO_SC_TAG_DIAG_OFFSET);
1441 	sc_p->sc_ltag_diag_acc = (uint64_t *)(a + SCHIZO_SC_LTAG_DIAG_OFFSET);
1442 }
1443 
1444 /*ARGSUSED*/
1445 int
1446 pci_get_numproxy(dev_info_t *dip)
1447 {
1448 	/*
1449 	 * Schizo does not support interrupt proxies.
1450 	 */
1451 	return (0);
1452 }
1453 
1454 /*
1455  * pcisch error handling 101:
1456  *
1457  * The various functions below are responsible for error handling. Given
1458  * a particular error, they must gather the appropriate state, report all
1459  * errors with correct payload, and attempt recovery where ever possible.
1460  *
1461  * Recovery in the context of this driver is being able notify a leaf device
1462  * of the failed transaction. This leaf device may either be the master or
1463  * target for this transaction and may have already received an error
1464  * notification via a PCI interrupt. Notification is done via DMA and access
1465  * handles. If we capture an address for the transaction then we can map it
1466  * to a handle(if the leaf device is fma-compliant) and fault the handle as
1467  * well as call the device driver registered callback.
1468  *
1469  * The hardware can either interrupt or trap upon detection of an error, in
1470  * some rare cases it also causes a fatal reset.
1471  *
1472  * cb_buserr_intr() is responsible for handling control block
1473  * errors(errors which stem from the host bus side of the bridge). Since
1474  * we support multiple chips and host bus standards, cb_buserr_intr will
1475  * call a bus specific error handler to report and handle the detected
1476  * error. Since this error can either affect or orginate from either of the
1477  * two PCI busses which are connected to the bridge, we need to call
1478  * pci_pbm_err_handler() for each bus as well to report their errors. We
1479  * also need to gather possible errors which have been detected by their
1480  * compliant children(via ndi_fm_handler_dispatch()).
1481  *
1482  * pbm_error_intr() and ecc_intr() are responsible for PCI Block Module
1483  * errors(generic PCI + bridge specific) and ECC errors, respectively. They
1484  * are common between pcisch and pcipsy and therefore exist in pci_pbm.c and
1485  * pci_ecc.c. To support error handling certain chip specific handlers
1486  * must exist and they are defined below.
1487  *
1488  * cpu_deferred_error() and cpu_async_error(), handle the traps that may
1489  * have originated from IO space. They call into the registered IO callbacks
1490  * to report and handle errors that may have caused the trap.
1491  *
1492  * pci_pbm_err_handler() is called by pbm_error_intr() or pci_err_callback()
1493  * (generic fma callback for pcipsy/pcisch, pci_fm.c). pci_err_callback() is
1494  * called when the CPU has trapped because of a possible IO error(TO/BERR/UE).
1495  * It will call pci_pbm_err_handler() to report and handle all PCI/PBM/IOMMU
1496  * related errors which are detected by the chip.
1497  *
1498  * pci_pbm_err_handler() calls a generic interface pbm_afsr_report()(pci_pbm.c)
1499  * to report the pbm specific errors and attempt to map the failed address
1500  * (if captured) to a device instance. pbm_afsr_report() calls a chip specific
1501  * interface to interpret the afsr bits pci_pbm_classify()(pcisch.c/pcipsy.c).
1502  * pci_pbm_err_handler() also calls iommu_err_handler() to handle IOMMU related
1503  * errors.
1504  *
1505  * iommu_err_handler() can recover from most errors, as long as the requesting
1506  * device is notified and the iommu can be flushed. If an IOMMU error occurs
1507  * due to a UE then it will be passed on to the ecc_err_handler() for
1508  * subsequent handling.
1509  *
1510  * ecc_err_handler()(pci_ecc.c) also calls a chip specific interface to
1511  * interpret the afsr, pci_ecc_classify(). ecc_err_handler() also calls
1512  * pci_pbm_err_handler() to report any pbm errors detected.
1513  *
1514  * To make sure that the trap code and the interrupt code are not going
1515  * to step on each others toes we have a per chip pci_fm_mutex. This also
1516  * makes it necessary for us to be caution while we are at a high PIL, so
1517  * that we do not cause a subsequent trap that causes us to hang.
1518  *
1519  * The attempt to commonize code was meant to keep in line with the current
1520  * pci driver implementation and it was not meant to confuse. If you are
1521  * confused then don't worry, I was too.
1522  *
1523  */
1524 static void
1525 pci_cb_errstate_get(cb_t *cb_p, cb_errstate_t *cb_err_p)
1526 {
1527 	uint64_t pa = cb_p->cb_base_pa;
1528 	int	i;
1529 
1530 	bzero(cb_err_p, sizeof (cb_errstate_t));
1531 
1532 	ASSERT(MUTEX_HELD(&cb_p->cb_pci_cmn_p->pci_fm_mutex));
1533 
1534 	cb_err_p->cb_bridge_type = PCI_BRIDGE_TYPE(cb_p->cb_pci_cmn_p);
1535 
1536 	cb_err_p->cb_csr = lddphysio(pa + SCHIZO_CB_CSR_OFFSET);
1537 	cb_err_p->cb_err = lddphysio(pa + SCHIZO_CB_ERRCTRL_OFFSET);
1538 	cb_err_p->cb_intr = lddphysio(pa + SCHIZO_CB_INTCTRL_OFFSET);
1539 	cb_err_p->cb_elog = lddphysio(pa + SCHIZO_CB_ERRLOG_OFFSET);
1540 	cb_err_p->cb_ecc = lddphysio(pa + SCHIZO_CB_ECCCTRL_OFFSET);
1541 	cb_err_p->cb_ue_afsr = lddphysio(pa + SCHIZO_CB_UEAFSR_OFFSET);
1542 	cb_err_p->cb_ue_afar = lddphysio(pa + SCHIZO_CB_UEAFAR_OFFSET);
1543 	cb_err_p->cb_ce_afsr = lddphysio(pa + SCHIZO_CB_CEAFSR_OFFSET);
1544 	cb_err_p->cb_ce_afar = lddphysio(pa + SCHIZO_CB_CEAFAR_OFFSET);
1545 
1546 	if ((CB_CHIP_TYPE((cb_t *)cb_p)) == PCI_CHIP_XMITS) {
1547 		cb_err_p->cb_first_elog = lddphysio(pa +
1548 				XMITS_CB_FIRST_ERROR_LOG);
1549 		cb_err_p->cb_first_eaddr = lddphysio(pa +
1550 				XMITS_CB_FIRST_ERROR_ADDR);
1551 		cb_err_p->cb_leaf_status = lddphysio(pa +
1552 				XMITS_CB_FIRST_ERROR_ADDR);
1553 	}
1554 
1555 	/* Gather PBM state information for both sides of this chip */
1556 	for (i = 0; i < 2; i++) {
1557 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1558 			continue;
1559 		pci_pbm_errstate_get(((cb_t *)cb_p)->cb_pci_cmn_p->
1560 					    pci_p[i], &cb_err_p->cb_pbm[i]);
1561 	}
1562 }
1563 
1564 static void
1565 pci_cb_clear_error(cb_t *cb_p, cb_errstate_t *cb_err_p)
1566 {
1567 	uint64_t pa = ((cb_t *)cb_p)->cb_base_pa;
1568 
1569 	stdphysio(pa + SCHIZO_CB_ERRLOG_OFFSET, cb_err_p->cb_elog);
1570 }
1571 
1572 static cb_fm_err_t safari_err_tbl[] = {
1573 	SAFARI_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1574 	SAFARI_SSM_DIS,		SCHIZO_CB_ELOG_SSM_DIS,		CB_FATAL,
1575 	SAFARI_BAD_CMD_PCIA, 	SCHIZO_CB_ELOG_BAD_CMD_PCIA,	CB_FATAL,
1576 	SAFARI_BAD_CMD_PCIB, 	SCHIZO_CB_ELOG_BAD_CMD_PCIB,	CB_FATAL,
1577 	SAFARI_PAR_ERR_INT_PCIB, XMITS_CB_ELOG_PAR_ERR_INT_PCIB, CB_FATAL,
1578 	SAFARI_PAR_ERR_INT_PCIA, XMITS_CB_ELOG_PAR_ERR_INT_PCIA, CB_FATAL,
1579 	SAFARI_PAR_ERR_INT_SAF,	XMITS_CB_ELOG_PAR_ERR_INT_SAF,	CB_FATAL,
1580 	SAFARI_PLL_ERR_PCIB,	XMITS_CB_ELOG_PLL_ERR_PCIB,	CB_FATAL,
1581 	SAFARI_PLL_ERR_PCIA,	XMITS_CB_ELOG_PLL_ERR_PCIA,	CB_FATAL,
1582 	SAFARI_PLL_ERR_SAF,	XMITS_CB_ELOG_PLL_ERR_SAF,	CB_FATAL,
1583 	SAFARI_SAF_CIQ_TO,	SCHIZO_CB_ELOG_SAF_CIQ_TO,	CB_FATAL,
1584 	SAFARI_SAF_LPQ_TO,	SCHIZO_CB_ELOG_SAF_LPQ_TO,	CB_FATAL,
1585 	SAFARI_SAF_SFPQ_TO,	SCHIZO_CB_ELOG_SAF_SFPQ_TO,	CB_FATAL,
1586 	SAFARI_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1587 	SAFARI_UNMAP_ERR,	SCHIZO_CB_ELOG_UNMAP_ERR,	CB_FATAL,
1588 	SAFARI_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_FATAL,
1589 	SAFARI_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_FATAL,
1590 	SAFARI_DSTAT_ERR,	SCHIZO_CB_ELOG_DSTAT_ERR,	CB_FATAL,
1591 	SAFARI_SAF_UFPQ_TO,	SCHIZO_CB_ELOG_SAF_UFPQ_TO,	CB_FATAL,
1592 	SAFARI_CPU0_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU0_PAR_SINGLE,	CB_FATAL,
1593 	SAFARI_CPU0_PAR_BIDI,	SCHIZO_CB_ELOG_CPU0_PAR_BIDI,	CB_FATAL,
1594 	SAFARI_CPU1_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU1_PAR_SINGLE,	CB_FATAL,
1595 	SAFARI_CPU1_PAR_BIDI,	SCHIZO_CB_ELOG_CPU1_PAR_BIDI,	CB_FATAL,
1596 	NULL,			NULL,				NULL,
1597 };
1598 
1599 /*
1600  * Function used to handle and log Safari bus errors.
1601  */
1602 static int
1603 safari_err_handler(dev_info_t *dip, uint64_t fme_ena,
1604 		cb_errstate_t *cb_err_p)
1605 {
1606 	int	i;
1607 	int	fatal = 0;
1608 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
1609 	pci_common_t *cmn_p = pci_p->pci_common_p;
1610 
1611 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1612 
1613 	for (i = 0; safari_err_tbl[i].cb_err_class != NULL; i++) {
1614 		if (cb_err_p->cb_elog & safari_err_tbl[i].cb_reg_bit) {
1615 			cb_err_p->cb_err_class = safari_err_tbl[i].cb_err_class;
1616 			cb_ereport_post(dip, fme_ena, cb_err_p);
1617 			fatal += safari_err_tbl[i].cb_fatal;
1618 		}
1619 	}
1620 
1621 	if (fatal)
1622 		return (DDI_FM_FATAL);
1623 	return (DDI_FM_OK);
1624 
1625 }
1626 
1627 /*
1628  * Check pbm va log register for captured errant address, and fail handle
1629  * if in per device cache.
1630  * Called from jbus_err_handler.
1631  */
1632 static int
1633 jbus_check_va_log(cb_t *cb_p, uint64_t fme_ena,
1634     cb_errstate_t *cb_err_p)
1635 {
1636 	int i;
1637 	int ret = DDI_FM_FATAL;
1638 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1639 
1640 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1641 	/*
1642 	 * Check VA log register for address associated with error,
1643 	 * if no address is registered then return failure
1644 	 */
1645 	for (i = 0; i < 2; i++) {
1646 
1647 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1648 			continue;
1649 		/*
1650 		 * Look up and fault handle associated with
1651 		 * logged DMA address
1652 		 */
1653 		if (cb_err_p->cb_pbm[i].pbm_va_log) {
1654 			void *addr = (void *)&cb_err_p->cb_pbm[i].pbm_va_log;
1655 			ret = ndi_fmc_error(cb_p->cb_pci_cmn_p->pci_p[i]->
1656 					pci_dip, NULL, DMA_HANDLE, fme_ena,
1657 					(void *)addr);
1658 			if (ret == DDI_FM_NONFATAL)
1659 				break;
1660 		}
1661 	}
1662 	return (ret);
1663 }
1664 
1665 static cb_fm_err_t jbus_err_tbl[] = {
1666 	JBUS_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1667 	JBUS_PWR_DATA_PERR,	TOMATILLO_CB_ELOG_WR_DATA_PAR_ERR, CB_FATAL,
1668 	JBUS_DRD_DATA_PERR,	TOMATILLO_CB_ELOG_RD_DATA_PAR_ERR, CB_NONFATAL,
1669 	JBUS_CTL_PERR,		TOMATILLO_CB_ELOG_CTL_PAR_ERR,	CB_FATAL,
1670 	JBUS_ILL_BYTE_EN,	TOMATILLO_CB_ELOG_ILL_BYTE_EN,	CB_FATAL,
1671 	JBUS_ILL_COH_IN,	TOMATILLO_CB_ELOG_ILL_COH_IN,	CB_FATAL,
1672 	JBUS_SNOOP_ERR_RD,	TOMATILLO_CB_ELOG_SNOOP_ERR_RD,	CB_FATAL,
1673 	JBUS_SNOOP_ERR_RDS,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDS, CB_FATAL,
1674 	JBUS_SNOOP_ERR_RDSA,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDSA, CB_FATAL,
1675 	JBUS_SNOOP_ERR_OWN,	TOMATILLO_CB_ELOG_SNOOP_ERR_OWN, CB_FATAL,
1676 	JBUS_SNOOP_ERR_RDO,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDO, CB_FATAL,
1677 	JBUS_SNOOP_ERR_PCI,	TOMATILLO_CB_ELOG_SNOOP_ERR_PCI, CB_FATAL,
1678 	JBUS_SNOOP_ERR_GR,	TOMATILLO_CB_ELOG_SNOOP_ERR_GR,	CB_FATAL,
1679 	JBUS_SNOOP_ERR,		TOMATILLO_CB_ELOG_SNOOP_ERR,	CB_FATAL,
1680 	JBUS_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1681 	JBUS_UNMAP_ERR,		SCHIZO_CB_ELOG_UNMAP_ERR,	CB_NONFATAL,
1682 	JBUS_TO_EXP_ERR,	TOMATILLO_CB_ELOG_TO_EXP_ERR,	CB_NONFATAL,
1683 	JBUS_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_NONFATAL,
1684 	JBUS_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_NONFATAL,
1685 	NULL,			NULL,				NULL,
1686 };
1687 
1688 /*
1689  * Function used to handle and log Jbus errors.
1690  */
1691 static int
1692 jbus_err_handler(dev_info_t *dip, uint64_t fme_ena,
1693     cb_errstate_t *cb_err_p)
1694 {
1695 	int	fatal = 0;
1696 	int	nonfatal = 0;
1697 	int	i;
1698 	pci_t	*pci_p = get_pci_soft_state(ddi_get_instance(dip));
1699 	cb_t	*cb_p = pci_p->pci_cb_p;
1700 
1701 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1702 
1703 	for (i = 0; jbus_err_tbl[i].cb_err_class != NULL; i++) {
1704 		if (!(cb_err_p->cb_elog & jbus_err_tbl[i].cb_reg_bit))
1705 			continue;
1706 		cb_err_p->cb_err_class = jbus_err_tbl[i].cb_err_class;
1707 		if (jbus_err_tbl[i].cb_fatal) {
1708 			fatal += jbus_err_tbl[i].cb_fatal;
1709 			continue;
1710 		}
1711 		if (jbus_check_va_log(cb_p, fme_ena, cb_err_p)
1712 				!= DDI_FM_NONFATAL) {
1713 			fatal++;
1714 		}
1715 		cb_ereport_post(dip, fme_ena, cb_err_p);
1716 	}
1717 
1718 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1719 				DDI_FM_OK));
1720 }
1721 
1722 /*
1723  * Control Block error interrupt handler.
1724  */
1725 uint_t
1726 cb_buserr_intr(caddr_t a)
1727 {
1728 	cb_t *cb_p = (cb_t *)a;
1729 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1730 	pci_t *pci_p = cmn_p->pci_p[0];
1731 	cb_errstate_t cb_err;
1732 	ddi_fm_error_t derr;
1733 	int ret = DDI_FM_FATAL;
1734 	int i;
1735 
1736 	if (pci_p == NULL)
1737 		pci_p = cmn_p->pci_p[1];
1738 
1739 	bzero(&derr, sizeof (ddi_fm_error_t));
1740 	derr.fme_version = DDI_FME_VERSION;
1741 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1742 
1743 	mutex_enter(&cmn_p->pci_fm_mutex);
1744 
1745 	pci_cb_errstate_get(cb_p, &cb_err);
1746 
1747 	if (CB_CHIP_TYPE(cb_p) == PCI_CHIP_TOMATILLO)
1748 		ret = jbus_err_handler(pci_p->pci_dip, derr.fme_ena, &cb_err);
1749 	else if ((CB_CHIP_TYPE(cb_p) == PCI_CHIP_SCHIZO) ||
1750 			(CB_CHIP_TYPE(cb_p) == PCI_CHIP_XMITS))
1751 		ret = safari_err_handler(pci_p->pci_dip, derr.fme_ena,
1752 		    &cb_err);
1753 
1754 	/*
1755 	 * Check for related errors in PBM and IOMMU. The IOMMU could cause
1756 	 * a timeout on the jbus due to an IOMMU miss, so we need to check and
1757 	 * log the IOMMU error registers.
1758 	 */
1759 	for (i = 0; i < 2; i++) {
1760 		if (cmn_p->pci_p[i] == NULL)
1761 			continue;
1762 		if (pci_pbm_err_handler(cmn_p->pci_p[i]->pci_dip, &derr,
1763 		    (void *)cmn_p->pci_p[i], PCI_CB_CALL) == DDI_FM_FATAL)
1764 			ret = DDI_FM_FATAL;
1765 	}
1766 
1767 	/* Cleanup and reset error bits */
1768 	(void) pci_cb_clear_error(cb_p, &cb_err);
1769 	mutex_exit(&cmn_p->pci_fm_mutex);
1770 
1771 	if (ret == DDI_FM_FATAL) {
1772 		fm_panic("Fatal System Bus Error has occurred\n");
1773 	}
1774 
1775 	return (DDI_INTR_CLAIMED);
1776 }
1777 
1778 static ecc_fm_err_t ecc_err_tbl[] = {
1779 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1780 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_UPA64S, SCH_REG_UPA,
1781 	ACC_HANDLE,
1782 
1783 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1784 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_REG, SCH_REG_PCIA_REG,
1785 	ACC_HANDLE,
1786 
1787 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1788 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_MEM, SCH_REG_PCIA_MEM,
1789 	ACC_HANDLE,
1790 
1791 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1792 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_CFGIO, SCH_REG_PCIA_CFGIO,
1793 	ACC_HANDLE,
1794 
1795 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1796 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_REG, SCH_REG_PCIB_REG,
1797 	ACC_HANDLE,
1798 
1799 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1800 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_MEM, SCH_REG_PCIB_MEM,
1801 	ACC_HANDLE,
1802 
1803 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1804 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_CFGIO, SCH_REG_PCIB_CFGIO,
1805 	ACC_HANDLE,
1806 
1807 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1808 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_SAFARI_REGS, SCH_REG_SAFARI_REGS,
1809 	ACC_HANDLE,
1810 
1811 	PCI_ECC_SEC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_UE,
1812 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1813 
1814 	PCI_ECC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1815 	PBM_PRIMARY, NULL, NULL, ACC_HANDLE,
1816 
1817 	PCI_ECC_SEC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1818 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1819 
1820 	PCI_ECC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1821 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1822 
1823 	PCI_ECC_SEC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1824 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1825 
1826 	PCI_ECC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1827 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1828 
1829 	PCI_ECC_SEC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1830 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1831 
1832 	PCI_ECC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1833 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1834 
1835 	PCI_ECC_SEC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1836 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1837 
1838 	PCI_ECC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1839 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1840 
1841 	PCI_ECC_SEC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1842 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1843 
1844 	NULL, NULL, NULL, NULL, NULL, NULL,
1845 };
1846 
1847 /*
1848  * pci_ecc_classify, called by ecc_handler to classify ecc errors
1849  * and determine if we should panic or not.
1850  */
1851 void
1852 pci_ecc_classify(uint64_t err, ecc_errstate_t *ecc_err_p)
1853 {
1854 	struct async_flt *ecc_p = &ecc_err_p->ecc_aflt;
1855 	uint64_t region, afar = ecc_p->flt_addr;
1856 	int i, j, ret = 0;
1857 	int flag, fatal = 0;
1858 	pci_common_t *cmn_p = ecc_err_p->ecc_ii_p.ecc_p->ecc_pci_cmn_p;
1859 	pci_t *pci_p = cmn_p->pci_p[0];
1860 
1861 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1862 
1863 	ecc_err_p->ecc_bridge_type = PCI_BRIDGE_TYPE(cmn_p);
1864 
1865 	if (pci_p == NULL)
1866 		pci_p = cmn_p->pci_p[1];
1867 
1868 	ecc_err_p->ecc_ctrl = lddphysio(ecc_err_p->ecc_ii_p.ecc_p->ecc_csr_pa);
1869 	ecc_err_p->ecc_err_addr = afar;
1870 	region = afar & SCHIZO_ECC_AFAR_PIOW_MASK;
1871 
1872 	for (i = 0; ecc_err_tbl[i].ecc_err_class != NULL; i++) {
1873 		if (!(err & ecc_err_tbl[i].ecc_reg_bit) ||
1874 			(ecc_err_p->ecc_ii_p.ecc_type !=
1875 			    ecc_err_tbl[i].ecc_type) ||
1876 			(ecc_err_p->ecc_pri != ecc_err_tbl[i].ecc_pri))
1877 			continue;
1878 
1879 		ecc_p->flt_erpt_class = ecc_err_tbl[i].ecc_err_class;
1880 		flag = ecc_err_tbl[i].ecc_flag;
1881 
1882 		if (!ecc_err_tbl[i].ecc_pri ||
1883 				(ecc_err_tbl[i].ecc_type == CBNINTR_CE)) {
1884 			fatal += (ecc_err_tbl[i].ecc_type == CBNINTR_UE) ?
1885 				1 : 0;
1886 			break;
1887 		}
1888 
1889 		if (flag == ACC_HANDLE &&
1890 			(region & ecc_err_tbl[i].ecc_region_bits)) {
1891 			ecc_err_p->ecc_region = ecc_err_tbl[i].ecc_region;
1892 			pci_format_ecc_addr(pci_p->pci_dip,
1893 					&ecc_err_p->ecc_err_addr,
1894 					ecc_err_p->ecc_region);
1895 		}
1896 
1897 		/*
1898 		 * Lookup and fault errant handle
1899 		 */
1900 		for (j = 0; j < 2; ++j) {
1901 			ret = DDI_FM_UNKNOWN;
1902 			if (cmn_p->pci_p[j] == NULL)
1903 				continue;
1904 			ret = ndi_fmc_error(cmn_p->pci_p[j]->pci_dip, NULL,
1905 					flag, ecc_err_p->ecc_ena,
1906 					(void *)&ecc_err_p->ecc_err_addr);
1907 			if (ret == DDI_FM_NONFATAL) {
1908 				fatal = 0;
1909 				break;
1910 			} else
1911 				fatal++;
1912 		}
1913 		break;
1914 	}
1915 
1916 	if (fatal)
1917 		ecc_p->flt_panic = 1;
1918 	else if (flag != ACC_HANDLE)
1919 		ecc_err_p->ecc_pg_ret = 1;
1920 }
1921 
1922 /*
1923  * Tables to define PCI-X Split Completion errors
1924  */
1925 
1926 pcix_err_msg_rec_t pcix_completer_errs[] = {
1927 	{PCIX_CPLT_OUT_OF_RANGE,	"pcix", "oor"	},
1928 };
1929 
1930 pcix_err_tbl_t pcix_split_errs_tbl[] = {
1931 	{PCIX_CLASS_CPLT,
1932 		sizeof (pcix_completer_errs)/sizeof (pcix_err_msg_rec_t),
1933 		pcix_completer_errs		},
1934 };
1935 
1936 /*
1937  * Tables for the PCI-X error status messages
1938  */
1939 pcix_err_msg_rec_t pcix_stat_errs[] = {
1940 	{XMITS_PCIX_STAT_SC_DSCRD,	"pcix", "discard"  	},
1941 	{XMITS_PCIX_STAT_SC_TTO,	"xmits.pbmx", "tato" 	},
1942 	{XMITS_PCIX_STAT_SMMU,		"xmits.pbmx", "stmmu"	},
1943 	{XMITS_PCIX_STAT_SDSTAT,	"xmits.pbmx", "stdst"	},
1944 	{XMITS_PCIX_STAT_CMMU,		"xmits.pbmx", "cnmmu"	},
1945 	{XMITS_PCIX_STAT_CDSTAT,	"xmits.pbmx", "cndst"	}
1946 };
1947 
1948 pcix_err_tbl_t pcix_stat_errs_tbl =
1949 	{PCIX_NO_CLASS,
1950 		sizeof (pcix_stat_errs)/sizeof (pcix_err_msg_rec_t),
1951 		pcix_stat_errs		};
1952 
1953 
1954 /*
1955  * walk thru a table of error messages, printing as appropriate
1956  *
1957  * t - the table of messages to parse
1958  * err - the error to match against
1959  * multi - flag, sometimes multiple error bits may be set/desired
1960  */
1961 static int
1962 pcix_lookup_err_msgs(dev_info_t *dip, uint64_t ena, pcix_err_tbl_t t,
1963 		pbm_errstate_t *pbm_err_p)
1964 {
1965 	uint32_t err_bits  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_INDEX_MASK;
1966 	int nerr = 0;
1967 	int j;
1968 	char buf[FM_MAX_CLASS];
1969 
1970 	for (j = 0; j < t.err_rec_num; j++)  {
1971 		uint32_t msg_key = t.err_msg_tbl[j].msg_key;
1972 		if (pbm_err_p->pbm_multi ? !(err_bits & msg_key) : err_bits
1973 				!= msg_key)
1974 			continue;
1975 
1976 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1977 		    t.err_msg_tbl[j].msg_class,
1978 		    pbm_err_p->pbm_pri ? "" : PCIX_SECONDARY,
1979 		    t.err_msg_tbl[j].msg_str);
1980 
1981 		pbm_err_p->pbm_err_class = buf;
1982 		pcix_ereport_post(dip, ena, pbm_err_p);
1983 		nerr++;
1984 	}
1985 	return (nerr ? DDI_FM_FATAL : DDI_FM_OK);
1986 }
1987 
1988 /*
1989  * Decodes primary(bit 27-24) or secondary(bit 15-12) PCI-X split
1990  * completion error message class and index in PBM AFSR.
1991  */
1992 static void
1993 pcix_log_split_err(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
1994 {
1995 	uint32_t class  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_CLASS_MASK;
1996 	uint32_t num_classes = sizeof (pcix_split_errs_tbl) /
1997 	    sizeof (struct pcix_err_tbl);
1998 	int i;
1999 
2000 	for (i = 0; i < num_classes; i++) {
2001 		if (class == pcix_split_errs_tbl[i].err_class) {
2002 			pbm_err_p->pbm_multi = PCIX_SINGLE_ERR;
2003 			(void) pcix_lookup_err_msgs(dip, ena,
2004 			    pcix_split_errs_tbl[i], pbm_err_p);
2005 			break;
2006 		}
2007 	}
2008 }
2009 
2010 /*
2011  * Report PBM PCI-X Error Status Register if in PCI-X mode
2012  *
2013  * Once a PCI-X fault tree is constructed, the code below may need to
2014  * change.
2015  */
2016 static int
2017 pcix_log_pbm(pci_t *pci_p, uint64_t ena, pbm_errstate_t *pbm_err_p)
2018 {
2019 	int fatal = 0;
2020 	int nonfatal = 0;
2021 	uint32_t e;
2022 
2023 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2024 
2025 	DEBUG3(DBG_ERR_INTR, pci_p->pci_dip, "pcix_log_pbm: chip_type=%d "
2026 	    "ctr_stat=%lx afsr = 0x%lx", CHIP_TYPE(pci_p),
2027 	    pbm_err_p->pbm_ctl_stat, pbm_err_p->pbm_afsr);
2028 
2029 	if (!(CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) ||
2030 	    !(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE))
2031 		return (DDI_FM_OK);
2032 
2033 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2034 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2035 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2036 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2037 		nonfatal++;
2038 	}
2039 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR) {
2040 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2041 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2042 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2043 		nonfatal++;
2044 	}
2045 
2046 	e = PBM_PCIX_TO_PRIERR(pbm_err_p->pbm_pcix_stat);
2047 	if (e) {
2048 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2049 		pbm_err_p->pbm_err = e;
2050 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2051 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2052 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2053 			fatal++;
2054 		else
2055 			nonfatal++;
2056 	}
2057 
2058 	e = PBM_PCIX_TO_SECERR(pbm_err_p->pbm_pcix_stat);
2059 	if (e) {
2060 		pbm_err_p->pbm_pri = PBM_SECONDARY;
2061 		pbm_err_p->pbm_err = e;
2062 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2063 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2064 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2065 			fatal++;
2066 		else
2067 			nonfatal++;
2068 	}
2069 
2070 	if (!fatal && !nonfatal)
2071 		return (DDI_FM_OK);
2072 	else if (fatal)
2073 		return (DDI_FM_FATAL);
2074 	return (DDI_FM_NONFATAL);
2075 }
2076 
2077 static pbm_fm_err_t pbm_err_tbl[] = {
2078 	PCI_MA,			SCHIZO_PCI_AFSR_E_MA,	PBM_PRIMARY,
2079 	FM_LOG_PCI,	PCI_TARG_MA,
2080 
2081 	PCI_SEC_MA,		SCHIZO_PCI_AFSR_E_MA,	PBM_SECONDARY,
2082 	FM_LOG_PBM,	NULL,
2083 
2084 	PCI_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_PRIMARY,
2085 	FM_LOG_PCI,	PCI_TARG_REC_TA,
2086 
2087 	PCI_SEC_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_SECONDARY,
2088 	FM_LOG_PBM,	NULL,
2089 
2090 	PCI_PBM_RETRY,		SCHIZO_PCI_AFSR_E_RTRY,	PBM_PRIMARY,
2091 	FM_LOG_PBM,	PCI_PBM_TARG_RETRY,
2092 
2093 	PCI_SEC_PBM_RETRY,	SCHIZO_PCI_AFSR_E_RTRY,	PBM_SECONDARY,
2094 	FM_LOG_PBM,	NULL,
2095 
2096 	PCI_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_PRIMARY,
2097 	FM_LOG_PCI,	PCI_TARG_MDPE,
2098 
2099 	PCI_SEC_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_SECONDARY,
2100 	FM_LOG_PBM,	NULL,
2101 
2102 	PCI_PBM_TTO,		SCHIZO_PCI_AFSR_E_TTO,	PBM_PRIMARY,
2103 	FM_LOG_PBM,	PCI_PBM_TARG_TTO,
2104 
2105 	PCI_SEC_PBM_TTO,	SCHIZO_PCI_AFSR_E_TTO,	PBM_SECONDARY,
2106 	FM_LOG_PBM,	NULL,
2107 
2108 	PCI_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_PRIMARY,
2109 	FM_LOG_PBM,	NULL,
2110 
2111 	PCI_SEC_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_SECONDARY,
2112 	FM_LOG_PBM,	NULL,
2113 
2114 	NULL,			NULL,			NULL,
2115 	NULL,		NULL,
2116 };
2117 
2118 
2119 /*
2120  * pci_pbm_classify, called by pbm_afsr_report to classify piow afsr.
2121  */
2122 int
2123 pci_pbm_classify(pbm_errstate_t *pbm_err_p)
2124 {
2125 	uint32_t err;
2126 	int nerr = 0;
2127 	int i;
2128 
2129 	err = pbm_err_p->pbm_pri ? PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr):
2130 		PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
2131 
2132 	for (i = 0; pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2133 		if ((err & pbm_err_tbl[i].pbm_reg_bit) &&
2134 		    (pbm_err_p->pbm_pri == pbm_err_tbl[i].pbm_pri)) {
2135 			if (pbm_err_tbl[i].pbm_flag == FM_LOG_PCI)
2136 				pbm_err_p->pbm_pci.pci_err_class =
2137 					pbm_err_tbl[i].pbm_err_class;
2138 			else
2139 				pbm_err_p->pbm_err_class =
2140 				    pbm_err_tbl[i].pbm_err_class;
2141 
2142 			pbm_err_p->pbm_terr_class =
2143 			    pbm_err_tbl[i].pbm_terr_class;
2144 			pbm_err_p->pbm_log = pbm_err_tbl[i].pbm_flag;
2145 			nerr++;
2146 			break;
2147 		}
2148 	}
2149 
2150 	return (nerr);
2151 }
2152 
2153 /*
2154  * Function used to handle and log IOMMU errors. Called by pci_pbm_err_handler,
2155  * with pci_fm_mutex held.
2156  */
2157 static int
2158 iommu_err_handler(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
2159 {
2160 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2161 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2162 	ecc_t *ecc_p = pci_p->pci_ecc_p;
2163 	uint64_t stat;
2164 	ushort_t ta_signalled;
2165 	int err = 0;
2166 	int fatal = 0;
2167 	int nonfatal = 0;
2168 	int ret;
2169 
2170 	ASSERT(MUTEX_HELD(&ecc_p->ecc_pci_cmn_p->pci_fm_mutex));
2171 	if (!((stat = *iommu_p->iommu_ctrl_reg) & TOMATILLO_IOMMU_ERR)) {
2172 		pbm_err_p->pbm_err_class = PCI_SCH_MMU_ERR;
2173 		iommu_ereport_post(dip, ena, pbm_err_p);
2174 		return (DDI_FM_NONFATAL);
2175 	}
2176 
2177 	/*
2178 	 * Need to make sure a Target Abort was signalled to the device if
2179 	 * we have any hope of recovering. Tomatillo does not send a TA for
2180 	 * DMA Writes that result in a Translation Error, thus fooling the
2181 	 * device into believing everything is as it expects. Ignorance
2182 	 * is bliss, but knowledge is power.
2183 	 */
2184 	ta_signalled = pbm_err_p->pbm_pci.pci_cfg_stat &
2185 		PCI_STAT_S_TARG_AB;
2186 
2187 	if (stat & TOMATILLO_IOMMU_ERR_ILLTSBTBW) {
2188 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_TSBTBW;
2189 		err = 1;
2190 		iommu_ereport_post(dip, ena, pbm_err_p);
2191 		if (!ta_signalled)
2192 			fatal++;
2193 		else
2194 			nonfatal++;
2195 	}
2196 
2197 	if (stat & TOMATILLO_IOMMU_ERR_BAD_VA) {
2198 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_VA;
2199 		err = 1;
2200 		iommu_ereport_post(dip, ena, pbm_err_p);
2201 		if (!ta_signalled)
2202 			fatal++;
2203 		else
2204 			nonfatal++;
2205 	}
2206 
2207 	if (!err) {
2208 		stat = ((stat & TOMATILLO_IOMMU_ERRSTS) >>
2209 		    TOMATILLO_IOMMU_ERRSTS_SHIFT);
2210 		switch (stat) {
2211 		case TOMATILLO_IOMMU_PROTECTION_ERR:
2212 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_PROT_ERR;
2213 			iommu_ereport_post(dip, ena, pbm_err_p);
2214 			fatal++;
2215 			break;
2216 		case TOMATILLO_IOMMU_INVALID_ERR:
2217 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_INVAL_ERR;
2218 			/*
2219 			 * Fault the address in iommu_tfar
2220 			 * register to inform target driver of error
2221 			 */
2222 			ret = ndi_fmc_error(pci_p->pci_dip, NULL, DMA_HANDLE,
2223 				ena, (void *)&pbm_err_p->pbm_iommu.iommu_tfar);
2224 
2225 			if (ret != DDI_FM_NONFATAL)
2226 				if (ta_signalled)
2227 					nonfatal++;
2228 				else
2229 					fatal++;
2230 			else
2231 				nonfatal++;
2232 
2233 			iommu_ereport_post(dip, ena, pbm_err_p);
2234 			break;
2235 		case TOMATILLO_IOMMU_TIMEOUT_ERR:
2236 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_TO_ERR;
2237 			fatal++;
2238 			iommu_ereport_post(dip, ena, pbm_err_p);
2239 			break;
2240 		case TOMATILLO_IOMMU_ECC_ERR:
2241 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_UE;
2242 			iommu_ereport_post(dip, ena, pbm_err_p);
2243 			break;
2244 		}
2245 	}
2246 
2247 	if (fatal)
2248 		return (DDI_FM_FATAL);
2249 	else if (nonfatal)
2250 		return (DDI_FM_NONFATAL);
2251 
2252 	return (DDI_FM_OK);
2253 }
2254 
2255 int
2256 pci_check_error(pci_t *pci_p)
2257 {
2258 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2259 	uint16_t pci_cfg_stat;
2260 	uint64_t pbm_ctl_stat, pbm_afsr, pbm_pcix_stat;
2261 	caddr_t a = pci_p->pci_address[0];
2262 	uint64_t *pbm_pcix_stat_reg;
2263 
2264 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2265 
2266 	pci_cfg_stat = pbm_p->pbm_config_header->ch_status_reg;
2267 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2268 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2269 
2270 	if ((pci_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2271 				PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2272 				PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2273 			(pbm_ctl_stat & (SCHIZO_PCI_CTRL_BUS_UNUSABLE |
2274 				TOMATILLO_PCI_CTRL_PCI_DTO_ERR |
2275 				SCHIZO_PCI_CTRL_PCI_TTO_ERR |
2276 				SCHIZO_PCI_CTRL_PCI_RTRY_ERR |
2277 				SCHIZO_PCI_CTRL_PCI_MMU_ERR |
2278 				COMMON_PCI_CTRL_SBH_ERR |
2279 				COMMON_PCI_CTRL_SERR)) ||
2280 			(PBM_AFSR_TO_PRIERR(pbm_afsr)))
2281 		return (1);
2282 
2283 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2284 			(pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2285 
2286 		pbm_pcix_stat_reg = (uint64_t *)(a +
2287 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2288 
2289 		pbm_pcix_stat = *pbm_pcix_stat_reg;
2290 
2291 		if (PBM_PCIX_TO_PRIERR(pbm_pcix_stat))
2292 			return (1);
2293 
2294 		if (pbm_pcix_stat & XMITS_PCIX_STAT_PERR_RECOV_INT)
2295 			return (1);
2296 	}
2297 
2298 	return (0);
2299 
2300 }
2301 
2302 static pbm_fm_err_t pci_pbm_err_tbl[] = {
2303 	PCI_PBM_RETRY,			SCHIZO_PCI_CTRL_PCI_RTRY_ERR,
2304 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_RETRY,
2305 
2306 	PCI_PBM_TTO,			SCHIZO_PCI_CTRL_PCI_TTO_ERR,
2307 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_TTO,
2308 
2309 	PCI_SCH_BUS_UNUSABLE_ERR,	SCHIZO_PCI_CTRL_BUS_UNUSABLE,
2310 	NULL,	PBM_NONFATAL,	NULL,
2311 
2312 	NULL,				NULL,
2313 	NULL,	NULL,		NULL
2314 };
2315 
2316 /*
2317  * Function used to log all PCI/PBM/IOMMU errors found in the system.
2318  * It is called by the pbm_error_intr as well as the pci_err_callback(trap
2319  * callback). To protect access we hold the pci_fm_mutex when calling
2320  * this function.
2321  */
2322 int
2323 pci_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2324 		const void *impl_data, int caller)
2325 {
2326 	int fatal = 0;
2327 	int nonfatal = 0;
2328 	int unknown = 0;
2329 	uint32_t prierr, secerr;
2330 	pbm_errstate_t pbm_err;
2331 	char buf[FM_MAX_CLASS];
2332 	pci_t *pci_p = (pci_t *)impl_data;
2333 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2334 	int i, ret = 0;
2335 
2336 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2337 	pci_pbm_errstate_get(pci_p, &pbm_err);
2338 
2339 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2340 	    fm_ena_generate(0, FM_ENA_FMT1);
2341 
2342 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2343 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2344 
2345 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
2346 		if (caller == PCI_TRAP_CALL) {
2347 			/*
2348 			 * For ddi_caut_get treat all events as nonfatal.
2349 			 * The trampoline will set err_ena = 0, err_status =
2350 			 * NONFATAL. We only really call this function so that
2351 			 * pci_clear_error() and ndi_fm_handler_dispatch() will
2352 			 * get called.
2353 			 */
2354 			derr->fme_status = DDI_FM_NONFATAL;
2355 			nonfatal++;
2356 			goto done;
2357 		} else {
2358 			/*
2359 			 * For ddi_caut_put treat all events as nonfatal. Here
2360 			 * we have the handle and can call ndi_fm_acc_err_set().
2361 			 */
2362 			derr->fme_status = DDI_FM_NONFATAL;
2363 			ndi_fm_acc_err_set(pbm_p->pbm_excl_handle, derr);
2364 			nonfatal++;
2365 			goto done;
2366 		}
2367 	} else if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2368 		/*
2369 		 * For ddi_peek treat all events as nonfatal. We only
2370 		 * really call this function so that pci_clear_error()
2371 		 * and ndi_fm_handler_dispatch() will get called.
2372 		 */
2373 		nonfatal++;
2374 		goto done;
2375 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2376 		/*
2377 		 * For ddi_poke we can treat as nonfatal if the
2378 		 * following conditions are met :
2379 		 * 1. Make sure only primary error is MA/TA
2380 		 * 2. Make sure no secondary error bits set
2381 		 * 3. check pci config header stat reg to see MA/TA is
2382 		 *    logged. We cannot verify only MA/TA is recorded
2383 		 *    since it gets much more complicated when a
2384 		 *    PCI-to-PCI bridge is present.
2385 		 */
2386 		if ((prierr == SCHIZO_PCI_AFSR_E_MA) && !secerr &&
2387 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_MAST_AB)) {
2388 			nonfatal++;
2389 			goto done;
2390 		} else if ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) &&
2391 		    pcix_ma_behind_bridge(&pbm_err)) {
2392 			/*
2393 			 * MAs behind a PCI-X bridge get sent back to
2394 			 * the host as a Split Completion Error Message.
2395 			 * We handle this the same as the above check.
2396 			 */
2397 			nonfatal++;
2398 			goto done;
2399 		}
2400 		if ((prierr == SCHIZO_PCI_AFSR_E_TA) && !secerr &&
2401 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_TARG_AB)) {
2402 			nonfatal++;
2403 			goto done;
2404 		}
2405 	}
2406 
2407 	DEBUG2(DBG_ERR_INTR, dip, "pci_pbm_err_handler: prierr=0x%x "
2408 	    "secerr=0x%x", prierr, secerr);
2409 
2410 	if (prierr || secerr) {
2411 		ret = pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2412 		if (ret == DDI_FM_FATAL)
2413 			fatal++;
2414 		else
2415 			nonfatal++;
2416 	}
2417 	if ((ret = pcix_log_pbm(pci_p, derr->fme_ena, &pbm_err))
2418 			== DDI_FM_FATAL)
2419 		fatal++;
2420 	else if (ret == DDI_FM_NONFATAL)
2421 		nonfatal++;
2422 
2423 	if ((ret = pci_cfg_report(dip, derr, &pbm_err.pbm_pci, caller, prierr))
2424 			== DDI_FM_FATAL)
2425 		fatal++;
2426 	else if (ret == DDI_FM_NONFATAL)
2427 		nonfatal++;
2428 
2429 	for (i = 0; pci_pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2430 		if ((pbm_err.pbm_ctl_stat & pci_pbm_err_tbl[i].pbm_reg_bit) &&
2431 		    !prierr) {
2432 			pbm_err.pbm_err_class =
2433 				pci_pbm_err_tbl[i].pbm_err_class;
2434 			pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2435 			if (pci_pbm_err_tbl[i].pbm_flag)
2436 				fatal++;
2437 			else
2438 				nonfatal++;
2439 			if (caller == PCI_TRAP_CALL &&
2440 			    pci_pbm_err_tbl[i].pbm_terr_class)
2441 				pci_target_enqueue(derr->fme_ena,
2442 				    pci_pbm_err_tbl[i].pbm_terr_class,
2443 				    pbm_err.pbm_bridge_type,
2444 				    (uint64_t)derr->fme_bus_specific);
2445 		}
2446 	}
2447 
2448 	if ((pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SBH_ERR) &&
2449 	    (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)) {
2450 		pbm_err.pbm_err_class = PCI_SCH_SBH;
2451 		pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2452 		if (pci_panic_on_sbh_errors)
2453 			fatal++;
2454 		else
2455 			nonfatal++;
2456 	}
2457 
2458 	/*
2459 	 * PBM Received System Error - During any transaction, or
2460 	 * at any point on the bus, some device may detect a critical
2461 	 * error and signal a system error to the system.
2462 	 */
2463 	if (pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SERR) {
2464 		/*
2465 		 * may be expected (master abort from pci-pci bridge during
2466 		 * poke will generate SERR)
2467 		 */
2468 		if (derr->fme_flag != DDI_FM_ERR_POKE) {
2469 			DEBUG1(DBG_ERR_INTR, dip, "pci_pbm_err_handler: "
2470 			    "ereport_post: %s", buf);
2471 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2472 				PCI_ERROR_SUBCLASS, PCI_REC_SERR);
2473 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2474 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2475 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2476 			    pbm_err.pbm_pci.pci_cfg_stat, PCI_CONFIG_COMMAND,
2477 			    DATA_TYPE_UINT16, pbm_err.pbm_pci.pci_cfg_comm,
2478 			    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2479 		}
2480 		unknown++;
2481 	}
2482 
2483 	/*
2484 	 * PCI Retry Timeout - Device fails to retry deferred
2485 	 * transaction within timeout. Only Tomatillo
2486 	 */
2487 	if (pbm_err.pbm_ctl_stat & TOMATILLO_PCI_CTRL_PCI_DTO_ERR) {
2488 		if (pci_dto_fault_warn == CE_PANIC)
2489 			fatal++;
2490 		else
2491 			nonfatal++;
2492 
2493 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2494 			PCI_ERROR_SUBCLASS, PCI_DTO);
2495 		ddi_fm_ereport_post(dip, buf, derr->fme_ena, DDI_NOSLEEP,
2496 		    FM_VERSION, DATA_TYPE_UINT8, 0,
2497 		    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2498 		    pbm_err.pbm_pci.pci_cfg_stat,
2499 		    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2500 		    pbm_err.pbm_pci.pci_cfg_comm,
2501 		    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2502 	}
2503 
2504 	/*
2505 	 * PBM Detected Data Parity Error - DPE detected during a DMA Write
2506 	 * or PIO Read. Later case is taken care of by cpu_deferred_error
2507 	 * and sent here to be logged.
2508 	 */
2509 	if ((pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_PERROR) &&
2510 			!(pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_S_SYSERR)) {
2511 		/*
2512 		 * If we have an address then fault
2513 		 * it, if not probe for errant device
2514 		 */
2515 		ret = DDI_FM_FATAL;
2516 		if (caller != PCI_TRAP_CALL) {
2517 			if (pbm_err.pbm_va_log) {
2518 				ret = ndi_fmc_error(dip, NULL, DMA_HANDLE,
2519 				    derr->fme_ena, (void *)&pbm_err.pbm_va_log);
2520 			}
2521 			if (ret == DDI_FM_NONFATAL)
2522 				nonfatal++;
2523 			else
2524 				fatal++;
2525 		} else
2526 			nonfatal++;
2527 
2528 	}
2529 
2530 	/* PBM Detected IOMMU Error */
2531 	if (pbm_err.pbm_ctl_stat & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2532 		if (iommu_err_handler(dip, derr->fme_ena, &pbm_err)
2533 				== DDI_FM_FATAL)
2534 			fatal++;
2535 		else
2536 			nonfatal++;
2537 	}
2538 
2539 done:
2540 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
2541 	if (ret == DDI_FM_FATAL) {
2542 		fatal++;
2543 	} else if (ret == DDI_FM_NONFATAL) {
2544 		nonfatal++;
2545 	} else if (ret == DDI_FM_UNKNOWN) {
2546 		unknown++;
2547 	}
2548 
2549 	/*
2550 	 * RSERR not claimed as nonfatal by a child is considered fatal
2551 	 */
2552 	if (unknown && !fatal && !nonfatal)
2553 		fatal++;
2554 
2555 	/* Cleanup and reset error bits */
2556 	pci_clear_error(pci_p, &pbm_err);
2557 
2558 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2559 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2560 }
2561 
2562 /*
2563  * Function returns TRUE if a Primary error is Split Completion Error
2564  * that indicates a Master Abort occured behind a PCI-X bridge.
2565  * This function should only be called for busses running in PCI-X mode.
2566  */
2567 static int
2568 pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p)
2569 {
2570 	uint64_t msg;
2571 
2572 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR)
2573 		return (0);
2574 
2575 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2576 		msg = (pbm_err_p->pbm_afsr >> XMITS_PCI_X_P_MSG_SHIFT) &
2577 		    XMITS_PCIX_MSG_MASK;
2578 		if (msg & PCIX_CLASS_BRIDGE)
2579 			if (msg & PCIX_BRIDGE_MASTER_ABORT) {
2580 				return (1);
2581 			}
2582 	}
2583 
2584 	return (0);
2585 }
2586 
2587 /*
2588  * Function used to gather PBM/PCI/IOMMU error state for the
2589  * pci_pbm_err_handler and the cb_buserr_intr. This function must be
2590  * called while pci_fm_mutex is held.
2591  */
2592 static void
2593 pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2594 {
2595 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2596 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2597 	caddr_t a = pci_p->pci_address[0];
2598 	uint64_t *pbm_pcix_stat_reg;
2599 
2600 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2601 	bzero(pbm_err_p, sizeof (pbm_errstate_t));
2602 
2603 	/*
2604 	 * Capture all pbm error state for later logging
2605 	 */
2606 	pbm_err_p->pbm_bridge_type = PCI_BRIDGE_TYPE(pci_p->pci_common_p);
2607 
2608 	pbm_err_p->pbm_pci.pci_cfg_stat =
2609 		pbm_p->pbm_config_header->ch_status_reg;
2610 	pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2611 	pbm_err_p->pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2612 	pbm_err_p->pbm_afar = *pbm_p->pbm_async_flt_addr_reg;
2613 	pbm_err_p->pbm_iommu.iommu_stat = *iommu_p->iommu_ctrl_reg;
2614 	pbm_err_p->pbm_pci.pci_cfg_comm =
2615 		pbm_p->pbm_config_header->ch_command_reg;
2616 	pbm_err_p->pbm_pci.pci_pa = *pbm_p->pbm_async_flt_addr_reg;
2617 
2618 	/*
2619 	 * Record errant slot for Xmits and Schizo
2620 	 * Not stored in Tomatillo
2621 	 */
2622 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS ||
2623 			CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) {
2624 		pbm_err_p->pbm_err_sl = (pbm_err_p->pbm_ctl_stat &
2625 				SCHIZO_PCI_CTRL_ERR_SLOT) >>
2626 			SCHIZO_PCI_CTRL_ERR_SLOT_SHIFT;
2627 
2628 		/*
2629 		 * The bit 51 on XMITS rev1.0 is same as
2630 		 * SCHIZO_PCI_CTRL_ERR_SLOT_LOCK on schizo2.3. But
2631 		 * this bit needs to be cleared to be able to latch
2632 		 * the slot info on next fault.
2633 		 * But in XMITS Rev2.0, this bit indicates a DMA Write
2634 		 * Parity error.
2635 		 */
2636 		if (pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_DMA_WR_PERR) {
2637 			if ((PCI_CHIP_ID(pci_p) == XMITS_VER_10) ||
2638 				(PCI_CHIP_ID(pci_p) <= SCHIZO_VER_23)) {
2639 				/*
2640 				 * top 32 bits are W1C and we just want to
2641 				 * clear SLOT_LOCK. Leave bottom 32 bits
2642 				 * unchanged
2643 				 */
2644 				*pbm_p->pbm_ctrl_reg =
2645 					pbm_err_p->pbm_ctl_stat &
2646 					(SCHIZO_PCI_CTRL_ERR_SLOT_LOCK |
2647 					0xffffffff);
2648 				pbm_err_p->pbm_ctl_stat =
2649 					*pbm_p->pbm_ctrl_reg;
2650 			}
2651 		}
2652 	}
2653 
2654 	/*
2655 	 * Tomatillo specific registers
2656 	 */
2657 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2658 		pbm_err_p->pbm_va_log = (uint64_t)va_to_pa(
2659 		    (void *)(uintptr_t)*(a + TOMATILLO_TGT_ERR_VALOG_OFFSET));
2660 		pbm_err_p->pbm_iommu.iommu_tfar = *iommu_p->iommu_tfar_reg;
2661 	}
2662 
2663 	/*
2664 	 * Xmits PCI-X register
2665 	 */
2666 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2667 			(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2668 
2669 		pbm_pcix_stat_reg = (uint64_t *)(a +
2670 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2671 
2672 		pbm_err_p->pbm_pcix_stat = *pbm_pcix_stat_reg;
2673 		pbm_err_p->pbm_pcix_pfar = pbm_err_p->pbm_pcix_stat &
2674 				XMITS_PCI_X_STATUS_PFAR_MASK;
2675 	}
2676 }
2677 
2678 /*
2679  * Function used to clear PBM/PCI/IOMMU error state after error handling
2680  * is complete. Only clearing error bits which have been logged. Called by
2681  * pci_pbm_err_handler and pci_bus_exit.
2682  */
2683 static void
2684 pci_clear_error(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2685 {
2686 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2687 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2688 
2689 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pci_p->pci_common_p->pci_fm_mutex));
2690 
2691 	if (*pbm_p->pbm_ctrl_reg & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2692 		iommu_tlb_scrub(pci_p->pci_iommu_p, 1);
2693 	}
2694 	pbm_p->pbm_config_header->ch_status_reg =
2695 		pbm_err_p->pbm_pci.pci_cfg_stat;
2696 	*pbm_p->pbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
2697 	*pbm_p->pbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
2698 	*iommu_p->iommu_ctrl_reg = pbm_err_p->pbm_iommu.iommu_stat;
2699 }
2700 
2701 void
2702 pbm_clear_error(pbm_t *pbm_p)
2703 {
2704 	uint64_t pbm_afsr, pbm_ctl_stat;
2705 
2706 	/*
2707 	 * for poke() support - called from POKE_FLUSH. Spin waiting
2708 	 * for MA, TA or SERR to be cleared by a pbm_error_intr().
2709 	 * We have to wait for SERR too in case the device is beyond
2710 	 * a pci-pci bridge.
2711 	 */
2712 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2713 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2714 	while (((pbm_afsr >> SCHIZO_PCI_AFSR_PE_SHIFT) &
2715 	    (SCHIZO_PCI_AFSR_E_MA | SCHIZO_PCI_AFSR_E_TA)) ||
2716 	    (pbm_ctl_stat & COMMON_PCI_CTRL_SERR)) {
2717 		pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2718 		pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2719 	}
2720 }
2721 
2722 /*
2723  * Function used to convert the 32 bit captured PCI error address
2724  * to the full Safari or Jbus address. This is so we can look this address
2725  * up in our handle caches.
2726  */
2727 void
2728 pci_format_addr(dev_info_t *dip, uint64_t *afar, uint64_t afsr)
2729 {
2730 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2731 	pci_ranges_t *io_range, *mem_range;
2732 	uint64_t err_pa = 0;
2733 
2734 	if (afsr & SCHIZO_PCI_AFSR_CONF_SPACE) {
2735 		err_pa |= pci_p->pci_ranges->parent_high;
2736 		err_pa = err_pa << 32;
2737 		err_pa |= pci_p->pci_ranges->parent_low;
2738 	} else if (afsr & SCHIZO_PCI_AFSR_IO_SPACE) {
2739 		io_range = pci_p->pci_ranges + 1;
2740 		err_pa |= io_range->parent_high;
2741 		err_pa = err_pa << 32;
2742 		err_pa |= io_range->parent_low;
2743 	} else if (afsr & SCHIZO_PCI_AFSR_MEM_SPACE) {
2744 		mem_range = pci_p->pci_ranges + 2;
2745 		err_pa |= mem_range->parent_high;
2746 		err_pa = err_pa << 32;
2747 		err_pa |= mem_range->parent_low;
2748 	}
2749 	*afar |= err_pa;
2750 }
2751 
2752 static ecc_format_t ecc_format_tbl[] = {
2753 	SCH_REG_UPA,		NULL,				NULL,
2754 	SCH_REG_PCIA_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEA,
2755 	SCH_REG_PCIA_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEA,
2756 	SCH_REG_PCIA_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEA,
2757 	SCH_REG_PCIB_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEB,
2758 	SCH_REG_PCIB_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEB,
2759 	SCH_REG_PCIB_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEB,
2760 	SCH_REG_SAFARI_REGS,	NULL,				NULL,
2761 	NULL,			NULL,				NULL,
2762 };
2763 
2764 /*
2765  * Function used to convert the 32 bit PIO address captured for a
2766  * Safari Bus UE(during PIO Rd/Wr) to a full Safari Bus Address.
2767  */
2768 static void
2769 pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar, ecc_region_t region)
2770 {
2771 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2772 	pci_common_t *cmn_p = pci_p->pci_common_p;
2773 	cb_t *cb_p = pci_p->pci_cb_p;
2774 	int i, pci_side = 0;
2775 	int swap = 0;
2776 	uint64_t pa = cb_p->cb_base_pa;
2777 	uint64_t flag, schizo_base, pci_csr_base;
2778 
2779 	if (pci_p == NULL)
2780 		return;
2781 
2782 	pci_csr_base = va_to_pa(pci_p->pci_address[0]);
2783 
2784 	/*
2785 	 * Using the csr_base address to determine which side
2786 	 * we are on.
2787 	 */
2788 	if (pci_csr_base & PCI_SIDE_ADDR_MASK)
2789 		pci_side = 1;
2790 	else
2791 		pci_side = 0;
2792 
2793 	schizo_base = pa - PBM_CTRL_OFFSET;
2794 
2795 	for (i = 0; ecc_format_tbl[i].ecc_region != NULL; i++) {
2796 		if (region == ecc_format_tbl[i].ecc_region) {
2797 			flag = ecc_format_tbl[i].ecc_space;
2798 			if (ecc_format_tbl[i].ecc_side != pci_side)
2799 				swap = 1;
2800 			if (region == SCH_REG_SAFARI_REGS)
2801 				*afar |= schizo_base;
2802 			break;
2803 		}
2804 	}
2805 
2806 	if (swap) {
2807 		pci_p = cmn_p->pci_p[PCI_OTHER_SIDE(pci_p->pci_side)];
2808 
2809 		if (pci_p == NULL)
2810 			return;
2811 	}
2812 	pci_format_addr(pci_p->pci_dip, afar, flag);
2813 }
2814 
2815 /*
2816  * Function used to post control block specific ereports.
2817  */
2818 static void
2819 cb_ereport_post(dev_info_t *dip, uint64_t ena, cb_errstate_t *cb_err)
2820 {
2821 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2822 	char buf[FM_MAX_CLASS], dev_path[MAXPATHLEN], *ptr;
2823 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2824 	nvlist_t *ereport, *detector;
2825 	errorq_elem_t *eqep;
2826 	nv_alloc_t *nva;
2827 
2828 	DEBUG1(DBG_ATTACH, dip, "cb_ereport_post: elog 0x%lx",
2829 	    cb_err->cb_elog);
2830 
2831 	/*
2832 	 * We do not use ddi_fm_ereport_post because we need to set a
2833 	 * special detector here. Since we do not have a device path for
2834 	 * the bridge chip we use what we think it should be to aid in
2835 	 * diagnosis.
2836 	 */
2837 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", DDI_IO_CLASS,
2838 	    cb_err->cb_bridge_type, cb_err->cb_err_class);
2839 
2840 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2841 
2842 	eqep = errorq_reserve(fmhdl->fh_errorq);
2843 	if (eqep == NULL)
2844 		return;
2845 
2846 	ereport = errorq_elem_nvl(fmhdl->fh_errorq, eqep);
2847 	nva = errorq_elem_nva(fmhdl->fh_errorq, eqep);
2848 	detector = fm_nvlist_create(nva);
2849 
2850 	ASSERT(ereport);
2851 	ASSERT(nva);
2852 	ASSERT(detector);
2853 
2854 	ddi_pathname(dip, dev_path);
2855 	ptr = strrchr(dev_path, (int)',');
2856 
2857 	if (ptr)
2858 		*ptr = '\0';
2859 
2860 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, dev_path, NULL);
2861 
2862 	DEBUG1(DBG_ERR_INTR, dip, "cb_ereport_post: ereport_set: %s", buf);
2863 
2864 	if (CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO ||
2865 	    CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
2866 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2867 		    SAFARI_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2868 		    SAFARI_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2869 		    SAFARI_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2870 		    SAFARI_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2871 		    SAFARI_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2872 		    NULL);
2873 	} else if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2874 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2875 		    JBUS_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2876 		    JBUS_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2877 		    JBUS_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2878 		    JBUS_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2879 		    JBUS_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2880 		    NULL);
2881 	}
2882 	errorq_commit(fmhdl->fh_errorq, eqep, ERRORQ_ASYNC);
2883 }
2884 
2885 /*
2886  * Function used to post IOMMU specific ereports.
2887  */
2888 static void
2889 iommu_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2890 {
2891 	char buf[FM_MAX_CLASS];
2892 
2893 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2894 		    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2895 
2896 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2897 
2898 	DEBUG1(DBG_ERR_INTR, dip, "iommu_ereport_post: ereport_set: %s", buf);
2899 
2900 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2901 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2902 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2903 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2904 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2905 	    PCI_PBM_IOMMU_CTRL, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_stat,
2906 	    PCI_PBM_IOMMU_TFAR, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_tfar,
2907 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2908 	    PCI_PBM_VALOG, DATA_TYPE_UINT64, pbm_err->pbm_va_log,
2909 	    NULL);
2910 }
2911 
2912 /*
2913  * Function used to post PCI-X generic ereports.
2914  * This function needs to be fixed once the Fault Boundary Analysis
2915  * for PCI-X is conducted. The payload should be made more generic.
2916  */
2917 static void
2918 pcix_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2919 {
2920 	char buf[FM_MAX_CLASS];
2921 
2922 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2923 		    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2924 
2925 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2926 
2927 	DEBUG1(DBG_ERR_INTR, dip, "pcix_ereport_post: ereport_post: %s", buf);
2928 
2929 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2930 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2931 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2932 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2933 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2934 	    PCI_PBM_AFSR, DATA_TYPE_UINT64, pbm_err->pbm_afsr,
2935 	    PCI_PBM_AFAR, DATA_TYPE_UINT64, pbm_err->pbm_afar,
2936 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2937 	    PCIX_STAT, DATA_TYPE_UINT64, pbm_err->pbm_pcix_stat,
2938 	    PCIX_PFAR, DATA_TYPE_UINT32, pbm_err->pbm_pcix_pfar,
2939 	    NULL);
2940 }
2941 
2942 static void
2943 iommu_ctx_free(iommu_t *iommu_p)
2944 {
2945 	kmem_free(iommu_p->iommu_ctx_bitmap, IOMMU_CTX_BITMAP_SIZE);
2946 }
2947 
2948 /*
2949  * iommu_tlb_scrub():
2950  *	Exam TLB entries through TLB diagnostic registers and look for errors.
2951  *	scrub = 1 : cleanup all error bits in tlb, called in FAULT_RESET case
2952  *	scrub = 0 : log all error conditions to console, FAULT_LOG case
2953  *	In both cases, it returns number of errors found in tlb entries.
2954  */
2955 static int
2956 iommu_tlb_scrub(iommu_t *iommu_p, int scrub)
2957 {
2958 	int i, nerr = 0;
2959 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
2960 	char *neg = "not ";
2961 
2962 	uint64_t base = (uint64_t)iommu_p->iommu_ctrl_reg -
2963 		COMMON_IOMMU_CTRL_REG_OFFSET;
2964 
2965 	volatile uint64_t *tlb_tag = (volatile uint64_t *)
2966 		(base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
2967 	volatile uint64_t *tlb_data = (volatile uint64_t *)
2968 		(base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
2969 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
2970 		uint64_t tag = tlb_tag[i];
2971 		uint64_t data = tlb_data[i];
2972 		uint32_t errstat;
2973 		iopfn_t pfn;
2974 
2975 		if (!(tag & TLBTAG_ERR_BIT))
2976 			continue;
2977 
2978 		pfn = (iopfn_t)(data & TLBDATA_MEMPA_BITS);
2979 		errstat = (uint32_t)
2980 			((tag & TLBTAG_ERRSTAT_BITS) >> TLBTAG_ERRSTAT_SHIFT);
2981 		if (errstat == TLBTAG_ERRSTAT_INVALID) {
2982 			if (scrub)
2983 				tlb_tag[i] = tlb_data[i] = 0ull;
2984 		} else
2985 			nerr++;
2986 
2987 		if (scrub)
2988 			continue;
2989 
2990 		cmn_err(CE_CONT, "%s%d: Error %x on IOMMU TLB entry %x:\n"
2991 		"\tContext=%lx %sWritable %sStreamable\n"
2992 		"\tPCI Page Size=%sk Address in page %lx\n",
2993 			ddi_driver_name(dip), ddi_get_instance(dip), errstat, i,
2994 			(tag & TLBTAG_CONTEXT_BITS) >> TLBTAG_CONTEXT_SHIFT,
2995 			(tag & TLBTAG_WRITABLE_BIT) ? "" : neg,
2996 			(tag & TLBTAG_STREAM_BIT) ? "" : neg,
2997 			(tag & TLBTAG_PGSIZE_BIT) ? "64" : "8",
2998 			(tag & TLBTAG_PCIVPN_BITS) << 13);
2999 		cmn_err(CE_CONT, "Memory: %sValid %sCacheable Page Frame=%lx\n",
3000 			(data & TLBDATA_VALID_BIT) ? "" : neg,
3001 			(data & TLBDATA_CACHE_BIT) ? "" : neg, pfn);
3002 	}
3003 	return (nerr);
3004 }
3005 
3006 /*
3007  * pci_iommu_disp: calculates the displacement needed in tomatillo's
3008  *	iommu control register and modifies the control value template
3009  *	from caller. It also clears any error status bit that are new
3010  *	in tomatillo.
3011  * return value: an 8-bit mask to enable corresponding 512 MB segments
3012  *	suitable for tomatillo's target address register.
3013  *	0x00: no programming is needed, use existing value from prom
3014  *	0x60: use segment 5 and 6 to form a 1GB dvma range
3015  */
3016 static uint64_t
3017 pci_iommu_disp(iommu_t *iommu_p, uint64_t *ctl_p)
3018 {
3019 	uint64_t ctl_old;
3020 	if (CHIP_TYPE(iommu_p->iommu_pci_p) != PCI_CHIP_TOMATILLO)
3021 		return (0);
3022 
3023 	ctl_old = *iommu_p->iommu_ctrl_reg;
3024 	/* iommu ctrl reg error bits are W1C */
3025 	if (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT) {
3026 		cmn_err(CE_WARN, "Tomatillo iommu err: %lx", ctl_old);
3027 		*ctl_p |= (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT)
3028 		    << TOMATIILO_IOMMU_ERR_REG_SHIFT;
3029 	}
3030 
3031 	if (iommu_p->iommu_tsb_size != TOMATILLO_IOMMU_TSB_MAX)
3032 		return (0);
3033 
3034 	/* Tomatillo 2.0 and later, and 1GB DVMA range */
3035 	*ctl_p |= 1 << TOMATILLO_IOMMU_SEG_DISP_SHIFT;
3036 	return (3 << (iommu_p->iommu_dvma_base >> (32 - 3)));
3037 }
3038 
3039 void
3040 pci_iommu_config(iommu_t *iommu_p, uint64_t iommu_ctl, uint64_t cfgpa)
3041 {
3042 	uintptr_t pbm_regbase = get_pbm_reg_base(iommu_p->iommu_pci_p);
3043 	volatile uint64_t *pbm_csr_p = (volatile uint64_t *)pbm_regbase;
3044 	volatile uint64_t *tgt_space_p = (volatile uint64_t *)(pbm_regbase |
3045 		(TOMATILLO_TGT_ADDR_SPACE_OFFSET - SCHIZO_PCI_CTRL_REG_OFFSET));
3046 	volatile uint64_t pbm_ctl = *pbm_csr_p;
3047 
3048 	volatile uint64_t *iommu_ctl_p = iommu_p->iommu_ctrl_reg;
3049 	volatile uint64_t tsb_bar_val = iommu_p->iommu_tsb_paddr;
3050 	volatile uint64_t *tsb_bar_p = iommu_p->iommu_tsb_base_addr_reg;
3051 	uint64_t mask = pci_iommu_disp(iommu_p, &iommu_ctl);
3052 
3053 	DEBUG2(DBG_ATTACH, iommu_p->iommu_pci_p->pci_dip,
3054 		"\npci_iommu_config: pbm_csr_p=%llx pbm_ctl=%llx",
3055 		pbm_csr_p, pbm_ctl);
3056 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3057 		"\n\tiommu_ctl_p=%llx iommu_ctl=%llx",
3058 		iommu_ctl_p, iommu_ctl);
3059 	DEBUG4(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3060 		"\n\tcfgpa=%llx tgt_space_p=%llx mask=%x tsb=%llx\n",
3061 		cfgpa, tgt_space_p, mask, tsb_bar_val);
3062 
3063 	if (!cfgpa)
3064 		goto reprog;
3065 
3066 	/* disable PBM arbiters - turn off bits 0-7 */
3067 	*pbm_csr_p = (pbm_ctl >> 8) << 8;
3068 
3069 	/*
3070 	 * For non-XMITS, flush any previous writes. This is only
3071 	 * necessary for host bridges that may have a USB keywboard
3072 	 * attached.  XMITS does not.
3073 	 */
3074 	if (!(CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_XMITS))
3075 		(void) ldphysio(cfgpa);
3076 
3077 reprog:
3078 	if (mask)
3079 		*tgt_space_p = mask;
3080 
3081 	*tsb_bar_p = tsb_bar_val;
3082 	*iommu_ctl_p = iommu_ctl;
3083 
3084 	*pbm_csr_p = pbm_ctl;	/* re-enable bus arbitration */
3085 	pbm_ctl = *pbm_csr_p;	/* flush all prev writes */
3086 }
3087 
3088 
3089 int
3090 pci_get_portid(dev_info_t *dip)
3091 {
3092 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3093 	    "portid", -1));
3094 }
3095 
3096 /*
3097  * Schizo Safari Performance Events.
3098  */
3099 pci_kev_mask_t
3100 schizo_saf_events[] = {
3101 	{"saf_bus_cycles", 0x1},	{"saf_pause_asserted_cycles", 0x2},
3102 	{"saf_frn_coherent_cmds", 0x3},	{"saf_frn_coherent_hits", 0x4},
3103 	{"saf_my_coherent_cmds", 0x5},	{"saf_my_coherent_hits", 0x6},
3104 	{"saf_frn_io_cmds", 0x7}, 	{"saf_frn_io_hits", 0x8},
3105 	{"merge_buffer", 0x9}, 		{"interrupts", 0xa},
3106 	{"csr_pios", 0xc}, 		{"upa_pios", 0xd},
3107 	{"pcia_pios", 0xe}, 		{"pcib_pios", 0xf},
3108 	{"saf_pause_seen_cycles", 0x11}, 	{"dvma_reads", 0x12},
3109 	{"dvma_writes", 0x13},		{"saf_orq_full_cycles", 0x14},
3110 	{"saf_data_in_cycles", 0x15},	{"saf_data_out_cycles", 0x16},
3111 	{"clear_pic", 0x1f}
3112 };
3113 
3114 
3115 /*
3116  * Schizo PCI Performance Events.
3117  */
3118 pci_kev_mask_t
3119 schizo_pci_events[] = {
3120 	{"dvma_stream_rd", 0x0}, 	{"dvma_stream_wr", 0x1},
3121 	{"dvma_const_rd", 0x2},		{"dvma_const_wr", 0x3},
3122 	{"dvma_stream_buf_mis", 0x4},	{"dvma_cycles", 0x5},
3123 	{"dvma_wd_xfr", 0x6},		{"pio_cycles", 0x7},
3124 	{"dvma_tlb_misses", 0x10},	{"interrupts", 0x11},
3125 	{"saf_inter_nack", 0x12},	{"pio_reads", 0x13},
3126 	{"pio_writes", 0x14},		{"dvma_rd_buf_timeout", 0x15},
3127 	{"dvma_rd_rtry_stc", 0x16},	{"dvma_wr_rtry_stc", 0x17},
3128 	{"dvma_rd_rtry_nonstc", 0x18},	{"dvma_wr_rtry_nonstc", 0x19},
3129 	{"E*_slow_transitions", 0x1a},	{"E*_slow_cycles_per_64", 0x1b},
3130 	{"clear_pic", 0x1f}
3131 };
3132 
3133 
3134 /*
3135  * Create the picN kstats for the pci
3136  * and safari events.
3137  */
3138 void
3139 pci_kstat_init()
3140 {
3141 	pci_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3142 		KM_NOSLEEP);
3143 
3144 	if (pci_name_kstat == NULL) {
3145 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3146 	} else {
3147 		pci_name_kstat->pic_no_evs =
3148 			sizeof (schizo_pci_events) / sizeof (pci_kev_mask_t);
3149 		pci_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3150 		pci_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3151 		pci_create_name_kstat("pcis",
3152 			pci_name_kstat, schizo_pci_events);
3153 	}
3154 
3155 	saf_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3156 		KM_NOSLEEP);
3157 	if (saf_name_kstat == NULL) {
3158 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3159 	} else {
3160 		saf_name_kstat->pic_no_evs =
3161 			sizeof (schizo_saf_events) / sizeof (pci_kev_mask_t);
3162 		saf_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3163 		saf_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3164 		pci_create_name_kstat("saf", saf_name_kstat, schizo_saf_events);
3165 	}
3166 }
3167 
3168 void
3169 pci_kstat_fini()
3170 {
3171 	if (pci_name_kstat != NULL) {
3172 		pci_delete_name_kstat(pci_name_kstat);
3173 		kmem_free(pci_name_kstat, sizeof (pci_ksinfo_t));
3174 		pci_name_kstat = NULL;
3175 	}
3176 
3177 	if (saf_name_kstat != NULL) {
3178 		pci_delete_name_kstat(saf_name_kstat);
3179 		kmem_free(saf_name_kstat, sizeof (pci_ksinfo_t));
3180 		saf_name_kstat = NULL;
3181 	}
3182 }
3183 
3184 /*
3185  * Create 'counters' kstat for pci events.
3186  */
3187 void
3188 pci_add_pci_kstat(pci_t *pci_p)
3189 {
3190 	pci_cntr_addr_t *cntr_addr_p = &pci_p->pci_ks_addr;
3191 	uintptr_t regbase = (uintptr_t)pci_p->pci_address[0];
3192 
3193 	cntr_addr_p->pcr_addr = (uint64_t *)
3194 		(regbase + SCHIZO_PERF_PCI_PCR_OFFSET);
3195 	cntr_addr_p->pic_addr = (uint64_t *)
3196 		(regbase + SCHIZO_PERF_PCI_PIC_OFFSET);
3197 
3198 	pci_p->pci_ksp = pci_create_cntr_kstat(pci_p, "pcis",
3199 		NUM_OF_PICS, pci_cntr_kstat_update, cntr_addr_p);
3200 
3201 	if (pci_p->pci_ksp == NULL) {
3202 		cmn_err(CE_WARN, "pcisch : cannot create counter kstat");
3203 	}
3204 }
3205 
3206 void
3207 pci_rem_pci_kstat(pci_t *pci_p)
3208 {
3209 	if (pci_p->pci_ksp != NULL)
3210 		kstat_delete(pci_p->pci_ksp);
3211 	pci_p->pci_ksp = NULL;
3212 }
3213 
3214 void
3215 pci_add_upstream_kstat(pci_t *pci_p)
3216 {
3217 	pci_common_t	*cmn_p = pci_p->pci_common_p;
3218 	pci_cntr_pa_t	*cntr_pa_p = &cmn_p->pci_cmn_uks_pa;
3219 	uint64_t regbase = va_to_pa(pci_p->pci_address[1]);
3220 
3221 	cntr_pa_p->pcr_pa =
3222 		regbase + SCHIZO_PERF_SAF_PCR_OFFSET;
3223 	cntr_pa_p->pic_pa =
3224 		regbase + SCHIZO_PERF_SAF_PIC_OFFSET;
3225 
3226 	cmn_p->pci_common_uksp = pci_create_cntr_kstat(pci_p, "saf",
3227 		NUM_OF_PICS, pci_cntr_kstat_pa_update, cntr_pa_p);
3228 }
3229 
3230 /*
3231  * Extract the drivers binding name to identify which chip
3232  * we're binding to.  Whenever a new bus bridge is created, the driver alias
3233  * entry should be added here to identify the device if needed.  If a device
3234  * isn't added, the identity defaults to PCI_CHIP_UNIDENTIFIED.
3235  */
3236 static uint32_t
3237 pci_identity_init(pci_t *pci_p)
3238 {
3239 	dev_info_t *dip = pci_p->pci_dip;
3240 	char *name = ddi_binding_name(dip);
3241 	uint32_t ver = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3242 		"version#", 0);
3243 
3244 	if (strcmp(name, "pci108e,a801") == 0)
3245 		return (CHIP_ID(PCI_CHIP_TOMATILLO, ver, 0x00));
3246 
3247 	if (strcmp(name, "pci108e,8001") == 0)
3248 		return (CHIP_ID(PCI_CHIP_SCHIZO, ver, 0x00));
3249 
3250 	if (strcmp(name, "pci108e,8002") == 0) {
3251 		uint32_t mod_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3252 			DDI_PROP_DONTPASS, "module-revision#", 0);
3253 		return (CHIP_ID(PCI_CHIP_XMITS, ver, mod_rev));
3254 	}
3255 
3256 	cmn_err(CE_WARN, "%s%d: Unknown PCI Host bridge %s %x\n",
3257 		ddi_driver_name(dip), ddi_get_instance(dip), name, ver);
3258 
3259 	return (PCI_CHIP_UNIDENTIFIED);
3260 }
3261 
3262 /*
3263  * Setup a physical pointer to one leaf config space area. This
3264  * is used in several places in order to do a dummy read which
3265  * guarantees the nexus (and not a bus master) has gained control
3266  * of the bus.
3267  */
3268 static void
3269 pci_setup_cfgpa(pci_t *pci_p)
3270 {
3271 	dev_info_t *dip = pci_p->pci_dip;
3272 	dev_info_t *cdip;
3273 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3274 	uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
3275 	uint32_t *reg_p;
3276 	int reg_len;
3277 
3278 	for (cdip = ddi_get_child(dip); cdip != NULL;
3279 	    cdip = ddi_get_next_sibling(cdip)) {
3280 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
3281 		    "reg", (caddr_t)&reg_p, &reg_len) != DDI_PROP_SUCCESS)
3282 			continue;
3283 		cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
3284 		kmem_free(reg_p, reg_len);
3285 		break;
3286 	}
3287 	pbm_p->pbm_anychild_cfgpa = cfgpa;
3288 }
3289 
3290 void
3291 pci_post_init_child(pci_t *pci_p, dev_info_t *child)
3292 {
3293 	volatile uint64_t *ctrl_reg_p;
3294 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3295 
3296 	pci_setup_cfgpa(pci_p);
3297 
3298 	/*
3299 	 * This is a hack for skyhawk/casinni combination to address
3300 	 * hardware problems between the request and grant signals which
3301 	 * causes a bus hang.  One workaround, which is applied here,
3302 	 * is to disable bus parking if the child contains the property
3303 	 * pci-req-removal.  Note that if the bus is quiesced we must mask
3304 	 * off the parking bit in the saved control registers, since the
3305 	 * quiesce operation temporarily turns off PCI bus parking.
3306 	 */
3307 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
3308 		"pci-req-removal") == 1) {
3309 
3310 		if (pbm_p->pbm_quiesce_count > 0) {
3311 			pbm_p->pbm_saved_ctrl_reg &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3312 		} else {
3313 			ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3314 			*ctrl_reg_p &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3315 		}
3316 	}
3317 
3318 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
3319 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) {
3320 			int value;
3321 
3322 			/*
3323 			 * Due to a XMITS bug, we need to set the outstanding
3324 			 * split transactions to 1 for all PCI-X functions
3325 			 * behind the leaf.
3326 			 */
3327 			value = (xmits_max_transactions << 4) |
3328 			    (xmits_max_read_bytes << 2);
3329 
3330 			DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ "
3331 			    "Workaround: value = %x\n", value);
3332 
3333 			pcix_set_cmd_reg(child, value);
3334 
3335 			(void) ndi_prop_update_int(DDI_DEV_T_NONE,
3336 			    child, "pcix-update-cmd-reg", value);
3337 		}
3338 
3339 		if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
3340 			uint64_t *pbm_pcix_diag_reg =
3341 			    (uint64_t *)(pci_p->pci_address[0] +
3342 			    XMITS_PCI_X_DIAG_REG_OFFSET);
3343 			uint64_t bugcntl = (*pbm_pcix_diag_reg >>
3344 			    XMITS_PCI_X_DIAG_BUGCNTL_SHIFT) &
3345 			    XMITS_PCI_X_DIAG_BUGCNTL_MASK;
3346 			uint64_t tunable = (*pbm_p->pbm_ctrl_reg &
3347 			    XMITS_PCI_CTRL_X_MODE ?
3348 			    xmits_pcix_diag_bugcntl_pcix :
3349 			    xmits_pcix_diag_bugcntl_pci)
3350 			    & XMITS_PCI_X_DIAG_BUGCNTL_MASK;
3351 
3352 			DEBUG4(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS "
3353 			    "pcix diag bugcntl=0x%lx, tunable=0x%lx, mode=%s\n",
3354 			    ddi_driver_name(child), bugcntl, tunable,
3355 			    ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)?
3356 			    "PCI-X":"PCI"));
3357 
3358 			DEBUG2(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS "
3359 			    "pcix diag reg=0x%lx (CUR)\n",
3360 			    ddi_driver_name(child), *pbm_pcix_diag_reg);
3361 
3362 			/*
3363 			 * Due to a XMITS 3.x hw bug, we need to
3364 			 * read PBM's xmits pci ctrl status register to
3365 			 * determine mode (PCI or PCI-X) and then update
3366 			 * PBM's pcix diag register with new BUG_FIX_CNTL
3367 			 * bits (47:32) _if_ different from tunable's mode
3368 			 * based value. This update is performed only once
3369 			 * during the PBM's first child init.
3370 			 *
3371 			 * Per instructions from xmits hw engineering,
3372 			 * non-BUG_FIX_CNTL bits should not be preserved
3373 			 * when updating the pcix diag register. Such bits
3374 			 * should be written as 0s.
3375 			 */
3376 
3377 			if (bugcntl != tunable) {
3378 				*pbm_pcix_diag_reg = tunable <<
3379 				    XMITS_PCI_X_DIAG_BUGCNTL_SHIFT;
3380 
3381 				DEBUG2(DBG_INIT_CLD, pci_p->pci_dip, "%s: XMITS"
3382 				    " pcix diag reg=0x%lx (NEW)\n",
3383 				    ddi_driver_name(child), *pbm_pcix_diag_reg);
3384 			}
3385 		}
3386 	}
3387 }
3388 
3389 void
3390 pci_post_uninit_child(pci_t *pci_p)
3391 {
3392 	pci_setup_cfgpa(pci_p);
3393 }
3394 
3395 static int
3396 pci_tom_nbintr_op(pci_t *pci_p, uint32_t inum, intrfunc f, caddr_t arg,
3397     int flag)
3398 {
3399 	uint32_t ino = pci_p->pci_inos[inum];
3400 	uint32_t mondo = IB_INO_TO_NBMONDO(pci_p->pci_ib_p, ino);
3401 	int ret = DDI_SUCCESS;
3402 
3403 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo); /* no op on tom */
3404 
3405 	switch (flag) {
3406 	case PCI_OBJ_INTR_ADD:
3407 		VERIFY(add_ivintr(mondo, pci_pil[inum], f,
3408 		    arg, NULL, NULL) == 0);
3409 		break;
3410 	case PCI_OBJ_INTR_REMOVE:
3411 		VERIFY(rem_ivintr(mondo, pci_pil[inum]) == 0);
3412 		break;
3413 	default:
3414 		ret = DDI_FAILURE;
3415 		break;
3416 	}
3417 
3418 	return (ret);
3419 }
3420 
3421 int
3422 pci_ecc_add_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3423 {
3424 	uint32_t mondo;
3425 	int	r;
3426 
3427 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3428 	    pci_p->pci_inos[inum]);
3429 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3430 
3431 	VERIFY(add_ivintr(mondo, pci_pil[inum], (intrfunc)ecc_intr,
3432 	    (caddr_t)eii_p, NULL, NULL) == 0);
3433 
3434 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)
3435 		return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD,
3436 		    DDI_SUCCESS));
3437 
3438 	r = pci_tom_nbintr_op(pci_p, inum, (intrfunc)ecc_intr,
3439 	    (caddr_t)eii_p, PCI_OBJ_INTR_ADD);
3440 	return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD, r));
3441 }
3442 
3443 void
3444 pci_ecc_rem_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3445 {
3446 	uint32_t mondo;
3447 
3448 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3449 	    pci_p->pci_inos[inum]);
3450 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3451 
3452 	VERIFY(rem_ivintr(mondo, pci_pil[inum]) == 0);
3453 
3454 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
3455 		pci_tom_nbintr_op(pci_p, inum, (intrfunc)ecc_intr,
3456 			(caddr_t)eii_p, PCI_OBJ_INTR_REMOVE);
3457 }
3458 
3459 static uint_t
3460 pci_pbm_cdma_intr(caddr_t a)
3461 {
3462 	pbm_t *pbm_p = (pbm_t *)a;
3463 	pbm_p->pbm_cdma_flag = PBM_CDMA_DONE;
3464 	return (DDI_INTR_CLAIMED);
3465 }
3466 
3467 int
3468 pci_pbm_add_intr(pci_t *pci_p)
3469 {
3470 	uint32_t mondo;
3471 
3472 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3473 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3474 
3475 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_CDMA],
3476 	    (intrfunc)pci_pbm_cdma_intr, (caddr_t)pci_p->pci_pbm_p,
3477 	    NULL, NULL) == 0);
3478 
3479 	return (DDI_SUCCESS);
3480 }
3481 
3482 void
3483 pci_pbm_rem_intr(pci_t *pci_p)
3484 {
3485 	ib_t		*ib_p = pci_p->pci_ib_p;
3486 	uint32_t	mondo;
3487 
3488 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3489 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3490 
3491 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_CDMA], IB_INTR_NOWAIT);
3492 	VERIFY(rem_ivintr(mondo, pci_pil[CBNINTR_CDMA]) == 0);
3493 }
3494 
3495 void
3496 pci_pbm_suspend(pci_t *pci_p)
3497 {
3498 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3499 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3500 
3501 	/* Save CDMA interrupt state */
3502 	pbm_p->pbm_cdma_imr_save = *ib_intr_map_reg_addr(pci_p->pci_ib_p, ino);
3503 }
3504 
3505 void
3506 pci_pbm_resume(pci_t *pci_p)
3507 {
3508 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3509 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3510 
3511 	/* Restore CDMA interrupt state */
3512 	*ib_intr_map_reg_addr(pci_p->pci_ib_p, ino) = pbm_p->pbm_cdma_imr_save;
3513 }
3514 
3515 /*
3516  * pci_bus_quiesce
3517  *
3518  * This function is called as the corresponding control ops routine
3519  * to a DDI_CTLOPS_QUIESCE command.  Its mission is to halt all DMA
3520  * activity on the bus by disabling arbitration/parking.
3521  */
3522 int
3523 pci_bus_quiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3524 {
3525 	volatile uint64_t *ctrl_reg_p;
3526 	volatile uint64_t ctrl_reg;
3527 	pbm_t *pbm_p;
3528 
3529 	pbm_p = pci_p->pci_pbm_p;
3530 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3531 
3532 	if (pbm_p->pbm_quiesce_count++ == 0) {
3533 
3534 		DEBUG0(DBG_PWR, dip, "quiescing bus\n");
3535 
3536 		ctrl_reg = *ctrl_reg_p;
3537 		pbm_p->pbm_saved_ctrl_reg = ctrl_reg;
3538 		ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
3539 		    SCHIZO_PCI_CTRL_ARB_PARK);
3540 		*ctrl_reg_p = ctrl_reg;
3541 #ifdef	DEBUG
3542 		ctrl_reg = *ctrl_reg_p;
3543 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3544 		    SCHIZO_PCI_CTRL_ARB_PARK)) != 0)
3545 			panic("ctrl_reg didn't quiesce: 0x%lx\n", ctrl_reg);
3546 #endif
3547 		if (pbm_p->pbm_anychild_cfgpa)
3548 			(void) ldphysio(pbm_p->pbm_anychild_cfgpa);
3549 	}
3550 
3551 	return (DDI_SUCCESS);
3552 }
3553 
3554 /*
3555  * pci_bus_unquiesce
3556  *
3557  * This function is called as the corresponding control ops routine
3558  * to a DDI_CTLOPS_UNQUIESCE command.  Its mission is to resume paused
3559  * DMA activity on the bus by re-enabling arbitration (and maybe parking).
3560  */
3561 int
3562 pci_bus_unquiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3563 {
3564 	volatile uint64_t *ctrl_reg_p;
3565 	pbm_t *pbm_p;
3566 #ifdef	DEBUG
3567 	volatile uint64_t ctrl_reg;
3568 #endif
3569 
3570 	pbm_p = pci_p->pci_pbm_p;
3571 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3572 
3573 	ASSERT(pbm_p->pbm_quiesce_count > 0);
3574 	if (--pbm_p->pbm_quiesce_count == 0) {
3575 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
3576 #ifdef	DEBUG
3577 		ctrl_reg = *ctrl_reg_p;
3578 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3579 		    SCHIZO_PCI_CTRL_ARB_PARK)) == 0)
3580 			panic("ctrl_reg didn't unquiesce: 0x%lx\n", ctrl_reg);
3581 #endif
3582 	}
3583 
3584 	return (DDI_SUCCESS);
3585 }
3586 
3587 int
3588 pci_reloc_getkey(void)
3589 {
3590 	return (0x200);
3591 }
3592 
3593 static void
3594 tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p, dvma_addr_t dvma_pg,
3595 	int npages)
3596 {
3597 	uint32_t dur_max, dur_base;
3598 	dvma_unbind_req_t *req_p, *req_max_p;
3599 	dvma_unbind_req_t *req_base_p = iommu_p->iommu_mtlb_req_p;
3600 	uint32_t tlb_vpn[IOMMU_TLB_ENTRIES];
3601 	caddr_t reg_base;
3602 	volatile uint64_t *tag_p;
3603 	int i, preserv_count = 0;
3604 
3605 	mutex_enter(&iommu_p->iommu_mtlb_lock);
3606 
3607 	iommu_p->iommu_mtlb_npgs += npages;
3608 	req_max_p = req_base_p + iommu_p->iommu_mtlb_nreq++;
3609 	req_max_p->dur_npg = npages;
3610 	req_max_p->dur_base = dvma_pg;
3611 	req_max_p->dur_flags = mp->dmai_flags & DMAI_FLAGS_VMEMCACHE;
3612 
3613 
3614 	if (iommu_p->iommu_mtlb_npgs <= iommu_p->iommu_mtlb_maxpgs)
3615 		goto done;
3616 
3617 	/* read TLB */
3618 	reg_base = iommu_p->iommu_pci_p->pci_address[0];
3619 	tag_p = (volatile uint64_t *)
3620 	    (reg_base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
3621 
3622 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
3623 		tlb_vpn[i] = tag_p[i] & SCHIZO_VPN_MASK;
3624 
3625 	/* for each request search the TLB for a matching address */
3626 	for (req_p = req_base_p; req_p <= req_max_p; req_p++) {
3627 		dur_base = req_p->dur_base;
3628 		dur_max = req_p->dur_base + req_p->dur_npg;
3629 
3630 		for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
3631 			uint_t vpn = tlb_vpn[i];
3632 			if (vpn >= dur_base && vpn < dur_max)
3633 				break;
3634 		}
3635 		if (i >= IOMMU_TLB_ENTRIES) {
3636 			pci_vmem_do_free(iommu_p,
3637 			    (void *)IOMMU_PTOB(req_p->dur_base),
3638 			    req_p->dur_npg, req_p->dur_flags);
3639 			iommu_p->iommu_mtlb_npgs -= req_p->dur_npg;
3640 			continue;
3641 		}
3642 		/* if an empty slot exists */
3643 		if ((req_p - req_base_p) != preserv_count)
3644 			*(req_base_p + preserv_count) = *req_p;
3645 		preserv_count++;
3646 	}
3647 
3648 	iommu_p->iommu_mtlb_nreq = preserv_count;
3649 done:
3650 	mutex_exit(&iommu_p->iommu_mtlb_lock);
3651 }
3652 
3653 void
3654 pci_vmem_free(iommu_t *iommu_p, ddi_dma_impl_t *mp, void *dvma_addr,
3655     size_t npages)
3656 {
3657 	if (tm_mtlb_gc)
3658 		tm_vmem_free(mp, iommu_p,
3659 		    (dvma_addr_t)IOMMU_BTOP((dvma_addr_t)dvma_addr), npages);
3660 	else
3661 		pci_vmem_do_free(iommu_p, dvma_addr, npages,
3662 		    (mp->dmai_flags & DMAI_FLAGS_VMEMCACHE));
3663 }
3664 
3665 /*
3666  * pci_iommu_bypass_end_configure
3667  *
3668  * Support for 42-bit bus width to SAFARI and JBUS in DVMA and
3669  * iommu bypass transfers:
3670  */
3671 
3672 dma_bypass_addr_t
3673 pci_iommu_bypass_end_configure(void)
3674 {
3675 
3676 	return ((dma_bypass_addr_t)SAFARI_JBUS_IOMMU_BYPASS_END);
3677 }
3678