xref: /titanic_51/usr/src/uts/sun4u/io/pci/pcisch.c (revision bd335c6465ddbafe543900df4b03247bfa288eff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Schizo specifics implementation:
31  *	interrupt mapping register
32  *	PBM configuration
33  *	ECC and PBM error handling
34  *	Iommu mapping handling
35  *	Streaming Cache flushing
36  */
37 
38 #include <sys/types.h>
39 #include <sys/kmem.h>
40 #include <sys/sysmacros.h>
41 #include <sys/async.h>
42 #include <sys/ivintr.h>
43 #include <sys/systm.h>
44 #include <sys/intr.h>
45 #include <sys/machsystm.h>	/* lddphys() */
46 #include <sys/machsystm.h>	/* lddphys, intr_dist_add */
47 #include <sys/iommutsb.h>
48 #include <sys/promif.h>		/* prom_printf */
49 #include <sys/map.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/spl.h>
54 #include <sys/fm/util.h>
55 #include <sys/ddi_impldefs.h>
56 #include <sys/fm/protocol.h>
57 #include <sys/fm/io/sun4upci.h>
58 #include <sys/fm/io/ddi.h>
59 #include <sys/fm/io/pci.h>
60 #include <sys/pci/pci_obj.h>
61 #include <sys/pci/pcisch.h>
62 #include <sys/pci/pcisch_asm.h>
63 #include <sys/x_call.h>		/* XCALL_PIL */
64 
65 /*LINTLIBRARY*/
66 
67 extern uint8_t ldstub(uint8_t *);
68 
69 #define	IOMMU_CTX_BITMAP_SIZE	(1 << (12 - 3))
70 static void iommu_ctx_free(iommu_t *);
71 static int iommu_tlb_scrub(iommu_t *, int);
72 static uint32_t pci_identity_init(pci_t *);
73 
74 static void pci_cb_clear_error(cb_t *, cb_errstate_t *);
75 static void pci_clear_error(pci_t *, pbm_errstate_t *);
76 static uint32_t pci_identity_init(pci_t *pci_p);
77 static int pci_intr_setup(pci_t *pci_p);
78 static void iommu_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
79 static void cb_ereport_post(dev_info_t *, uint64_t, cb_errstate_t *);
80 static void pcix_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
81 static void pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar,
82 		ecc_region_t region);
83 static void pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p);
84 static void tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p,
85 		dvma_addr_t dvma_pg, int npages);
86 
87 static int pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p);
88 
89 static pci_ksinfo_t	*pci_name_kstat;
90 static pci_ksinfo_t	*saf_name_kstat;
91 
92 extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value);
93 
94 /* called by pci_attach() DDI_ATTACH to initialize pci objects */
95 int
96 pci_obj_setup(pci_t *pci_p)
97 {
98 	pci_common_t *cmn_p;
99 	uint32_t chip_id = pci_identity_init(pci_p);
100 	uint32_t cmn_id = PCI_CMN_ID(ID_CHIP_TYPE(chip_id), pci_p->pci_id);
101 	int ret;
102 
103 	/* Perform allocations first to avoid delicate unwinding. */
104 	if (pci_alloc_tsb(pci_p) != DDI_SUCCESS)
105 		return (DDI_FAILURE);
106 
107 	mutex_enter(&pci_global_mutex);
108 	cmn_p = get_pci_common_soft_state(cmn_id);
109 	if (cmn_p == NULL) {
110 		if (alloc_pci_common_soft_state(cmn_id) != DDI_SUCCESS) {
111 			mutex_exit(&pci_global_mutex);
112 			pci_free_tsb(pci_p);
113 			return (DDI_FAILURE);
114 		}
115 		cmn_p = get_pci_common_soft_state(cmn_id);
116 		cmn_p->pci_common_id = cmn_id;
117 		cmn_p->pci_common_tsb_cookie = IOMMU_TSB_COOKIE_NONE;
118 	}
119 
120 	ASSERT((pci_p->pci_side == 0) || (pci_p->pci_side == 1));
121 	if (cmn_p->pci_p[pci_p->pci_side]) {
122 		/* second side attach */
123 		pci_p->pci_side = PCI_OTHER_SIDE(pci_p->pci_side);
124 		ASSERT(cmn_p->pci_p[pci_p->pci_side] == NULL);
125 	}
126 
127 	cmn_p->pci_p[pci_p->pci_side] = pci_p;
128 	pci_p->pci_common_p = cmn_p;
129 
130 	if (cmn_p->pci_common_refcnt == 0)
131 		cmn_p->pci_chip_id = chip_id;
132 
133 	ib_create(pci_p);
134 
135 	/*
136 	 * The initialization of cb internal interrupts depends on ib
137 	 */
138 	if (cmn_p->pci_common_refcnt == 0) {
139 		cb_create(pci_p);
140 		cmn_p->pci_common_cb_p = pci_p->pci_cb_p;
141 	} else
142 		pci_p->pci_cb_p = cmn_p->pci_common_cb_p;
143 
144 	iommu_create(pci_p);
145 
146 	if (cmn_p->pci_common_refcnt == 0) {
147 		ecc_create(pci_p);
148 		cmn_p->pci_common_ecc_p = pci_p->pci_ecc_p;
149 	} else
150 		pci_p->pci_ecc_p = cmn_p->pci_common_ecc_p;
151 
152 	pbm_create(pci_p);
153 	sc_create(pci_p);
154 
155 	pci_fm_create(pci_p);
156 
157 	if ((ret = pci_intr_setup(pci_p)) != DDI_SUCCESS)
158 		goto done;
159 
160 	pci_kstat_create(pci_p);
161 
162 	cmn_p->pci_common_attachcnt++;
163 	cmn_p->pci_common_refcnt++;
164 done:
165 	mutex_exit(&pci_global_mutex);
166 	if (ret != DDI_SUCCESS)
167 		cmn_err(CE_WARN, "pci_obj_setup failed %x", ret);
168 	return (ret);
169 }
170 
171 /* called by pci_detach() DDI_DETACH to destroy pci objects */
172 void
173 pci_obj_destroy(pci_t *pci_p)
174 {
175 	pci_common_t *cmn_p;
176 	mutex_enter(&pci_global_mutex);
177 
178 	cmn_p = pci_p->pci_common_p;
179 	cmn_p->pci_common_refcnt--;
180 	cmn_p->pci_common_attachcnt--;
181 
182 	pci_kstat_destroy(pci_p);
183 
184 	/* schizo non-shared objects */
185 	pci_fm_destroy(pci_p);
186 
187 	sc_destroy(pci_p);
188 	pbm_destroy(pci_p);
189 	iommu_destroy(pci_p);
190 	ib_destroy(pci_p);
191 
192 	if (cmn_p->pci_common_refcnt != 0) {
193 		pci_intr_teardown(pci_p);
194 		cmn_p->pci_p[pci_p->pci_side] = NULL;
195 		mutex_exit(&pci_global_mutex);
196 		return;
197 	}
198 
199 	/* schizo shared objects - uses cmn_p, must be destroyed before cmn */
200 	ecc_destroy(pci_p);
201 	cb_destroy(pci_p);
202 
203 	free_pci_common_soft_state(cmn_p->pci_common_id);
204 	pci_intr_teardown(pci_p);
205 	mutex_exit(&pci_global_mutex);
206 }
207 
208 /* called by pci_attach() DDI_RESUME to (re)initialize pci objects */
209 void
210 pci_obj_resume(pci_t *pci_p)
211 {
212 	pci_common_t *cmn_p = pci_p->pci_common_p;
213 
214 	mutex_enter(&pci_global_mutex);
215 
216 	ib_configure(pci_p->pci_ib_p);
217 	iommu_configure(pci_p->pci_iommu_p);
218 
219 	if (cmn_p->pci_common_attachcnt == 0)
220 		ecc_configure(pci_p);
221 
222 	ib_resume(pci_p->pci_ib_p);
223 
224 	pbm_configure(pci_p->pci_pbm_p);
225 	sc_configure(pci_p->pci_sc_p);
226 
227 	if (cmn_p->pci_common_attachcnt == 0)
228 		cb_resume(pci_p->pci_cb_p);
229 
230 	pbm_resume(pci_p->pci_pbm_p);
231 
232 	cmn_p->pci_common_attachcnt++;
233 	mutex_exit(&pci_global_mutex);
234 }
235 
236 /* called by pci_detach() DDI_SUSPEND to suspend pci objects */
237 void
238 pci_obj_suspend(pci_t *pci_p)
239 {
240 	mutex_enter(&pci_global_mutex);
241 
242 	pbm_suspend(pci_p->pci_pbm_p);
243 	ib_suspend(pci_p->pci_ib_p);
244 
245 	if (!--pci_p->pci_common_p->pci_common_attachcnt)
246 		cb_suspend(pci_p->pci_cb_p);
247 
248 	mutex_exit(&pci_global_mutex);
249 }
250 
251 /*
252  * add an additional 0x35 or 0x36 ino interrupt on platforms don't have them
253  * This routine has multiple places that assumes interrupt takes one cell
254  * each and cell size is same as integer size.
255  */
256 static int
257 pci_intr_setup(pci_t *pci_p)
258 {
259 	dev_info_t *dip = pci_p->pci_dip;
260 	pbm_t *pbm_p = pci_p->pci_pbm_p;
261 	cb_t *cb_p = pci_p->pci_cb_p;
262 	uint32_t *intr_buf, *new_intr_buf;
263 	int intr_len, intr_cnt, ret;
264 
265 	if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
266 		"interrupts", (caddr_t)&intr_buf, &intr_len) != DDI_SUCCESS)
267 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
268 			ddi_driver_name(dip), ddi_get_instance(dip));
269 
270 	intr_cnt = BYTES_TO_1275_CELLS(intr_len);
271 	if (intr_cnt < CBNINTR_CDMA)	/* CBNINTR_CDMA is 0 based */
272 		cmn_err(CE_PANIC, "%s%d: <%d interrupts", ddi_driver_name(dip),
273 			ddi_get_instance(dip), CBNINTR_CDMA);
274 
275 	if (intr_cnt == CBNINTR_CDMA)
276 		intr_cnt++;
277 
278 	new_intr_buf = kmem_alloc(CELLS_1275_TO_BYTES(intr_cnt), KM_SLEEP);
279 	bcopy(intr_buf, new_intr_buf, intr_len);
280 	kmem_free(intr_buf, intr_len);
281 
282 	new_intr_buf[CBNINTR_CDMA] = PBM_CDMA_INO_BASE + pci_p->pci_side;
283 	pci_p->pci_inos = new_intr_buf;
284 	pci_p->pci_inos_len = CELLS_1275_TO_BYTES(intr_cnt);
285 
286 	if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts",
287 		(int *)new_intr_buf, intr_cnt))
288 		cmn_err(CE_PANIC, "%s%d: cannot update interrupts property\n",
289 			ddi_driver_name(dip), ddi_get_instance(dip));
290 
291 	if (pci_p->pci_common_p->pci_common_refcnt == 0) {
292 		cb_p->cb_no_of_inos = intr_cnt;
293 		if (ret = cb_register_intr(pci_p))
294 			goto teardown;
295 		if (ret = ecc_register_intr(pci_p))
296 			goto teardown;
297 
298 		intr_dist_add(cb_intr_dist, cb_p);
299 		cb_enable_intr(pci_p);
300 		ecc_enable_intr(pci_p);
301 	}
302 
303 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
304 		pbm_p->pbm_sync_ino = pci_p->pci_inos[CBNINTR_PBM];
305 	if (ret = pbm_register_intr(pbm_p)) {
306 		if (pci_p->pci_common_p->pci_common_refcnt == 0)
307 			intr_dist_rem(cb_intr_dist, cb_p);
308 		goto teardown;
309 	}
310 	intr_dist_add(pbm_intr_dist, pbm_p);
311 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_PBM]);
312 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_CDMA]);
313 
314 	intr_dist_add_weighted(ib_intr_dist_all, pci_p->pci_ib_p);
315 	return (DDI_SUCCESS);
316 teardown:
317 	pci_intr_teardown(pci_p);
318 	return (ret);
319 }
320 
321 uint64_t
322 pci_sc_configure(pci_t *pci_p)
323 {
324 	int instance;
325 	dev_info_t *dip = pci_p->pci_dip;
326 
327 	instance = ddi_get_instance(dip);
328 	if ((pci_xmits_sc_max_prf & (1 << instance)) &&
329 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS))
330 		return (XMITS_SC_MAX_PRF);
331 	else
332 		return (0);
333 }
334 
335 static void
336 pci_schizo_cdma_sync(pbm_t *pbm_p)
337 {
338 	pci_t *pci_p = pbm_p->pbm_pci_p;
339 	hrtime_t start_time;
340 	volatile uint64_t *clr_p = ib_clear_intr_reg_addr(pci_p->pci_ib_p,
341 		pci_p->pci_inos[CBNINTR_CDMA]);
342 	uint32_t fail_cnt = pci_cdma_intr_count;
343 
344 	mutex_enter(&pbm_p->pbm_sync_mutex);
345 #ifdef PBM_CDMA_DEBUG
346 	pbm_p->pbm_cdma_req_cnt++;
347 #endif /* PBM_CDMA_DEBUG */
348 	pbm_p->pbm_cdma_flag = PBM_CDMA_PEND;
349 	IB_INO_INTR_TRIG(clr_p);
350 wait:
351 	start_time = gethrtime();
352 	while (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE) {
353 		if (gethrtime() - start_time <= pci_cdma_intr_timeout)
354 			continue;
355 		if (--fail_cnt > 0)
356 			goto wait;
357 		if (pbm_p->pbm_cdma_flag == PBM_CDMA_DONE)
358 			break;
359 		cmn_err(CE_PANIC, "%s (%s): consistent dma sync timeout",
360 		    pbm_p->pbm_nameinst_str, pbm_p->pbm_nameaddr_str);
361 	}
362 #ifdef PBM_CDMA_DEBUG
363 	if (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE)
364 		pbm_p->pbm_cdma_to_cnt++;
365 	else {
366 		start_time = gethrtime() - start_time;
367 		pbm_p->pbm_cdma_success_cnt++;
368 		pbm_p->pbm_cdma_latency_sum += start_time;
369 		if (start_time > pbm_p->pbm_cdma_latency_max)
370 			pbm_p->pbm_cdma_latency_max = start_time;
371 	}
372 #endif /* PBM_CDMA_DEBUG */
373 	mutex_exit(&pbm_p->pbm_sync_mutex);
374 }
375 
376 #if !defined(lint)
377 #include <sys/cpuvar.h>
378 #endif
379 
380 #define	SYNC_HW_BUSY(pa, mask)	(lddphysio(pa) & (mask))
381 
382 /*
383  * Consistent DMA Sync/Flush
384  *
385  * XMITS and Tomatillo use multi-threaded sync/flush register.
386  * Called from interrupt wrapper: the associated ino is used to index
387  *	the distinctive register bit.
388  * Called from pci_dma_sync(): the bit belongs to PBM is shared
389  *	for all calls from pci_dma_sync(). Xmits requires serialization
390  *	while Tomatillo does not.
391  */
392 void
393 pci_pbm_dma_sync(pbm_t *pbm_p, ib_ino_t ino)
394 {
395 	pci_t *pci_p = pbm_p->pbm_pci_p;
396 	hrtime_t start_time;
397 	uint64_t ino_mask, sync_reg_pa;
398 	volatile uint64_t flag_val;
399 	uint32_t locked, chip_type = CHIP_TYPE(pci_p);
400 	int	i;
401 
402 	if (chip_type == PCI_CHIP_SCHIZO) {
403 		pci_schizo_cdma_sync(pbm_p);
404 		return;
405 	}
406 
407 	sync_reg_pa = pbm_p->pbm_sync_reg_pa;
408 
409 	locked = 0;
410 	if (((chip_type == PCI_CHIP_XMITS) && (ino == pbm_p->pbm_sync_ino)) ||
411 	    pci_sync_lock) {
412 		locked = 1;
413 		mutex_enter(&pbm_p->pbm_sync_mutex);
414 	}
415 	ino_mask = 1ull << ino;
416 	stdphysio(sync_reg_pa, ino_mask);
417 
418 	for (i = 0; i < 5; i++) {
419 		if ((flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) == 0)
420 			goto done;
421 	}
422 
423 	start_time = gethrtime();
424 	for (; (flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) != 0; i++) {
425 		if (gethrtime() - start_time > pci_sync_buf_timeout)
426 			break;
427 	}
428 
429 	if (flag_val && SYNC_HW_BUSY(sync_reg_pa, ino_mask) && !panicstr)
430 		cmn_err(CE_PANIC, "%s: pbm dma sync %llx,%llx timeout!",
431 			pbm_p->pbm_nameaddr_str, sync_reg_pa, flag_val);
432 done:
433 	/* optional: stdphysio(sync_reg_pa - 8, ino_mask); */
434 	if (locked)
435 		mutex_exit(&pbm_p->pbm_sync_mutex);
436 
437 	if (tomatillo_store_store_wrka) {
438 #if !defined(lint)
439 		kpreempt_disable();
440 #endif
441 		tomatillo_store_store_order();
442 #if !defined(lint)
443 		kpreempt_enable();
444 #endif
445 	}
446 
447 }
448 
449 /*ARGSUSED*/
450 void
451 pci_fix_ranges(pci_ranges_t *rng_p, int rng_entries)
452 {
453 }
454 
455 /*
456  * map_pci_registers
457  *
458  * This function is called from the attach routine to map the registers
459  * accessed by this driver.
460  *
461  * used by: pci_attach()
462  *
463  * return value: DDI_FAILURE on failure
464  */
465 int
466 map_pci_registers(pci_t *pci_p, dev_info_t *dip)
467 {
468 	ddi_device_acc_attr_t attr;
469 	int len;
470 
471 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
472 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
473 
474 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
475 	/*
476 	 * Register set 0 is PCI CSR Base
477 	 */
478 	if (ddi_regs_map_setup(dip, 0, &pci_p->pci_address[0], 0, 0,
479 	    &attr, &pci_p->pci_ac[0]) != DDI_SUCCESS) {
480 		len = 0;
481 		goto fail;
482 	}
483 	/*
484 	 * Register set 1 is Schizo CSR Base
485 	 */
486 	if (ddi_regs_map_setup(dip, 1, &pci_p->pci_address[1], 0, 0,
487 	    &attr, &pci_p->pci_ac[1]) != DDI_SUCCESS) {
488 		len = 1;
489 		goto fail;
490 	}
491 
492 	/*
493 	 * The third register set contains the bridge's configuration
494 	 * header.  This header is at the very beginning of the bridge's
495 	 * configuration space.  This space has litte-endian byte order.
496 	 */
497 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
498 	if (ddi_regs_map_setup(dip, 2, &pci_p->pci_address[2], 0,
499 	    PCI_CONF_HDR_SIZE, &attr, &pci_p->pci_ac[2]) != DDI_SUCCESS) {
500 		len = 2;
501 		goto fail;
502 	}
503 
504 	if (ddi_getproplen(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
505 	    "reg", &len) || (len / sizeof (pci_nexus_regspec_t) < 4))
506 		goto done;
507 
508 	/*
509 	 * The optional fourth register bank points to the
510 	 * interrupt concentrator registers.
511 	 */
512 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
513 	if (ddi_regs_map_setup(dip, 3, &pci_p->pci_address[3], 0,
514 	    0, &attr, &pci_p->pci_ac[3]) != DDI_SUCCESS) {
515 		len = 3;
516 		goto fail;
517 	}
518 
519 done:
520 	DEBUG4(DBG_ATTACH, dip, "address (%p,%p,%p,%p)\n",
521 	    pci_p->pci_address[0], pci_p->pci_address[1],
522 	    pci_p->pci_address[2], pci_p->pci_address[3]);
523 
524 	return (DDI_SUCCESS);
525 
526 
527 fail:
528 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
529 		ddi_driver_name(dip), ddi_get_instance(dip), len);
530 	for (; len--; ddi_regs_map_free(&pci_p->pci_ac[len]));
531 	return (DDI_FAILURE);
532 }
533 
534 /*
535  * unmap_pci_registers:
536  *
537  * This routine unmap the registers mapped by map_pci_registers.
538  *
539  * used by: pci_detach()
540  *
541  * return value: none
542  */
543 void
544 unmap_pci_registers(pci_t *pci_p)
545 {
546 	int i;
547 
548 	for (i = 0; i < 4; i++) {
549 		if (pci_p->pci_ac[i])
550 			ddi_regs_map_free(&pci_p->pci_ac[i]);
551 	}
552 }
553 
554 uint64_t
555 ib_get_map_reg(ib_mondo_t mondo, uint32_t cpu_id)
556 {
557 	uint32_t agent_id;
558 	uint32_t node_id;
559 
560 	/* ensure that cpu_id is only 10 bits. */
561 	ASSERT((cpu_id & ~0x3ff) == 0);
562 
563 	agent_id = cpu_id & 0x1f;
564 	node_id = (cpu_id >> 5) & 0x1f;
565 
566 	return ((mondo) | (agent_id << COMMON_INTR_MAP_REG_TID_SHIFT) |
567 	    (node_id << SCHIZO_INTR_MAP_REG_NID_SHIFT) |
568 	    COMMON_INTR_MAP_REG_VALID);
569 }
570 
571 uint32_t
572 ib_map_reg_get_cpu(volatile uint64_t reg)
573 {
574 	return (((reg & COMMON_INTR_MAP_REG_TID) >>
575 		COMMON_INTR_MAP_REG_TID_SHIFT) |
576 			((reg & SCHIZO_INTR_MAP_REG_NID) >>
577 			(SCHIZO_INTR_MAP_REG_NID_SHIFT-5)));
578 }
579 
580 uint64_t *
581 ib_intr_map_reg_addr(ib_t *ib_p, ib_ino_t ino)
582 {
583 	/*
584 	 * Schizo maps all interrupts in one contiguous area.
585 	 * (PCI_CSRBase + 0x00.1000 + INO * 8).
586 	 */
587 	return ((uint64_t *)(ib_p->ib_intr_map_regs) + (ino & 0x3f));
588 }
589 
590 uint64_t *
591 ib_clear_intr_reg_addr(ib_t *ib_p, ib_ino_t ino)	/* XXX - needs work */
592 {
593 	/*
594 	 * Schizo maps clear intr. registers in contiguous area.
595 	 * (PCI_CSRBase + 0x00.1400 + INO * 8).
596 	 */
597 	return ((uint64_t *)(ib_p->ib_slot_clear_intr_regs) + (ino & 0x3f));
598 }
599 
600 /*
601  * schizo does not have mapping register per slot, so no sharing
602  * is done.
603  */
604 /*ARGSUSED*/
605 void
606 ib_ino_map_reg_share(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
607 {
608 }
609 
610 /*
611  * return true if there are interrupts using this mapping register
612  */
613 /*ARGSUSED*/
614 int
615 ib_ino_map_reg_unshare(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
616 {
617 	return (ino_p->ino_ih_size);
618 }
619 
620 void
621 pci_pbm_intr_dist(pbm_t *pbm_p)
622 {
623 	pci_t *pci_p = pbm_p->pbm_pci_p;
624 	ib_t *ib_p = pci_p->pci_ib_p;
625 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[CBNINTR_CDMA]);
626 
627 	mutex_enter(&pbm_p->pbm_sync_mutex);
628 	ib_intr_dist_nintr(ib_p, ino, ib_intr_map_reg_addr(ib_p, ino));
629 	mutex_exit(&pbm_p->pbm_sync_mutex);
630 }
631 
632 uint32_t
633 pci_xlate_intr(dev_info_t *dip, dev_info_t *rdip, ib_t *ib_p, uint32_t intr)
634 {
635 	return (IB_INO_TO_MONDO(ib_p, intr));
636 }
637 
638 
639 /*
640  * Return the cpuid to to be used for an ino.  We have no special cpu
641  * assignment constraints for this nexus, so just call intr_dist_cpuid().
642  */
643 /* ARGSUSED */
644 uint32_t
645 pci_intr_dist_cpuid(ib_t *ib_p, ib_ino_info_t *ino_p)
646 {
647 	return (intr_dist_cpuid());
648 }
649 
650 void
651 pci_cb_teardown(pci_t *pci_p)
652 {
653 	cb_t 	*cb_p = pci_p->pci_cb_p;
654 	uint32_t mondo;
655 
656 	if (!pci_buserr_interrupt)
657 		return;
658 
659 	mondo = ((pci_p->pci_cb_p->cb_ign  << PCI_INO_BITS) |
660 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
661 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
662 
663 	cb_disable_nintr(cb_p, CBNINTR_BUS_ERROR, IB_INTR_WAIT);
664 	rem_ivintr(mondo, NULL);
665 }
666 
667 int
668 cb_register_intr(pci_t *pci_p)
669 {
670 	uint32_t mondo;
671 
672 	if (!pci_buserr_interrupt)
673 		return (DDI_SUCCESS);
674 
675 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
676 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
677 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
678 
679 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR],
680 	    cb_buserr_intr, (caddr_t)pci_p->pci_cb_p, NULL) == 0);
681 
682 	return (PCI_ATTACH_RETCODE(PCI_CB_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
683 }
684 
685 void
686 cb_enable_intr(pci_t *pci_p)
687 {
688 	if (pci_buserr_interrupt)
689 		cb_enable_nintr(pci_p, CBNINTR_BUS_ERROR);
690 }
691 
692 uint64_t
693 cb_ino_to_map_pa(cb_t *cb_p, ib_ino_t ino)
694 {
695 	return (cb_p->cb_map_pa + (ino << 3));
696 }
697 
698 uint64_t
699 cb_ino_to_clr_pa(cb_t *cb_p, ib_ino_t ino)
700 {
701 	return (cb_p->cb_clr_pa + (ino << 3));
702 }
703 
704 /*
705  * Useful on psycho only.
706  */
707 int
708 cb_remove_xintr(pci_t *pci_p, dev_info_t *dip, dev_info_t *rdip, ib_ino_t ino,
709 ib_mondo_t mondo)
710 {
711 	return (DDI_FAILURE);
712 }
713 
714 void
715 pbm_configure(pbm_t *pbm_p)
716 {
717 	pci_t *pci_p = pbm_p->pbm_pci_p;
718 	dev_info_t *dip = pbm_p->pbm_pci_p->pci_dip;
719 	int instance = ddi_get_instance(dip);
720 	uint64_t l;
721 	uint64_t mask = 1ll << instance;
722 	ushort_t s = 0;
723 
724 	l = *pbm_p->pbm_ctrl_reg;	/* save control register state */
725 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
726 
727 	/*
728 	 * See if any SERR# signals are asserted.  We'll clear them later.
729 	 */
730 	if (l & COMMON_PCI_CTRL_SERR)
731 		cmn_err(CE_WARN, "%s%d: SERR asserted on pci bus\n",
732 		    ddi_driver_name(dip), instance);
733 
734 	/*
735 	 * Determine if PCI bus is running at 33 or 66 mhz.
736 	 */
737 	if (l & COMMON_PCI_CTRL_SPEED)
738 		pbm_p->pbm_speed = PBM_SPEED_66MHZ;
739 	else
740 		pbm_p->pbm_speed = PBM_SPEED_33MHZ;
741 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: %d mhz\n",
742 	    pbm_p->pbm_speed  == PBM_SPEED_66MHZ ? 66 : 33);
743 
744 	if (pci_set_dto_value & mask) {
745 		l &= ~(3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
746 		l |= pci_dto_value << SCHIZO_PCI_CTRL_PTO_SHIFT;
747 	} else if (PCI_CHIP_ID(pci_p) >= TOMATILLO_VER_21) {
748 		l |= (3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
749 	}
750 
751 	/*
752 	 * Enable error interrupts.
753 	 */
754 	if (pci_error_intr_enable & mask)
755 		l |= SCHIZO_PCI_CTRL_ERR_INT_EN;
756 	else
757 		l &= ~SCHIZO_PCI_CTRL_ERR_INT_EN;
758 
759 	/*
760 	 * Enable pci streaming byte errors and error interrupts.
761 	 */
762 	if (pci_sbh_error_intr_enable & mask)
763 		l |= SCHIZO_PCI_CTRL_SBH_INT_EN;
764 	else
765 		l &= ~SCHIZO_PCI_CTRL_SBH_INT_EN;
766 
767 	/*
768 	 * Enable pci discard timeout error interrupt.
769 	 */
770 	if (pci_mmu_error_intr_enable & mask)
771 		l |= SCHIZO_PCI_CTRL_MMU_INT_EN;
772 	else
773 		l &= ~SCHIZO_PCI_CTRL_MMU_INT_EN;
774 
775 	/*
776 	 * Enable PCI-X error interrupts.
777 	 */
778 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
779 
780 		if (xmits_error_intr_enable & mask)
781 			l |= XMITS_PCI_CTRL_X_ERRINT_EN;
782 		else
783 			l &= ~XMITS_PCI_CTRL_X_ERRINT_EN;
784 		/*
785 		 * Panic if older XMITS hardware is found.
786 		 */
787 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)
788 			if (PCI_CHIP_ID(pci_p) <= XMITS_VER_10)
789 				cmn_err(CE_PANIC, "%s (%s): PCIX mode "
790 				"unsupported on XMITS version %d\n",
791 				    pbm_p->pbm_nameinst_str,
792 				    pbm_p->pbm_nameaddr_str, CHIP_VER(pci_p));
793 
794 		if (xmits_perr_recov_int_enable) {
795 			if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
796 				uint64_t pcix_err;
797 				/*
798 				 * Enable interrupt on PERR
799 				 */
800 				pcix_err = *pbm_p->pbm_pcix_err_stat_reg;
801 				pcix_err |= XMITS_PCIX_STAT_PERR_RECOV_INT_EN;
802 				pcix_err &= ~XMITS_PCIX_STAT_SERR_ON_PERR;
803 				*pbm_p->pbm_pcix_err_stat_reg = pcix_err;
804 			}
805 		}
806 
807 		/*
808 		 * Enable parity error detection on internal memories
809 		 */
810 		*pbm_p->pbm_pci_ped_ctrl = 0x3fff;
811 	}
812 
813 	/*
814 	 * Enable/disable bus parking.
815 	 */
816 	if ((pci_bus_parking_enable & mask) &&
817 	    !ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
818 	    "no-bus-parking"))
819 		l |= SCHIZO_PCI_CTRL_ARB_PARK;
820 	else
821 		l &= ~SCHIZO_PCI_CTRL_ARB_PARK;
822 
823 	/*
824 	 * Enable arbitration.
825 	 */
826 	l |= PCI_CHIP_ID(pci_p) == XMITS_VER_10 ? XMITS10_PCI_CTRL_ARB_EN_MASK :
827 		SCHIZO_PCI_CTRL_ARB_EN_MASK;
828 
829 	/*
830 	 * Make sure SERR is clear
831 	 */
832 	l |= COMMON_PCI_CTRL_SERR;
833 
834 
835 	/*
836 	 * Enable DTO interrupt, if desired.
837 	 */
838 
839 	if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_20 || (pci_dto_intr_enable &
840 	    mask))
841 		l |=	 (TOMATILLO_PCI_CTRL_DTO_INT_EN);
842 	else
843 		l &=	 ~(TOMATILLO_PCI_CTRL_DTO_INT_EN);
844 
845 	l |= TOMATILLO_PCI_CTRL_PEN_RD_MLTPL |
846 		TOMATILLO_PCI_CTRL_PEN_RD_ONE |
847 		TOMATILLO_PCI_CTRL_PEN_RD_LINE;
848 
849 	/*
850 	 * Now finally write the control register with the appropriate value.
851 	 */
852 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
853 	*pbm_p->pbm_ctrl_reg = l;
854 
855 	/*
856 	 * Enable IO Prefetch on Tomatillo
857 	 */
858 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
859 		volatile uint64_t *ioc_csr_p = pbm_p->pbm_ctrl_reg +
860 			((TOMATILLO_IOC_CSR_OFF -
861 			SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
862 		*ioc_csr_p = TOMATILLO_WRT_PEN |
863 			(1 << TOMATILLO_POFFSET_SHIFT) |
864 			TOMATILLO_C_PEN_RD_MLTPL |
865 			TOMATILLO_C_PEN_RD_ONE |
866 			TOMATILLO_C_PEN_RD_LINE;
867 	}
868 
869 	/*
870 	 * Allow DMA write parity errors to generate an interrupt.
871 	 * This is implemented on Schizo 2.5 and greater and XMITS 3.0
872 	 * and greater.  Setting this on earlier versions of XMITS 3.0
873 	 * has no affect.
874 	 */
875 	if (((CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) &&
876 	    PCI_CHIP_ID(pci_p) >= SCHIZO_VER_25) ||
877 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)) {
878 		volatile uint64_t *pbm_icd = pbm_p->pbm_ctrl_reg +
879 		    ((SCHIZO_PERF_PCI_ICD_OFFSET -
880 		    SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
881 
882 		*pbm_icd |= SCHIZO_PERF_PCI_ICD_DMAW_PARITY_INT_ENABLE;
883 	}
884 
885 	/*
886 	 * Clear any PBM errors.
887 	 */
888 	l = (SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_PE_SHIFT) |
889 		(SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_SE_SHIFT);
890 	*pbm_p->pbm_async_flt_status_reg = l;
891 
892 	/*
893 	 * Allow the diag register to be set based upon variable that
894 	 * can be configured via /etc/system.
895 	 */
896 	l = *pbm_p->pbm_diag_reg;
897 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
898 
899 	/*
900 	 * Enable/disable retry limit.
901 	 */
902 	if (pci_retry_disable & mask)
903 		l |= COMMON_PCI_DIAG_DIS_RETRY;
904 	else
905 		l &= ~COMMON_PCI_DIAG_DIS_RETRY;
906 
907 	/*
908 	 * Enable/disable DMA write/interrupt synchronization.
909 	 */
910 	if (pci_intsync_disable & mask)
911 		l |= COMMON_PCI_DIAG_DIS_INTSYNC;
912 	else
913 		l &= ~COMMON_PCI_DIAG_DIS_INTSYNC;
914 
915 	/*
916 	 * Enable/disable retry arbitration priority.
917 	 */
918 	if (pci_enable_retry_arb & mask)
919 		l &= ~SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
920 	else
921 		l |= SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
922 
923 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
924 	*pbm_p->pbm_diag_reg = l;
925 
926 	/*
927 	 * Enable SERR# and parity reporting via command register.
928 	 */
929 	s = pci_perr_enable & mask ? PCI_COMM_PARITY_DETECT : 0;
930 	s |= pci_serr_enable & mask ? PCI_COMM_SERR_ENABLE : 0;
931 
932 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg=%x\n", s);
933 	pbm_p->pbm_config_header->ch_command_reg = s;
934 
935 	/*
936 	 * Clear error bits in configuration status register.
937 	 */
938 	s = PCI_STAT_PERROR | PCI_STAT_S_PERROR |
939 		PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |
940 		PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR;
941 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg=%x\n", s);
942 	pbm_p->pbm_config_header->ch_status_reg = s;
943 
944 	/*
945 	 * The current versions of the obp are suppose to set the latency
946 	 * timer register but do not.  Bug 1234181 is open against this
947 	 * problem.  Until this bug is fixed we check to see if the obp
948 	 * has attempted to set the latency timer register by checking
949 	 * for the existence of a "latency-timer" property.
950 	 */
951 	if (pci_set_latency_timer_register) {
952 		DEBUG1(DBG_ATTACH, dip,
953 		    "pbm_configure: set schizo latency timer to %x\n",
954 			pci_latency_timer);
955 		pbm_p->pbm_config_header->ch_latency_timer_reg =
956 			pci_latency_timer;
957 	}
958 
959 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
960 		(int)pbm_p->pbm_config_header->ch_latency_timer_reg);
961 }
962 
963 uint_t
964 pbm_disable_pci_errors(pbm_t *pbm_p)
965 {
966 	pci_t *pci_p = pbm_p->pbm_pci_p;
967 	ib_t *ib_p = pci_p->pci_ib_p;
968 
969 	/*
970 	 * Disable error and streaming byte hole interrupts via the
971 	 * PBM control register.
972 	 */
973 	*pbm_p->pbm_ctrl_reg &=
974 		~(SCHIZO_PCI_CTRL_ERR_INT_EN | SCHIZO_PCI_CTRL_SBH_INT_EN |
975 		SCHIZO_PCI_CTRL_MMU_INT_EN);
976 
977 	/*
978 	 * Disable error interrupts via the interrupt mapping register.
979 	 */
980 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_NOWAIT);
981 	return (BF_NONE);
982 }
983 
984 /*
985  * Layout of the dvma context bucket bitmap entry:
986  *
987  *	63 - 56		55 - 0
988  *	8-bit lock	56-bit, each represent one context
989  *	DCB_LOCK_BITS	DCB_BMAP_BITS
990  */
991 #define	DCB_LOCK_BITS	8
992 #define	DCB_BMAP_BITS	(64 - DCB_LOCK_BITS)
993 
994 dvma_context_t
995 pci_iommu_get_dvma_context(iommu_t *iommu_p, dvma_addr_t dvma_pg_index)
996 {
997 	dvma_context_t ctx;
998 	int i = (dvma_pg_index >> 6) & 0x1f;	/* 5 bit index within bucket */
999 	uint64_t ctx_mask, test = 1ull << i;
1000 	uint32_t bucket_no = dvma_pg_index & 0x3f;
1001 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1002 
1003 	uint32_t spl = ddi_enter_critical();	/* block interrupts */
1004 	if (ldstub((uint8_t *)bucket_ptr)) {	/* try lock */
1005 		ddi_exit_critical(spl);		/* unblock interrupt */
1006 		pci_iommu_ctx_lock_failure++;
1007 		return (0);
1008 	}
1009 
1010 	/* clear lock bits */
1011 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1012 	ASSERT(*bucket_ptr >> DCB_BMAP_BITS == 0xff);
1013 	ASSERT(ctx_mask >> DCB_BMAP_BITS == 0);
1014 
1015 	if (ctx_mask & test)			/* quick check i bit */
1016 		for (i = 0, test = 1ull; test & ctx_mask; test <<= 1, i++);
1017 	if (i < DCB_BMAP_BITS)
1018 		ctx_mask |= test;
1019 	*bucket_ptr = ctx_mask;			/* unlock */
1020 	ddi_exit_critical(spl);			/* unblock interrupts */
1021 
1022 	ctx = i < DCB_BMAP_BITS ? (bucket_no << 6) | i : 0;
1023 	DEBUG3(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1024 		"get_dvma_context: ctx_mask=0x%x.%x ctx=0x%x\n",
1025 		(uint32_t)(ctx_mask >> 32), (uint32_t)ctx_mask, ctx);
1026 	return (ctx);
1027 }
1028 
1029 void
1030 pci_iommu_free_dvma_context(iommu_t *iommu_p, dvma_context_t ctx)
1031 {
1032 	uint64_t ctx_mask;
1033 	uint32_t spl, bucket_no = ctx >> 6;
1034 	int bit_no = ctx & 0x3f;
1035 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1036 
1037 	DEBUG1(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1038 		"free_dvma_context: ctx=0x%x\n", ctx);
1039 
1040 	spl = ddi_enter_critical();			/* block interrupts */
1041 	while (ldstub((uint8_t *)bucket_ptr));		/* spin lock */
1042 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1043 							/* clear lock bits */
1044 	ASSERT(ctx_mask & (1ull << bit_no));
1045 	*bucket_ptr = ctx_mask ^ (1ull << bit_no);	/* clear & unlock */
1046 	ddi_exit_critical(spl);				/* unblock interrupt */
1047 }
1048 
1049 int
1050 pci_sc_ctx_inv(dev_info_t *dip, sc_t *sc_p, ddi_dma_impl_t *mp)
1051 {
1052 	dvma_context_t ctx = MP2CTX(mp);
1053 	volatile uint64_t *reg_addr = sc_p->sc_ctx_match_reg + ctx;
1054 	uint64_t matchreg;
1055 
1056 	if (!*reg_addr) {
1057 		DEBUG1(DBG_SC, dip, "ctx=%x no match\n", ctx);
1058 		return (DDI_SUCCESS);
1059 	}
1060 
1061 	*sc_p->sc_ctx_invl_reg = ctx;	/* 1st flush write */
1062 	matchreg = *reg_addr;		/* re-fetch after 1st flush */
1063 	if (!matchreg)
1064 		return (DDI_SUCCESS);
1065 
1066 	matchreg = (matchreg << SC_ENT_SHIFT) >> SC_ENT_SHIFT;	/* low 16-bit */
1067 	do {
1068 		if (matchreg & 1)
1069 			*sc_p->sc_ctx_invl_reg = ctx;
1070 		matchreg >>= 1;
1071 	} while (matchreg);
1072 
1073 	if (pci_ctx_no_compat || !*reg_addr)	/* compat: active ctx flush */
1074 		return (DDI_SUCCESS);
1075 
1076 	pci_ctx_unsuccess_count++;
1077 	if (pci_ctx_flush_warn)
1078 		cmn_err(pci_ctx_flush_warn, "%s%d: ctx flush unsuccessful\n",
1079 			NAMEINST(dip));
1080 	return (DDI_FAILURE);
1081 }
1082 
1083 void
1084 pci_cb_setup(pci_t *pci_p)
1085 {
1086 	dev_info_t *dip = pci_p->pci_dip;
1087 	cb_t *cb_p = pci_p->pci_cb_p;
1088 	uint64_t pa;
1089 	uint32_t chip_id = PCI_CHIP_ID(pci_p);
1090 	DEBUG1(DBG_ATTACH, dip, "cb_create: chip id %d\n", chip_id);
1091 
1092 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1093 		if ((!tm_mtlb_gc_manual) &&
1094 		    (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_24))
1095 			tm_mtlb_gc = 1;
1096 
1097 		if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_23) {
1098 			extern int ignore_invalid_vecintr;
1099 			ignore_invalid_vecintr = 1;
1100 			tomatillo_store_store_wrka = 1;
1101 			tomatillo_disallow_bypass = 1;
1102 			if (pci_spurintr_msgs == PCI_SPURINTR_MSG_DEFAULT)
1103 				pci_spurintr_msgs = 0;
1104 		}
1105 	}
1106 
1107 	if (chip_id == TOMATILLO_VER_20 || chip_id == TOMATILLO_VER_21)
1108 		cmn_err(CE_WARN, "Unsupported Tomatillo rev (%x)", chip_id);
1109 
1110 	if (chip_id < SCHIZO_VER_23)
1111 		pci_ctx_no_active_flush = 1;
1112 
1113 	cb_p->cb_node_id = PCI_ID_TO_NODEID(pci_p->pci_id);
1114 	cb_p->cb_ign	 = PCI_ID_TO_IGN(pci_p->pci_id);
1115 
1116 	/*
1117 	 * schizo control status reg bank is on the 2nd "reg" property entry
1118 	 * interrupt mapping/clear/state regs are on the 1st "reg" entry.
1119 	 *
1120 	 * ALL internal interrupts except pbm interrupts are shared by both
1121 	 * sides, 1st-side-attached is used as *the* owner.
1122 	 */
1123 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[1]);
1124 	cb_p->cb_base_pa = pa << MMU_PAGESHIFT;
1125 
1126 	pa = pci_p->pci_address[3] ?
1127 		(uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[3]) : 0;
1128 	cb_p->cb_icbase_pa = (pa == PFN_INVALID) ? 0 : pa << MMU_PAGESHIFT;
1129 
1130 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[0])
1131 		<< MMU_PAGESHIFT;
1132 	cb_p->cb_map_pa = pa + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1133 	cb_p->cb_clr_pa = pa + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1134 	cb_p->cb_obsta_pa = pa + COMMON_IB_OBIO_INTR_STATE_DIAG_REG;
1135 }
1136 
1137 void
1138 pci_ecc_setup(ecc_t *ecc_p)
1139 {
1140 	ecc_p->ecc_ue.ecc_errpndg_mask = SCHIZO_ECC_UE_AFSR_ERRPNDG;
1141 	ecc_p->ecc_ue.ecc_offset_mask = SCHIZO_ECC_UE_AFSR_QW_OFFSET;
1142 	ecc_p->ecc_ue.ecc_offset_shift = SCHIZO_ECC_UE_AFSR_QW_OFFSET_SHIFT;
1143 	ecc_p->ecc_ue.ecc_size_log2 = 4;
1144 
1145 	ecc_p->ecc_ce.ecc_errpndg_mask = SCHIZO_ECC_CE_AFSR_ERRPNDG;
1146 	ecc_p->ecc_ce.ecc_offset_mask = SCHIZO_ECC_CE_AFSR_QW_OFFSET;
1147 	ecc_p->ecc_ce.ecc_offset_shift = SCHIZO_ECC_CE_AFSR_QW_OFFSET_SHIFT;
1148 	ecc_p->ecc_ce.ecc_size_log2 = 4;
1149 }
1150 
1151 ushort_t
1152 pci_ecc_get_synd(uint64_t afsr)
1153 {
1154 	return ((ushort_t)((afsr & SCHIZO_ECC_CE_AFSR_SYND) >>
1155 	    SCHIZO_ECC_CE_AFSR_SYND_SHIFT));
1156 }
1157 
1158 /*
1159  * overwrite dvma end address (only on virtual-dma systems)
1160  * initialize tsb size
1161  * reset context bits
1162  * return: IOMMU CSR bank base address (VA)
1163  */
1164 
1165 uintptr_t
1166 pci_iommu_setup(iommu_t *iommu_p)
1167 {
1168 	pci_dvma_range_prop_t *dvma_prop;
1169 	int dvma_prop_len;
1170 
1171 	uintptr_t a;
1172 	pci_t *pci_p = iommu_p->iommu_pci_p;
1173 	dev_info_t *dip = pci_p->pci_dip;
1174 	uint_t tsb_size = iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie);
1175 
1176 	/*
1177 	 * Initializations for Tomatillo's micro TLB bug. errata #82
1178 	 */
1179 	if (tm_mtlb_gc) {
1180 		iommu_p->iommu_mtlb_nreq = 0;
1181 		iommu_p->iommu_mtlb_npgs = 0;
1182 		iommu_p->iommu_mtlb_maxpgs = tm_mtlb_maxpgs;
1183 		iommu_p->iommu_mtlb_req_p = (dvma_unbind_req_t *)
1184 		    kmem_zalloc(sizeof (dvma_unbind_req_t) *
1185 		    (tm_mtlb_maxpgs + 1), KM_SLEEP);
1186 		mutex_init(&iommu_p->iommu_mtlb_lock, NULL, MUTEX_DRIVER, NULL);
1187 	}
1188 
1189 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1190 		"virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
1191 		DDI_PROP_SUCCESS)
1192 		goto tsb_done;
1193 
1194 	if (dvma_prop_len != sizeof (pci_dvma_range_prop_t)) {
1195 		cmn_err(CE_WARN, "%s%d: invalid virtual-dma property",
1196 			ddi_driver_name(dip), ddi_get_instance(dip));
1197 		goto tsb_end;
1198 	}
1199 	iommu_p->iommu_dvma_end = dvma_prop->dvma_base +
1200 		(dvma_prop->dvma_len - 1);
1201 tsb_end:
1202 	kmem_free(dvma_prop, dvma_prop_len);
1203 tsb_done:
1204 	iommu_p->iommu_tsb_size = iommu_tsb_size_encode(tsb_size);
1205 	iommu_p->iommu_ctx_bitmap =
1206 		kmem_zalloc(IOMMU_CTX_BITMAP_SIZE, KM_SLEEP);
1207 	*iommu_p->iommu_ctx_bitmap = 1ull;	/* reserve context 0 */
1208 
1209 	/*
1210 	 * Determine the virtual address of the register block
1211 	 * containing the iommu control registers and determine
1212 	 * the virtual address of schizo specific iommu registers.
1213 	 */
1214 	a = (uintptr_t)pci_p->pci_address[0];
1215 	iommu_p->iommu_flush_ctx_reg =
1216 		(uint64_t *)(a + SCHIZO_IOMMU_FLUSH_CTX_REG_OFFSET);
1217 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
1218 		iommu_p->iommu_tfar_reg =
1219 			(uint64_t *)(a + TOMATILLO_IOMMU_ERR_TFAR_OFFSET);
1220 	return (a);	/* PCICSRBase */
1221 }
1222 
1223 void
1224 pci_iommu_teardown(iommu_t *iommu_p)
1225 {
1226 	if (pci_use_contexts)
1227 		iommu_ctx_free(iommu_p);
1228 	if (iommu_p->iommu_mtlb_req_p) {
1229 		kmem_free(iommu_p->iommu_mtlb_req_p,
1230 		    sizeof (dvma_unbind_req_t) * (tm_mtlb_maxpgs + 1));
1231 		mutex_destroy(&iommu_p->iommu_mtlb_lock);
1232 		iommu_p->iommu_mtlb_req_p = NULL;
1233 		iommu_p->iommu_mtlb_nreq = 0;
1234 		iommu_p->iommu_mtlb_npgs = iommu_p->iommu_mtlb_maxpgs = 0;
1235 	}
1236 }
1237 
1238 uintptr_t
1239 get_pbm_reg_base(pci_t *pci_p)
1240 {
1241 	return ((uintptr_t)
1242 		(pci_p->pci_address[0] + SCHIZO_PCI_CTRL_REG_OFFSET));
1243 }
1244 
1245 /* ARGSUSED */
1246 static boolean_t
1247 pci_pbm_panic_callb(void *arg, int code)
1248 {
1249 	pbm_t *pbm_p = (pbm_t *)arg;
1250 	volatile uint64_t *ctrl_reg_p;
1251 
1252 	if (pbm_p->pbm_quiesce_count > 0) {
1253 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1254 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1255 	}
1256 
1257 	return (B_TRUE);
1258 }
1259 
1260 static boolean_t
1261 pci_pbm_debug_callb(void *arg, int code)
1262 {
1263 	pbm_t *pbm_p = (pbm_t *)arg;
1264 	volatile uint64_t *ctrl_reg_p;
1265 	uint64_t ctrl_reg;
1266 
1267 	if (pbm_p->pbm_quiesce_count > 0) {
1268 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1269 		if (code == 0) {
1270 			*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1271 		} else {
1272 			ctrl_reg = pbm_p->pbm_saved_ctrl_reg;
1273 			ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
1274 			    SCHIZO_PCI_CTRL_ARB_PARK);
1275 			*ctrl_reg_p = ctrl_reg;
1276 		}
1277 	}
1278 
1279 	return (B_TRUE);
1280 }
1281 
1282 void
1283 pci_pbm_setup(pbm_t *pbm_p)
1284 {
1285 	pci_t *pci_p = pbm_p->pbm_pci_p;
1286 	caddr_t a = pci_p->pci_address[0]; /* PBM block base VA */
1287 	uint64_t pa = va_to_pa(a);
1288 	extern int segkmem_reloc;
1289 
1290 	mutex_init(&pbm_p->pbm_sync_mutex, NULL, MUTEX_DRIVER,
1291 	    (void *)ipltospl(XCALL_PIL));
1292 
1293 	pbm_p->pbm_config_header = (config_header_t *)pci_p->pci_address[2];
1294 	pbm_p->pbm_ctrl_reg = (uint64_t *)(a + SCHIZO_PCI_CTRL_REG_OFFSET);
1295 	pbm_p->pbm_diag_reg = (uint64_t *)(a + SCHIZO_PCI_DIAG_REG_OFFSET);
1296 	pbm_p->pbm_async_flt_status_reg =
1297 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1298 	pbm_p->pbm_async_flt_addr_reg =
1299 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1300 	pbm_p->pbm_estar_reg = (uint64_t *)(a + SCHIZO_PCI_ESTAR_REG_OFFSET);
1301 	pbm_p->pbm_pcix_err_stat_reg = (uint64_t *)(a +
1302 	    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
1303 	pbm_p->pbm_pci_ped_ctrl = (uint64_t *)(a +
1304 	    XMITS_PARITY_DETECT_REG_OFFSET);
1305 
1306 	/*
1307 	 * Create a property to indicate that this node supports DVMA
1308 	 * page relocation.
1309 	 */
1310 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO && segkmem_reloc != 0) {
1311 		pci_dvma_remap_enabled = 1;
1312 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1313 		    pci_p->pci_dip, "dvma-remap-supported");
1314 	}
1315 
1316 	/*
1317 	 * Register a panic callback so we can unquiesce this bus
1318 	 * if it has been placed in the quiesced state.
1319 	 */
1320 	pbm_p->pbm_panic_cb_id = callb_add(pci_pbm_panic_callb,
1321 	    (void *)pbm_p, CB_CL_PANIC, "pci_panic");
1322 	pbm_p->pbm_debug_cb_id = callb_add(pci_pbm_panic_callb,
1323 	    (void *)pbm_p, CB_CL_ENTER_DEBUGGER, "pci_debug_enter");
1324 
1325 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
1326 		goto non_schizo;
1327 
1328 	if (PCI_CHIP_ID(pci_p) >= SCHIZO_VER_23) {
1329 
1330 		pbm_p->pbm_sync_reg_pa = pa + SCHIZO_PBM_DMA_SYNC_REG_OFFSET;
1331 
1332 		/*
1333 		 * This is a software workaround to fix schizo hardware bug.
1334 		 * Create a boolean property and its existence means consistent
1335 		 * dma sync should not be done while in prom. The usb polled
1336 		 * code (OHCI,EHCI) will check for this property and will not
1337 		 * do dma sync if this property exist.
1338 		 */
1339 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1340 		    pci_p->pci_dip, "no-prom-cdma-sync");
1341 	}
1342 	return;
1343 non_schizo:
1344 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1345 		pci_dvma_sync_before_unmap = 1;
1346 		pa = pci_p->pci_cb_p->cb_icbase_pa;
1347 	}
1348 	pbm_p->pbm_sync_reg_pa = pa + PBM_DMA_SYNC_PEND_REG_OFFSET;
1349 }
1350 
1351 void
1352 pci_pbm_teardown(pbm_t *pbm_p)
1353 {
1354 	(void) callb_delete(pbm_p->pbm_panic_cb_id);
1355 	(void) callb_delete(pbm_p->pbm_debug_cb_id);
1356 }
1357 
1358 uintptr_t
1359 pci_ib_setup(ib_t *ib_p)
1360 {
1361 	/*
1362 	 * Determine virtual addresses of bridge specific registers,
1363 	 */
1364 	pci_t *pci_p = ib_p->ib_pci_p;
1365 	uintptr_t a = (uintptr_t)pci_p->pci_address[0];
1366 
1367 	ib_p->ib_ign = PCI_ID_TO_IGN(pci_p->pci_id);
1368 	ib_p->ib_max_ino = SCHIZO_MAX_INO;
1369 	ib_p->ib_slot_intr_map_regs = a + SCHIZO_IB_SLOT_INTR_MAP_REG_OFFSET;
1370 	ib_p->ib_intr_map_regs = a + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1371 	ib_p->ib_slot_clear_intr_regs =
1372 		a + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1373 	return (a);
1374 }
1375 
1376 void
1377 pci_sc_setup(sc_t *sc_p)
1378 {
1379 	pci_t *pci_p = sc_p->sc_pci_p;
1380 	uintptr_t a;
1381 
1382 	/*
1383 	 * Determine the virtual addresses of the stream cache
1384 	 * control/status and flush registers.
1385 	 */
1386 	a = (uintptr_t)pci_p->pci_address[0];	/* PCICSRBase */
1387 	sc_p->sc_ctrl_reg = (uint64_t *)(a + SCHIZO_SC_CTRL_REG_OFFSET);
1388 	sc_p->sc_invl_reg = (uint64_t *)(a + SCHIZO_SC_INVL_REG_OFFSET);
1389 	sc_p->sc_sync_reg = (uint64_t *)(a + SCHIZO_SC_SYNC_REG_OFFSET);
1390 	sc_p->sc_ctx_invl_reg = (uint64_t *)(a + SCHIZO_SC_CTX_INVL_REG_OFFSET);
1391 	sc_p->sc_ctx_match_reg =
1392 		(uint64_t *)(a + SCHIZO_SC_CTX_MATCH_REG_OFFSET);
1393 
1394 	/*
1395 	 * Determine the virtual addresses of the streaming cache
1396 	 * diagnostic access registers.
1397 	 */
1398 	sc_p->sc_data_diag_acc = (uint64_t *)(a + SCHIZO_SC_DATA_DIAG_OFFSET);
1399 	sc_p->sc_tag_diag_acc = (uint64_t *)(a + SCHIZO_SC_TAG_DIAG_OFFSET);
1400 	sc_p->sc_ltag_diag_acc = (uint64_t *)(a + SCHIZO_SC_LTAG_DIAG_OFFSET);
1401 }
1402 
1403 /*ARGSUSED*/
1404 int
1405 pci_get_numproxy(dev_info_t *dip)
1406 {
1407 	/*
1408 	 * Schizo does not support interrupt proxies.
1409 	 */
1410 	return (0);
1411 }
1412 
1413 /*
1414  * pcisch error handling 101:
1415  *
1416  * The various functions below are responsible for error handling. Given
1417  * a particular error, they must gather the appropriate state, report all
1418  * errors with correct payload, and attempt recovery where ever possible.
1419  *
1420  * Recovery in the context of this driver is being able notify a leaf device
1421  * of the failed transaction. This leaf device may either be the master or
1422  * target for this transaction and may have already received an error
1423  * notification via a PCI interrupt. Notification is done via DMA and access
1424  * handles. If we capture an address for the transaction then we can map it
1425  * to a handle(if the leaf device is fma-compliant) and fault the handle as
1426  * well as call the device driver registered callback.
1427  *
1428  * The hardware can either interrupt or trap upon detection of an error, in
1429  * some rare cases it also causes a fatal reset.
1430  *
1431  * cb_buserr_intr() is responsible for handling control block
1432  * errors(errors which stem from the host bus side of the bridge). Since
1433  * we support multiple chips and host bus standards, cb_buserr_intr will
1434  * call a bus specific error handler to report and handle the detected
1435  * error. Since this error can either affect or orginate from either of the
1436  * two PCI busses which are connected to the bridge, we need to call
1437  * pci_pbm_err_handler() for each bus as well to report their errors. We
1438  * also need to gather possible errors which have been detected by their
1439  * compliant children(via ndi_fm_handler_dispatch()).
1440  *
1441  * pbm_error_intr() and ecc_intr() are responsible for PCI Block Module
1442  * errors(generic PCI + bridge specific) and ECC errors, respectively. They
1443  * are common between pcisch and pcipsy and therefore exist in pci_pbm.c and
1444  * pci_ecc.c. To support error handling certain chip specific handlers
1445  * must exist and they are defined below.
1446  *
1447  * cpu_deferred_error() and cpu_async_error(), handle the traps that may
1448  * have originated from IO space. They call into the registered IO callbacks
1449  * to report and handle errors that may have caused the trap.
1450  *
1451  * pci_pbm_err_handler() is called by pbm_error_intr() or pci_err_callback()
1452  * (generic fma callback for pcipsy/pcisch, pci_fm.c). pci_err_callback() is
1453  * called when the CPU has trapped because of a possible IO error(TO/BERR/UE).
1454  * It will call pci_pbm_err_handler() to report and handle all PCI/PBM/IOMMU
1455  * related errors which are detected by the chip.
1456  *
1457  * pci_pbm_err_handler() calls a generic interface pbm_afsr_report()(pci_pbm.c)
1458  * to report the pbm specific errors and attempt to map the failed address
1459  * (if captured) to a device instance. pbm_afsr_report() calls a chip specific
1460  * interface to interpret the afsr bits pci_pbm_classify()(pcisch.c/pcipsy.c).
1461  * pci_pbm_err_handler() also calls iommu_err_handler() to handle IOMMU related
1462  * errors.
1463  *
1464  * iommu_err_handler() can recover from most errors, as long as the requesting
1465  * device is notified and the iommu can be flushed. If an IOMMU error occurs
1466  * due to a UE then it will be passed on to the ecc_err_handler() for
1467  * subsequent handling.
1468  *
1469  * ecc_err_handler()(pci_ecc.c) also calls a chip specific interface to
1470  * interpret the afsr, pci_ecc_classify(). ecc_err_handler() also calls
1471  * pci_pbm_err_handler() to report any pbm errors detected.
1472  *
1473  * To make sure that the trap code and the interrupt code are not going
1474  * to step on each others toes we have a per chip pci_fm_mutex. This also
1475  * makes it necessary for us to be caution while we are at a high PIL, so
1476  * that we do not cause a subsequent trap that causes us to hang.
1477  *
1478  * The attempt to commonize code was meant to keep in line with the current
1479  * pci driver implementation and it was not meant to confuse. If you are
1480  * confused then don't worry, I was too.
1481  *
1482  */
1483 static void
1484 pci_cb_errstate_get(cb_t *cb_p, cb_errstate_t *cb_err_p)
1485 {
1486 	uint64_t pa = cb_p->cb_base_pa;
1487 	int	i;
1488 
1489 	bzero(cb_err_p, sizeof (cb_errstate_t));
1490 
1491 	ASSERT(MUTEX_HELD(&cb_p->cb_pci_cmn_p->pci_fm_mutex));
1492 
1493 	cb_err_p->cb_bridge_type = PCI_BRIDGE_TYPE(cb_p->cb_pci_cmn_p);
1494 
1495 	cb_err_p->cb_csr = lddphysio(pa + SCHIZO_CB_CSR_OFFSET);
1496 	cb_err_p->cb_err = lddphysio(pa + SCHIZO_CB_ERRCTRL_OFFSET);
1497 	cb_err_p->cb_intr = lddphysio(pa + SCHIZO_CB_INTCTRL_OFFSET);
1498 	cb_err_p->cb_elog = lddphysio(pa + SCHIZO_CB_ERRLOG_OFFSET);
1499 	cb_err_p->cb_ecc = lddphysio(pa + SCHIZO_CB_ECCCTRL_OFFSET);
1500 	cb_err_p->cb_ue_afsr = lddphysio(pa + SCHIZO_CB_UEAFSR_OFFSET);
1501 	cb_err_p->cb_ue_afar = lddphysio(pa + SCHIZO_CB_UEAFAR_OFFSET);
1502 	cb_err_p->cb_ce_afsr = lddphysio(pa + SCHIZO_CB_CEAFSR_OFFSET);
1503 	cb_err_p->cb_ce_afar = lddphysio(pa + SCHIZO_CB_CEAFAR_OFFSET);
1504 
1505 	if ((CB_CHIP_TYPE((cb_t *)cb_p)) == PCI_CHIP_XMITS) {
1506 		cb_err_p->cb_first_elog = lddphysio(pa +
1507 				XMITS_CB_FIRST_ERROR_LOG);
1508 		cb_err_p->cb_first_eaddr = lddphysio(pa +
1509 				XMITS_CB_FIRST_ERROR_ADDR);
1510 		cb_err_p->cb_leaf_status = lddphysio(pa +
1511 				XMITS_CB_FIRST_ERROR_ADDR);
1512 	}
1513 
1514 	/* Gather PBM state information for both sides of this chip */
1515 	for (i = 0; i < 2; i++) {
1516 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1517 			continue;
1518 		pci_pbm_errstate_get(((cb_t *)cb_p)->cb_pci_cmn_p->
1519 					    pci_p[i], &cb_err_p->cb_pbm[i]);
1520 	}
1521 }
1522 
1523 static void
1524 pci_cb_clear_error(cb_t *cb_p, cb_errstate_t *cb_err_p)
1525 {
1526 	uint64_t pa = ((cb_t *)cb_p)->cb_base_pa;
1527 
1528 	stdphysio(pa + SCHIZO_CB_ERRLOG_OFFSET, cb_err_p->cb_elog);
1529 }
1530 
1531 static cb_fm_err_t safari_err_tbl[] = {
1532 	SAFARI_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1533 	SAFARI_SSM_DIS,		SCHIZO_CB_ELOG_SSM_DIS,		CB_FATAL,
1534 	SAFARI_BAD_CMD_PCIA, 	SCHIZO_CB_ELOG_BAD_CMD_PCIA,	CB_FATAL,
1535 	SAFARI_BAD_CMD_PCIB, 	SCHIZO_CB_ELOG_BAD_CMD_PCIB,	CB_FATAL,
1536 	SAFARI_PAR_ERR_INT_PCIB, XMITS_CB_ELOG_PAR_ERR_INT_PCIB, CB_FATAL,
1537 	SAFARI_PAR_ERR_INT_PCIA, XMITS_CB_ELOG_PAR_ERR_INT_PCIA, CB_FATAL,
1538 	SAFARI_PAR_ERR_INT_SAF,	XMITS_CB_ELOG_PAR_ERR_INT_SAF,	CB_FATAL,
1539 	SAFARI_PLL_ERR_PCIB,	XMITS_CB_ELOG_PLL_ERR_PCIB,	CB_FATAL,
1540 	SAFARI_PLL_ERR_PCIA,	XMITS_CB_ELOG_PLL_ERR_PCIA,	CB_FATAL,
1541 	SAFARI_PLL_ERR_SAF,	XMITS_CB_ELOG_PLL_ERR_SAF,	CB_FATAL,
1542 	SAFARI_SAF_CIQ_TO,	SCHIZO_CB_ELOG_SAF_CIQ_TO,	CB_FATAL,
1543 	SAFARI_SAF_LPQ_TO,	SCHIZO_CB_ELOG_SAF_LPQ_TO,	CB_FATAL,
1544 	SAFARI_SAF_SFPQ_TO,	SCHIZO_CB_ELOG_SAF_SFPQ_TO,	CB_FATAL,
1545 	SAFARI_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1546 	SAFARI_UNMAP_ERR,	SCHIZO_CB_ELOG_UNMAP_ERR,	CB_FATAL,
1547 	SAFARI_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_FATAL,
1548 	SAFARI_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_FATAL,
1549 	SAFARI_DSTAT_ERR,	SCHIZO_CB_ELOG_DSTAT_ERR,	CB_FATAL,
1550 	SAFARI_SAF_UFPQ_TO,	SCHIZO_CB_ELOG_SAF_UFPQ_TO,	CB_FATAL,
1551 	SAFARI_CPU0_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU0_PAR_SINGLE,	CB_FATAL,
1552 	SAFARI_CPU0_PAR_BIDI,	SCHIZO_CB_ELOG_CPU0_PAR_BIDI,	CB_FATAL,
1553 	SAFARI_CPU1_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU1_PAR_SINGLE,	CB_FATAL,
1554 	SAFARI_CPU1_PAR_BIDI,	SCHIZO_CB_ELOG_CPU1_PAR_BIDI,	CB_FATAL,
1555 	NULL,			NULL,				NULL,
1556 };
1557 
1558 /*
1559  * Function used to handle and log Safari bus errors.
1560  */
1561 static int
1562 safari_err_handler(dev_info_t *dip, uint64_t fme_ena,
1563 		cb_errstate_t *cb_err_p)
1564 {
1565 	int	i;
1566 	int	fatal = 0;
1567 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
1568 	pci_common_t *cmn_p = pci_p->pci_common_p;
1569 
1570 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1571 
1572 	for (i = 0; safari_err_tbl[i].cb_err_class != NULL; i++) {
1573 		if (cb_err_p->cb_elog & safari_err_tbl[i].cb_reg_bit) {
1574 			cb_err_p->cb_err_class = safari_err_tbl[i].cb_err_class;
1575 			cb_ereport_post(dip, fme_ena, cb_err_p);
1576 			fatal += safari_err_tbl[i].cb_fatal;
1577 		}
1578 	}
1579 
1580 	if (fatal)
1581 		return (DDI_FM_FATAL);
1582 	return (DDI_FM_OK);
1583 
1584 }
1585 
1586 /*
1587  * Check pbm va log register for captured errant address, and fail handle
1588  * if in per device cache.
1589  * Called from jbus_err_handler.
1590  */
1591 static int
1592 jbus_check_va_log(cb_t *cb_p, uint64_t fme_ena,
1593     cb_errstate_t *cb_err_p)
1594 {
1595 	int i;
1596 	int ret = DDI_FM_FATAL;
1597 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1598 
1599 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1600 	/*
1601 	 * Check VA log register for address associated with error,
1602 	 * if no address is registered then return failure
1603 	 */
1604 	for (i = 0; i < 2; i++) {
1605 
1606 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1607 			continue;
1608 		/*
1609 		 * Look up and fault handle associated with
1610 		 * logged DMA address
1611 		 */
1612 		if (cb_err_p->cb_pbm[i].pbm_va_log) {
1613 			ret = pci_handle_lookup(cb_p->cb_pci_cmn_p->pci_p[i]->
1614 					pci_dip, DMA_HANDLE, fme_ena,
1615 					(void *)&cb_err_p->cb_pbm[i].
1616 					pbm_va_log);
1617 			if (ret == DDI_FM_NONFATAL)
1618 				break;
1619 		}
1620 	}
1621 	return (ret);
1622 }
1623 
1624 static cb_fm_err_t jbus_err_tbl[] = {
1625 	JBUS_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1626 	JBUS_PWR_DATA_PERR,	TOMATILLO_CB_ELOG_WR_DATA_PAR_ERR, CB_FATAL,
1627 	JBUS_DRD_DATA_PERR,	TOMATILLO_CB_ELOG_RD_DATA_PAR_ERR, CB_NONFATAL,
1628 	JBUS_CTL_PERR,		TOMATILLO_CB_ELOG_CTL_PAR_ERR,	CB_FATAL,
1629 	JBUS_ILL_BYTE_EN,	TOMATILLO_CB_ELOG_ILL_BYTE_EN,	CB_FATAL,
1630 	JBUS_ILL_COH_IN,	TOMATILLO_CB_ELOG_ILL_COH_IN,	CB_FATAL,
1631 	JBUS_SNOOP_ERR_RD,	TOMATILLO_CB_ELOG_SNOOP_ERR_RD,	CB_FATAL,
1632 	JBUS_SNOOP_ERR_RDS,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDS, CB_FATAL,
1633 	JBUS_SNOOP_ERR_RDSA,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDSA, CB_FATAL,
1634 	JBUS_SNOOP_ERR_OWN,	TOMATILLO_CB_ELOG_SNOOP_ERR_OWN, CB_FATAL,
1635 	JBUS_SNOOP_ERR_RDO,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDO, CB_FATAL,
1636 	JBUS_SNOOP_ERR_PCI,	TOMATILLO_CB_ELOG_SNOOP_ERR_PCI, CB_FATAL,
1637 	JBUS_SNOOP_ERR_GR,	TOMATILLO_CB_ELOG_SNOOP_ERR_GR,	CB_FATAL,
1638 	JBUS_SNOOP_ERR,		TOMATILLO_CB_ELOG_SNOOP_ERR,	CB_FATAL,
1639 	JBUS_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1640 	JBUS_UNMAP_ERR,		SCHIZO_CB_ELOG_UNMAP_ERR,	CB_NONFATAL,
1641 	JBUS_TO_EXP_ERR,	TOMATILLO_CB_ELOG_TO_EXP_ERR,	CB_NONFATAL,
1642 	JBUS_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_NONFATAL,
1643 	JBUS_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_NONFATAL,
1644 	NULL,			NULL,				NULL,
1645 };
1646 
1647 /*
1648  * Function used to handle and log Jbus errors.
1649  */
1650 static int
1651 jbus_err_handler(dev_info_t *dip, uint64_t fme_ena,
1652     cb_errstate_t *cb_err_p)
1653 {
1654 	int	fatal = 0;
1655 	int	nonfatal = 0;
1656 	int	i;
1657 	pci_t	*pci_p = get_pci_soft_state(ddi_get_instance(dip));
1658 	cb_t	*cb_p = pci_p->pci_cb_p;
1659 
1660 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1661 
1662 	for (i = 0; jbus_err_tbl[i].cb_err_class != NULL; i++) {
1663 		if (!(cb_err_p->cb_elog & jbus_err_tbl[i].cb_reg_bit))
1664 			continue;
1665 		cb_err_p->cb_err_class = jbus_err_tbl[i].cb_err_class;
1666 		if (jbus_err_tbl[i].cb_fatal) {
1667 			fatal += jbus_err_tbl[i].cb_fatal;
1668 			continue;
1669 		}
1670 		if (jbus_check_va_log(cb_p, fme_ena, cb_err_p)
1671 				!= DDI_FM_NONFATAL) {
1672 			fatal++;
1673 		}
1674 		cb_ereport_post(dip, fme_ena, cb_err_p);
1675 	}
1676 
1677 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1678 				DDI_FM_OK));
1679 }
1680 
1681 /*
1682  * Control Block error interrupt handler.
1683  */
1684 uint_t
1685 cb_buserr_intr(caddr_t a)
1686 {
1687 	cb_t *cb_p = (cb_t *)a;
1688 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1689 	pci_t *pci_p = cmn_p->pci_p[0];
1690 	cb_errstate_t cb_err;
1691 	ddi_fm_error_t derr;
1692 	int ret = DDI_FM_FATAL;
1693 	int i;
1694 
1695 	if (pci_p == NULL)
1696 		pci_p = cmn_p->pci_p[1];
1697 
1698 	bzero(&derr, sizeof (ddi_fm_error_t));
1699 	derr.fme_version = DDI_FME_VERSION;
1700 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1701 
1702 	mutex_enter(&cmn_p->pci_fm_mutex);
1703 
1704 	pci_cb_errstate_get(cb_p, &cb_err);
1705 
1706 	if (CB_CHIP_TYPE(cb_p) == PCI_CHIP_TOMATILLO)
1707 		ret = jbus_err_handler(pci_p->pci_dip, derr.fme_ena, &cb_err);
1708 	else if ((CB_CHIP_TYPE(cb_p) == PCI_CHIP_SCHIZO) ||
1709 			(CB_CHIP_TYPE(cb_p) == PCI_CHIP_XMITS))
1710 		ret = safari_err_handler(pci_p->pci_dip, derr.fme_ena,
1711 		    &cb_err);
1712 
1713 	/*
1714 	 * Check for related errors in PBM and IOMMU. The IOMMU could cause
1715 	 * a timeout on the jbus due to an IOMMU miss, so we need to check and
1716 	 * log the IOMMU error registers.
1717 	 */
1718 	for (i = 0; i < 2; i++) {
1719 		if (cmn_p->pci_p[i] == NULL)
1720 			continue;
1721 		if (pci_pbm_err_handler(cmn_p->pci_p[i]->pci_dip, &derr,
1722 		    (void *)cmn_p->pci_p[i], PCI_CB_CALL) == DDI_FM_FATAL)
1723 			ret = DDI_FM_FATAL;
1724 	}
1725 
1726 	/* Cleanup and reset error bits */
1727 	(void) pci_cb_clear_error(cb_p, &cb_err);
1728 	mutex_exit(&cmn_p->pci_fm_mutex);
1729 
1730 	if (ret == DDI_FM_FATAL) {
1731 		fm_panic("Fatal System Bus Error has occurred\n");
1732 	}
1733 
1734 	return (DDI_INTR_CLAIMED);
1735 }
1736 
1737 static ecc_fm_err_t ecc_err_tbl[] = {
1738 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1739 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_UPA64S, SCH_REG_UPA,
1740 	ACC_HANDLE,
1741 
1742 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1743 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_REG, SCH_REG_PCIA_REG,
1744 	ACC_HANDLE,
1745 
1746 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1747 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_MEM, SCH_REG_PCIA_MEM,
1748 	ACC_HANDLE,
1749 
1750 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1751 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_CFGIO, SCH_REG_PCIA_CFGIO,
1752 	ACC_HANDLE,
1753 
1754 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1755 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_REG, SCH_REG_PCIB_REG,
1756 	ACC_HANDLE,
1757 
1758 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1759 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_MEM, SCH_REG_PCIB_MEM,
1760 	ACC_HANDLE,
1761 
1762 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1763 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_CFGIO, SCH_REG_PCIB_CFGIO,
1764 	ACC_HANDLE,
1765 
1766 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1767 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_SAFARI_REGS, SCH_REG_SAFARI_REGS,
1768 	ACC_HANDLE,
1769 
1770 	PCI_ECC_SEC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_UE,
1771 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1772 
1773 	PCI_ECC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1774 	PBM_PRIMARY, NULL, NULL, ACC_HANDLE,
1775 
1776 	PCI_ECC_SEC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1777 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1778 
1779 	PCI_ECC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1780 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1781 
1782 	PCI_ECC_SEC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1783 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1784 
1785 	PCI_ECC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1786 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1787 
1788 	PCI_ECC_SEC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1789 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1790 
1791 	PCI_ECC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1792 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1793 
1794 	PCI_ECC_SEC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1795 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1796 
1797 	PCI_ECC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1798 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1799 
1800 	PCI_ECC_SEC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1801 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1802 
1803 	NULL, NULL, NULL, NULL, NULL, NULL,
1804 };
1805 
1806 /*
1807  * pci_ecc_classify, called by ecc_handler to classify ecc errors
1808  * and determine if we should panic or not.
1809  */
1810 void
1811 pci_ecc_classify(uint64_t err, ecc_errstate_t *ecc_err_p)
1812 {
1813 	struct async_flt *ecc_p = &ecc_err_p->ecc_aflt;
1814 	uint64_t region, afar = ecc_p->flt_addr;
1815 	int i, j, ret = 0;
1816 	int flag, fatal = 0;
1817 	pci_common_t *cmn_p = ecc_err_p->ecc_ii_p.ecc_p->ecc_pci_cmn_p;
1818 	pci_t *pci_p = cmn_p->pci_p[0];
1819 
1820 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1821 
1822 	ecc_err_p->ecc_bridge_type = PCI_BRIDGE_TYPE(cmn_p);
1823 
1824 	if (pci_p == NULL)
1825 		pci_p = cmn_p->pci_p[1];
1826 
1827 	ecc_err_p->ecc_ctrl = lddphysio(ecc_err_p->ecc_ii_p.ecc_p->ecc_csr_pa);
1828 	ecc_err_p->ecc_err_addr = afar;
1829 	region = afar & SCHIZO_ECC_AFAR_PIOW_MASK;
1830 
1831 	for (i = 0; ecc_err_tbl[i].ecc_err_class != NULL; i++) {
1832 		if (!(err & ecc_err_tbl[i].ecc_reg_bit) ||
1833 			(ecc_err_p->ecc_ii_p.ecc_type !=
1834 			    ecc_err_tbl[i].ecc_type) ||
1835 			(ecc_err_p->ecc_pri != ecc_err_tbl[i].ecc_pri))
1836 			continue;
1837 
1838 		ecc_p->flt_erpt_class = ecc_err_tbl[i].ecc_err_class;
1839 		flag = ecc_err_tbl[i].ecc_flag;
1840 
1841 		if (!ecc_err_tbl[i].ecc_pri ||
1842 				(ecc_err_tbl[i].ecc_type == CBNINTR_CE)) {
1843 			fatal += (ecc_err_tbl[i].ecc_type == CBNINTR_UE) ?
1844 				1 : 0;
1845 			break;
1846 		}
1847 
1848 		if (flag == ACC_HANDLE &&
1849 			(region & ecc_err_tbl[i].ecc_region_bits)) {
1850 			ecc_err_p->ecc_region = ecc_err_tbl[i].ecc_region;
1851 			pci_format_ecc_addr(pci_p->pci_dip,
1852 					&ecc_err_p->ecc_err_addr,
1853 					ecc_err_p->ecc_region);
1854 		}
1855 
1856 		/*
1857 		 * Lookup and fault errant handle
1858 		 */
1859 		for (j = 0; j < 2; ++j) {
1860 			ret = DDI_FM_UNKNOWN;
1861 			if (cmn_p->pci_p[j] == NULL)
1862 				continue;
1863 			ret = pci_handle_lookup(cmn_p->pci_p[j]->pci_dip,
1864 					flag, ecc_err_p->ecc_ena,
1865 					(void *)&ecc_err_p->ecc_err_addr);
1866 			if (ret == DDI_FM_NONFATAL) {
1867 				fatal = 0;
1868 				break;
1869 			} else
1870 				fatal++;
1871 		}
1872 		break;
1873 	}
1874 
1875 	if (fatal)
1876 		ecc_p->flt_panic = 1;
1877 	else if (flag != ACC_HANDLE)
1878 		ecc_err_p->ecc_pg_ret = 1;
1879 }
1880 
1881 /*
1882  * Tables to define PCI-X Split Completion errors
1883  */
1884 
1885 pcix_err_msg_rec_t pcix_completer_errs[] = {
1886 	{PCIX_CPLT_OUT_OF_RANGE,	"pcix", "oor"	},
1887 };
1888 
1889 pcix_err_tbl_t pcix_split_errs_tbl[] = {
1890 	{PCIX_CLASS_CPLT,
1891 		sizeof (pcix_completer_errs)/sizeof (pcix_err_msg_rec_t),
1892 		pcix_completer_errs		},
1893 };
1894 
1895 /*
1896  * Tables for the PCI-X error status messages
1897  */
1898 pcix_err_msg_rec_t pcix_stat_errs[] = {
1899 	{XMITS_PCIX_STAT_SC_DSCRD,	"pcix", "discard"  	},
1900 	{XMITS_PCIX_STAT_SC_TTO,	"xmits.pbmx", "tato" 	},
1901 	{XMITS_PCIX_STAT_SMMU,		"xmits.pbmx", "stmmu"	},
1902 	{XMITS_PCIX_STAT_SDSTAT,	"xmits.pbmx", "stdst"	},
1903 	{XMITS_PCIX_STAT_CMMU,		"xmits.pbmx", "cnmmu"	},
1904 	{XMITS_PCIX_STAT_CDSTAT,	"xmits.pbmx", "cndst"	}
1905 };
1906 
1907 pcix_err_tbl_t pcix_stat_errs_tbl =
1908 	{PCIX_NO_CLASS,
1909 		sizeof (pcix_stat_errs)/sizeof (pcix_err_msg_rec_t),
1910 		pcix_stat_errs		};
1911 
1912 
1913 /*
1914  * walk thru a table of error messages, printing as appropriate
1915  *
1916  * t - the table of messages to parse
1917  * err - the error to match against
1918  * multi - flag, sometimes multiple error bits may be set/desired
1919  */
1920 static int
1921 pcix_lookup_err_msgs(dev_info_t *dip, uint64_t ena, pcix_err_tbl_t t,
1922 		pbm_errstate_t *pbm_err_p)
1923 {
1924 	uint32_t err_bits  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_INDEX_MASK;
1925 	int nerr = 0;
1926 	int j;
1927 	char buf[FM_MAX_CLASS];
1928 
1929 	for (j = 0; j < t.err_rec_num; j++)  {
1930 		uint32_t msg_key = t.err_msg_tbl[j].msg_key;
1931 		if (pbm_err_p->pbm_multi ? !(err_bits & msg_key) : err_bits
1932 				!= msg_key)
1933 			continue;
1934 
1935 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1936 		    t.err_msg_tbl[j].msg_class,
1937 		    pbm_err_p->pbm_pri ? "" : PCIX_SECONDARY,
1938 		    t.err_msg_tbl[j].msg_str);
1939 
1940 		pbm_err_p->pbm_err_class = buf;
1941 		pcix_ereport_post(dip, ena, pbm_err_p);
1942 		nerr++;
1943 	}
1944 	return (nerr ? DDI_FM_FATAL : DDI_FM_OK);
1945 }
1946 
1947 /*
1948  * Decodes primary(bit 27-24) or secondary(bit 15-12) PCI-X split
1949  * completion error message class and index in PBM AFSR.
1950  */
1951 static void
1952 pcix_log_split_err(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
1953 {
1954 	uint32_t class  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_CLASS_MASK;
1955 	uint32_t num_classes = sizeof (pcix_split_errs_tbl) /
1956 	    sizeof (struct pcix_err_tbl);
1957 	int i;
1958 
1959 	for (i = 0; i < num_classes; i++) {
1960 		if (class == pcix_split_errs_tbl[i].err_class) {
1961 			pbm_err_p->pbm_multi = PCIX_SINGLE_ERR;
1962 			(void) pcix_lookup_err_msgs(dip, ena,
1963 			    pcix_split_errs_tbl[i], pbm_err_p);
1964 			break;
1965 		}
1966 	}
1967 }
1968 
1969 /*
1970  * Report PBM PCI-X Error Status Register if in PCI-X mode
1971  *
1972  * Once a PCI-X fault tree is constructed, the code below may need to
1973  * change.
1974  */
1975 static int
1976 pcix_log_pbm(pci_t *pci_p, uint64_t ena, pbm_errstate_t *pbm_err_p)
1977 {
1978 	int fatal = 0;
1979 	int nonfatal = 0;
1980 	uint32_t e;
1981 
1982 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1983 
1984 	DEBUG3(DBG_ERR_INTR, pci_p->pci_dip, "pcix_log_pbm: chip_type=%d "
1985 	    "ctr_stat=%lx afsr = 0x%lx", CHIP_TYPE(pci_p),
1986 	    pbm_err_p->pbm_ctl_stat, pbm_err_p->pbm_afsr);
1987 
1988 	if (!(CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) ||
1989 	    !(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE))
1990 		return (DDI_FM_OK);
1991 
1992 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
1993 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
1994 		pbm_err_p->pbm_pri = PBM_PRIMARY;
1995 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
1996 		nonfatal++;
1997 	}
1998 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR) {
1999 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2000 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2001 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2002 		nonfatal++;
2003 	}
2004 
2005 	e = PBM_PCIX_TO_PRIERR(pbm_err_p->pbm_pcix_stat);
2006 	if (e) {
2007 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2008 		pbm_err_p->pbm_err = e;
2009 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2010 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2011 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2012 			fatal++;
2013 		else
2014 			nonfatal++;
2015 	}
2016 
2017 	e = PBM_PCIX_TO_SECERR(pbm_err_p->pbm_pcix_stat);
2018 	if (e) {
2019 		pbm_err_p->pbm_pri = PBM_SECONDARY;
2020 		pbm_err_p->pbm_err = e;
2021 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2022 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2023 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2024 			fatal++;
2025 		else
2026 			nonfatal++;
2027 	}
2028 
2029 	if (!fatal && !nonfatal)
2030 		return (DDI_FM_OK);
2031 	else if (fatal)
2032 		return (DDI_FM_FATAL);
2033 	return (DDI_FM_NONFATAL);
2034 }
2035 
2036 static pbm_fm_err_t pbm_err_tbl[] = {
2037 	PCI_MA,			SCHIZO_PCI_AFSR_E_MA,	PBM_PRIMARY,
2038 	FM_LOG_PCI,	PCI_TARG_MA,
2039 
2040 	PCI_SEC_MA,		SCHIZO_PCI_AFSR_E_MA,	PBM_SECONDARY,
2041 	FM_LOG_PBM,	NULL,
2042 
2043 	PCI_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_PRIMARY,
2044 	FM_LOG_PCI,	PCI_TARG_REC_TA,
2045 
2046 	PCI_SEC_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_SECONDARY,
2047 	FM_LOG_PBM,	NULL,
2048 
2049 	PCI_PBM_RETRY,		SCHIZO_PCI_AFSR_E_RTRY,	PBM_PRIMARY,
2050 	FM_LOG_PBM,	PCI_PBM_TARG_RETRY,
2051 
2052 	PCI_SEC_PBM_RETRY,	SCHIZO_PCI_AFSR_E_RTRY,	PBM_SECONDARY,
2053 	FM_LOG_PBM,	NULL,
2054 
2055 	PCI_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_PRIMARY,
2056 	FM_LOG_PCI,	PCI_TARG_MDPE,
2057 
2058 	PCI_SEC_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_SECONDARY,
2059 	FM_LOG_PBM,	NULL,
2060 
2061 	PCI_PBM_TTO,		SCHIZO_PCI_AFSR_E_TTO,	PBM_PRIMARY,
2062 	FM_LOG_PBM,	PCI_PBM_TARG_TTO,
2063 
2064 	PCI_SEC_PBM_TTO,	SCHIZO_PCI_AFSR_E_TTO,	PBM_SECONDARY,
2065 	FM_LOG_PBM,	NULL,
2066 
2067 	PCI_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_PRIMARY,
2068 	FM_LOG_PBM,	NULL,
2069 
2070 	PCI_SEC_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_SECONDARY,
2071 	FM_LOG_PBM,	NULL,
2072 
2073 	NULL,			NULL,			NULL,
2074 	NULL,		NULL,
2075 };
2076 
2077 
2078 /*
2079  * pci_pbm_classify, called by pbm_afsr_report to classify piow afsr.
2080  */
2081 int
2082 pci_pbm_classify(pbm_errstate_t *pbm_err_p)
2083 {
2084 	uint32_t err;
2085 	int nerr = 0;
2086 	int i;
2087 
2088 	err = pbm_err_p->pbm_pri ? PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr):
2089 		PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
2090 
2091 	for (i = 0; pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2092 		if ((err & pbm_err_tbl[i].pbm_reg_bit) &&
2093 		    (pbm_err_p->pbm_pri == pbm_err_tbl[i].pbm_pri)) {
2094 			if (pbm_err_tbl[i].pbm_flag == FM_LOG_PCI)
2095 				pbm_err_p->pbm_pci.pci_err_class =
2096 					pbm_err_tbl[i].pbm_err_class;
2097 			else
2098 				pbm_err_p->pbm_err_class =
2099 				    pbm_err_tbl[i].pbm_err_class;
2100 
2101 			pbm_err_p->pbm_terr_class =
2102 			    pbm_err_tbl[i].pbm_terr_class;
2103 			pbm_err_p->pbm_log = pbm_err_tbl[i].pbm_flag;
2104 			nerr++;
2105 			break;
2106 		}
2107 	}
2108 
2109 	return (nerr);
2110 }
2111 
2112 /*
2113  * Function used to handle and log IOMMU errors. Called by pci_pbm_err_handler,
2114  * with pci_fm_mutex held.
2115  */
2116 static int
2117 iommu_err_handler(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
2118 {
2119 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2120 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2121 	ecc_t *ecc_p = pci_p->pci_ecc_p;
2122 	uint64_t stat;
2123 	ushort_t ta_signalled;
2124 	int err = 0;
2125 	int fatal = 0;
2126 	int nonfatal = 0;
2127 	int ret;
2128 
2129 	ASSERT(MUTEX_HELD(&ecc_p->ecc_pci_cmn_p->pci_fm_mutex));
2130 	if (!((stat = *iommu_p->iommu_ctrl_reg) & TOMATILLO_IOMMU_ERR)) {
2131 		pbm_err_p->pbm_err_class = PCI_SCH_MMU_ERR;
2132 		iommu_ereport_post(dip, ena, pbm_err_p);
2133 		return (DDI_FM_NONFATAL);
2134 	}
2135 
2136 	/*
2137 	 * Need to make sure a Target Abort was signalled to the device if
2138 	 * we have any hope of recovering. Tomatillo does not send a TA for
2139 	 * DMA Writes that result in a Translation Error, thus fooling the
2140 	 * device into believing everything is as it expects. Ignorance
2141 	 * is bliss, but knowledge is power.
2142 	 */
2143 	ta_signalled = pbm_err_p->pbm_pci.pci_cfg_stat &
2144 		PCI_STAT_S_TARG_AB;
2145 
2146 	if (stat & TOMATILLO_IOMMU_ERR_ILLTSBTBW) {
2147 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_TSBTBW;
2148 		err = 1;
2149 		iommu_ereport_post(dip, ena, pbm_err_p);
2150 		if (!ta_signalled)
2151 			fatal++;
2152 		else
2153 			nonfatal++;
2154 	}
2155 
2156 	if (stat & TOMATILLO_IOMMU_ERR_BAD_VA) {
2157 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_VA;
2158 		err = 1;
2159 		iommu_ereport_post(dip, ena, pbm_err_p);
2160 		if (!ta_signalled)
2161 			fatal++;
2162 		else
2163 			nonfatal++;
2164 	}
2165 
2166 	if (!err) {
2167 		stat = ((stat & TOMATILLO_IOMMU_ERRSTS) >>
2168 		    TOMATILLO_IOMMU_ERRSTS_SHIFT);
2169 		switch (stat) {
2170 		case TOMATILLO_IOMMU_PROTECTION_ERR:
2171 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_PROT_ERR;
2172 			iommu_ereport_post(dip, ena, pbm_err_p);
2173 			fatal++;
2174 			break;
2175 		case TOMATILLO_IOMMU_INVALID_ERR:
2176 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_INVAL_ERR;
2177 			/*
2178 			 * Fault the address in iommu_tfar
2179 			 * register to inform target driver of error
2180 			 */
2181 			ret = pci_handle_lookup(pci_p->pci_dip, DMA_HANDLE,
2182 				ena, (void *)&pbm_err_p->pbm_iommu.iommu_tfar);
2183 
2184 			if (ret == DDI_FM_NONFATAL)
2185 				if (ta_signalled)
2186 					nonfatal++;
2187 				else
2188 					fatal++;
2189 			else
2190 				fatal++;
2191 			iommu_ereport_post(dip, ena, pbm_err_p);
2192 			break;
2193 		case TOMATILLO_IOMMU_TIMEOUT_ERR:
2194 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_TO_ERR;
2195 			fatal++;
2196 			iommu_ereport_post(dip, ena, pbm_err_p);
2197 			break;
2198 		case TOMATILLO_IOMMU_ECC_ERR:
2199 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_UE;
2200 			iommu_ereport_post(dip, ena, pbm_err_p);
2201 			break;
2202 		}
2203 	}
2204 
2205 	if (fatal)
2206 		return (DDI_FM_FATAL);
2207 	else if (nonfatal)
2208 		return (DDI_FM_NONFATAL);
2209 
2210 	return (DDI_FM_OK);
2211 }
2212 
2213 int
2214 pci_check_error(pci_t *pci_p)
2215 {
2216 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2217 	uint16_t pci_cfg_stat;
2218 	uint64_t pbm_ctl_stat, pbm_afsr, pbm_pcix_stat;
2219 	caddr_t a = pci_p->pci_address[0];
2220 	uint64_t *pbm_pcix_stat_reg;
2221 
2222 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2223 
2224 	pci_cfg_stat = pbm_p->pbm_config_header->ch_status_reg;
2225 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2226 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2227 
2228 	if ((pci_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2229 				PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2230 				PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2231 			(pbm_ctl_stat & (SCHIZO_PCI_CTRL_BUS_UNUSABLE |
2232 				TOMATILLO_PCI_CTRL_PCI_DTO_ERR |
2233 				SCHIZO_PCI_CTRL_PCI_TTO_ERR |
2234 				SCHIZO_PCI_CTRL_PCI_RTRY_ERR |
2235 				SCHIZO_PCI_CTRL_PCI_MMU_ERR |
2236 				COMMON_PCI_CTRL_SBH_ERR |
2237 				COMMON_PCI_CTRL_SERR)) ||
2238 			(PBM_AFSR_TO_PRIERR(pbm_afsr)))
2239 		return (1);
2240 
2241 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2242 			(pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2243 
2244 		pbm_pcix_stat_reg = (uint64_t *)(a +
2245 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2246 
2247 		pbm_pcix_stat = *pbm_pcix_stat_reg;
2248 
2249 		if (PBM_PCIX_TO_PRIERR(pbm_pcix_stat))
2250 			return (1);
2251 
2252 		if (pbm_pcix_stat & XMITS_PCIX_STAT_PERR_RECOV_INT)
2253 			return (1);
2254 	}
2255 
2256 	return (0);
2257 
2258 }
2259 
2260 static pbm_fm_err_t pci_pbm_err_tbl[] = {
2261 	PCI_PBM_RETRY,			SCHIZO_PCI_CTRL_PCI_RTRY_ERR,
2262 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_RETRY,
2263 
2264 	PCI_PBM_TTO,			SCHIZO_PCI_CTRL_PCI_TTO_ERR,
2265 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_TTO,
2266 
2267 	PCI_SCH_BUS_UNUSABLE_ERR,	SCHIZO_PCI_CTRL_BUS_UNUSABLE,
2268 	NULL,	PBM_NONFATAL,	NULL,
2269 
2270 	NULL,				NULL,
2271 	NULL,	NULL,		NULL
2272 };
2273 
2274 /*
2275  * Function used to log all PCI/PBM/IOMMU errors found in the system.
2276  * It is called by the pbm_error_intr as well as the pci_err_callback(trap
2277  * callback). To protect access we hold the pci_fm_mutex when calling
2278  * this function.
2279  */
2280 int
2281 pci_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2282 		const void *impl_data, int caller)
2283 {
2284 	int fatal = 0;
2285 	int nonfatal = 0;
2286 	int unknown = 0;
2287 	int rserr = 0;
2288 	uint32_t prierr, secerr;
2289 	pbm_errstate_t pbm_err;
2290 	char buf[FM_MAX_CLASS];
2291 	pci_t *pci_p = (pci_t *)impl_data;
2292 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2293 	pci_target_err_t tgt_err;
2294 	int i, ret = 0;
2295 
2296 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2297 	pci_pbm_errstate_get(pci_p, &pbm_err);
2298 
2299 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2300 	    fm_ena_generate(0, FM_ENA_FMT1);
2301 
2302 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2303 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2304 
2305 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
2306 		if (caller == PCI_TRAP_CALL) {
2307 			/*
2308 			 * For ddi_caut_get treat all events as nonfatal.
2309 			 * The trampoline will set err_ena = 0, err_status =
2310 			 * NONFATAL. We only really call this function so that
2311 			 * pci_clear_error() and ndi_fm_handler_dispatch() will
2312 			 * get called.
2313 			 */
2314 			derr->fme_status = DDI_FM_NONFATAL;
2315 			nonfatal++;
2316 			goto done;
2317 		} else {
2318 			/*
2319 			 * For ddi_caut_put treat all events as nonfatal. Here
2320 			 * we have the handle and can call ndi_fm_acc_err_set().
2321 			 */
2322 			derr->fme_status = DDI_FM_NONFATAL;
2323 			ndi_fm_acc_err_set(pbm_p->pbm_excl_handle, derr);
2324 			nonfatal++;
2325 			goto done;
2326 		}
2327 	} else if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2328 		/*
2329 		 * For ddi_peek treat all events as nonfatal. We only
2330 		 * really call this function so that pci_clear_error()
2331 		 * and ndi_fm_handler_dispatch() will get called.
2332 		 */
2333 		nonfatal++;
2334 		goto done;
2335 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2336 		/*
2337 		 * For ddi_poke we can treat as nonfatal if the
2338 		 * following conditions are met :
2339 		 * 1. Make sure only primary error is MA/TA
2340 		 * 2. Make sure no secondary error bits set
2341 		 * 3. check pci config header stat reg to see MA/TA is
2342 		 *    logged. We cannot verify only MA/TA is recorded
2343 		 *    since it gets much more complicated when a
2344 		 *    PCI-to-PCI bridge is present.
2345 		 */
2346 		if ((prierr == SCHIZO_PCI_AFSR_E_MA) && !secerr &&
2347 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_MAST_AB)) {
2348 			nonfatal++;
2349 			goto done;
2350 		} else if ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) &&
2351 		    pcix_ma_behind_bridge(&pbm_err)) {
2352 			/*
2353 			 * MAs behind a PCI-X bridge get sent back to
2354 			 * the host as a Split Completion Error Message.
2355 			 * We handle this the same as the above check.
2356 			 */
2357 			nonfatal++;
2358 			goto done;
2359 		}
2360 		if ((prierr == SCHIZO_PCI_AFSR_E_TA) && !secerr &&
2361 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_TARG_AB)) {
2362 			nonfatal++;
2363 			goto done;
2364 		}
2365 	}
2366 
2367 	DEBUG2(DBG_ERR_INTR, dip, "pci_pbm_err_handler: prierr=0x%x "
2368 	    "secerr=0x%x", prierr, secerr);
2369 
2370 	if (prierr || secerr) {
2371 		ret = pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2372 		if (ret == DDI_FM_FATAL)
2373 			fatal++;
2374 		else
2375 			nonfatal++;
2376 	}
2377 	if ((ret = pcix_log_pbm(pci_p, derr->fme_ena, &pbm_err))
2378 			== DDI_FM_FATAL)
2379 		fatal++;
2380 	else if (ret == DDI_FM_NONFATAL)
2381 		nonfatal++;
2382 
2383 	if ((ret = pci_cfg_report(dip, derr, &pbm_err.pbm_pci, caller, prierr))
2384 			== DDI_FM_FATAL)
2385 		fatal++;
2386 	else if (ret == DDI_FM_NONFATAL)
2387 		nonfatal++;
2388 
2389 	for (i = 0; pci_pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2390 		if ((pbm_err.pbm_ctl_stat & pci_pbm_err_tbl[i].pbm_reg_bit) &&
2391 		    !prierr) {
2392 			pbm_err.pbm_err_class =
2393 				pci_pbm_err_tbl[i].pbm_err_class;
2394 			pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2395 			if (pci_pbm_err_tbl[i].pbm_flag)
2396 				fatal++;
2397 			else
2398 				nonfatal++;
2399 			if (caller == PCI_TRAP_CALL &&
2400 			    pci_pbm_err_tbl[i].pbm_terr_class) {
2401 				tgt_err.tgt_err_ena = derr->fme_ena;
2402 				tgt_err.tgt_err_class =
2403 				    pci_pbm_err_tbl[i].pbm_terr_class;
2404 				tgt_err.tgt_bridge_type =
2405 				    pbm_err.pbm_bridge_type;
2406 				tgt_err.tgt_err_addr =
2407 				    (uint64_t)derr->fme_bus_specific;
2408 				errorq_dispatch(pci_target_queue,
2409 				    (void *)&tgt_err, sizeof (pci_target_err_t),
2410 				    ERRORQ_ASYNC);
2411 			}
2412 		}
2413 	}
2414 
2415 	if ((pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SBH_ERR) &&
2416 	    (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)) {
2417 		pbm_err.pbm_err_class = PCI_SCH_SBH;
2418 		pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2419 		if (pci_panic_on_sbh_errors)
2420 			fatal++;
2421 		else
2422 			nonfatal++;
2423 	}
2424 
2425 	/*
2426 	 * PBM Received System Error - During any transaction, or
2427 	 * at any point on the bus, some device may detect a critical
2428 	 * error and signal a system error to the system.
2429 	 */
2430 	if (pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SERR) {
2431 		/*
2432 		 * may be expected (master abort from pci-pci bridge during
2433 		 * poke will generate SERR)
2434 		 */
2435 		if (derr->fme_flag != DDI_FM_ERR_POKE) {
2436 			DEBUG1(DBG_ERR_INTR, dip, "pci_pbm_err_handler: "
2437 			    "ereport_post: %s", buf);
2438 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2439 				PCI_ERROR_SUBCLASS, PCI_REC_SERR);
2440 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2441 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2442 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2443 			    pbm_err.pbm_pci.pci_cfg_stat, PCI_CONFIG_COMMAND,
2444 			    DATA_TYPE_UINT16, pbm_err.pbm_pci.pci_cfg_comm,
2445 			    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2446 		}
2447 		rserr++;
2448 	}
2449 
2450 	/*
2451 	 * PCI Retry Timeout - Device fails to retry deferred
2452 	 * transaction within timeout. Only Tomatillo
2453 	 */
2454 	if (pbm_err.pbm_ctl_stat & TOMATILLO_PCI_CTRL_PCI_DTO_ERR) {
2455 		if (pci_dto_fault_warn == CE_PANIC)
2456 			fatal++;
2457 		else
2458 			nonfatal++;
2459 
2460 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2461 			PCI_ERROR_SUBCLASS, PCI_DTO);
2462 		ddi_fm_ereport_post(dip, buf, derr->fme_ena, DDI_NOSLEEP,
2463 		    FM_VERSION, DATA_TYPE_UINT8, 0,
2464 		    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2465 		    pbm_err.pbm_pci.pci_cfg_stat,
2466 		    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2467 		    pbm_err.pbm_pci.pci_cfg_comm,
2468 		    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2469 	}
2470 
2471 	/*
2472 	 * PBM Detected Data Parity Error - DPE detected during a DMA Write
2473 	 * or PIO Read. Later case is taken care of by cpu_deferred_error
2474 	 * and sent here to be logged.
2475 	 */
2476 	if ((pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_PERROR) &&
2477 			!(pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_S_SYSERR)) {
2478 		/*
2479 		 * If we have an address then fault
2480 		 * it, if not probe for errant device
2481 		 */
2482 		ret = DDI_FM_FATAL;
2483 		if (caller != PCI_TRAP_CALL) {
2484 			if (pbm_err.pbm_va_log)
2485 				ret = pci_handle_lookup(dip, DMA_HANDLE,
2486 						derr->fme_ena,
2487 						(void *)&pbm_err.pbm_va_log);
2488 			if (ret == DDI_FM_NONFATAL)
2489 				nonfatal++;
2490 			else
2491 				fatal++;
2492 		} else
2493 			nonfatal++;
2494 
2495 	}
2496 
2497 	/* PBM Detected IOMMU Error */
2498 	if (pbm_err.pbm_ctl_stat & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2499 		if (iommu_err_handler(dip, derr->fme_ena, &pbm_err)
2500 				== DDI_FM_FATAL)
2501 			fatal++;
2502 		else
2503 			nonfatal++;
2504 	}
2505 
2506 done:
2507 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
2508 	if (ret == DDI_FM_FATAL) {
2509 		fatal++;
2510 	} else if (ret == DDI_FM_NONFATAL) {
2511 		nonfatal++;
2512 	} else if (ret == DDI_FM_UNKNOWN) {
2513 		unknown++;
2514 	}
2515 
2516 	/*
2517 	 * RSERR not claimed as nonfatal by a child is considered fatal
2518 	 */
2519 	if (rserr && ret != DDI_FM_NONFATAL)
2520 		fatal++;
2521 
2522 	/* Cleanup and reset error bits */
2523 	pci_clear_error(pci_p, &pbm_err);
2524 
2525 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2526 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2527 }
2528 
2529 /*
2530  * Function returns TRUE if a Primary error is Split Completion Error
2531  * that indicates a Master Abort occured behind a PCI-X bridge.
2532  * This function should only be called for busses running in PCI-X mode.
2533  */
2534 static int
2535 pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p)
2536 {
2537 	uint64_t msg;
2538 
2539 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR)
2540 		return (0);
2541 
2542 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2543 		msg = (pbm_err_p->pbm_afsr >> XMITS_PCI_X_P_MSG_SHIFT) &
2544 		    XMITS_PCIX_MSG_MASK;
2545 		if (msg & PCIX_CLASS_BRIDGE)
2546 			if (msg & PCIX_BRIDGE_MASTER_ABORT) {
2547 				return (1);
2548 			}
2549 	}
2550 
2551 	return (0);
2552 }
2553 
2554 /*
2555  * Function used to gather PBM/PCI/IOMMU error state for the
2556  * pci_pbm_err_handler and the cb_buserr_intr. This function must be
2557  * called while pci_fm_mutex is held.
2558  */
2559 static void
2560 pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2561 {
2562 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2563 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2564 	caddr_t a = pci_p->pci_address[0];
2565 	uint64_t *pbm_pcix_stat_reg;
2566 
2567 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2568 	bzero(pbm_err_p, sizeof (pbm_errstate_t));
2569 
2570 	/*
2571 	 * Capture all pbm error state for later logging
2572 	 */
2573 	pbm_err_p->pbm_bridge_type = PCI_BRIDGE_TYPE(pci_p->pci_common_p);
2574 
2575 	pbm_err_p->pbm_pci.pci_cfg_stat =
2576 		pbm_p->pbm_config_header->ch_status_reg;
2577 	pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2578 	pbm_err_p->pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2579 	pbm_err_p->pbm_afar = *pbm_p->pbm_async_flt_addr_reg;
2580 	pbm_err_p->pbm_iommu.iommu_stat = *iommu_p->iommu_ctrl_reg;
2581 	pbm_err_p->pbm_pci.pci_cfg_comm =
2582 		pbm_p->pbm_config_header->ch_command_reg;
2583 	pbm_err_p->pbm_pci.pci_pa = *pbm_p->pbm_async_flt_addr_reg;
2584 
2585 	/*
2586 	 * Record errant slot for Xmits and Schizo
2587 	 * Not stored in Tomatillo
2588 	 */
2589 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS ||
2590 			CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) {
2591 		pbm_err_p->pbm_err_sl = (pbm_err_p->pbm_ctl_stat &
2592 				SCHIZO_PCI_CTRL_ERR_SLOT) >>
2593 			SCHIZO_PCI_CTRL_ERR_SLOT_SHIFT;
2594 
2595 		/*
2596 		 * The bit 51 on XMITS rev1.0 is same as
2597 		 * SCHIZO_PCI_CTRL_ERR_SLOT_LOCK on schizo2.3. But
2598 		 * this bit needs to be cleared to be able to latch
2599 		 * the slot info on next fault.
2600 		 * But in XMITS Rev2.0, this bit indicates a DMA Write
2601 		 * Parity error.
2602 		 */
2603 		if (pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_DMA_WR_PERR) {
2604 			if ((PCI_CHIP_ID(pci_p) == XMITS_VER_10) ||
2605 				(PCI_CHIP_ID(pci_p) <= SCHIZO_VER_23)) {
2606 				/*
2607 				 * top 32 bits are W1C and we just want to
2608 				 * clear SLOT_LOCK. Leave bottom 32 bits
2609 				 * unchanged
2610 				 */
2611 				*pbm_p->pbm_ctrl_reg =
2612 					pbm_err_p->pbm_ctl_stat &
2613 					(SCHIZO_PCI_CTRL_ERR_SLOT_LOCK |
2614 					0xffffffff);
2615 				pbm_err_p->pbm_ctl_stat =
2616 					*pbm_p->pbm_ctrl_reg;
2617 			}
2618 		}
2619 	}
2620 
2621 	/*
2622 	 * Tomatillo specific registers
2623 	 */
2624 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2625 		pbm_err_p->pbm_va_log = (uint64_t)va_to_pa((void *)*(a +
2626 				TOMATILLO_TGT_ERR_VALOG_OFFSET));
2627 		pbm_err_p->pbm_iommu.iommu_tfar = *iommu_p->iommu_tfar_reg;
2628 	}
2629 
2630 	/*
2631 	 * Xmits PCI-X register
2632 	 */
2633 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2634 			(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2635 
2636 		pbm_pcix_stat_reg = (uint64_t *)(a +
2637 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2638 
2639 		pbm_err_p->pbm_pcix_stat = *pbm_pcix_stat_reg;
2640 		pbm_err_p->pbm_pcix_pfar = pbm_err_p->pbm_pcix_stat &
2641 				XMITS_PCI_X_STATUS_PFAR_MASK;
2642 	}
2643 }
2644 
2645 /*
2646  * Function used to clear PBM/PCI/IOMMU error state after error handling
2647  * is complete. Only clearing error bits which have been logged. Called by
2648  * pci_pbm_err_handler and pci_bus_exit.
2649  */
2650 static void
2651 pci_clear_error(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2652 {
2653 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2654 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2655 
2656 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pci_p->pci_common_p->pci_fm_mutex));
2657 
2658 	if (*pbm_p->pbm_ctrl_reg & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2659 		iommu_tlb_scrub(pci_p->pci_iommu_p, 1);
2660 	}
2661 	pbm_p->pbm_config_header->ch_status_reg =
2662 		pbm_err_p->pbm_pci.pci_cfg_stat;
2663 	*pbm_p->pbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
2664 	*pbm_p->pbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
2665 	*iommu_p->iommu_ctrl_reg = pbm_err_p->pbm_iommu.iommu_stat;
2666 }
2667 
2668 void
2669 pbm_clear_error(pbm_t *pbm_p)
2670 {
2671 	uint64_t pbm_afsr, pbm_ctl_stat;
2672 
2673 	/*
2674 	 * for poke() support - called from POKE_FLUSH. Spin waiting
2675 	 * for MA, TA or SERR to be cleared by a pbm_error_intr().
2676 	 * We have to wait for SERR too in case the device is beyond
2677 	 * a pci-pci bridge.
2678 	 */
2679 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2680 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2681 	while (((pbm_afsr >> SCHIZO_PCI_AFSR_PE_SHIFT) &
2682 	    (SCHIZO_PCI_AFSR_E_MA | SCHIZO_PCI_AFSR_E_TA)) ||
2683 	    (pbm_ctl_stat & COMMON_PCI_CTRL_SERR)) {
2684 		pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2685 		pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2686 	}
2687 }
2688 
2689 /*
2690  * Function used to convert the 32 bit captured PCI error address
2691  * to the full Safari or Jbus address. This is so we can look this address
2692  * up in our handle caches.
2693  */
2694 void
2695 pci_format_addr(dev_info_t *dip, uint64_t *afar, uint64_t afsr)
2696 {
2697 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2698 	pci_ranges_t *io_range, *mem_range;
2699 	uint64_t err_pa = 0;
2700 
2701 	if (afsr & SCHIZO_PCI_AFSR_CONF_SPACE) {
2702 		err_pa |= pci_p->pci_ranges->parent_high;
2703 		err_pa = err_pa << 32;
2704 		err_pa |= pci_p->pci_ranges->parent_low;
2705 	} else if (afsr & SCHIZO_PCI_AFSR_IO_SPACE) {
2706 		io_range = pci_p->pci_ranges + 1;
2707 		err_pa |= io_range->parent_high;
2708 		err_pa = err_pa << 32;
2709 		err_pa |= io_range->parent_low;
2710 	} else if (afsr & SCHIZO_PCI_AFSR_MEM_SPACE) {
2711 		mem_range = pci_p->pci_ranges + 2;
2712 		err_pa |= mem_range->parent_high;
2713 		err_pa = err_pa << 32;
2714 		err_pa |= mem_range->parent_low;
2715 	}
2716 	*afar |= err_pa;
2717 }
2718 
2719 static ecc_format_t ecc_format_tbl[] = {
2720 	SCH_REG_UPA,		NULL,				NULL,
2721 	SCH_REG_PCIA_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEA,
2722 	SCH_REG_PCIA_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEA,
2723 	SCH_REG_PCIA_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEA,
2724 	SCH_REG_PCIB_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEB,
2725 	SCH_REG_PCIB_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEB,
2726 	SCH_REG_PCIB_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEB,
2727 	SCH_REG_SAFARI_REGS,	NULL,				NULL,
2728 	NULL,			NULL,				NULL,
2729 };
2730 
2731 /*
2732  * Function used to convert the 32 bit PIO address captured for a
2733  * Safari Bus UE(during PIO Rd/Wr) to a full Safari Bus Address.
2734  */
2735 static void
2736 pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar, ecc_region_t region)
2737 {
2738 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2739 	pci_common_t *cmn_p = pci_p->pci_common_p;
2740 	cb_t *cb_p = pci_p->pci_cb_p;
2741 	int i, pci_side = 0;
2742 	int swap = 0;
2743 	uint64_t pa = cb_p->cb_base_pa;
2744 	uint64_t flag, schizo_base, pci_csr_base;
2745 
2746 	if (pci_p == NULL)
2747 		return;
2748 
2749 	pci_csr_base = va_to_pa(pci_p->pci_address[0]);
2750 
2751 	/*
2752 	 * Using the csr_base address to determine which side
2753 	 * we are on.
2754 	 */
2755 	if (pci_csr_base & PCI_SIDE_ADDR_MASK)
2756 		pci_side = 1;
2757 	else
2758 		pci_side = 0;
2759 
2760 	schizo_base = pa - PBM_CTRL_OFFSET;
2761 
2762 	for (i = 0; ecc_format_tbl[i].ecc_region != NULL; i++) {
2763 		if (region == ecc_format_tbl[i].ecc_region) {
2764 			flag = ecc_format_tbl[i].ecc_space;
2765 			if (ecc_format_tbl[i].ecc_side != pci_side)
2766 				swap = 1;
2767 			if (region == SCH_REG_SAFARI_REGS)
2768 				*afar |= schizo_base;
2769 			break;
2770 		}
2771 	}
2772 
2773 	if (swap) {
2774 		pci_p = cmn_p->pci_p[PCI_OTHER_SIDE(pci_p->pci_side)];
2775 
2776 		if (pci_p == NULL)
2777 			return;
2778 	}
2779 	pci_format_addr(pci_p->pci_dip, afar, flag);
2780 }
2781 
2782 /*
2783  * Function used to post control block specific ereports.
2784  */
2785 static void
2786 cb_ereport_post(dev_info_t *dip, uint64_t ena, cb_errstate_t *cb_err)
2787 {
2788 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2789 	char buf[FM_MAX_CLASS], dev_path[MAXPATHLEN], *ptr;
2790 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2791 	nvlist_t *ereport, *detector;
2792 	errorq_elem_t *eqep;
2793 	nv_alloc_t *nva;
2794 
2795 	DEBUG1(DBG_ATTACH, dip, "cb_ereport_post: elog 0x%lx",
2796 	    cb_err->cb_elog);
2797 
2798 	/*
2799 	 * We do not use ddi_fm_ereport_post because we need to set a
2800 	 * special detector here. Since we do not have a device path for
2801 	 * the bridge chip we use what we think it should be to aid in
2802 	 * diagnosis.
2803 	 */
2804 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", DDI_IO_CLASS,
2805 	    cb_err->cb_bridge_type, cb_err->cb_err_class);
2806 
2807 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2808 
2809 	eqep = errorq_reserve(fmhdl->fh_errorq);
2810 	if (eqep == NULL)
2811 		return;
2812 
2813 	ereport = errorq_elem_nvl(fmhdl->fh_errorq, eqep);
2814 	nva = errorq_elem_nva(fmhdl->fh_errorq, eqep);
2815 	detector = fm_nvlist_create(nva);
2816 
2817 	ASSERT(ereport);
2818 	ASSERT(nva);
2819 	ASSERT(detector);
2820 
2821 	ddi_pathname(dip, dev_path);
2822 	ptr = strrchr(dev_path, (int)',');
2823 
2824 	if (ptr)
2825 		*ptr = '\0';
2826 
2827 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, dev_path, NULL);
2828 
2829 	DEBUG1(DBG_ERR_INTR, dip, "cb_ereport_post: ereport_set: %s", buf);
2830 
2831 	if (CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO ||
2832 	    CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
2833 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2834 		    SAFARI_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2835 		    SAFARI_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2836 		    SAFARI_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2837 		    SAFARI_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2838 		    SAFARI_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2839 		    NULL);
2840 	} else if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2841 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2842 		    JBUS_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2843 		    JBUS_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2844 		    JBUS_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2845 		    JBUS_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2846 		    JBUS_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2847 		    NULL);
2848 	}
2849 	errorq_commit(fmhdl->fh_errorq, eqep, ERRORQ_ASYNC);
2850 }
2851 
2852 /*
2853  * Function used to post IOMMU specific ereports.
2854  */
2855 static void
2856 iommu_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2857 {
2858 	char buf[FM_MAX_CLASS];
2859 
2860 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2861 		    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2862 
2863 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2864 
2865 	DEBUG1(DBG_ERR_INTR, dip, "iommu_ereport_post: ereport_set: %s", buf);
2866 
2867 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2868 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2869 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2870 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2871 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2872 	    PCI_PBM_IOMMU_CTRL, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_stat,
2873 	    PCI_PBM_IOMMU_TFAR, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_tfar,
2874 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2875 	    PCI_PBM_VALOG, DATA_TYPE_UINT64, pbm_err->pbm_va_log,
2876 	    NULL);
2877 }
2878 
2879 /*
2880  * Function used to post PCI-X generic ereports.
2881  * This function needs to be fixed once the Fault Boundary Analysis
2882  * for PCI-X is conducted. The payload should be made more generic.
2883  */
2884 static void
2885 pcix_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2886 {
2887 	char buf[FM_MAX_CLASS];
2888 
2889 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2890 
2891 	DEBUG1(DBG_ERR_INTR, dip, "pcix_ereport_post: ereport_post: %s", buf);
2892 
2893 	ddi_fm_ereport_post(dip, pbm_err->pbm_err_class, ena, DDI_NOSLEEP,
2894 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2895 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2896 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2897 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2898 	    PCI_PBM_AFSR, DATA_TYPE_UINT64, pbm_err->pbm_afsr,
2899 	    PCI_PBM_AFAR, DATA_TYPE_UINT64, pbm_err->pbm_afar,
2900 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2901 	    PCIX_STAT, DATA_TYPE_UINT64, pbm_err->pbm_pcix_stat,
2902 	    PCIX_PFAR, DATA_TYPE_UINT32, pbm_err->pbm_pcix_pfar,
2903 	    NULL);
2904 }
2905 
2906 static void
2907 iommu_ctx_free(iommu_t *iommu_p)
2908 {
2909 	kmem_free(iommu_p->iommu_ctx_bitmap, IOMMU_CTX_BITMAP_SIZE);
2910 }
2911 
2912 /*
2913  * iommu_tlb_scrub():
2914  *	Exam TLB entries through TLB diagnostic registers and look for errors.
2915  *	scrub = 1 : cleanup all error bits in tlb, called in FAULT_RESET case
2916  *	scrub = 0 : log all error conditions to console, FAULT_LOG case
2917  *	In both cases, it returns number of errors found in tlb entries.
2918  */
2919 static int
2920 iommu_tlb_scrub(iommu_t *iommu_p, int scrub)
2921 {
2922 	int i, nerr = 0;
2923 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
2924 	char *neg = "not ";
2925 
2926 	uint64_t base = (uint64_t)iommu_p->iommu_ctrl_reg -
2927 		COMMON_IOMMU_CTRL_REG_OFFSET;
2928 
2929 	volatile uint64_t *tlb_tag = (volatile uint64_t *)
2930 		(base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
2931 	volatile uint64_t *tlb_data = (volatile uint64_t *)
2932 		(base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
2933 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
2934 		uint64_t tag = tlb_tag[i];
2935 		uint64_t data = tlb_data[i];
2936 		uint32_t errstat;
2937 		iopfn_t pfn;
2938 
2939 		if (!(tag & TLBTAG_ERR_BIT))
2940 			continue;
2941 
2942 		pfn = (iopfn_t)(data & TLBDATA_MEMPA_BITS);
2943 		errstat = (uint32_t)
2944 			((tag & TLBTAG_ERRSTAT_BITS) >> TLBTAG_ERRSTAT_SHIFT);
2945 		if (errstat == TLBTAG_ERRSTAT_INVALID) {
2946 			if (scrub)
2947 				tlb_tag[i] = tlb_data[i] = 0ull;
2948 		} else
2949 			nerr++;
2950 
2951 		if (scrub)
2952 			continue;
2953 
2954 		cmn_err(CE_CONT, "%s%d: Error %x on IOMMU TLB entry %x:\n"
2955 		"\tContext=%x %sWritable %sStreamable\n"
2956 		"\tPCI Page Size=%sk Address in page %x\n",
2957 			ddi_driver_name(dip), ddi_get_instance(dip), errstat, i,
2958 			(tag & TLBTAG_CONTEXT_BITS) >> TLBTAG_CONTEXT_SHIFT,
2959 			(tag & TLBTAG_WRITABLE_BIT) ? "" : neg,
2960 			(tag & TLBTAG_STREAM_BIT) ? "" : neg,
2961 			(tag & TLBTAG_PGSIZE_BIT) ? "64" : "8",
2962 			(tag & TLBTAG_PCIVPN_BITS) << 13);
2963 		cmn_err(CE_CONT, "Memory: %sValid %sCacheable Page Frame=%x\n",
2964 			(data & TLBDATA_VALID_BIT) ? "" : neg,
2965 			(data & TLBDATA_CACHE_BIT) ? "" : neg, pfn);
2966 	}
2967 	return (nerr);
2968 }
2969 
2970 /*
2971  * pci_iommu_disp: calculates the displacement needed in tomatillo's
2972  *	iommu control register and modifies the control value template
2973  *	from caller. It also clears any error status bit that are new
2974  *	in tomatillo.
2975  * return value: an 8-bit mask to enable corresponding 512 MB segments
2976  *	suitable for tomatillo's target address register.
2977  *	0x00: no programming is needed, use existing value from prom
2978  *	0x60: use segment 5 and 6 to form a 1GB dvma range
2979  */
2980 static uint64_t
2981 pci_iommu_disp(iommu_t *iommu_p, uint64_t *ctl_p)
2982 {
2983 	uint64_t ctl_old;
2984 	if (CHIP_TYPE(iommu_p->iommu_pci_p) != PCI_CHIP_TOMATILLO)
2985 		return (0);
2986 
2987 	ctl_old = *iommu_p->iommu_ctrl_reg;
2988 	/* iommu ctrl reg error bits are W1C */
2989 	if (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT) {
2990 		cmn_err(CE_WARN, "Tomatillo iommu err: %x", ctl_old);
2991 		*ctl_p |= (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT)
2992 		    << TOMATIILO_IOMMU_ERR_REG_SHIFT;
2993 	}
2994 
2995 	if (iommu_p->iommu_tsb_size != TOMATILLO_IOMMU_TSB_MAX)
2996 		return (0);
2997 
2998 	/* Tomatillo 2.0 and later, and 1GB DVMA range */
2999 	*ctl_p |= 1 << TOMATILLO_IOMMU_SEG_DISP_SHIFT;
3000 	return (3 << (iommu_p->iommu_dvma_base >> (32 - 3)));
3001 }
3002 
3003 void
3004 pci_iommu_config(iommu_t *iommu_p, uint64_t iommu_ctl, uint64_t cfgpa)
3005 {
3006 	uintptr_t pbm_regbase = get_pbm_reg_base(iommu_p->iommu_pci_p);
3007 	volatile uint64_t *pbm_csr_p = (volatile uint64_t *)pbm_regbase;
3008 	volatile uint64_t *tgt_space_p = (volatile uint64_t *)(pbm_regbase |
3009 		(TOMATILLO_TGT_ADDR_SPACE_OFFSET - SCHIZO_PCI_CTRL_REG_OFFSET));
3010 	volatile uint64_t pbm_ctl = *pbm_csr_p;
3011 
3012 	volatile uint64_t *iommu_ctl_p = iommu_p->iommu_ctrl_reg;
3013 	volatile uint64_t tsb_bar_val = iommu_p->iommu_tsb_paddr;
3014 	volatile uint64_t *tsb_bar_p = iommu_p->iommu_tsb_base_addr_reg;
3015 	uint64_t mask = pci_iommu_disp(iommu_p, &iommu_ctl);
3016 
3017 	DEBUG2(DBG_ATTACH, iommu_p->iommu_pci_p->pci_dip,
3018 		"\npci_iommu_config: pbm_csr_p=%llx pbm_ctl=%llx",
3019 		pbm_csr_p, pbm_ctl);
3020 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3021 		"\n\tiommu_ctl_p=%llx iommu_ctl=%llx",
3022 		iommu_ctl_p, iommu_ctl);
3023 	DEBUG4(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3024 		"\n\tcfgpa=%llx tgt_space_p=%llx mask=%x tsb=%llx\n",
3025 		cfgpa, tgt_space_p, mask, tsb_bar_val);
3026 
3027 	if (!cfgpa)
3028 		goto reprog;
3029 
3030 	/* disable PBM arbiters - turn off bits 0-7 */
3031 	*pbm_csr_p = (pbm_ctl >> 8) << 8;
3032 
3033 	/*
3034 	 * For non-XMITS, flush any previous writes. This is only
3035 	 * necessary for host bridges that may have a USB keywboard
3036 	 * attached.  XMITS does not.
3037 	 */
3038 	if (!(CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_XMITS))
3039 		(void) ldphysio(cfgpa);
3040 
3041 reprog:
3042 	if (mask)
3043 		*tgt_space_p = mask;
3044 
3045 	*tsb_bar_p = tsb_bar_val;
3046 	*iommu_ctl_p = iommu_ctl;
3047 
3048 	*pbm_csr_p = pbm_ctl;	/* re-enable bus arbitration */
3049 	pbm_ctl = *pbm_csr_p;	/* flush all prev writes */
3050 }
3051 
3052 
3053 int
3054 pci_get_portid(dev_info_t *dip)
3055 {
3056 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3057 	    "portid", -1));
3058 }
3059 
3060 /*
3061  * Schizo Safari Performance Events.
3062  */
3063 pci_kev_mask_t
3064 schizo_saf_events[] = {
3065 	{"saf_bus_cycles", 0x1},	{"saf_pause_asserted_cycles", 0x2},
3066 	{"saf_frn_coherent_cmds", 0x3},	{"saf_frn_coherent_hits", 0x4},
3067 	{"saf_my_coherent_cmds", 0x5},	{"saf_my_coherent_hits", 0x6},
3068 	{"saf_frn_io_cmds", 0x7}, 	{"saf_frn_io_hits", 0x8},
3069 	{"merge_buffer", 0x9}, 		{"interrupts", 0xa},
3070 	{"csr_pios", 0xc}, 		{"upa_pios", 0xd},
3071 	{"pcia_pios", 0xe}, 		{"pcib_pios", 0xf},
3072 	{"saf_pause_seen_cycles", 0x11}, 	{"dvma_reads", 0x12},
3073 	{"dvma_writes", 0x13},		{"saf_orq_full_cycles", 0x14},
3074 	{"saf_data_in_cycles", 0x15},	{"saf_data_out_cycles", 0x16},
3075 	{"clear_pic", 0x1f}
3076 };
3077 
3078 
3079 /*
3080  * Schizo PCI Performance Events.
3081  */
3082 pci_kev_mask_t
3083 schizo_pci_events[] = {
3084 	{"dvma_stream_rd", 0x0}, 	{"dvma_stream_wr", 0x1},
3085 	{"dvma_const_rd", 0x2},		{"dvma_const_wr", 0x3},
3086 	{"dvma_stream_buf_mis", 0x4},	{"dvma_cycles", 0x5},
3087 	{"dvma_wd_xfr", 0x6},		{"pio_cycles", 0x7},
3088 	{"dvma_tlb_misses", 0x10},	{"interrupts", 0x11},
3089 	{"saf_inter_nack", 0x12},	{"pio_reads", 0x13},
3090 	{"pio_writes", 0x14},		{"dvma_rd_buf_timeout", 0x15},
3091 	{"dvma_rd_rtry_stc", 0x16},	{"dvma_wr_rtry_stc", 0x17},
3092 	{"dvma_rd_rtry_nonstc", 0x18},	{"dvma_wr_rtry_nonstc", 0x19},
3093 	{"E*_slow_transitions", 0x1a},	{"E*_slow_cycles_per_64", 0x1b},
3094 	{"clear_pic", 0x1f}
3095 };
3096 
3097 
3098 /*
3099  * Create the picN kstats for the pci
3100  * and safari events.
3101  */
3102 void
3103 pci_kstat_init()
3104 {
3105 	pci_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3106 		KM_NOSLEEP);
3107 
3108 	if (pci_name_kstat == NULL) {
3109 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3110 	} else {
3111 		pci_name_kstat->pic_no_evs =
3112 			sizeof (schizo_pci_events) / sizeof (pci_kev_mask_t);
3113 		pci_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3114 		pci_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3115 		pci_create_name_kstat("pcis",
3116 			pci_name_kstat, schizo_pci_events);
3117 	}
3118 
3119 	saf_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3120 		KM_NOSLEEP);
3121 	if (saf_name_kstat == NULL) {
3122 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3123 	} else {
3124 		saf_name_kstat->pic_no_evs =
3125 			sizeof (schizo_saf_events) / sizeof (pci_kev_mask_t);
3126 		saf_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3127 		saf_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3128 		pci_create_name_kstat("saf", saf_name_kstat, schizo_saf_events);
3129 	}
3130 }
3131 
3132 void
3133 pci_kstat_fini()
3134 {
3135 	if (pci_name_kstat != NULL) {
3136 		pci_delete_name_kstat(pci_name_kstat);
3137 		kmem_free(pci_name_kstat, sizeof (pci_ksinfo_t));
3138 		pci_name_kstat = NULL;
3139 	}
3140 
3141 	if (saf_name_kstat != NULL) {
3142 		pci_delete_name_kstat(saf_name_kstat);
3143 		kmem_free(saf_name_kstat, sizeof (pci_ksinfo_t));
3144 		saf_name_kstat = NULL;
3145 	}
3146 }
3147 
3148 /*
3149  * Create 'counters' kstat for pci events.
3150  */
3151 void
3152 pci_add_pci_kstat(pci_t *pci_p)
3153 {
3154 	pci_cntr_addr_t *cntr_addr_p = &pci_p->pci_ks_addr;
3155 	uintptr_t regbase = (uintptr_t)pci_p->pci_address[0];
3156 
3157 	cntr_addr_p->pcr_addr = (uint64_t *)
3158 		(regbase + SCHIZO_PERF_PCI_PCR_OFFSET);
3159 	cntr_addr_p->pic_addr = (uint64_t *)
3160 		(regbase + SCHIZO_PERF_PCI_PIC_OFFSET);
3161 
3162 	pci_p->pci_ksp = pci_create_cntr_kstat(pci_p, "pcis",
3163 		NUM_OF_PICS, pci_cntr_kstat_update, cntr_addr_p);
3164 
3165 	if (pci_p->pci_ksp == NULL) {
3166 		cmn_err(CE_WARN, "pcisch : cannot create counter kstat");
3167 	}
3168 }
3169 
3170 void
3171 pci_rem_pci_kstat(pci_t *pci_p)
3172 {
3173 	if (pci_p->pci_ksp != NULL)
3174 		kstat_delete(pci_p->pci_ksp);
3175 	pci_p->pci_ksp = NULL;
3176 }
3177 
3178 void
3179 pci_add_upstream_kstat(pci_t *pci_p)
3180 {
3181 	pci_common_t	*cmn_p = pci_p->pci_common_p;
3182 	pci_cntr_pa_t	*cntr_pa_p = &cmn_p->pci_cmn_uks_pa;
3183 	uint64_t regbase = va_to_pa(pci_p->pci_address[1]);
3184 
3185 	cntr_pa_p->pcr_pa =
3186 		regbase + SCHIZO_PERF_SAF_PCR_OFFSET;
3187 	cntr_pa_p->pic_pa =
3188 		regbase + SCHIZO_PERF_SAF_PIC_OFFSET;
3189 
3190 	cmn_p->pci_common_uksp = pci_create_cntr_kstat(pci_p, "saf",
3191 		NUM_OF_PICS, pci_cntr_kstat_pa_update, cntr_pa_p);
3192 }
3193 
3194 /*
3195  * Extract the drivers binding name to identify which chip
3196  * we're binding to.  Whenever a new bus bridge is created, the driver alias
3197  * entry should be added here to identify the device if needed.  If a device
3198  * isn't added, the identity defaults to PCI_CHIP_UNIDENTIFIED.
3199  */
3200 static uint32_t
3201 pci_identity_init(pci_t *pci_p)
3202 {
3203 	dev_info_t *dip = pci_p->pci_dip;
3204 	char *name = ddi_binding_name(dip);
3205 	uint32_t ver = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3206 		"version#", 0);
3207 
3208 	if (strcmp(name, "pci108e,a801") == 0)
3209 		return (CHIP_ID(PCI_CHIP_TOMATILLO, ver, 0x00));
3210 
3211 	if (strcmp(name, "pci108e,8001") == 0)
3212 		return (CHIP_ID(PCI_CHIP_SCHIZO, ver, 0x00));
3213 
3214 	if (strcmp(name, "pci108e,8002") == 0) {
3215 		uint32_t mod_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3216 			DDI_PROP_DONTPASS, "module-revision#", 0);
3217 		return (CHIP_ID(PCI_CHIP_XMITS, ver, mod_rev));
3218 	}
3219 
3220 	cmn_err(CE_WARN, "%s%d: Unknown PCI Host bridge %s %x\n",
3221 		ddi_driver_name(dip), ddi_get_instance(dip), name, ver);
3222 
3223 	return (PCI_CHIP_UNIDENTIFIED);
3224 }
3225 
3226 /*
3227  * Setup a physical pointer to one leaf config space area. This
3228  * is used in several places in order to do a dummy read which
3229  * guarantees the nexus (and not a bus master) has gained control
3230  * of the bus.
3231  */
3232 static void
3233 pci_setup_cfgpa(pci_t *pci_p)
3234 {
3235 	dev_info_t *dip = pci_p->pci_dip;
3236 	dev_info_t *cdip;
3237 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3238 	uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
3239 	uint32_t *reg_p;
3240 	int reg_len;
3241 
3242 	for (cdip = ddi_get_child(dip); cdip != NULL;
3243 	    cdip = ddi_get_next_sibling(cdip)) {
3244 		if (ddi_getlongprop(DDI_DEV_T_NONE, cdip, DDI_PROP_DONTPASS,
3245 		    "reg", (caddr_t)&reg_p, &reg_len) != DDI_PROP_SUCCESS)
3246 			continue;
3247 		cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
3248 		kmem_free(reg_p, reg_len);
3249 		break;
3250 	}
3251 	pbm_p->pbm_anychild_cfgpa = cfgpa;
3252 }
3253 
3254 void
3255 pci_post_init_child(pci_t *pci_p, dev_info_t *child)
3256 {
3257 	volatile uint64_t *ctrl_reg_p;
3258 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3259 
3260 	pci_setup_cfgpa(pci_p);
3261 
3262 	/*
3263 	 * This is a hack for skyhawk/casinni combination to address
3264 	 * hardware problems between the request and grant signals which
3265 	 * causes a bus hang.  One workaround, which is applied here,
3266 	 * is to disable bus parking if the child contains the property
3267 	 * pci-req-removal.  Note that if the bus is quiesced we must mask
3268 	 * off the parking bit in the saved control registers, since the
3269 	 * quiesce operation temporarily turns off PCI bus parking.
3270 	 */
3271 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
3272 		"pci-req-removal") == 1) {
3273 
3274 		if (pbm_p->pbm_quiesce_count > 0) {
3275 			pbm_p->pbm_saved_ctrl_reg &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3276 		} else {
3277 			ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3278 			*ctrl_reg_p &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3279 		}
3280 	}
3281 
3282 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
3283 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) {
3284 			int value;
3285 
3286 			/*
3287 			 * Due to a XMITS bug, we need to set the outstanding
3288 			 * split transactions to 1 for all PCI-X functions
3289 			 * behind the leaf.
3290 			 */
3291 			value = (xmits_max_transactions << 4) |
3292 			    (xmits_max_read_bytes << 2);
3293 
3294 			DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ "
3295 			    "Workaround: value = %x\n", value);
3296 
3297 			pcix_set_cmd_reg(child, value);
3298 
3299 			(void) ndi_prop_update_int(DDI_DEV_T_NONE,
3300 			    child, "pcix-update-cmd-reg", value);
3301 		}
3302 	}
3303 }
3304 
3305 void
3306 pci_post_uninit_child(pci_t *pci_p)
3307 {
3308 	pci_setup_cfgpa(pci_p);
3309 }
3310 
3311 static int
3312 pci_tom_nbintr_op(pci_t *pci_p, uint32_t inum, intrfunc f, caddr_t arg,
3313     int flag)
3314 {
3315 	uint32_t ino = pci_p->pci_inos[inum];
3316 	uint32_t mondo = IB_INO_TO_NBMONDO(pci_p->pci_ib_p, ino);
3317 	int ret = DDI_SUCCESS;
3318 
3319 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo); /* no op on tom */
3320 
3321 	switch (flag) {
3322 	case PCI_OBJ_INTR_ADD:
3323 		VERIFY(add_ivintr(mondo, pci_pil[inum], f, arg, NULL) == 0);
3324 		break;
3325 	case PCI_OBJ_INTR_REMOVE:
3326 		rem_ivintr(mondo, NULL);
3327 		break;
3328 	default:
3329 		ret = DDI_FAILURE;
3330 		break;
3331 	}
3332 
3333 	return (ret);
3334 }
3335 
3336 int
3337 pci_ecc_add_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3338 {
3339 	uint32_t mondo;
3340 	int	r;
3341 
3342 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3343 	    pci_p->pci_inos[inum]);
3344 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3345 
3346 	VERIFY(add_ivintr(mondo, pci_pil[inum], ecc_intr,
3347 	    (caddr_t)eii_p, NULL) == 0);
3348 
3349 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)
3350 		return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD,
3351 		    DDI_SUCCESS));
3352 
3353 	r = pci_tom_nbintr_op(pci_p, inum, ecc_intr,
3354 	    (caddr_t)eii_p, PCI_OBJ_INTR_ADD);
3355 	return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD, r));
3356 }
3357 
3358 void
3359 pci_ecc_rem_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3360 {
3361 	uint32_t mondo;
3362 
3363 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3364 	    pci_p->pci_inos[inum]);
3365 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3366 
3367 	rem_ivintr(mondo, NULL);
3368 
3369 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
3370 		pci_tom_nbintr_op(pci_p, inum, ecc_intr,
3371 			(caddr_t)eii_p, PCI_OBJ_INTR_REMOVE);
3372 }
3373 
3374 static uint_t
3375 pci_pbm_cdma_intr(caddr_t a)
3376 {
3377 	pbm_t *pbm_p = (pbm_t *)a;
3378 	pbm_p->pbm_cdma_flag = PBM_CDMA_DONE;
3379 #ifdef PBM_CDMA_DEBUG
3380 	pbm_p->pbm_cdma_intr_cnt++;
3381 #endif /* PBM_CDMA_DEBUG */
3382 	return (DDI_INTR_CLAIMED);
3383 }
3384 
3385 int
3386 pci_pbm_add_intr(pci_t *pci_p)
3387 {
3388 	uint32_t mondo;
3389 
3390 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3391 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3392 
3393 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_CDMA],
3394 	    pci_pbm_cdma_intr, (caddr_t)pci_p->pci_pbm_p, NULL) == 0);
3395 
3396 	return (DDI_SUCCESS);
3397 }
3398 
3399 void
3400 pci_pbm_rem_intr(pci_t *pci_p)
3401 {
3402 	ib_t		*ib_p = pci_p->pci_ib_p;
3403 	uint32_t	mondo;
3404 
3405 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3406 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3407 
3408 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_CDMA], IB_INTR_NOWAIT);
3409 	rem_ivintr(mondo, NULL);
3410 }
3411 
3412 void
3413 pci_pbm_suspend(pci_t *pci_p)
3414 {
3415 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3416 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3417 
3418 	/* Save CDMA interrupt state */
3419 	pbm_p->pbm_cdma_imr_save = *ib_intr_map_reg_addr(pci_p->pci_ib_p, ino);
3420 }
3421 
3422 void
3423 pci_pbm_resume(pci_t *pci_p)
3424 {
3425 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3426 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3427 
3428 	/* Restore CDMA interrupt state */
3429 	*ib_intr_map_reg_addr(pci_p->pci_ib_p, ino) = pbm_p->pbm_cdma_imr_save;
3430 }
3431 
3432 /*
3433  * pci_bus_quiesce
3434  *
3435  * This function is called as the corresponding control ops routine
3436  * to a DDI_CTLOPS_QUIESCE command.  Its mission is to halt all DMA
3437  * activity on the bus by disabling arbitration/parking.
3438  */
3439 int
3440 pci_bus_quiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3441 {
3442 	volatile uint64_t *ctrl_reg_p;
3443 	volatile uint64_t ctrl_reg;
3444 	pbm_t *pbm_p;
3445 
3446 	pbm_p = pci_p->pci_pbm_p;
3447 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3448 
3449 	if (pbm_p->pbm_quiesce_count++ == 0) {
3450 
3451 		DEBUG0(DBG_PWR, dip, "quiescing bus\n");
3452 
3453 		ctrl_reg = *ctrl_reg_p;
3454 		pbm_p->pbm_saved_ctrl_reg = ctrl_reg;
3455 		ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
3456 		    SCHIZO_PCI_CTRL_ARB_PARK);
3457 		*ctrl_reg_p = ctrl_reg;
3458 #ifdef	DEBUG
3459 		ctrl_reg = *ctrl_reg_p;
3460 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3461 		    SCHIZO_PCI_CTRL_ARB_PARK)) != 0)
3462 			panic("ctrl_reg didn't quiesce: 0x%x\n", ctrl_reg);
3463 #endif
3464 		if (pbm_p->pbm_anychild_cfgpa)
3465 			(void) ldphysio(pbm_p->pbm_anychild_cfgpa);
3466 	}
3467 
3468 	return (DDI_SUCCESS);
3469 }
3470 
3471 /*
3472  * pci_bus_unquiesce
3473  *
3474  * This function is called as the corresponding control ops routine
3475  * to a DDI_CTLOPS_UNQUIESCE command.  Its mission is to resume paused
3476  * DMA activity on the bus by re-enabling arbitration (and maybe parking).
3477  */
3478 int
3479 pci_bus_unquiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3480 {
3481 	volatile uint64_t *ctrl_reg_p;
3482 	pbm_t *pbm_p;
3483 #ifdef	DEBUG
3484 	volatile uint64_t ctrl_reg;
3485 #endif
3486 
3487 	pbm_p = pci_p->pci_pbm_p;
3488 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3489 
3490 	ASSERT(pbm_p->pbm_quiesce_count > 0);
3491 	if (--pbm_p->pbm_quiesce_count == 0) {
3492 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
3493 #ifdef	DEBUG
3494 		ctrl_reg = *ctrl_reg_p;
3495 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3496 		    SCHIZO_PCI_CTRL_ARB_PARK)) == 0)
3497 			panic("ctrl_reg didn't unquiesce: 0x%x\n", ctrl_reg);
3498 #endif
3499 	}
3500 
3501 	return (DDI_SUCCESS);
3502 }
3503 
3504 static void
3505 tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p, dvma_addr_t dvma_pg,
3506 	int npages)
3507 {
3508 	uint32_t dur_max, dur_base;
3509 	dvma_unbind_req_t *req_p, *req_max_p;
3510 	dvma_unbind_req_t *req_base_p = iommu_p->iommu_mtlb_req_p;
3511 	uint32_t tlb_vpn[IOMMU_TLB_ENTRIES];
3512 	caddr_t reg_base;
3513 	volatile uint64_t *tag_p;
3514 	int i, preserv_count = 0;
3515 
3516 	mutex_enter(&iommu_p->iommu_mtlb_lock);
3517 
3518 	iommu_p->iommu_mtlb_npgs += npages;
3519 	req_max_p = req_base_p + iommu_p->iommu_mtlb_nreq++;
3520 	req_max_p->dur_npg = npages;
3521 	req_max_p->dur_base = dvma_pg;
3522 	req_max_p->dur_flags = mp->dmai_flags & DMAI_FLAGS_VMEMCACHE;
3523 
3524 
3525 	if (iommu_p->iommu_mtlb_npgs <= iommu_p->iommu_mtlb_maxpgs)
3526 		goto done;
3527 
3528 	/* read TLB */
3529 	reg_base = iommu_p->iommu_pci_p->pci_address[0];
3530 	tag_p = (volatile uint64_t *)
3531 	    (reg_base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
3532 
3533 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
3534 		tlb_vpn[i] = tag_p[i] & SCHIZO_VPN_MASK;
3535 
3536 	/* for each request search the TLB for a matching address */
3537 	for (req_p = req_base_p; req_p <= req_max_p; req_p++) {
3538 		dur_base = req_p->dur_base;
3539 		dur_max = req_p->dur_base + req_p->dur_npg;
3540 
3541 		for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
3542 			uint_t vpn = tlb_vpn[i];
3543 			if (vpn >= dur_base && vpn < dur_max)
3544 				break;
3545 		}
3546 		if (i >= IOMMU_TLB_ENTRIES) {
3547 			pci_vmem_do_free(iommu_p,
3548 			    (void *)IOMMU_PTOB(req_p->dur_base),
3549 			    req_p->dur_npg, req_p->dur_flags);
3550 			iommu_p->iommu_mtlb_npgs -= req_p->dur_npg;
3551 			continue;
3552 		}
3553 		/* if an empty slot exists */
3554 		if ((req_p - req_base_p) != preserv_count)
3555 			*(req_base_p + preserv_count) = *req_p;
3556 		preserv_count++;
3557 	}
3558 
3559 	iommu_p->iommu_mtlb_nreq = preserv_count;
3560 done:
3561 	mutex_exit(&iommu_p->iommu_mtlb_lock);
3562 }
3563 
3564 void
3565 pci_vmem_free(iommu_t *iommu_p, ddi_dma_impl_t *mp, void *dvma_addr,
3566     size_t npages)
3567 {
3568 	if (tm_mtlb_gc)
3569 		tm_vmem_free(mp, iommu_p,
3570 		    (dvma_addr_t)IOMMU_BTOP((dvma_addr_t)dvma_addr), npages);
3571 	else
3572 		pci_vmem_do_free(iommu_p, dvma_addr, npages,
3573 		    (mp->dmai_flags & DMAI_FLAGS_VMEMCACHE));
3574 }
3575