xref: /titanic_41/usr/src/uts/sun4u/io/pci/pcisch.c (revision 4f85d229295a756a4e6f1759b47df7b97412db7d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Schizo specifics implementation:
31  *	interrupt mapping register
32  *	PBM configuration
33  *	ECC and PBM error handling
34  *	Iommu mapping handling
35  *	Streaming Cache flushing
36  */
37 
38 #include <sys/types.h>
39 #include <sys/kmem.h>
40 #include <sys/sysmacros.h>
41 #include <sys/async.h>
42 #include <sys/ivintr.h>
43 #include <sys/systm.h>
44 #include <sys/intr.h>
45 #include <sys/machsystm.h>	/* lddphys() */
46 #include <sys/machsystm.h>	/* lddphys, intr_dist_add */
47 #include <sys/iommutsb.h>
48 #include <sys/promif.h>		/* prom_printf */
49 #include <sys/map.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/spl.h>
54 #include <sys/fm/util.h>
55 #include <sys/ddi_impldefs.h>
56 #include <sys/fm/protocol.h>
57 #include <sys/fm/io/sun4upci.h>
58 #include <sys/fm/io/ddi.h>
59 #include <sys/fm/io/pci.h>
60 #include <sys/pci/pci_obj.h>
61 #include <sys/pci/pcisch.h>
62 #include <sys/pci/pcisch_asm.h>
63 #include <sys/x_call.h>		/* XCALL_PIL */
64 
65 /*LINTLIBRARY*/
66 
67 extern uint8_t ldstub(uint8_t *);
68 
69 #define	IOMMU_CTX_BITMAP_SIZE	(1 << (12 - 3))
70 static void iommu_ctx_free(iommu_t *);
71 static int iommu_tlb_scrub(iommu_t *, int);
72 static uint32_t pci_identity_init(pci_t *);
73 
74 static void pci_cb_clear_error(cb_t *, cb_errstate_t *);
75 static void pci_clear_error(pci_t *, pbm_errstate_t *);
76 static uint32_t pci_identity_init(pci_t *pci_p);
77 static int pci_intr_setup(pci_t *pci_p);
78 static void iommu_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
79 static void cb_ereport_post(dev_info_t *, uint64_t, cb_errstate_t *);
80 static void pcix_ereport_post(dev_info_t *, uint64_t, pbm_errstate_t *);
81 static void pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar,
82 		ecc_region_t region);
83 static void pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p);
84 static void tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p,
85 		dvma_addr_t dvma_pg, int npages);
86 
87 static int pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p);
88 
89 static pci_ksinfo_t	*pci_name_kstat;
90 static pci_ksinfo_t	*saf_name_kstat;
91 
92 extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value);
93 
94 /* called by pci_attach() DDI_ATTACH to initialize pci objects */
95 int
96 pci_obj_setup(pci_t *pci_p)
97 {
98 	pci_common_t *cmn_p;
99 	uint32_t chip_id = pci_identity_init(pci_p);
100 	uint32_t cmn_id = PCI_CMN_ID(ID_CHIP_TYPE(chip_id), pci_p->pci_id);
101 	int ret;
102 
103 	/* Perform allocations first to avoid delicate unwinding. */
104 	if (pci_alloc_tsb(pci_p) != DDI_SUCCESS)
105 		return (DDI_FAILURE);
106 
107 	mutex_enter(&pci_global_mutex);
108 	cmn_p = get_pci_common_soft_state(cmn_id);
109 	if (cmn_p == NULL) {
110 		if (alloc_pci_common_soft_state(cmn_id) != DDI_SUCCESS) {
111 			mutex_exit(&pci_global_mutex);
112 			pci_free_tsb(pci_p);
113 			return (DDI_FAILURE);
114 		}
115 		cmn_p = get_pci_common_soft_state(cmn_id);
116 		cmn_p->pci_common_id = cmn_id;
117 		cmn_p->pci_common_tsb_cookie = IOMMU_TSB_COOKIE_NONE;
118 	}
119 
120 	ASSERT((pci_p->pci_side == 0) || (pci_p->pci_side == 1));
121 	if (cmn_p->pci_p[pci_p->pci_side]) {
122 		/* second side attach */
123 		pci_p->pci_side = PCI_OTHER_SIDE(pci_p->pci_side);
124 		ASSERT(cmn_p->pci_p[pci_p->pci_side] == NULL);
125 	}
126 
127 	cmn_p->pci_p[pci_p->pci_side] = pci_p;
128 	pci_p->pci_common_p = cmn_p;
129 
130 	if (cmn_p->pci_common_refcnt == 0)
131 		cmn_p->pci_chip_id = chip_id;
132 
133 	ib_create(pci_p);
134 
135 	/*
136 	 * The initialization of cb internal interrupts depends on ib
137 	 */
138 	if (cmn_p->pci_common_refcnt == 0) {
139 		cb_create(pci_p);
140 		cmn_p->pci_common_cb_p = pci_p->pci_cb_p;
141 	} else
142 		pci_p->pci_cb_p = cmn_p->pci_common_cb_p;
143 
144 	iommu_create(pci_p);
145 
146 	if (cmn_p->pci_common_refcnt == 0) {
147 		ecc_create(pci_p);
148 		cmn_p->pci_common_ecc_p = pci_p->pci_ecc_p;
149 	} else
150 		pci_p->pci_ecc_p = cmn_p->pci_common_ecc_p;
151 
152 	pbm_create(pci_p);
153 	sc_create(pci_p);
154 
155 	pci_fm_create(pci_p);
156 
157 	if ((ret = pci_intr_setup(pci_p)) != DDI_SUCCESS)
158 		goto done;
159 
160 	pci_kstat_create(pci_p);
161 
162 	cmn_p->pci_common_attachcnt++;
163 	cmn_p->pci_common_refcnt++;
164 done:
165 	mutex_exit(&pci_global_mutex);
166 	if (ret != DDI_SUCCESS)
167 		cmn_err(CE_WARN, "pci_obj_setup failed %x", ret);
168 	return (ret);
169 }
170 
171 /* called by pci_detach() DDI_DETACH to destroy pci objects */
172 void
173 pci_obj_destroy(pci_t *pci_p)
174 {
175 	pci_common_t *cmn_p;
176 	mutex_enter(&pci_global_mutex);
177 
178 	cmn_p = pci_p->pci_common_p;
179 	cmn_p->pci_common_refcnt--;
180 	cmn_p->pci_common_attachcnt--;
181 
182 	pci_kstat_destroy(pci_p);
183 
184 	/* schizo non-shared objects */
185 	pci_fm_destroy(pci_p);
186 
187 	sc_destroy(pci_p);
188 	pbm_destroy(pci_p);
189 	iommu_destroy(pci_p);
190 	ib_destroy(pci_p);
191 
192 	if (cmn_p->pci_common_refcnt != 0) {
193 		pci_intr_teardown(pci_p);
194 		cmn_p->pci_p[pci_p->pci_side] = NULL;
195 		mutex_exit(&pci_global_mutex);
196 		return;
197 	}
198 
199 	/* schizo shared objects - uses cmn_p, must be destroyed before cmn */
200 	ecc_destroy(pci_p);
201 	cb_destroy(pci_p);
202 
203 	free_pci_common_soft_state(cmn_p->pci_common_id);
204 	pci_intr_teardown(pci_p);
205 	mutex_exit(&pci_global_mutex);
206 }
207 
208 /* called by pci_attach() DDI_RESUME to (re)initialize pci objects */
209 void
210 pci_obj_resume(pci_t *pci_p)
211 {
212 	pci_common_t *cmn_p = pci_p->pci_common_p;
213 
214 	mutex_enter(&pci_global_mutex);
215 
216 	ib_configure(pci_p->pci_ib_p);
217 	iommu_configure(pci_p->pci_iommu_p);
218 
219 	if (cmn_p->pci_common_attachcnt == 0)
220 		ecc_configure(pci_p);
221 
222 	ib_resume(pci_p->pci_ib_p);
223 
224 	pbm_configure(pci_p->pci_pbm_p);
225 	sc_configure(pci_p->pci_sc_p);
226 
227 	if (cmn_p->pci_common_attachcnt == 0)
228 		cb_resume(pci_p->pci_cb_p);
229 
230 	pbm_resume(pci_p->pci_pbm_p);
231 
232 	cmn_p->pci_common_attachcnt++;
233 	mutex_exit(&pci_global_mutex);
234 }
235 
236 /* called by pci_detach() DDI_SUSPEND to suspend pci objects */
237 void
238 pci_obj_suspend(pci_t *pci_p)
239 {
240 	mutex_enter(&pci_global_mutex);
241 
242 	pbm_suspend(pci_p->pci_pbm_p);
243 	ib_suspend(pci_p->pci_ib_p);
244 
245 	if (!--pci_p->pci_common_p->pci_common_attachcnt)
246 		cb_suspend(pci_p->pci_cb_p);
247 
248 	mutex_exit(&pci_global_mutex);
249 }
250 
251 /*
252  * add an additional 0x35 or 0x36 ino interrupt on platforms don't have them
253  * This routine has multiple places that assumes interrupt takes one cell
254  * each and cell size is same as integer size.
255  */
256 static int
257 pci_intr_setup(pci_t *pci_p)
258 {
259 	dev_info_t *dip = pci_p->pci_dip;
260 	pbm_t *pbm_p = pci_p->pci_pbm_p;
261 	cb_t *cb_p = pci_p->pci_cb_p;
262 	uint32_t *intr_buf, *new_intr_buf;
263 	int intr_len, intr_cnt, ret;
264 
265 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
266 		"interrupts", (caddr_t)&intr_buf, &intr_len) != DDI_SUCCESS)
267 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
268 			ddi_driver_name(dip), ddi_get_instance(dip));
269 
270 	intr_cnt = BYTES_TO_1275_CELLS(intr_len);
271 	if (intr_cnt < CBNINTR_CDMA)	/* CBNINTR_CDMA is 0 based */
272 		cmn_err(CE_PANIC, "%s%d: <%d interrupts", ddi_driver_name(dip),
273 			ddi_get_instance(dip), CBNINTR_CDMA);
274 
275 	if (intr_cnt == CBNINTR_CDMA)
276 		intr_cnt++;
277 
278 	new_intr_buf = kmem_alloc(CELLS_1275_TO_BYTES(intr_cnt), KM_SLEEP);
279 	bcopy(intr_buf, new_intr_buf, intr_len);
280 	kmem_free(intr_buf, intr_len);
281 
282 	new_intr_buf[CBNINTR_CDMA] = PBM_CDMA_INO_BASE + pci_p->pci_side;
283 	pci_p->pci_inos = new_intr_buf;
284 	pci_p->pci_inos_len = CELLS_1275_TO_BYTES(intr_cnt);
285 
286 	if (ndi_prop_update_int_array(DDI_DEV_T_NONE, dip, "interrupts",
287 		(int *)new_intr_buf, intr_cnt))
288 		cmn_err(CE_PANIC, "%s%d: cannot update interrupts property\n",
289 			ddi_driver_name(dip), ddi_get_instance(dip));
290 
291 	if (pci_p->pci_common_p->pci_common_refcnt == 0) {
292 		cb_p->cb_no_of_inos = intr_cnt;
293 		if (ret = cb_register_intr(pci_p))
294 			goto teardown;
295 		if (ret = ecc_register_intr(pci_p))
296 			goto teardown;
297 
298 		intr_dist_add(cb_intr_dist, cb_p);
299 		cb_enable_intr(pci_p);
300 		ecc_enable_intr(pci_p);
301 	}
302 
303 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
304 		pbm_p->pbm_sync_ino = pci_p->pci_inos[CBNINTR_PBM];
305 	if (ret = pbm_register_intr(pbm_p)) {
306 		if (pci_p->pci_common_p->pci_common_refcnt == 0)
307 			intr_dist_rem(cb_intr_dist, cb_p);
308 		goto teardown;
309 	}
310 	intr_dist_add(pbm_intr_dist, pbm_p);
311 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_PBM]);
312 	ib_intr_enable(pci_p, pci_p->pci_inos[CBNINTR_CDMA]);
313 
314 	intr_dist_add_weighted(ib_intr_dist_all, pci_p->pci_ib_p);
315 	return (DDI_SUCCESS);
316 teardown:
317 	pci_intr_teardown(pci_p);
318 	return (ret);
319 }
320 
321 uint64_t
322 pci_sc_configure(pci_t *pci_p)
323 {
324 	int instance;
325 	dev_info_t *dip = pci_p->pci_dip;
326 
327 	instance = ddi_get_instance(dip);
328 	if ((pci_xmits_sc_max_prf & (1 << instance)) &&
329 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS))
330 		return (XMITS_SC_MAX_PRF);
331 	else
332 		return (0);
333 }
334 
335 static void
336 pci_schizo_cdma_sync(pbm_t *pbm_p)
337 {
338 	pci_t *pci_p = pbm_p->pbm_pci_p;
339 	hrtime_t start_time;
340 	volatile uint64_t *clr_p = ib_clear_intr_reg_addr(pci_p->pci_ib_p,
341 		pci_p->pci_inos[CBNINTR_CDMA]);
342 	uint32_t fail_cnt = pci_cdma_intr_count;
343 
344 	mutex_enter(&pbm_p->pbm_sync_mutex);
345 #ifdef PBM_CDMA_DEBUG
346 	pbm_p->pbm_cdma_req_cnt++;
347 #endif /* PBM_CDMA_DEBUG */
348 	pbm_p->pbm_cdma_flag = PBM_CDMA_PEND;
349 	IB_INO_INTR_TRIG(clr_p);
350 wait:
351 	start_time = gethrtime();
352 	while (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE) {
353 		if (gethrtime() - start_time <= pci_cdma_intr_timeout)
354 			continue;
355 		if (--fail_cnt > 0)
356 			goto wait;
357 		if (pbm_p->pbm_cdma_flag == PBM_CDMA_DONE)
358 			break;
359 		cmn_err(CE_PANIC, "%s (%s): consistent dma sync timeout",
360 		    pbm_p->pbm_nameinst_str, pbm_p->pbm_nameaddr_str);
361 	}
362 #ifdef PBM_CDMA_DEBUG
363 	if (pbm_p->pbm_cdma_flag != PBM_CDMA_DONE)
364 		pbm_p->pbm_cdma_to_cnt++;
365 	else {
366 		start_time = gethrtime() - start_time;
367 		pbm_p->pbm_cdma_success_cnt++;
368 		pbm_p->pbm_cdma_latency_sum += start_time;
369 		if (start_time > pbm_p->pbm_cdma_latency_max)
370 			pbm_p->pbm_cdma_latency_max = start_time;
371 	}
372 #endif /* PBM_CDMA_DEBUG */
373 	mutex_exit(&pbm_p->pbm_sync_mutex);
374 }
375 
376 #if !defined(lint)
377 #include <sys/cpuvar.h>
378 #endif
379 
380 #define	SYNC_HW_BUSY(pa, mask)	(lddphysio(pa) & (mask))
381 
382 /*
383  * Consistent DMA Sync/Flush
384  *
385  * XMITS and Tomatillo use multi-threaded sync/flush register.
386  * Called from interrupt wrapper: the associated ino is used to index
387  *	the distinctive register bit.
388  * Called from pci_dma_sync(): the bit belongs to PBM is shared
389  *	for all calls from pci_dma_sync(). Xmits requires serialization
390  *	while Tomatillo does not.
391  */
392 void
393 pci_pbm_dma_sync(pbm_t *pbm_p, ib_ino_t ino)
394 {
395 	pci_t *pci_p = pbm_p->pbm_pci_p;
396 	hrtime_t start_time;
397 	uint64_t ino_mask, sync_reg_pa;
398 	volatile uint64_t flag_val;
399 	uint32_t locked, chip_type = CHIP_TYPE(pci_p);
400 	int	i;
401 
402 	if (chip_type == PCI_CHIP_SCHIZO) {
403 		pci_schizo_cdma_sync(pbm_p);
404 		return;
405 	}
406 
407 	sync_reg_pa = pbm_p->pbm_sync_reg_pa;
408 
409 	locked = 0;
410 	if (((chip_type == PCI_CHIP_XMITS) && (ino == pbm_p->pbm_sync_ino)) ||
411 	    pci_sync_lock) {
412 		locked = 1;
413 		mutex_enter(&pbm_p->pbm_sync_mutex);
414 	}
415 	ino_mask = 1ull << ino;
416 	stdphysio(sync_reg_pa, ino_mask);
417 
418 	for (i = 0; i < 5; i++) {
419 		if ((flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) == 0)
420 			goto done;
421 	}
422 
423 	start_time = gethrtime();
424 	for (; (flag_val = SYNC_HW_BUSY(sync_reg_pa, ino_mask)) != 0; i++) {
425 		if (gethrtime() - start_time > pci_sync_buf_timeout)
426 			break;
427 	}
428 
429 	if (flag_val && SYNC_HW_BUSY(sync_reg_pa, ino_mask) && !panicstr)
430 		cmn_err(CE_PANIC, "%s: pbm dma sync %lx,%lx timeout!",
431 			pbm_p->pbm_nameaddr_str, sync_reg_pa, flag_val);
432 done:
433 	/* optional: stdphysio(sync_reg_pa - 8, ino_mask); */
434 	if (locked)
435 		mutex_exit(&pbm_p->pbm_sync_mutex);
436 
437 	if (tomatillo_store_store_wrka) {
438 #if !defined(lint)
439 		kpreempt_disable();
440 #endif
441 		tomatillo_store_store_order();
442 #if !defined(lint)
443 		kpreempt_enable();
444 #endif
445 	}
446 
447 }
448 
449 /*ARGSUSED*/
450 void
451 pci_fix_ranges(pci_ranges_t *rng_p, int rng_entries)
452 {
453 }
454 
455 /*
456  * map_pci_registers
457  *
458  * This function is called from the attach routine to map the registers
459  * accessed by this driver.
460  *
461  * used by: pci_attach()
462  *
463  * return value: DDI_FAILURE on failure
464  */
465 int
466 map_pci_registers(pci_t *pci_p, dev_info_t *dip)
467 {
468 	ddi_device_acc_attr_t attr;
469 	int len;
470 
471 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
472 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
473 
474 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
475 	/*
476 	 * Register set 0 is PCI CSR Base
477 	 */
478 	if (ddi_regs_map_setup(dip, 0, &pci_p->pci_address[0], 0, 0,
479 	    &attr, &pci_p->pci_ac[0]) != DDI_SUCCESS) {
480 		len = 0;
481 		goto fail;
482 	}
483 	/*
484 	 * Register set 1 is Schizo CSR Base
485 	 */
486 	if (ddi_regs_map_setup(dip, 1, &pci_p->pci_address[1], 0, 0,
487 	    &attr, &pci_p->pci_ac[1]) != DDI_SUCCESS) {
488 		len = 1;
489 		goto fail;
490 	}
491 
492 	/*
493 	 * The third register set contains the bridge's configuration
494 	 * header.  This header is at the very beginning of the bridge's
495 	 * configuration space.  This space has litte-endian byte order.
496 	 */
497 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
498 	if (ddi_regs_map_setup(dip, 2, &pci_p->pci_address[2], 0,
499 	    PCI_CONF_HDR_SIZE, &attr, &pci_p->pci_ac[2]) != DDI_SUCCESS) {
500 		len = 2;
501 		goto fail;
502 	}
503 
504 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
505 	    "reg", &len) || (len / sizeof (pci_nexus_regspec_t) < 4))
506 		goto done;
507 
508 	/*
509 	 * The optional fourth register bank points to the
510 	 * interrupt concentrator registers.
511 	 */
512 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
513 	if (ddi_regs_map_setup(dip, 3, &pci_p->pci_address[3], 0,
514 	    0, &attr, &pci_p->pci_ac[3]) != DDI_SUCCESS) {
515 		len = 3;
516 		goto fail;
517 	}
518 
519 done:
520 	DEBUG4(DBG_ATTACH, dip, "address (%p,%p,%p,%p)\n",
521 	    pci_p->pci_address[0], pci_p->pci_address[1],
522 	    pci_p->pci_address[2], pci_p->pci_address[3]);
523 
524 	return (DDI_SUCCESS);
525 
526 
527 fail:
528 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
529 		ddi_driver_name(dip), ddi_get_instance(dip), len);
530 	for (; len--; ddi_regs_map_free(&pci_p->pci_ac[len]));
531 	return (DDI_FAILURE);
532 }
533 
534 /*
535  * unmap_pci_registers:
536  *
537  * This routine unmap the registers mapped by map_pci_registers.
538  *
539  * used by: pci_detach()
540  *
541  * return value: none
542  */
543 void
544 unmap_pci_registers(pci_t *pci_p)
545 {
546 	int i;
547 
548 	for (i = 0; i < 4; i++) {
549 		if (pci_p->pci_ac[i])
550 			ddi_regs_map_free(&pci_p->pci_ac[i]);
551 	}
552 }
553 
554 uint64_t
555 ib_get_map_reg(ib_mondo_t mondo, uint32_t cpu_id)
556 {
557 	uint32_t agent_id;
558 	uint32_t node_id;
559 
560 	/* ensure that cpu_id is only 10 bits. */
561 	ASSERT((cpu_id & ~0x3ff) == 0);
562 
563 	agent_id = cpu_id & 0x1f;
564 	node_id = (cpu_id >> 5) & 0x1f;
565 
566 	return ((mondo) | (agent_id << COMMON_INTR_MAP_REG_TID_SHIFT) |
567 	    (node_id << SCHIZO_INTR_MAP_REG_NID_SHIFT) |
568 	    COMMON_INTR_MAP_REG_VALID);
569 }
570 
571 uint32_t
572 ib_map_reg_get_cpu(volatile uint64_t reg)
573 {
574 	return (((reg & COMMON_INTR_MAP_REG_TID) >>
575 		COMMON_INTR_MAP_REG_TID_SHIFT) |
576 			((reg & SCHIZO_INTR_MAP_REG_NID) >>
577 			(SCHIZO_INTR_MAP_REG_NID_SHIFT-5)));
578 }
579 
580 uint64_t *
581 ib_intr_map_reg_addr(ib_t *ib_p, ib_ino_t ino)
582 {
583 	/*
584 	 * Schizo maps all interrupts in one contiguous area.
585 	 * (PCI_CSRBase + 0x00.1000 + INO * 8).
586 	 */
587 	return ((uint64_t *)(ib_p->ib_intr_map_regs) + (ino & 0x3f));
588 }
589 
590 uint64_t *
591 ib_clear_intr_reg_addr(ib_t *ib_p, ib_ino_t ino)	/* XXX - needs work */
592 {
593 	/*
594 	 * Schizo maps clear intr. registers in contiguous area.
595 	 * (PCI_CSRBase + 0x00.1400 + INO * 8).
596 	 */
597 	return ((uint64_t *)(ib_p->ib_slot_clear_intr_regs) + (ino & 0x3f));
598 }
599 
600 /*
601  * schizo does not have mapping register per slot, so no sharing
602  * is done.
603  */
604 /*ARGSUSED*/
605 void
606 ib_ino_map_reg_share(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
607 {
608 }
609 
610 /*
611  * return true if there are interrupts using this mapping register
612  */
613 /*ARGSUSED*/
614 int
615 ib_ino_map_reg_unshare(ib_t *ib_p, ib_ino_t ino, ib_ino_info_t *ino_p)
616 {
617 	return (ino_p->ino_ih_size);
618 }
619 
620 void
621 pci_pbm_intr_dist(pbm_t *pbm_p)
622 {
623 	pci_t *pci_p = pbm_p->pbm_pci_p;
624 	ib_t *ib_p = pci_p->pci_ib_p;
625 	ib_ino_t ino = IB_MONDO_TO_INO(pci_p->pci_inos[CBNINTR_CDMA]);
626 
627 	mutex_enter(&pbm_p->pbm_sync_mutex);
628 	ib_intr_dist_nintr(ib_p, ino, ib_intr_map_reg_addr(ib_p, ino));
629 	mutex_exit(&pbm_p->pbm_sync_mutex);
630 }
631 
632 uint32_t
633 pci_xlate_intr(dev_info_t *dip, dev_info_t *rdip, ib_t *ib_p, uint32_t intr)
634 {
635 	return (IB_INO_TO_MONDO(ib_p, intr));
636 }
637 
638 
639 /*
640  * Return the cpuid to to be used for an ino.  We have no special cpu
641  * assignment constraints for this nexus, so just call intr_dist_cpuid().
642  */
643 /* ARGSUSED */
644 uint32_t
645 pci_intr_dist_cpuid(ib_t *ib_p, ib_ino_info_t *ino_p)
646 {
647 	return (intr_dist_cpuid());
648 }
649 
650 void
651 pci_cb_teardown(pci_t *pci_p)
652 {
653 	cb_t 	*cb_p = pci_p->pci_cb_p;
654 	uint32_t mondo;
655 
656 	if (!pci_buserr_interrupt)
657 		return;
658 
659 	mondo = ((pci_p->pci_cb_p->cb_ign  << PCI_INO_BITS) |
660 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
661 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
662 
663 	cb_disable_nintr(cb_p, CBNINTR_BUS_ERROR, IB_INTR_WAIT);
664 	rem_ivintr(mondo, NULL);
665 }
666 
667 int
668 cb_register_intr(pci_t *pci_p)
669 {
670 	uint32_t mondo;
671 
672 	if (!pci_buserr_interrupt)
673 		return (DDI_SUCCESS);
674 
675 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
676 	    pci_p->pci_inos[CBNINTR_BUS_ERROR]);
677 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
678 
679 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_BUS_ERROR],
680 	    cb_buserr_intr, (caddr_t)pci_p->pci_cb_p, NULL) == 0);
681 
682 	return (PCI_ATTACH_RETCODE(PCI_CB_OBJ, PCI_OBJ_INTR_ADD, DDI_SUCCESS));
683 }
684 
685 void
686 cb_enable_intr(pci_t *pci_p)
687 {
688 	if (pci_buserr_interrupt)
689 		cb_enable_nintr(pci_p, CBNINTR_BUS_ERROR);
690 }
691 
692 uint64_t
693 cb_ino_to_map_pa(cb_t *cb_p, ib_ino_t ino)
694 {
695 	return (cb_p->cb_map_pa + (ino << 3));
696 }
697 
698 uint64_t
699 cb_ino_to_clr_pa(cb_t *cb_p, ib_ino_t ino)
700 {
701 	return (cb_p->cb_clr_pa + (ino << 3));
702 }
703 
704 /*
705  * Useful on psycho only.
706  */
707 int
708 cb_remove_xintr(pci_t *pci_p, dev_info_t *dip, dev_info_t *rdip, ib_ino_t ino,
709 ib_mondo_t mondo)
710 {
711 	return (DDI_FAILURE);
712 }
713 
714 void
715 pbm_configure(pbm_t *pbm_p)
716 {
717 	pci_t *pci_p = pbm_p->pbm_pci_p;
718 	dev_info_t *dip = pbm_p->pbm_pci_p->pci_dip;
719 	int instance = ddi_get_instance(dip);
720 	uint64_t l;
721 	uint64_t mask = 1ll << instance;
722 	ushort_t s = 0;
723 
724 	l = *pbm_p->pbm_ctrl_reg;	/* save control register state */
725 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
726 
727 	/*
728 	 * See if any SERR# signals are asserted.  We'll clear them later.
729 	 */
730 	if (l & COMMON_PCI_CTRL_SERR)
731 		cmn_err(CE_WARN, "%s%d: SERR asserted on pci bus\n",
732 		    ddi_driver_name(dip), instance);
733 
734 	/*
735 	 * Determine if PCI bus is running at 33 or 66 mhz.
736 	 */
737 	if (l & COMMON_PCI_CTRL_SPEED)
738 		pbm_p->pbm_speed = PBM_SPEED_66MHZ;
739 	else
740 		pbm_p->pbm_speed = PBM_SPEED_33MHZ;
741 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: %d mhz\n",
742 	    pbm_p->pbm_speed  == PBM_SPEED_66MHZ ? 66 : 33);
743 
744 	if (pci_set_dto_value & mask) {
745 		l &= ~(3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
746 		l |= pci_dto_value << SCHIZO_PCI_CTRL_PTO_SHIFT;
747 	} else if (PCI_CHIP_ID(pci_p) >= TOMATILLO_VER_21) {
748 		l |= (3ull << SCHIZO_PCI_CTRL_PTO_SHIFT);
749 	}
750 
751 	/*
752 	 * Enable error interrupts.
753 	 */
754 	if (pci_error_intr_enable & mask)
755 		l |= SCHIZO_PCI_CTRL_ERR_INT_EN;
756 	else
757 		l &= ~SCHIZO_PCI_CTRL_ERR_INT_EN;
758 
759 	/*
760 	 * Enable pci streaming byte errors and error interrupts.
761 	 */
762 	if (pci_sbh_error_intr_enable & mask)
763 		l |= SCHIZO_PCI_CTRL_SBH_INT_EN;
764 	else
765 		l &= ~SCHIZO_PCI_CTRL_SBH_INT_EN;
766 
767 	/*
768 	 * Enable pci discard timeout error interrupt.
769 	 */
770 	if (pci_mmu_error_intr_enable & mask)
771 		l |= SCHIZO_PCI_CTRL_MMU_INT_EN;
772 	else
773 		l &= ~SCHIZO_PCI_CTRL_MMU_INT_EN;
774 
775 	/*
776 	 * Enable PCI-X error interrupts.
777 	 */
778 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
779 
780 		if (xmits_error_intr_enable & mask)
781 			l |= XMITS_PCI_CTRL_X_ERRINT_EN;
782 		else
783 			l &= ~XMITS_PCI_CTRL_X_ERRINT_EN;
784 		/*
785 		 * Panic if older XMITS hardware is found.
786 		 */
787 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE)
788 			if (PCI_CHIP_ID(pci_p) <= XMITS_VER_10)
789 				cmn_err(CE_PANIC, "%s (%s): PCIX mode "
790 				"unsupported on XMITS version %d\n",
791 				    pbm_p->pbm_nameinst_str,
792 				    pbm_p->pbm_nameaddr_str, CHIP_VER(pci_p));
793 
794 		if (xmits_perr_recov_int_enable) {
795 			if (PCI_CHIP_ID(pci_p) >= XMITS_VER_30) {
796 				uint64_t pcix_err;
797 				/*
798 				 * Enable interrupt on PERR
799 				 */
800 				pcix_err = *pbm_p->pbm_pcix_err_stat_reg;
801 				pcix_err |= XMITS_PCIX_STAT_PERR_RECOV_INT_EN;
802 				pcix_err &= ~XMITS_PCIX_STAT_SERR_ON_PERR;
803 				*pbm_p->pbm_pcix_err_stat_reg = pcix_err;
804 			}
805 		}
806 
807 		/*
808 		 * Enable parity error detection on internal memories
809 		 */
810 		*pbm_p->pbm_pci_ped_ctrl = 0x3fff;
811 	}
812 
813 	/*
814 	 * Enable/disable bus parking.
815 	 */
816 	if ((pci_bus_parking_enable & mask) &&
817 	    !ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
818 	    "no-bus-parking"))
819 		l |= SCHIZO_PCI_CTRL_ARB_PARK;
820 	else
821 		l &= ~SCHIZO_PCI_CTRL_ARB_PARK;
822 
823 	/*
824 	 * Enable arbitration.
825 	 */
826 	l |= PCI_CHIP_ID(pci_p) == XMITS_VER_10 ? XMITS10_PCI_CTRL_ARB_EN_MASK :
827 		SCHIZO_PCI_CTRL_ARB_EN_MASK;
828 
829 	/*
830 	 * Make sure SERR is clear
831 	 */
832 	l |= COMMON_PCI_CTRL_SERR;
833 
834 
835 	/*
836 	 * Enable DTO interrupt, if desired.
837 	 */
838 
839 	if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_20 || (pci_dto_intr_enable &
840 	    mask))
841 		l |=	 (TOMATILLO_PCI_CTRL_DTO_INT_EN);
842 	else
843 		l &=	 ~(TOMATILLO_PCI_CTRL_DTO_INT_EN);
844 
845 	l |= TOMATILLO_PCI_CTRL_PEN_RD_MLTPL |
846 		TOMATILLO_PCI_CTRL_PEN_RD_ONE |
847 		TOMATILLO_PCI_CTRL_PEN_RD_LINE;
848 
849 	/*
850 	 * Now finally write the control register with the appropriate value.
851 	 */
852 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: ctrl reg=%llx\n", l);
853 	*pbm_p->pbm_ctrl_reg = l;
854 
855 	/*
856 	 * Enable IO Prefetch on Tomatillo
857 	 */
858 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
859 		volatile uint64_t *ioc_csr_p = pbm_p->pbm_ctrl_reg +
860 			((TOMATILLO_IOC_CSR_OFF -
861 			SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
862 		*ioc_csr_p = TOMATILLO_WRT_PEN |
863 			(1 << TOMATILLO_POFFSET_SHIFT) |
864 			TOMATILLO_C_PEN_RD_MLTPL |
865 			TOMATILLO_C_PEN_RD_ONE |
866 			TOMATILLO_C_PEN_RD_LINE;
867 	}
868 
869 	/*
870 	 * Allow DMA write parity errors to generate an interrupt.
871 	 * This is implemented on Schizo 2.5 and greater and XMITS 3.0
872 	 * and greater.  Setting this on earlier versions of XMITS 3.0
873 	 * has no affect.
874 	 */
875 	if (((CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) &&
876 	    PCI_CHIP_ID(pci_p) >= SCHIZO_VER_25) ||
877 	    (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS)) {
878 		volatile uint64_t *pbm_icd = pbm_p->pbm_ctrl_reg +
879 		    ((SCHIZO_PERF_PCI_ICD_OFFSET -
880 		    SCHIZO_PCI_CTRL_REG_OFFSET) >> 3);
881 
882 		*pbm_icd |= SCHIZO_PERF_PCI_ICD_DMAW_PARITY_INT_ENABLE;
883 	}
884 
885 	/*
886 	 * Clear any PBM errors.
887 	 */
888 	l = (SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_PE_SHIFT) |
889 		(SCHIZO_PCI_AFSR_E_MASK << SCHIZO_PCI_AFSR_SE_SHIFT);
890 	*pbm_p->pbm_async_flt_status_reg = l;
891 
892 	/*
893 	 * Allow the diag register to be set based upon variable that
894 	 * can be configured via /etc/system.
895 	 */
896 	l = *pbm_p->pbm_diag_reg;
897 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
898 
899 	/*
900 	 * Enable/disable retry limit.
901 	 */
902 	if (pci_retry_disable & mask)
903 		l |= COMMON_PCI_DIAG_DIS_RETRY;
904 	else
905 		l &= ~COMMON_PCI_DIAG_DIS_RETRY;
906 
907 	/*
908 	 * Enable/disable DMA write/interrupt synchronization.
909 	 */
910 	if (pci_intsync_disable & mask)
911 		l |= COMMON_PCI_DIAG_DIS_INTSYNC;
912 	else
913 		l &= ~COMMON_PCI_DIAG_DIS_INTSYNC;
914 
915 	/*
916 	 * Enable/disable retry arbitration priority.
917 	 */
918 	if (pci_enable_retry_arb & mask)
919 		l &= ~SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
920 	else
921 		l |= SCHIZO_PCI_DIAG_DIS_RTRY_ARB;
922 
923 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: PCI diag reg=%llx\n", l);
924 	*pbm_p->pbm_diag_reg = l;
925 
926 	/*
927 	 * Enable SERR# and parity reporting via command register.
928 	 */
929 	s = pci_perr_enable & mask ? PCI_COMM_PARITY_DETECT : 0;
930 	s |= pci_serr_enable & mask ? PCI_COMM_SERR_ENABLE : 0;
931 
932 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf command reg=%x\n", s);
933 	pbm_p->pbm_config_header->ch_command_reg = s;
934 
935 	/*
936 	 * Clear error bits in configuration status register.
937 	 */
938 	s = PCI_STAT_PERROR | PCI_STAT_S_PERROR |
939 		PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |
940 		PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR;
941 	DEBUG1(DBG_ATTACH, dip, "pbm_configure: conf status reg=%x\n", s);
942 	pbm_p->pbm_config_header->ch_status_reg = s;
943 
944 	/*
945 	 * The current versions of the obp are suppose to set the latency
946 	 * timer register but do not.  Bug 1234181 is open against this
947 	 * problem.  Until this bug is fixed we check to see if the obp
948 	 * has attempted to set the latency timer register by checking
949 	 * for the existence of a "latency-timer" property.
950 	 */
951 	if (pci_set_latency_timer_register) {
952 		DEBUG1(DBG_ATTACH, dip,
953 		    "pbm_configure: set schizo latency timer to %x\n",
954 			pci_latency_timer);
955 		pbm_p->pbm_config_header->ch_latency_timer_reg =
956 			pci_latency_timer;
957 	}
958 
959 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
960 		(int)pbm_p->pbm_config_header->ch_latency_timer_reg);
961 }
962 
963 uint_t
964 pbm_disable_pci_errors(pbm_t *pbm_p)
965 {
966 	pci_t *pci_p = pbm_p->pbm_pci_p;
967 	ib_t *ib_p = pci_p->pci_ib_p;
968 
969 	/*
970 	 * Disable error and streaming byte hole interrupts via the
971 	 * PBM control register.
972 	 */
973 	*pbm_p->pbm_ctrl_reg &=
974 		~(SCHIZO_PCI_CTRL_ERR_INT_EN | SCHIZO_PCI_CTRL_SBH_INT_EN |
975 		SCHIZO_PCI_CTRL_MMU_INT_EN);
976 
977 	/*
978 	 * Disable error interrupts via the interrupt mapping register.
979 	 */
980 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_PBM], IB_INTR_NOWAIT);
981 	return (BF_NONE);
982 }
983 
984 /*
985  * Layout of the dvma context bucket bitmap entry:
986  *
987  *	63 - 56		55 - 0
988  *	8-bit lock	56-bit, each represent one context
989  *	DCB_LOCK_BITS	DCB_BMAP_BITS
990  */
991 #define	DCB_LOCK_BITS	8
992 #define	DCB_BMAP_BITS	(64 - DCB_LOCK_BITS)
993 
994 dvma_context_t
995 pci_iommu_get_dvma_context(iommu_t *iommu_p, dvma_addr_t dvma_pg_index)
996 {
997 	dvma_context_t ctx;
998 	int i = (dvma_pg_index >> 6) & 0x1f;	/* 5 bit index within bucket */
999 	uint64_t ctx_mask, test = 1ull << i;
1000 	uint32_t bucket_no = dvma_pg_index & 0x3f;
1001 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1002 
1003 	uint32_t spl = ddi_enter_critical();	/* block interrupts */
1004 	if (ldstub((uint8_t *)bucket_ptr)) {	/* try lock */
1005 		ddi_exit_critical(spl);		/* unblock interrupt */
1006 		pci_iommu_ctx_lock_failure++;
1007 		return (0);
1008 	}
1009 
1010 	/* clear lock bits */
1011 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1012 	ASSERT(*bucket_ptr >> DCB_BMAP_BITS == 0xff);
1013 	ASSERT(ctx_mask >> DCB_BMAP_BITS == 0);
1014 
1015 	if (ctx_mask & test)			/* quick check i bit */
1016 		for (i = 0, test = 1ull; test & ctx_mask; test <<= 1, i++);
1017 	if (i < DCB_BMAP_BITS)
1018 		ctx_mask |= test;
1019 	*bucket_ptr = ctx_mask;			/* unlock */
1020 	ddi_exit_critical(spl);			/* unblock interrupts */
1021 
1022 	ctx = i < DCB_BMAP_BITS ? (bucket_no << 6) | i : 0;
1023 	DEBUG3(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1024 		"get_dvma_context: ctx_mask=0x%x.%x ctx=0x%x\n",
1025 		(uint32_t)(ctx_mask >> 32), (uint32_t)ctx_mask, ctx);
1026 	return (ctx);
1027 }
1028 
1029 void
1030 pci_iommu_free_dvma_context(iommu_t *iommu_p, dvma_context_t ctx)
1031 {
1032 	uint64_t ctx_mask;
1033 	uint32_t spl, bucket_no = ctx >> 6;
1034 	int bit_no = ctx & 0x3f;
1035 	uint64_t *bucket_ptr = iommu_p->iommu_ctx_bitmap + bucket_no;
1036 
1037 	DEBUG1(DBG_DMA_MAP, iommu_p->iommu_pci_p->pci_dip,
1038 		"free_dvma_context: ctx=0x%x\n", ctx);
1039 
1040 	spl = ddi_enter_critical();			/* block interrupts */
1041 	while (ldstub((uint8_t *)bucket_ptr));		/* spin lock */
1042 	ctx_mask = (*bucket_ptr << DCB_LOCK_BITS) >> DCB_LOCK_BITS;
1043 							/* clear lock bits */
1044 	ASSERT(ctx_mask & (1ull << bit_no));
1045 	*bucket_ptr = ctx_mask ^ (1ull << bit_no);	/* clear & unlock */
1046 	ddi_exit_critical(spl);				/* unblock interrupt */
1047 }
1048 
1049 int
1050 pci_sc_ctx_inv(dev_info_t *dip, sc_t *sc_p, ddi_dma_impl_t *mp)
1051 {
1052 	dvma_context_t ctx = MP2CTX(mp);
1053 	volatile uint64_t *reg_addr = sc_p->sc_ctx_match_reg + ctx;
1054 	uint64_t matchreg;
1055 
1056 	if (!*reg_addr) {
1057 		DEBUG1(DBG_SC, dip, "ctx=%x no match\n", ctx);
1058 		return (DDI_SUCCESS);
1059 	}
1060 
1061 	*sc_p->sc_ctx_invl_reg = ctx;	/* 1st flush write */
1062 	matchreg = *reg_addr;		/* re-fetch after 1st flush */
1063 	if (!matchreg)
1064 		return (DDI_SUCCESS);
1065 
1066 	matchreg = (matchreg << SC_ENT_SHIFT) >> SC_ENT_SHIFT;	/* low 16-bit */
1067 	do {
1068 		if (matchreg & 1)
1069 			*sc_p->sc_ctx_invl_reg = ctx;
1070 		matchreg >>= 1;
1071 	} while (matchreg);
1072 
1073 	if (pci_ctx_no_compat || !*reg_addr)	/* compat: active ctx flush */
1074 		return (DDI_SUCCESS);
1075 
1076 	pci_ctx_unsuccess_count++;
1077 	if (pci_ctx_flush_warn)
1078 		cmn_err(pci_ctx_flush_warn, "%s%d: ctx flush unsuccessful\n",
1079 			NAMEINST(dip));
1080 	return (DDI_FAILURE);
1081 }
1082 
1083 void
1084 pci_cb_setup(pci_t *pci_p)
1085 {
1086 	dev_info_t *dip = pci_p->pci_dip;
1087 	cb_t *cb_p = pci_p->pci_cb_p;
1088 	uint64_t pa;
1089 	uint32_t chip_id = PCI_CHIP_ID(pci_p);
1090 	DEBUG1(DBG_ATTACH, dip, "cb_create: chip id %d\n", chip_id);
1091 
1092 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1093 		if ((!tm_mtlb_gc_manual) &&
1094 		    (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_24))
1095 			tm_mtlb_gc = 1;
1096 
1097 		if (PCI_CHIP_ID(pci_p) <= TOMATILLO_VER_23) {
1098 			extern int ignore_invalid_vecintr;
1099 			ignore_invalid_vecintr = 1;
1100 			tomatillo_store_store_wrka = 1;
1101 			tomatillo_disallow_bypass = 1;
1102 			if (pci_spurintr_msgs == PCI_SPURINTR_MSG_DEFAULT)
1103 				pci_spurintr_msgs = 0;
1104 		}
1105 	}
1106 
1107 	if (chip_id == TOMATILLO_VER_20 || chip_id == TOMATILLO_VER_21)
1108 		cmn_err(CE_WARN, "Unsupported Tomatillo rev (%x)", chip_id);
1109 
1110 	if (chip_id < SCHIZO_VER_23)
1111 		pci_ctx_no_active_flush = 1;
1112 
1113 	cb_p->cb_node_id = PCI_ID_TO_NODEID(pci_p->pci_id);
1114 	cb_p->cb_ign	 = PCI_ID_TO_IGN(pci_p->pci_id);
1115 
1116 	/*
1117 	 * schizo control status reg bank is on the 2nd "reg" property entry
1118 	 * interrupt mapping/clear/state regs are on the 1st "reg" entry.
1119 	 *
1120 	 * ALL internal interrupts except pbm interrupts are shared by both
1121 	 * sides, 1st-side-attached is used as *the* owner.
1122 	 */
1123 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[1]);
1124 	cb_p->cb_base_pa = pa << MMU_PAGESHIFT;
1125 
1126 	pa = pci_p->pci_address[3] ?
1127 		(uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[3]) : 0;
1128 	cb_p->cb_icbase_pa = (pa == PFN_INVALID) ? 0 : pa << MMU_PAGESHIFT;
1129 
1130 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pci_p->pci_address[0])
1131 		<< MMU_PAGESHIFT;
1132 	cb_p->cb_map_pa = pa + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1133 	cb_p->cb_clr_pa = pa + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1134 	cb_p->cb_obsta_pa = pa + COMMON_IB_OBIO_INTR_STATE_DIAG_REG;
1135 }
1136 
1137 void
1138 pci_ecc_setup(ecc_t *ecc_p)
1139 {
1140 	ecc_p->ecc_ue.ecc_errpndg_mask = SCHIZO_ECC_UE_AFSR_ERRPNDG;
1141 	ecc_p->ecc_ue.ecc_offset_mask = SCHIZO_ECC_UE_AFSR_QW_OFFSET;
1142 	ecc_p->ecc_ue.ecc_offset_shift = SCHIZO_ECC_UE_AFSR_QW_OFFSET_SHIFT;
1143 	ecc_p->ecc_ue.ecc_size_log2 = 4;
1144 
1145 	ecc_p->ecc_ce.ecc_errpndg_mask = SCHIZO_ECC_CE_AFSR_ERRPNDG;
1146 	ecc_p->ecc_ce.ecc_offset_mask = SCHIZO_ECC_CE_AFSR_QW_OFFSET;
1147 	ecc_p->ecc_ce.ecc_offset_shift = SCHIZO_ECC_CE_AFSR_QW_OFFSET_SHIFT;
1148 	ecc_p->ecc_ce.ecc_size_log2 = 4;
1149 }
1150 
1151 ushort_t
1152 pci_ecc_get_synd(uint64_t afsr)
1153 {
1154 	return ((ushort_t)((afsr & SCHIZO_ECC_CE_AFSR_SYND) >>
1155 	    SCHIZO_ECC_CE_AFSR_SYND_SHIFT));
1156 }
1157 
1158 /*
1159  * overwrite dvma end address (only on virtual-dma systems)
1160  * initialize tsb size
1161  * reset context bits
1162  * return: IOMMU CSR bank base address (VA)
1163  */
1164 
1165 uintptr_t
1166 pci_iommu_setup(iommu_t *iommu_p)
1167 {
1168 	pci_dvma_range_prop_t *dvma_prop;
1169 	int dvma_prop_len;
1170 
1171 	uintptr_t a;
1172 	pci_t *pci_p = iommu_p->iommu_pci_p;
1173 	dev_info_t *dip = pci_p->pci_dip;
1174 	uint_t tsb_size = iommu_tsb_cookie_to_size(pci_p->pci_tsb_cookie);
1175 	uint_t tsb_size_prop;
1176 
1177 	/*
1178 	 * Initializations for Tomatillo's micro TLB bug. errata #82
1179 	 */
1180 	if (tm_mtlb_gc) {
1181 		iommu_p->iommu_mtlb_nreq = 0;
1182 		iommu_p->iommu_mtlb_npgs = 0;
1183 		iommu_p->iommu_mtlb_maxpgs = tm_mtlb_maxpgs;
1184 		iommu_p->iommu_mtlb_req_p = (dvma_unbind_req_t *)
1185 		    kmem_zalloc(sizeof (dvma_unbind_req_t) *
1186 		    (tm_mtlb_maxpgs + 1), KM_SLEEP);
1187 		mutex_init(&iommu_p->iommu_mtlb_lock, NULL, MUTEX_DRIVER, NULL);
1188 	}
1189 
1190 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1191 		"virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
1192 		DDI_PROP_SUCCESS)
1193 		goto tsb_done;
1194 
1195 	if (dvma_prop_len != sizeof (pci_dvma_range_prop_t)) {
1196 		cmn_err(CE_WARN, "%s%d: invalid virtual-dma property",
1197 			ddi_driver_name(dip), ddi_get_instance(dip));
1198 		goto tsb_end;
1199 	}
1200 	iommu_p->iommu_dvma_end = dvma_prop->dvma_base +
1201 		(dvma_prop->dvma_len - 1);
1202 	tsb_size_prop = IOMMU_BTOP(dvma_prop->dvma_len) * sizeof (uint64_t);
1203 	tsb_size = MIN(tsb_size_prop, tsb_size);
1204 tsb_end:
1205 	kmem_free(dvma_prop, dvma_prop_len);
1206 tsb_done:
1207 	iommu_p->iommu_tsb_size = iommu_tsb_size_encode(tsb_size);
1208 	iommu_p->iommu_ctx_bitmap =
1209 		kmem_zalloc(IOMMU_CTX_BITMAP_SIZE, KM_SLEEP);
1210 	*iommu_p->iommu_ctx_bitmap = 1ull;	/* reserve context 0 */
1211 
1212 	/*
1213 	 * Determine the virtual address of the register block
1214 	 * containing the iommu control registers and determine
1215 	 * the virtual address of schizo specific iommu registers.
1216 	 */
1217 	a = (uintptr_t)pci_p->pci_address[0];
1218 	iommu_p->iommu_flush_ctx_reg =
1219 		(uint64_t *)(a + SCHIZO_IOMMU_FLUSH_CTX_REG_OFFSET);
1220 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
1221 		iommu_p->iommu_tfar_reg =
1222 			(uint64_t *)(a + TOMATILLO_IOMMU_ERR_TFAR_OFFSET);
1223 	return (a);	/* PCICSRBase */
1224 }
1225 
1226 void
1227 pci_iommu_teardown(iommu_t *iommu_p)
1228 {
1229 	if (pci_use_contexts)
1230 		iommu_ctx_free(iommu_p);
1231 	if (iommu_p->iommu_mtlb_req_p) {
1232 		kmem_free(iommu_p->iommu_mtlb_req_p,
1233 		    sizeof (dvma_unbind_req_t) * (tm_mtlb_maxpgs + 1));
1234 		mutex_destroy(&iommu_p->iommu_mtlb_lock);
1235 		iommu_p->iommu_mtlb_req_p = NULL;
1236 		iommu_p->iommu_mtlb_nreq = 0;
1237 		iommu_p->iommu_mtlb_npgs = iommu_p->iommu_mtlb_maxpgs = 0;
1238 	}
1239 }
1240 
1241 uintptr_t
1242 get_pbm_reg_base(pci_t *pci_p)
1243 {
1244 	return ((uintptr_t)
1245 		(pci_p->pci_address[0] + SCHIZO_PCI_CTRL_REG_OFFSET));
1246 }
1247 
1248 /* ARGSUSED */
1249 static boolean_t
1250 pci_pbm_panic_callb(void *arg, int code)
1251 {
1252 	pbm_t *pbm_p = (pbm_t *)arg;
1253 	volatile uint64_t *ctrl_reg_p;
1254 
1255 	if (pbm_p->pbm_quiesce_count > 0) {
1256 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1257 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1258 	}
1259 
1260 	return (B_TRUE);
1261 }
1262 
1263 static boolean_t
1264 pci_pbm_debug_callb(void *arg, int code)
1265 {
1266 	pbm_t *pbm_p = (pbm_t *)arg;
1267 	volatile uint64_t *ctrl_reg_p;
1268 	uint64_t ctrl_reg;
1269 
1270 	if (pbm_p->pbm_quiesce_count > 0) {
1271 		ctrl_reg_p = pbm_p->pbm_ctrl_reg;
1272 		if (code == 0) {
1273 			*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
1274 		} else {
1275 			ctrl_reg = pbm_p->pbm_saved_ctrl_reg;
1276 			ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
1277 			    SCHIZO_PCI_CTRL_ARB_PARK);
1278 			*ctrl_reg_p = ctrl_reg;
1279 		}
1280 	}
1281 
1282 	return (B_TRUE);
1283 }
1284 
1285 void
1286 pci_pbm_setup(pbm_t *pbm_p)
1287 {
1288 	pci_t *pci_p = pbm_p->pbm_pci_p;
1289 	caddr_t a = pci_p->pci_address[0]; /* PBM block base VA */
1290 	uint64_t pa = va_to_pa(a);
1291 	extern int segkmem_reloc;
1292 
1293 	mutex_init(&pbm_p->pbm_sync_mutex, NULL, MUTEX_DRIVER,
1294 	    (void *)ipltospl(XCALL_PIL));
1295 
1296 	pbm_p->pbm_config_header = (config_header_t *)pci_p->pci_address[2];
1297 	pbm_p->pbm_ctrl_reg = (uint64_t *)(a + SCHIZO_PCI_CTRL_REG_OFFSET);
1298 	pbm_p->pbm_diag_reg = (uint64_t *)(a + SCHIZO_PCI_DIAG_REG_OFFSET);
1299 	pbm_p->pbm_async_flt_status_reg =
1300 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1301 	pbm_p->pbm_async_flt_addr_reg =
1302 		(uint64_t *)(a + SCHIZO_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1303 	pbm_p->pbm_estar_reg = (uint64_t *)(a + SCHIZO_PCI_ESTAR_REG_OFFSET);
1304 	pbm_p->pbm_pcix_err_stat_reg = (uint64_t *)(a +
1305 	    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
1306 	pbm_p->pbm_pci_ped_ctrl = (uint64_t *)(a +
1307 	    XMITS_PARITY_DETECT_REG_OFFSET);
1308 
1309 	/*
1310 	 * Create a property to indicate that this node supports DVMA
1311 	 * page relocation.
1312 	 */
1313 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO && segkmem_reloc != 0) {
1314 		pci_dvma_remap_enabled = 1;
1315 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1316 		    pci_p->pci_dip, "dvma-remap-supported");
1317 	}
1318 
1319 	/*
1320 	 * Register a panic callback so we can unquiesce this bus
1321 	 * if it has been placed in the quiesced state.
1322 	 */
1323 	pbm_p->pbm_panic_cb_id = callb_add(pci_pbm_panic_callb,
1324 	    (void *)pbm_p, CB_CL_PANIC, "pci_panic");
1325 	pbm_p->pbm_debug_cb_id = callb_add(pci_pbm_panic_callb,
1326 	    (void *)pbm_p, CB_CL_ENTER_DEBUGGER, "pci_debug_enter");
1327 
1328 	if (CHIP_TYPE(pci_p) != PCI_CHIP_SCHIZO)
1329 		goto non_schizo;
1330 
1331 	if (PCI_CHIP_ID(pci_p) >= SCHIZO_VER_23) {
1332 
1333 		pbm_p->pbm_sync_reg_pa = pa + SCHIZO_PBM_DMA_SYNC_REG_OFFSET;
1334 
1335 		/*
1336 		 * This is a software workaround to fix schizo hardware bug.
1337 		 * Create a boolean property and its existence means consistent
1338 		 * dma sync should not be done while in prom. The usb polled
1339 		 * code (OHCI,EHCI) will check for this property and will not
1340 		 * do dma sync if this property exist.
1341 		 */
1342 		(void) ndi_prop_create_boolean(DDI_DEV_T_NONE,
1343 		    pci_p->pci_dip, "no-prom-cdma-sync");
1344 	}
1345 	return;
1346 non_schizo:
1347 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
1348 		pci_dvma_sync_before_unmap = 1;
1349 		pa = pci_p->pci_cb_p->cb_icbase_pa;
1350 	}
1351 	pbm_p->pbm_sync_reg_pa = pa + PBM_DMA_SYNC_PEND_REG_OFFSET;
1352 }
1353 
1354 void
1355 pci_pbm_teardown(pbm_t *pbm_p)
1356 {
1357 	(void) callb_delete(pbm_p->pbm_panic_cb_id);
1358 	(void) callb_delete(pbm_p->pbm_debug_cb_id);
1359 }
1360 
1361 uintptr_t
1362 pci_ib_setup(ib_t *ib_p)
1363 {
1364 	/*
1365 	 * Determine virtual addresses of bridge specific registers,
1366 	 */
1367 	pci_t *pci_p = ib_p->ib_pci_p;
1368 	uintptr_t a = (uintptr_t)pci_p->pci_address[0];
1369 
1370 	ib_p->ib_ign = PCI_ID_TO_IGN(pci_p->pci_id);
1371 	ib_p->ib_max_ino = SCHIZO_MAX_INO;
1372 	ib_p->ib_slot_intr_map_regs = a + SCHIZO_IB_SLOT_INTR_MAP_REG_OFFSET;
1373 	ib_p->ib_intr_map_regs = a + SCHIZO_IB_INTR_MAP_REG_OFFSET;
1374 	ib_p->ib_slot_clear_intr_regs =
1375 		a + SCHIZO_IB_CLEAR_INTR_REG_OFFSET;
1376 	return (a);
1377 }
1378 
1379 void
1380 pci_sc_setup(sc_t *sc_p)
1381 {
1382 	pci_t *pci_p = sc_p->sc_pci_p;
1383 	uintptr_t a;
1384 
1385 	/*
1386 	 * Determine the virtual addresses of the stream cache
1387 	 * control/status and flush registers.
1388 	 */
1389 	a = (uintptr_t)pci_p->pci_address[0];	/* PCICSRBase */
1390 	sc_p->sc_ctrl_reg = (uint64_t *)(a + SCHIZO_SC_CTRL_REG_OFFSET);
1391 	sc_p->sc_invl_reg = (uint64_t *)(a + SCHIZO_SC_INVL_REG_OFFSET);
1392 	sc_p->sc_sync_reg = (uint64_t *)(a + SCHIZO_SC_SYNC_REG_OFFSET);
1393 	sc_p->sc_ctx_invl_reg = (uint64_t *)(a + SCHIZO_SC_CTX_INVL_REG_OFFSET);
1394 	sc_p->sc_ctx_match_reg =
1395 		(uint64_t *)(a + SCHIZO_SC_CTX_MATCH_REG_OFFSET);
1396 
1397 	/*
1398 	 * Determine the virtual addresses of the streaming cache
1399 	 * diagnostic access registers.
1400 	 */
1401 	sc_p->sc_data_diag_acc = (uint64_t *)(a + SCHIZO_SC_DATA_DIAG_OFFSET);
1402 	sc_p->sc_tag_diag_acc = (uint64_t *)(a + SCHIZO_SC_TAG_DIAG_OFFSET);
1403 	sc_p->sc_ltag_diag_acc = (uint64_t *)(a + SCHIZO_SC_LTAG_DIAG_OFFSET);
1404 }
1405 
1406 /*ARGSUSED*/
1407 int
1408 pci_get_numproxy(dev_info_t *dip)
1409 {
1410 	/*
1411 	 * Schizo does not support interrupt proxies.
1412 	 */
1413 	return (0);
1414 }
1415 
1416 /*
1417  * pcisch error handling 101:
1418  *
1419  * The various functions below are responsible for error handling. Given
1420  * a particular error, they must gather the appropriate state, report all
1421  * errors with correct payload, and attempt recovery where ever possible.
1422  *
1423  * Recovery in the context of this driver is being able notify a leaf device
1424  * of the failed transaction. This leaf device may either be the master or
1425  * target for this transaction and may have already received an error
1426  * notification via a PCI interrupt. Notification is done via DMA and access
1427  * handles. If we capture an address for the transaction then we can map it
1428  * to a handle(if the leaf device is fma-compliant) and fault the handle as
1429  * well as call the device driver registered callback.
1430  *
1431  * The hardware can either interrupt or trap upon detection of an error, in
1432  * some rare cases it also causes a fatal reset.
1433  *
1434  * cb_buserr_intr() is responsible for handling control block
1435  * errors(errors which stem from the host bus side of the bridge). Since
1436  * we support multiple chips and host bus standards, cb_buserr_intr will
1437  * call a bus specific error handler to report and handle the detected
1438  * error. Since this error can either affect or orginate from either of the
1439  * two PCI busses which are connected to the bridge, we need to call
1440  * pci_pbm_err_handler() for each bus as well to report their errors. We
1441  * also need to gather possible errors which have been detected by their
1442  * compliant children(via ndi_fm_handler_dispatch()).
1443  *
1444  * pbm_error_intr() and ecc_intr() are responsible for PCI Block Module
1445  * errors(generic PCI + bridge specific) and ECC errors, respectively. They
1446  * are common between pcisch and pcipsy and therefore exist in pci_pbm.c and
1447  * pci_ecc.c. To support error handling certain chip specific handlers
1448  * must exist and they are defined below.
1449  *
1450  * cpu_deferred_error() and cpu_async_error(), handle the traps that may
1451  * have originated from IO space. They call into the registered IO callbacks
1452  * to report and handle errors that may have caused the trap.
1453  *
1454  * pci_pbm_err_handler() is called by pbm_error_intr() or pci_err_callback()
1455  * (generic fma callback for pcipsy/pcisch, pci_fm.c). pci_err_callback() is
1456  * called when the CPU has trapped because of a possible IO error(TO/BERR/UE).
1457  * It will call pci_pbm_err_handler() to report and handle all PCI/PBM/IOMMU
1458  * related errors which are detected by the chip.
1459  *
1460  * pci_pbm_err_handler() calls a generic interface pbm_afsr_report()(pci_pbm.c)
1461  * to report the pbm specific errors and attempt to map the failed address
1462  * (if captured) to a device instance. pbm_afsr_report() calls a chip specific
1463  * interface to interpret the afsr bits pci_pbm_classify()(pcisch.c/pcipsy.c).
1464  * pci_pbm_err_handler() also calls iommu_err_handler() to handle IOMMU related
1465  * errors.
1466  *
1467  * iommu_err_handler() can recover from most errors, as long as the requesting
1468  * device is notified and the iommu can be flushed. If an IOMMU error occurs
1469  * due to a UE then it will be passed on to the ecc_err_handler() for
1470  * subsequent handling.
1471  *
1472  * ecc_err_handler()(pci_ecc.c) also calls a chip specific interface to
1473  * interpret the afsr, pci_ecc_classify(). ecc_err_handler() also calls
1474  * pci_pbm_err_handler() to report any pbm errors detected.
1475  *
1476  * To make sure that the trap code and the interrupt code are not going
1477  * to step on each others toes we have a per chip pci_fm_mutex. This also
1478  * makes it necessary for us to be caution while we are at a high PIL, so
1479  * that we do not cause a subsequent trap that causes us to hang.
1480  *
1481  * The attempt to commonize code was meant to keep in line with the current
1482  * pci driver implementation and it was not meant to confuse. If you are
1483  * confused then don't worry, I was too.
1484  *
1485  */
1486 static void
1487 pci_cb_errstate_get(cb_t *cb_p, cb_errstate_t *cb_err_p)
1488 {
1489 	uint64_t pa = cb_p->cb_base_pa;
1490 	int	i;
1491 
1492 	bzero(cb_err_p, sizeof (cb_errstate_t));
1493 
1494 	ASSERT(MUTEX_HELD(&cb_p->cb_pci_cmn_p->pci_fm_mutex));
1495 
1496 	cb_err_p->cb_bridge_type = PCI_BRIDGE_TYPE(cb_p->cb_pci_cmn_p);
1497 
1498 	cb_err_p->cb_csr = lddphysio(pa + SCHIZO_CB_CSR_OFFSET);
1499 	cb_err_p->cb_err = lddphysio(pa + SCHIZO_CB_ERRCTRL_OFFSET);
1500 	cb_err_p->cb_intr = lddphysio(pa + SCHIZO_CB_INTCTRL_OFFSET);
1501 	cb_err_p->cb_elog = lddphysio(pa + SCHIZO_CB_ERRLOG_OFFSET);
1502 	cb_err_p->cb_ecc = lddphysio(pa + SCHIZO_CB_ECCCTRL_OFFSET);
1503 	cb_err_p->cb_ue_afsr = lddphysio(pa + SCHIZO_CB_UEAFSR_OFFSET);
1504 	cb_err_p->cb_ue_afar = lddphysio(pa + SCHIZO_CB_UEAFAR_OFFSET);
1505 	cb_err_p->cb_ce_afsr = lddphysio(pa + SCHIZO_CB_CEAFSR_OFFSET);
1506 	cb_err_p->cb_ce_afar = lddphysio(pa + SCHIZO_CB_CEAFAR_OFFSET);
1507 
1508 	if ((CB_CHIP_TYPE((cb_t *)cb_p)) == PCI_CHIP_XMITS) {
1509 		cb_err_p->cb_first_elog = lddphysio(pa +
1510 				XMITS_CB_FIRST_ERROR_LOG);
1511 		cb_err_p->cb_first_eaddr = lddphysio(pa +
1512 				XMITS_CB_FIRST_ERROR_ADDR);
1513 		cb_err_p->cb_leaf_status = lddphysio(pa +
1514 				XMITS_CB_FIRST_ERROR_ADDR);
1515 	}
1516 
1517 	/* Gather PBM state information for both sides of this chip */
1518 	for (i = 0; i < 2; i++) {
1519 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1520 			continue;
1521 		pci_pbm_errstate_get(((cb_t *)cb_p)->cb_pci_cmn_p->
1522 					    pci_p[i], &cb_err_p->cb_pbm[i]);
1523 	}
1524 }
1525 
1526 static void
1527 pci_cb_clear_error(cb_t *cb_p, cb_errstate_t *cb_err_p)
1528 {
1529 	uint64_t pa = ((cb_t *)cb_p)->cb_base_pa;
1530 
1531 	stdphysio(pa + SCHIZO_CB_ERRLOG_OFFSET, cb_err_p->cb_elog);
1532 }
1533 
1534 static cb_fm_err_t safari_err_tbl[] = {
1535 	SAFARI_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1536 	SAFARI_SSM_DIS,		SCHIZO_CB_ELOG_SSM_DIS,		CB_FATAL,
1537 	SAFARI_BAD_CMD_PCIA, 	SCHIZO_CB_ELOG_BAD_CMD_PCIA,	CB_FATAL,
1538 	SAFARI_BAD_CMD_PCIB, 	SCHIZO_CB_ELOG_BAD_CMD_PCIB,	CB_FATAL,
1539 	SAFARI_PAR_ERR_INT_PCIB, XMITS_CB_ELOG_PAR_ERR_INT_PCIB, CB_FATAL,
1540 	SAFARI_PAR_ERR_INT_PCIA, XMITS_CB_ELOG_PAR_ERR_INT_PCIA, CB_FATAL,
1541 	SAFARI_PAR_ERR_INT_SAF,	XMITS_CB_ELOG_PAR_ERR_INT_SAF,	CB_FATAL,
1542 	SAFARI_PLL_ERR_PCIB,	XMITS_CB_ELOG_PLL_ERR_PCIB,	CB_FATAL,
1543 	SAFARI_PLL_ERR_PCIA,	XMITS_CB_ELOG_PLL_ERR_PCIA,	CB_FATAL,
1544 	SAFARI_PLL_ERR_SAF,	XMITS_CB_ELOG_PLL_ERR_SAF,	CB_FATAL,
1545 	SAFARI_SAF_CIQ_TO,	SCHIZO_CB_ELOG_SAF_CIQ_TO,	CB_FATAL,
1546 	SAFARI_SAF_LPQ_TO,	SCHIZO_CB_ELOG_SAF_LPQ_TO,	CB_FATAL,
1547 	SAFARI_SAF_SFPQ_TO,	SCHIZO_CB_ELOG_SAF_SFPQ_TO,	CB_FATAL,
1548 	SAFARI_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1549 	SAFARI_UNMAP_ERR,	SCHIZO_CB_ELOG_UNMAP_ERR,	CB_FATAL,
1550 	SAFARI_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_FATAL,
1551 	SAFARI_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_FATAL,
1552 	SAFARI_DSTAT_ERR,	SCHIZO_CB_ELOG_DSTAT_ERR,	CB_FATAL,
1553 	SAFARI_SAF_UFPQ_TO,	SCHIZO_CB_ELOG_SAF_UFPQ_TO,	CB_FATAL,
1554 	SAFARI_CPU0_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU0_PAR_SINGLE,	CB_FATAL,
1555 	SAFARI_CPU0_PAR_BIDI,	SCHIZO_CB_ELOG_CPU0_PAR_BIDI,	CB_FATAL,
1556 	SAFARI_CPU1_PAR_SINGLE,	SCHIZO_CB_ELOG_CPU1_PAR_SINGLE,	CB_FATAL,
1557 	SAFARI_CPU1_PAR_BIDI,	SCHIZO_CB_ELOG_CPU1_PAR_BIDI,	CB_FATAL,
1558 	NULL,			NULL,				NULL,
1559 };
1560 
1561 /*
1562  * Function used to handle and log Safari bus errors.
1563  */
1564 static int
1565 safari_err_handler(dev_info_t *dip, uint64_t fme_ena,
1566 		cb_errstate_t *cb_err_p)
1567 {
1568 	int	i;
1569 	int	fatal = 0;
1570 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
1571 	pci_common_t *cmn_p = pci_p->pci_common_p;
1572 
1573 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1574 
1575 	for (i = 0; safari_err_tbl[i].cb_err_class != NULL; i++) {
1576 		if (cb_err_p->cb_elog & safari_err_tbl[i].cb_reg_bit) {
1577 			cb_err_p->cb_err_class = safari_err_tbl[i].cb_err_class;
1578 			cb_ereport_post(dip, fme_ena, cb_err_p);
1579 			fatal += safari_err_tbl[i].cb_fatal;
1580 		}
1581 	}
1582 
1583 	if (fatal)
1584 		return (DDI_FM_FATAL);
1585 	return (DDI_FM_OK);
1586 
1587 }
1588 
1589 /*
1590  * Check pbm va log register for captured errant address, and fail handle
1591  * if in per device cache.
1592  * Called from jbus_err_handler.
1593  */
1594 static int
1595 jbus_check_va_log(cb_t *cb_p, uint64_t fme_ena,
1596     cb_errstate_t *cb_err_p)
1597 {
1598 	int i;
1599 	int ret = DDI_FM_FATAL;
1600 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1601 
1602 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1603 	/*
1604 	 * Check VA log register for address associated with error,
1605 	 * if no address is registered then return failure
1606 	 */
1607 	for (i = 0; i < 2; i++) {
1608 
1609 		if (cb_p->cb_pci_cmn_p->pci_p[i] == NULL)
1610 			continue;
1611 		/*
1612 		 * Look up and fault handle associated with
1613 		 * logged DMA address
1614 		 */
1615 		if (cb_err_p->cb_pbm[i].pbm_va_log) {
1616 			ret = pci_handle_lookup(cb_p->cb_pci_cmn_p->pci_p[i]->
1617 					pci_dip, DMA_HANDLE, fme_ena,
1618 					(void *)&cb_err_p->cb_pbm[i].
1619 					pbm_va_log);
1620 			if (ret == DDI_FM_NONFATAL)
1621 				break;
1622 		}
1623 	}
1624 	return (ret);
1625 }
1626 
1627 static cb_fm_err_t jbus_err_tbl[] = {
1628 	JBUS_APERR,		SCHIZO_CB_ELOG_ADDR_PAR_ERR,	CB_FATAL,
1629 	JBUS_PWR_DATA_PERR,	TOMATILLO_CB_ELOG_WR_DATA_PAR_ERR, CB_FATAL,
1630 	JBUS_DRD_DATA_PERR,	TOMATILLO_CB_ELOG_RD_DATA_PAR_ERR, CB_NONFATAL,
1631 	JBUS_CTL_PERR,		TOMATILLO_CB_ELOG_CTL_PAR_ERR,	CB_FATAL,
1632 	JBUS_ILL_BYTE_EN,	TOMATILLO_CB_ELOG_ILL_BYTE_EN,	CB_FATAL,
1633 	JBUS_ILL_COH_IN,	TOMATILLO_CB_ELOG_ILL_COH_IN,	CB_FATAL,
1634 	JBUS_SNOOP_ERR_RD,	TOMATILLO_CB_ELOG_SNOOP_ERR_RD,	CB_FATAL,
1635 	JBUS_SNOOP_ERR_RDS,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDS, CB_FATAL,
1636 	JBUS_SNOOP_ERR_RDSA,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDSA, CB_FATAL,
1637 	JBUS_SNOOP_ERR_OWN,	TOMATILLO_CB_ELOG_SNOOP_ERR_OWN, CB_FATAL,
1638 	JBUS_SNOOP_ERR_RDO,	TOMATILLO_CB_ELOG_SNOOP_ERR_RDO, CB_FATAL,
1639 	JBUS_SNOOP_ERR_PCI,	TOMATILLO_CB_ELOG_SNOOP_ERR_PCI, CB_FATAL,
1640 	JBUS_SNOOP_ERR_GR,	TOMATILLO_CB_ELOG_SNOOP_ERR_GR,	CB_FATAL,
1641 	JBUS_SNOOP_ERR,		TOMATILLO_CB_ELOG_SNOOP_ERR,	CB_FATAL,
1642 	JBUS_BAD_CMD,		SCHIZO_CB_ELOG_BAD_CMD,		CB_FATAL,
1643 	JBUS_UNMAP_ERR,		SCHIZO_CB_ELOG_UNMAP_ERR,	CB_NONFATAL,
1644 	JBUS_TO_EXP_ERR,	TOMATILLO_CB_ELOG_TO_EXP_ERR,	CB_NONFATAL,
1645 	JBUS_TO_ERR,		SCHIZO_CB_ELOG_TO_ERR,		CB_NONFATAL,
1646 	JBUS_BUS_ERR,		SCHIZO_CB_ELOG_BUS_ERR,		CB_NONFATAL,
1647 	NULL,			NULL,				NULL,
1648 };
1649 
1650 /*
1651  * Function used to handle and log Jbus errors.
1652  */
1653 static int
1654 jbus_err_handler(dev_info_t *dip, uint64_t fme_ena,
1655     cb_errstate_t *cb_err_p)
1656 {
1657 	int	fatal = 0;
1658 	int	nonfatal = 0;
1659 	int	i;
1660 	pci_t	*pci_p = get_pci_soft_state(ddi_get_instance(dip));
1661 	cb_t	*cb_p = pci_p->pci_cb_p;
1662 
1663 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1664 
1665 	for (i = 0; jbus_err_tbl[i].cb_err_class != NULL; i++) {
1666 		if (!(cb_err_p->cb_elog & jbus_err_tbl[i].cb_reg_bit))
1667 			continue;
1668 		cb_err_p->cb_err_class = jbus_err_tbl[i].cb_err_class;
1669 		if (jbus_err_tbl[i].cb_fatal) {
1670 			fatal += jbus_err_tbl[i].cb_fatal;
1671 			continue;
1672 		}
1673 		if (jbus_check_va_log(cb_p, fme_ena, cb_err_p)
1674 				!= DDI_FM_NONFATAL) {
1675 			fatal++;
1676 		}
1677 		cb_ereport_post(dip, fme_ena, cb_err_p);
1678 	}
1679 
1680 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1681 				DDI_FM_OK));
1682 }
1683 
1684 /*
1685  * Control Block error interrupt handler.
1686  */
1687 uint_t
1688 cb_buserr_intr(caddr_t a)
1689 {
1690 	cb_t *cb_p = (cb_t *)a;
1691 	pci_common_t *cmn_p = cb_p->cb_pci_cmn_p;
1692 	pci_t *pci_p = cmn_p->pci_p[0];
1693 	cb_errstate_t cb_err;
1694 	ddi_fm_error_t derr;
1695 	int ret = DDI_FM_FATAL;
1696 	int i;
1697 
1698 	if (pci_p == NULL)
1699 		pci_p = cmn_p->pci_p[1];
1700 
1701 	bzero(&derr, sizeof (ddi_fm_error_t));
1702 	derr.fme_version = DDI_FME_VERSION;
1703 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1704 
1705 	mutex_enter(&cmn_p->pci_fm_mutex);
1706 
1707 	pci_cb_errstate_get(cb_p, &cb_err);
1708 
1709 	if (CB_CHIP_TYPE(cb_p) == PCI_CHIP_TOMATILLO)
1710 		ret = jbus_err_handler(pci_p->pci_dip, derr.fme_ena, &cb_err);
1711 	else if ((CB_CHIP_TYPE(cb_p) == PCI_CHIP_SCHIZO) ||
1712 			(CB_CHIP_TYPE(cb_p) == PCI_CHIP_XMITS))
1713 		ret = safari_err_handler(pci_p->pci_dip, derr.fme_ena,
1714 		    &cb_err);
1715 
1716 	/*
1717 	 * Check for related errors in PBM and IOMMU. The IOMMU could cause
1718 	 * a timeout on the jbus due to an IOMMU miss, so we need to check and
1719 	 * log the IOMMU error registers.
1720 	 */
1721 	for (i = 0; i < 2; i++) {
1722 		if (cmn_p->pci_p[i] == NULL)
1723 			continue;
1724 		if (pci_pbm_err_handler(cmn_p->pci_p[i]->pci_dip, &derr,
1725 		    (void *)cmn_p->pci_p[i], PCI_CB_CALL) == DDI_FM_FATAL)
1726 			ret = DDI_FM_FATAL;
1727 	}
1728 
1729 	/* Cleanup and reset error bits */
1730 	(void) pci_cb_clear_error(cb_p, &cb_err);
1731 	mutex_exit(&cmn_p->pci_fm_mutex);
1732 
1733 	if (ret == DDI_FM_FATAL) {
1734 		fm_panic("Fatal System Bus Error has occurred\n");
1735 	}
1736 
1737 	return (DDI_INTR_CLAIMED);
1738 }
1739 
1740 static ecc_fm_err_t ecc_err_tbl[] = {
1741 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1742 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_UPA64S, SCH_REG_UPA,
1743 	ACC_HANDLE,
1744 
1745 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1746 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_REG, SCH_REG_PCIA_REG,
1747 	ACC_HANDLE,
1748 
1749 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1750 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_MEM, SCH_REG_PCIA_MEM,
1751 	ACC_HANDLE,
1752 
1753 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1754 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIA_CFGIO, SCH_REG_PCIA_CFGIO,
1755 	ACC_HANDLE,
1756 
1757 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1758 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_REG, SCH_REG_PCIB_REG,
1759 	ACC_HANDLE,
1760 
1761 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1762 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_MEM, SCH_REG_PCIB_MEM,
1763 	ACC_HANDLE,
1764 
1765 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1766 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_PCIB_CFGIO, SCH_REG_PCIB_CFGIO,
1767 	ACC_HANDLE,
1768 
1769 	PCI_ECC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO, CBNINTR_UE,
1770 	PBM_PRIMARY, SCHIZO_ECC_AFAR_PIOW_SAFARI_REGS, SCH_REG_SAFARI_REGS,
1771 	ACC_HANDLE,
1772 
1773 	PCI_ECC_SEC_PIO_UE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_UE,
1774 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1775 
1776 	PCI_ECC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1777 	PBM_PRIMARY, NULL, NULL, ACC_HANDLE,
1778 
1779 	PCI_ECC_SEC_PIO_CE, COMMON_ECC_UE_AFSR_E_PIO,  CBNINTR_CE,
1780 	PBM_SECONDARY, NULL, NULL, ACC_HANDLE,
1781 
1782 	PCI_ECC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1783 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1784 
1785 	PCI_ECC_SEC_DRD_UE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_UE,
1786 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1787 
1788 	PCI_ECC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1789 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1790 
1791 	PCI_ECC_SEC_DRD_CE, COMMON_ECC_UE_AFSR_E_DRD, CBNINTR_CE,
1792 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1793 
1794 	PCI_ECC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1795 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1796 
1797 	PCI_ECC_SEC_DWR_UE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_UE,
1798 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1799 
1800 	PCI_ECC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1801 	PBM_PRIMARY, NULL, NULL, DMA_HANDLE,
1802 
1803 	PCI_ECC_SEC_DWR_CE, COMMON_ECC_UE_AFSR_E_DWR, CBNINTR_CE,
1804 	PBM_SECONDARY, NULL, NULL, DMA_HANDLE,
1805 
1806 	NULL, NULL, NULL, NULL, NULL, NULL,
1807 };
1808 
1809 /*
1810  * pci_ecc_classify, called by ecc_handler to classify ecc errors
1811  * and determine if we should panic or not.
1812  */
1813 void
1814 pci_ecc_classify(uint64_t err, ecc_errstate_t *ecc_err_p)
1815 {
1816 	struct async_flt *ecc_p = &ecc_err_p->ecc_aflt;
1817 	uint64_t region, afar = ecc_p->flt_addr;
1818 	int i, j, ret = 0;
1819 	int flag, fatal = 0;
1820 	pci_common_t *cmn_p = ecc_err_p->ecc_ii_p.ecc_p->ecc_pci_cmn_p;
1821 	pci_t *pci_p = cmn_p->pci_p[0];
1822 
1823 	ASSERT(MUTEX_HELD(&cmn_p->pci_fm_mutex));
1824 
1825 	ecc_err_p->ecc_bridge_type = PCI_BRIDGE_TYPE(cmn_p);
1826 
1827 	if (pci_p == NULL)
1828 		pci_p = cmn_p->pci_p[1];
1829 
1830 	ecc_err_p->ecc_ctrl = lddphysio(ecc_err_p->ecc_ii_p.ecc_p->ecc_csr_pa);
1831 	ecc_err_p->ecc_err_addr = afar;
1832 	region = afar & SCHIZO_ECC_AFAR_PIOW_MASK;
1833 
1834 	for (i = 0; ecc_err_tbl[i].ecc_err_class != NULL; i++) {
1835 		if (!(err & ecc_err_tbl[i].ecc_reg_bit) ||
1836 			(ecc_err_p->ecc_ii_p.ecc_type !=
1837 			    ecc_err_tbl[i].ecc_type) ||
1838 			(ecc_err_p->ecc_pri != ecc_err_tbl[i].ecc_pri))
1839 			continue;
1840 
1841 		ecc_p->flt_erpt_class = ecc_err_tbl[i].ecc_err_class;
1842 		flag = ecc_err_tbl[i].ecc_flag;
1843 
1844 		if (!ecc_err_tbl[i].ecc_pri ||
1845 				(ecc_err_tbl[i].ecc_type == CBNINTR_CE)) {
1846 			fatal += (ecc_err_tbl[i].ecc_type == CBNINTR_UE) ?
1847 				1 : 0;
1848 			break;
1849 		}
1850 
1851 		if (flag == ACC_HANDLE &&
1852 			(region & ecc_err_tbl[i].ecc_region_bits)) {
1853 			ecc_err_p->ecc_region = ecc_err_tbl[i].ecc_region;
1854 			pci_format_ecc_addr(pci_p->pci_dip,
1855 					&ecc_err_p->ecc_err_addr,
1856 					ecc_err_p->ecc_region);
1857 		}
1858 
1859 		/*
1860 		 * Lookup and fault errant handle
1861 		 */
1862 		for (j = 0; j < 2; ++j) {
1863 			ret = DDI_FM_UNKNOWN;
1864 			if (cmn_p->pci_p[j] == NULL)
1865 				continue;
1866 			ret = pci_handle_lookup(cmn_p->pci_p[j]->pci_dip,
1867 					flag, ecc_err_p->ecc_ena,
1868 					(void *)&ecc_err_p->ecc_err_addr);
1869 			if (ret == DDI_FM_NONFATAL) {
1870 				fatal = 0;
1871 				break;
1872 			} else
1873 				fatal++;
1874 		}
1875 		break;
1876 	}
1877 
1878 	if (fatal)
1879 		ecc_p->flt_panic = 1;
1880 	else if (flag != ACC_HANDLE)
1881 		ecc_err_p->ecc_pg_ret = 1;
1882 }
1883 
1884 /*
1885  * Tables to define PCI-X Split Completion errors
1886  */
1887 
1888 pcix_err_msg_rec_t pcix_completer_errs[] = {
1889 	{PCIX_CPLT_OUT_OF_RANGE,	"pcix", "oor"	},
1890 };
1891 
1892 pcix_err_tbl_t pcix_split_errs_tbl[] = {
1893 	{PCIX_CLASS_CPLT,
1894 		sizeof (pcix_completer_errs)/sizeof (pcix_err_msg_rec_t),
1895 		pcix_completer_errs		},
1896 };
1897 
1898 /*
1899  * Tables for the PCI-X error status messages
1900  */
1901 pcix_err_msg_rec_t pcix_stat_errs[] = {
1902 	{XMITS_PCIX_STAT_SC_DSCRD,	"pcix", "discard"  	},
1903 	{XMITS_PCIX_STAT_SC_TTO,	"xmits.pbmx", "tato" 	},
1904 	{XMITS_PCIX_STAT_SMMU,		"xmits.pbmx", "stmmu"	},
1905 	{XMITS_PCIX_STAT_SDSTAT,	"xmits.pbmx", "stdst"	},
1906 	{XMITS_PCIX_STAT_CMMU,		"xmits.pbmx", "cnmmu"	},
1907 	{XMITS_PCIX_STAT_CDSTAT,	"xmits.pbmx", "cndst"	}
1908 };
1909 
1910 pcix_err_tbl_t pcix_stat_errs_tbl =
1911 	{PCIX_NO_CLASS,
1912 		sizeof (pcix_stat_errs)/sizeof (pcix_err_msg_rec_t),
1913 		pcix_stat_errs		};
1914 
1915 
1916 /*
1917  * walk thru a table of error messages, printing as appropriate
1918  *
1919  * t - the table of messages to parse
1920  * err - the error to match against
1921  * multi - flag, sometimes multiple error bits may be set/desired
1922  */
1923 static int
1924 pcix_lookup_err_msgs(dev_info_t *dip, uint64_t ena, pcix_err_tbl_t t,
1925 		pbm_errstate_t *pbm_err_p)
1926 {
1927 	uint32_t err_bits  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_INDEX_MASK;
1928 	int nerr = 0;
1929 	int j;
1930 	char buf[FM_MAX_CLASS];
1931 
1932 	for (j = 0; j < t.err_rec_num; j++)  {
1933 		uint32_t msg_key = t.err_msg_tbl[j].msg_key;
1934 		if (pbm_err_p->pbm_multi ? !(err_bits & msg_key) : err_bits
1935 				!= msg_key)
1936 			continue;
1937 
1938 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1939 		    t.err_msg_tbl[j].msg_class,
1940 		    pbm_err_p->pbm_pri ? "" : PCIX_SECONDARY,
1941 		    t.err_msg_tbl[j].msg_str);
1942 
1943 		pbm_err_p->pbm_err_class = buf;
1944 		pcix_ereport_post(dip, ena, pbm_err_p);
1945 		nerr++;
1946 	}
1947 	return (nerr ? DDI_FM_FATAL : DDI_FM_OK);
1948 }
1949 
1950 /*
1951  * Decodes primary(bit 27-24) or secondary(bit 15-12) PCI-X split
1952  * completion error message class and index in PBM AFSR.
1953  */
1954 static void
1955 pcix_log_split_err(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
1956 {
1957 	uint32_t class  = pbm_err_p->pbm_err & XMITS_PCIX_MSG_CLASS_MASK;
1958 	uint32_t num_classes = sizeof (pcix_split_errs_tbl) /
1959 	    sizeof (struct pcix_err_tbl);
1960 	int i;
1961 
1962 	for (i = 0; i < num_classes; i++) {
1963 		if (class == pcix_split_errs_tbl[i].err_class) {
1964 			pbm_err_p->pbm_multi = PCIX_SINGLE_ERR;
1965 			(void) pcix_lookup_err_msgs(dip, ena,
1966 			    pcix_split_errs_tbl[i], pbm_err_p);
1967 			break;
1968 		}
1969 	}
1970 }
1971 
1972 /*
1973  * Report PBM PCI-X Error Status Register if in PCI-X mode
1974  *
1975  * Once a PCI-X fault tree is constructed, the code below may need to
1976  * change.
1977  */
1978 static int
1979 pcix_log_pbm(pci_t *pci_p, uint64_t ena, pbm_errstate_t *pbm_err_p)
1980 {
1981 	int fatal = 0;
1982 	int nonfatal = 0;
1983 	uint32_t e;
1984 
1985 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
1986 
1987 	DEBUG3(DBG_ERR_INTR, pci_p->pci_dip, "pcix_log_pbm: chip_type=%d "
1988 	    "ctr_stat=%lx afsr = 0x%lx", CHIP_TYPE(pci_p),
1989 	    pbm_err_p->pbm_ctl_stat, pbm_err_p->pbm_afsr);
1990 
1991 	if (!(CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) ||
1992 	    !(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE))
1993 		return (DDI_FM_OK);
1994 
1995 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
1996 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
1997 		pbm_err_p->pbm_pri = PBM_PRIMARY;
1998 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
1999 		nonfatal++;
2000 	}
2001 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR) {
2002 		pbm_err_p->pbm_err = PBM_AFSR_TO_PRISPLIT(pbm_err_p->pbm_afsr);
2003 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2004 		pcix_log_split_err(pci_p->pci_dip, ena, pbm_err_p);
2005 		nonfatal++;
2006 	}
2007 
2008 	e = PBM_PCIX_TO_PRIERR(pbm_err_p->pbm_pcix_stat);
2009 	if (e) {
2010 		pbm_err_p->pbm_pri = PBM_PRIMARY;
2011 		pbm_err_p->pbm_err = e;
2012 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2013 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2014 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2015 			fatal++;
2016 		else
2017 			nonfatal++;
2018 	}
2019 
2020 	e = PBM_PCIX_TO_SECERR(pbm_err_p->pbm_pcix_stat);
2021 	if (e) {
2022 		pbm_err_p->pbm_pri = PBM_SECONDARY;
2023 		pbm_err_p->pbm_err = e;
2024 		pbm_err_p->pbm_multi = PCIX_MULTI_ERR;
2025 		if (pcix_lookup_err_msgs(pci_p->pci_dip, ena,
2026 		    pcix_stat_errs_tbl, pbm_err_p) == DDI_FM_FATAL)
2027 			fatal++;
2028 		else
2029 			nonfatal++;
2030 	}
2031 
2032 	if (!fatal && !nonfatal)
2033 		return (DDI_FM_OK);
2034 	else if (fatal)
2035 		return (DDI_FM_FATAL);
2036 	return (DDI_FM_NONFATAL);
2037 }
2038 
2039 static pbm_fm_err_t pbm_err_tbl[] = {
2040 	PCI_MA,			SCHIZO_PCI_AFSR_E_MA,	PBM_PRIMARY,
2041 	FM_LOG_PCI,	PCI_TARG_MA,
2042 
2043 	PCI_SEC_MA,		SCHIZO_PCI_AFSR_E_MA,	PBM_SECONDARY,
2044 	FM_LOG_PBM,	NULL,
2045 
2046 	PCI_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_PRIMARY,
2047 	FM_LOG_PCI,	PCI_TARG_REC_TA,
2048 
2049 	PCI_SEC_REC_TA,		SCHIZO_PCI_AFSR_E_TA,	PBM_SECONDARY,
2050 	FM_LOG_PBM,	NULL,
2051 
2052 	PCI_PBM_RETRY,		SCHIZO_PCI_AFSR_E_RTRY,	PBM_PRIMARY,
2053 	FM_LOG_PBM,	PCI_PBM_TARG_RETRY,
2054 
2055 	PCI_SEC_PBM_RETRY,	SCHIZO_PCI_AFSR_E_RTRY,	PBM_SECONDARY,
2056 	FM_LOG_PBM,	NULL,
2057 
2058 	PCI_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_PRIMARY,
2059 	FM_LOG_PCI,	PCI_TARG_MDPE,
2060 
2061 	PCI_SEC_MDPE,		SCHIZO_PCI_AFSR_E_PERR,	PBM_SECONDARY,
2062 	FM_LOG_PBM,	NULL,
2063 
2064 	PCI_PBM_TTO,		SCHIZO_PCI_AFSR_E_TTO,	PBM_PRIMARY,
2065 	FM_LOG_PBM,	PCI_PBM_TARG_TTO,
2066 
2067 	PCI_SEC_PBM_TTO,	SCHIZO_PCI_AFSR_E_TTO,	PBM_SECONDARY,
2068 	FM_LOG_PBM,	NULL,
2069 
2070 	PCI_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_PRIMARY,
2071 	FM_LOG_PBM,	NULL,
2072 
2073 	PCI_SEC_SCH_BUS_UNUSABLE_ERR, SCHIZO_PCI_AFSR_E_UNUSABLE, PBM_SECONDARY,
2074 	FM_LOG_PBM,	NULL,
2075 
2076 	NULL,			NULL,			NULL,
2077 	NULL,		NULL,
2078 };
2079 
2080 
2081 /*
2082  * pci_pbm_classify, called by pbm_afsr_report to classify piow afsr.
2083  */
2084 int
2085 pci_pbm_classify(pbm_errstate_t *pbm_err_p)
2086 {
2087 	uint32_t err;
2088 	int nerr = 0;
2089 	int i;
2090 
2091 	err = pbm_err_p->pbm_pri ? PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr):
2092 		PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
2093 
2094 	for (i = 0; pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2095 		if ((err & pbm_err_tbl[i].pbm_reg_bit) &&
2096 		    (pbm_err_p->pbm_pri == pbm_err_tbl[i].pbm_pri)) {
2097 			if (pbm_err_tbl[i].pbm_flag == FM_LOG_PCI)
2098 				pbm_err_p->pbm_pci.pci_err_class =
2099 					pbm_err_tbl[i].pbm_err_class;
2100 			else
2101 				pbm_err_p->pbm_err_class =
2102 				    pbm_err_tbl[i].pbm_err_class;
2103 
2104 			pbm_err_p->pbm_terr_class =
2105 			    pbm_err_tbl[i].pbm_terr_class;
2106 			pbm_err_p->pbm_log = pbm_err_tbl[i].pbm_flag;
2107 			nerr++;
2108 			break;
2109 		}
2110 	}
2111 
2112 	return (nerr);
2113 }
2114 
2115 /*
2116  * Function used to handle and log IOMMU errors. Called by pci_pbm_err_handler,
2117  * with pci_fm_mutex held.
2118  */
2119 static int
2120 iommu_err_handler(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err_p)
2121 {
2122 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2123 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2124 	ecc_t *ecc_p = pci_p->pci_ecc_p;
2125 	uint64_t stat;
2126 	ushort_t ta_signalled;
2127 	int err = 0;
2128 	int fatal = 0;
2129 	int nonfatal = 0;
2130 	int ret;
2131 
2132 	ASSERT(MUTEX_HELD(&ecc_p->ecc_pci_cmn_p->pci_fm_mutex));
2133 	if (!((stat = *iommu_p->iommu_ctrl_reg) & TOMATILLO_IOMMU_ERR)) {
2134 		pbm_err_p->pbm_err_class = PCI_SCH_MMU_ERR;
2135 		iommu_ereport_post(dip, ena, pbm_err_p);
2136 		return (DDI_FM_NONFATAL);
2137 	}
2138 
2139 	/*
2140 	 * Need to make sure a Target Abort was signalled to the device if
2141 	 * we have any hope of recovering. Tomatillo does not send a TA for
2142 	 * DMA Writes that result in a Translation Error, thus fooling the
2143 	 * device into believing everything is as it expects. Ignorance
2144 	 * is bliss, but knowledge is power.
2145 	 */
2146 	ta_signalled = pbm_err_p->pbm_pci.pci_cfg_stat &
2147 		PCI_STAT_S_TARG_AB;
2148 
2149 	if (stat & TOMATILLO_IOMMU_ERR_ILLTSBTBW) {
2150 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_TSBTBW;
2151 		err = 1;
2152 		iommu_ereport_post(dip, ena, pbm_err_p);
2153 		if (!ta_signalled)
2154 			fatal++;
2155 		else
2156 			nonfatal++;
2157 	}
2158 
2159 	if (stat & TOMATILLO_IOMMU_ERR_BAD_VA) {
2160 		pbm_err_p->pbm_err_class = PCI_TOM_MMU_BAD_VA;
2161 		err = 1;
2162 		iommu_ereport_post(dip, ena, pbm_err_p);
2163 		if (!ta_signalled)
2164 			fatal++;
2165 		else
2166 			nonfatal++;
2167 	}
2168 
2169 	if (!err) {
2170 		stat = ((stat & TOMATILLO_IOMMU_ERRSTS) >>
2171 		    TOMATILLO_IOMMU_ERRSTS_SHIFT);
2172 		switch (stat) {
2173 		case TOMATILLO_IOMMU_PROTECTION_ERR:
2174 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_PROT_ERR;
2175 			iommu_ereport_post(dip, ena, pbm_err_p);
2176 			fatal++;
2177 			break;
2178 		case TOMATILLO_IOMMU_INVALID_ERR:
2179 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_INVAL_ERR;
2180 			/*
2181 			 * Fault the address in iommu_tfar
2182 			 * register to inform target driver of error
2183 			 */
2184 			ret = pci_handle_lookup(pci_p->pci_dip, DMA_HANDLE,
2185 				ena, (void *)&pbm_err_p->pbm_iommu.iommu_tfar);
2186 
2187 			if (ret == DDI_FM_NONFATAL)
2188 				if (ta_signalled)
2189 					nonfatal++;
2190 				else
2191 					fatal++;
2192 			else
2193 				fatal++;
2194 			iommu_ereport_post(dip, ena, pbm_err_p);
2195 			break;
2196 		case TOMATILLO_IOMMU_TIMEOUT_ERR:
2197 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_TO_ERR;
2198 			fatal++;
2199 			iommu_ereport_post(dip, ena, pbm_err_p);
2200 			break;
2201 		case TOMATILLO_IOMMU_ECC_ERR:
2202 			pbm_err_p->pbm_err_class = PCI_TOM_MMU_UE;
2203 			iommu_ereport_post(dip, ena, pbm_err_p);
2204 			break;
2205 		}
2206 	}
2207 
2208 	if (fatal)
2209 		return (DDI_FM_FATAL);
2210 	else if (nonfatal)
2211 		return (DDI_FM_NONFATAL);
2212 
2213 	return (DDI_FM_OK);
2214 }
2215 
2216 int
2217 pci_check_error(pci_t *pci_p)
2218 {
2219 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2220 	uint16_t pci_cfg_stat;
2221 	uint64_t pbm_ctl_stat, pbm_afsr, pbm_pcix_stat;
2222 	caddr_t a = pci_p->pci_address[0];
2223 	uint64_t *pbm_pcix_stat_reg;
2224 
2225 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2226 
2227 	pci_cfg_stat = pbm_p->pbm_config_header->ch_status_reg;
2228 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2229 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2230 
2231 	if ((pci_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2232 				PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2233 				PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2234 			(pbm_ctl_stat & (SCHIZO_PCI_CTRL_BUS_UNUSABLE |
2235 				TOMATILLO_PCI_CTRL_PCI_DTO_ERR |
2236 				SCHIZO_PCI_CTRL_PCI_TTO_ERR |
2237 				SCHIZO_PCI_CTRL_PCI_RTRY_ERR |
2238 				SCHIZO_PCI_CTRL_PCI_MMU_ERR |
2239 				COMMON_PCI_CTRL_SBH_ERR |
2240 				COMMON_PCI_CTRL_SERR)) ||
2241 			(PBM_AFSR_TO_PRIERR(pbm_afsr)))
2242 		return (1);
2243 
2244 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2245 			(pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2246 
2247 		pbm_pcix_stat_reg = (uint64_t *)(a +
2248 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2249 
2250 		pbm_pcix_stat = *pbm_pcix_stat_reg;
2251 
2252 		if (PBM_PCIX_TO_PRIERR(pbm_pcix_stat))
2253 			return (1);
2254 
2255 		if (pbm_pcix_stat & XMITS_PCIX_STAT_PERR_RECOV_INT)
2256 			return (1);
2257 	}
2258 
2259 	return (0);
2260 
2261 }
2262 
2263 static pbm_fm_err_t pci_pbm_err_tbl[] = {
2264 	PCI_PBM_RETRY,			SCHIZO_PCI_CTRL_PCI_RTRY_ERR,
2265 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_RETRY,
2266 
2267 	PCI_PBM_TTO,			SCHIZO_PCI_CTRL_PCI_TTO_ERR,
2268 	NULL,	PBM_NONFATAL,	PCI_PBM_TARG_TTO,
2269 
2270 	PCI_SCH_BUS_UNUSABLE_ERR,	SCHIZO_PCI_CTRL_BUS_UNUSABLE,
2271 	NULL,	PBM_NONFATAL,	NULL,
2272 
2273 	NULL,				NULL,
2274 	NULL,	NULL,		NULL
2275 };
2276 
2277 /*
2278  * Function used to log all PCI/PBM/IOMMU errors found in the system.
2279  * It is called by the pbm_error_intr as well as the pci_err_callback(trap
2280  * callback). To protect access we hold the pci_fm_mutex when calling
2281  * this function.
2282  */
2283 int
2284 pci_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2285 		const void *impl_data, int caller)
2286 {
2287 	int fatal = 0;
2288 	int nonfatal = 0;
2289 	int unknown = 0;
2290 	int rserr = 0;
2291 	uint32_t prierr, secerr;
2292 	pbm_errstate_t pbm_err;
2293 	char buf[FM_MAX_CLASS];
2294 	pci_t *pci_p = (pci_t *)impl_data;
2295 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2296 	pci_target_err_t tgt_err;
2297 	int i, ret = 0;
2298 
2299 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2300 	pci_pbm_errstate_get(pci_p, &pbm_err);
2301 
2302 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2303 	    fm_ena_generate(0, FM_ENA_FMT1);
2304 
2305 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2306 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2307 
2308 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
2309 		if (caller == PCI_TRAP_CALL) {
2310 			/*
2311 			 * For ddi_caut_get treat all events as nonfatal.
2312 			 * The trampoline will set err_ena = 0, err_status =
2313 			 * NONFATAL. We only really call this function so that
2314 			 * pci_clear_error() and ndi_fm_handler_dispatch() will
2315 			 * get called.
2316 			 */
2317 			derr->fme_status = DDI_FM_NONFATAL;
2318 			nonfatal++;
2319 			goto done;
2320 		} else {
2321 			/*
2322 			 * For ddi_caut_put treat all events as nonfatal. Here
2323 			 * we have the handle and can call ndi_fm_acc_err_set().
2324 			 */
2325 			derr->fme_status = DDI_FM_NONFATAL;
2326 			ndi_fm_acc_err_set(pbm_p->pbm_excl_handle, derr);
2327 			nonfatal++;
2328 			goto done;
2329 		}
2330 	} else if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2331 		/*
2332 		 * For ddi_peek treat all events as nonfatal. We only
2333 		 * really call this function so that pci_clear_error()
2334 		 * and ndi_fm_handler_dispatch() will get called.
2335 		 */
2336 		nonfatal++;
2337 		goto done;
2338 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2339 		/*
2340 		 * For ddi_poke we can treat as nonfatal if the
2341 		 * following conditions are met :
2342 		 * 1. Make sure only primary error is MA/TA
2343 		 * 2. Make sure no secondary error bits set
2344 		 * 3. check pci config header stat reg to see MA/TA is
2345 		 *    logged. We cannot verify only MA/TA is recorded
2346 		 *    since it gets much more complicated when a
2347 		 *    PCI-to-PCI bridge is present.
2348 		 */
2349 		if ((prierr == SCHIZO_PCI_AFSR_E_MA) && !secerr &&
2350 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_MAST_AB)) {
2351 			nonfatal++;
2352 			goto done;
2353 		} else if ((*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) &&
2354 		    pcix_ma_behind_bridge(&pbm_err)) {
2355 			/*
2356 			 * MAs behind a PCI-X bridge get sent back to
2357 			 * the host as a Split Completion Error Message.
2358 			 * We handle this the same as the above check.
2359 			 */
2360 			nonfatal++;
2361 			goto done;
2362 		}
2363 		if ((prierr == SCHIZO_PCI_AFSR_E_TA) && !secerr &&
2364 		    (pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_R_TARG_AB)) {
2365 			nonfatal++;
2366 			goto done;
2367 		}
2368 	}
2369 
2370 	DEBUG2(DBG_ERR_INTR, dip, "pci_pbm_err_handler: prierr=0x%x "
2371 	    "secerr=0x%x", prierr, secerr);
2372 
2373 	if (prierr || secerr) {
2374 		ret = pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2375 		if (ret == DDI_FM_FATAL)
2376 			fatal++;
2377 		else
2378 			nonfatal++;
2379 	}
2380 	if ((ret = pcix_log_pbm(pci_p, derr->fme_ena, &pbm_err))
2381 			== DDI_FM_FATAL)
2382 		fatal++;
2383 	else if (ret == DDI_FM_NONFATAL)
2384 		nonfatal++;
2385 
2386 	if ((ret = pci_cfg_report(dip, derr, &pbm_err.pbm_pci, caller, prierr))
2387 			== DDI_FM_FATAL)
2388 		fatal++;
2389 	else if (ret == DDI_FM_NONFATAL)
2390 		nonfatal++;
2391 
2392 	for (i = 0; pci_pbm_err_tbl[i].pbm_err_class != NULL; i++) {
2393 		if ((pbm_err.pbm_ctl_stat & pci_pbm_err_tbl[i].pbm_reg_bit) &&
2394 		    !prierr) {
2395 			pbm_err.pbm_err_class =
2396 				pci_pbm_err_tbl[i].pbm_err_class;
2397 			pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2398 			if (pci_pbm_err_tbl[i].pbm_flag)
2399 				fatal++;
2400 			else
2401 				nonfatal++;
2402 			if (caller == PCI_TRAP_CALL &&
2403 			    pci_pbm_err_tbl[i].pbm_terr_class) {
2404 				tgt_err.tgt_err_ena = derr->fme_ena;
2405 				tgt_err.tgt_err_class =
2406 				    pci_pbm_err_tbl[i].pbm_terr_class;
2407 				tgt_err.tgt_bridge_type =
2408 				    pbm_err.pbm_bridge_type;
2409 				tgt_err.tgt_err_addr =
2410 				    (uint64_t)derr->fme_bus_specific;
2411 				errorq_dispatch(pci_target_queue,
2412 				    (void *)&tgt_err, sizeof (pci_target_err_t),
2413 				    ERRORQ_ASYNC);
2414 			}
2415 		}
2416 	}
2417 
2418 	if ((pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SBH_ERR) &&
2419 	    (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)) {
2420 		pbm_err.pbm_err_class = PCI_SCH_SBH;
2421 		pbm_ereport_post(dip, derr->fme_ena, &pbm_err);
2422 		if (pci_panic_on_sbh_errors)
2423 			fatal++;
2424 		else
2425 			nonfatal++;
2426 	}
2427 
2428 	/*
2429 	 * PBM Received System Error - During any transaction, or
2430 	 * at any point on the bus, some device may detect a critical
2431 	 * error and signal a system error to the system.
2432 	 */
2433 	if (pbm_err.pbm_ctl_stat & COMMON_PCI_CTRL_SERR) {
2434 		/*
2435 		 * may be expected (master abort from pci-pci bridge during
2436 		 * poke will generate SERR)
2437 		 */
2438 		if (derr->fme_flag != DDI_FM_ERR_POKE) {
2439 			DEBUG1(DBG_ERR_INTR, dip, "pci_pbm_err_handler: "
2440 			    "ereport_post: %s", buf);
2441 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2442 				PCI_ERROR_SUBCLASS, PCI_REC_SERR);
2443 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2444 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2445 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2446 			    pbm_err.pbm_pci.pci_cfg_stat, PCI_CONFIG_COMMAND,
2447 			    DATA_TYPE_UINT16, pbm_err.pbm_pci.pci_cfg_comm,
2448 			    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2449 		}
2450 		rserr++;
2451 	}
2452 
2453 	/*
2454 	 * PCI Retry Timeout - Device fails to retry deferred
2455 	 * transaction within timeout. Only Tomatillo
2456 	 */
2457 	if (pbm_err.pbm_ctl_stat & TOMATILLO_PCI_CTRL_PCI_DTO_ERR) {
2458 		if (pci_dto_fault_warn == CE_PANIC)
2459 			fatal++;
2460 		else
2461 			nonfatal++;
2462 
2463 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2464 			PCI_ERROR_SUBCLASS, PCI_DTO);
2465 		ddi_fm_ereport_post(dip, buf, derr->fme_ena, DDI_NOSLEEP,
2466 		    FM_VERSION, DATA_TYPE_UINT8, 0,
2467 		    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2468 		    pbm_err.pbm_pci.pci_cfg_stat,
2469 		    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2470 		    pbm_err.pbm_pci.pci_cfg_comm,
2471 		    PCI_PA, DATA_TYPE_UINT64, (uint64_t)0, NULL);
2472 	}
2473 
2474 	/*
2475 	 * PBM Detected Data Parity Error - DPE detected during a DMA Write
2476 	 * or PIO Read. Later case is taken care of by cpu_deferred_error
2477 	 * and sent here to be logged.
2478 	 */
2479 	if ((pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_PERROR) &&
2480 			!(pbm_err.pbm_pci.pci_cfg_stat & PCI_STAT_S_SYSERR)) {
2481 		/*
2482 		 * If we have an address then fault
2483 		 * it, if not probe for errant device
2484 		 */
2485 		ret = DDI_FM_FATAL;
2486 		if (caller != PCI_TRAP_CALL) {
2487 			if (pbm_err.pbm_va_log)
2488 				ret = pci_handle_lookup(dip, DMA_HANDLE,
2489 						derr->fme_ena,
2490 						(void *)&pbm_err.pbm_va_log);
2491 			if (ret == DDI_FM_NONFATAL)
2492 				nonfatal++;
2493 			else
2494 				fatal++;
2495 		} else
2496 			nonfatal++;
2497 
2498 	}
2499 
2500 	/* PBM Detected IOMMU Error */
2501 	if (pbm_err.pbm_ctl_stat & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2502 		if (iommu_err_handler(dip, derr->fme_ena, &pbm_err)
2503 				== DDI_FM_FATAL)
2504 			fatal++;
2505 		else
2506 			nonfatal++;
2507 	}
2508 
2509 done:
2510 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
2511 	if (ret == DDI_FM_FATAL) {
2512 		fatal++;
2513 	} else if (ret == DDI_FM_NONFATAL) {
2514 		nonfatal++;
2515 	} else if (ret == DDI_FM_UNKNOWN) {
2516 		unknown++;
2517 	}
2518 
2519 	/*
2520 	 * RSERR not claimed as nonfatal by a child is considered fatal
2521 	 */
2522 	if (rserr && ret != DDI_FM_NONFATAL)
2523 		fatal++;
2524 
2525 	/* Cleanup and reset error bits */
2526 	pci_clear_error(pci_p, &pbm_err);
2527 
2528 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2529 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2530 }
2531 
2532 /*
2533  * Function returns TRUE if a Primary error is Split Completion Error
2534  * that indicates a Master Abort occured behind a PCI-X bridge.
2535  * This function should only be called for busses running in PCI-X mode.
2536  */
2537 static int
2538 pcix_ma_behind_bridge(pbm_errstate_t *pbm_err_p)
2539 {
2540 	uint64_t msg;
2541 
2542 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_S_SC_ERR)
2543 		return (0);
2544 
2545 	if (pbm_err_p->pbm_afsr & XMITS_PCI_X_AFSR_P_SC_ERR) {
2546 		msg = (pbm_err_p->pbm_afsr >> XMITS_PCI_X_P_MSG_SHIFT) &
2547 		    XMITS_PCIX_MSG_MASK;
2548 		if (msg & PCIX_CLASS_BRIDGE)
2549 			if (msg & PCIX_BRIDGE_MASTER_ABORT) {
2550 				return (1);
2551 			}
2552 	}
2553 
2554 	return (0);
2555 }
2556 
2557 /*
2558  * Function used to gather PBM/PCI/IOMMU error state for the
2559  * pci_pbm_err_handler and the cb_buserr_intr. This function must be
2560  * called while pci_fm_mutex is held.
2561  */
2562 static void
2563 pci_pbm_errstate_get(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2564 {
2565 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2566 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2567 	caddr_t a = pci_p->pci_address[0];
2568 	uint64_t *pbm_pcix_stat_reg;
2569 
2570 	ASSERT(MUTEX_HELD(&pci_p->pci_common_p->pci_fm_mutex));
2571 	bzero(pbm_err_p, sizeof (pbm_errstate_t));
2572 
2573 	/*
2574 	 * Capture all pbm error state for later logging
2575 	 */
2576 	pbm_err_p->pbm_bridge_type = PCI_BRIDGE_TYPE(pci_p->pci_common_p);
2577 
2578 	pbm_err_p->pbm_pci.pci_cfg_stat =
2579 		pbm_p->pbm_config_header->ch_status_reg;
2580 	pbm_err_p->pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2581 	pbm_err_p->pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2582 	pbm_err_p->pbm_afar = *pbm_p->pbm_async_flt_addr_reg;
2583 	pbm_err_p->pbm_iommu.iommu_stat = *iommu_p->iommu_ctrl_reg;
2584 	pbm_err_p->pbm_pci.pci_cfg_comm =
2585 		pbm_p->pbm_config_header->ch_command_reg;
2586 	pbm_err_p->pbm_pci.pci_pa = *pbm_p->pbm_async_flt_addr_reg;
2587 
2588 	/*
2589 	 * Record errant slot for Xmits and Schizo
2590 	 * Not stored in Tomatillo
2591 	 */
2592 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS ||
2593 			CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO) {
2594 		pbm_err_p->pbm_err_sl = (pbm_err_p->pbm_ctl_stat &
2595 				SCHIZO_PCI_CTRL_ERR_SLOT) >>
2596 			SCHIZO_PCI_CTRL_ERR_SLOT_SHIFT;
2597 
2598 		/*
2599 		 * The bit 51 on XMITS rev1.0 is same as
2600 		 * SCHIZO_PCI_CTRL_ERR_SLOT_LOCK on schizo2.3. But
2601 		 * this bit needs to be cleared to be able to latch
2602 		 * the slot info on next fault.
2603 		 * But in XMITS Rev2.0, this bit indicates a DMA Write
2604 		 * Parity error.
2605 		 */
2606 		if (pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_DMA_WR_PERR) {
2607 			if ((PCI_CHIP_ID(pci_p) == XMITS_VER_10) ||
2608 				(PCI_CHIP_ID(pci_p) <= SCHIZO_VER_23)) {
2609 				/*
2610 				 * top 32 bits are W1C and we just want to
2611 				 * clear SLOT_LOCK. Leave bottom 32 bits
2612 				 * unchanged
2613 				 */
2614 				*pbm_p->pbm_ctrl_reg =
2615 					pbm_err_p->pbm_ctl_stat &
2616 					(SCHIZO_PCI_CTRL_ERR_SLOT_LOCK |
2617 					0xffffffff);
2618 				pbm_err_p->pbm_ctl_stat =
2619 					*pbm_p->pbm_ctrl_reg;
2620 			}
2621 		}
2622 	}
2623 
2624 	/*
2625 	 * Tomatillo specific registers
2626 	 */
2627 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2628 		pbm_err_p->pbm_va_log = (uint64_t)va_to_pa(
2629 		    (void *)(uintptr_t)*(a + TOMATILLO_TGT_ERR_VALOG_OFFSET));
2630 		pbm_err_p->pbm_iommu.iommu_tfar = *iommu_p->iommu_tfar_reg;
2631 	}
2632 
2633 	/*
2634 	 * Xmits PCI-X register
2635 	 */
2636 	if ((CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) &&
2637 			(pbm_err_p->pbm_ctl_stat & XMITS_PCI_CTRL_X_MODE)) {
2638 
2639 		pbm_pcix_stat_reg = (uint64_t *)(a +
2640 		    XMITS_PCI_X_ERROR_STATUS_REG_OFFSET);
2641 
2642 		pbm_err_p->pbm_pcix_stat = *pbm_pcix_stat_reg;
2643 		pbm_err_p->pbm_pcix_pfar = pbm_err_p->pbm_pcix_stat &
2644 				XMITS_PCI_X_STATUS_PFAR_MASK;
2645 	}
2646 }
2647 
2648 /*
2649  * Function used to clear PBM/PCI/IOMMU error state after error handling
2650  * is complete. Only clearing error bits which have been logged. Called by
2651  * pci_pbm_err_handler and pci_bus_exit.
2652  */
2653 static void
2654 pci_clear_error(pci_t *pci_p, pbm_errstate_t *pbm_err_p)
2655 {
2656 	pbm_t *pbm_p = pci_p->pci_pbm_p;
2657 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2658 
2659 	ASSERT(MUTEX_HELD(&pbm_p->pbm_pci_p->pci_common_p->pci_fm_mutex));
2660 
2661 	if (*pbm_p->pbm_ctrl_reg & SCHIZO_PCI_CTRL_PCI_MMU_ERR) {
2662 		iommu_tlb_scrub(pci_p->pci_iommu_p, 1);
2663 	}
2664 	pbm_p->pbm_config_header->ch_status_reg =
2665 		pbm_err_p->pbm_pci.pci_cfg_stat;
2666 	*pbm_p->pbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
2667 	*pbm_p->pbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
2668 	*iommu_p->iommu_ctrl_reg = pbm_err_p->pbm_iommu.iommu_stat;
2669 }
2670 
2671 void
2672 pbm_clear_error(pbm_t *pbm_p)
2673 {
2674 	uint64_t pbm_afsr, pbm_ctl_stat;
2675 
2676 	/*
2677 	 * for poke() support - called from POKE_FLUSH. Spin waiting
2678 	 * for MA, TA or SERR to be cleared by a pbm_error_intr().
2679 	 * We have to wait for SERR too in case the device is beyond
2680 	 * a pci-pci bridge.
2681 	 */
2682 	pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2683 	pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2684 	while (((pbm_afsr >> SCHIZO_PCI_AFSR_PE_SHIFT) &
2685 	    (SCHIZO_PCI_AFSR_E_MA | SCHIZO_PCI_AFSR_E_TA)) ||
2686 	    (pbm_ctl_stat & COMMON_PCI_CTRL_SERR)) {
2687 		pbm_ctl_stat = *pbm_p->pbm_ctrl_reg;
2688 		pbm_afsr = *pbm_p->pbm_async_flt_status_reg;
2689 	}
2690 }
2691 
2692 /*
2693  * Function used to convert the 32 bit captured PCI error address
2694  * to the full Safari or Jbus address. This is so we can look this address
2695  * up in our handle caches.
2696  */
2697 void
2698 pci_format_addr(dev_info_t *dip, uint64_t *afar, uint64_t afsr)
2699 {
2700 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2701 	pci_ranges_t *io_range, *mem_range;
2702 	uint64_t err_pa = 0;
2703 
2704 	if (afsr & SCHIZO_PCI_AFSR_CONF_SPACE) {
2705 		err_pa |= pci_p->pci_ranges->parent_high;
2706 		err_pa = err_pa << 32;
2707 		err_pa |= pci_p->pci_ranges->parent_low;
2708 	} else if (afsr & SCHIZO_PCI_AFSR_IO_SPACE) {
2709 		io_range = pci_p->pci_ranges + 1;
2710 		err_pa |= io_range->parent_high;
2711 		err_pa = err_pa << 32;
2712 		err_pa |= io_range->parent_low;
2713 	} else if (afsr & SCHIZO_PCI_AFSR_MEM_SPACE) {
2714 		mem_range = pci_p->pci_ranges + 2;
2715 		err_pa |= mem_range->parent_high;
2716 		err_pa = err_pa << 32;
2717 		err_pa |= mem_range->parent_low;
2718 	}
2719 	*afar |= err_pa;
2720 }
2721 
2722 static ecc_format_t ecc_format_tbl[] = {
2723 	SCH_REG_UPA,		NULL,				NULL,
2724 	SCH_REG_PCIA_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEA,
2725 	SCH_REG_PCIA_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEA,
2726 	SCH_REG_PCIA_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEA,
2727 	SCH_REG_PCIB_REG,	SCHIZO_PCI_AFSR_CONF_SPACE,	PCI_SIDEB,
2728 	SCH_REG_PCIB_MEM,	SCHIZO_PCI_AFSR_MEM_SPACE,	PCI_SIDEB,
2729 	SCH_REG_PCIB_CFGIO,	SCHIZO_PCI_AFSR_IO_SPACE,	PCI_SIDEB,
2730 	SCH_REG_SAFARI_REGS,	NULL,				NULL,
2731 	NULL,			NULL,				NULL,
2732 };
2733 
2734 /*
2735  * Function used to convert the 32 bit PIO address captured for a
2736  * Safari Bus UE(during PIO Rd/Wr) to a full Safari Bus Address.
2737  */
2738 static void
2739 pci_format_ecc_addr(dev_info_t *dip, uint64_t *afar, ecc_region_t region)
2740 {
2741 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2742 	pci_common_t *cmn_p = pci_p->pci_common_p;
2743 	cb_t *cb_p = pci_p->pci_cb_p;
2744 	int i, pci_side = 0;
2745 	int swap = 0;
2746 	uint64_t pa = cb_p->cb_base_pa;
2747 	uint64_t flag, schizo_base, pci_csr_base;
2748 
2749 	if (pci_p == NULL)
2750 		return;
2751 
2752 	pci_csr_base = va_to_pa(pci_p->pci_address[0]);
2753 
2754 	/*
2755 	 * Using the csr_base address to determine which side
2756 	 * we are on.
2757 	 */
2758 	if (pci_csr_base & PCI_SIDE_ADDR_MASK)
2759 		pci_side = 1;
2760 	else
2761 		pci_side = 0;
2762 
2763 	schizo_base = pa - PBM_CTRL_OFFSET;
2764 
2765 	for (i = 0; ecc_format_tbl[i].ecc_region != NULL; i++) {
2766 		if (region == ecc_format_tbl[i].ecc_region) {
2767 			flag = ecc_format_tbl[i].ecc_space;
2768 			if (ecc_format_tbl[i].ecc_side != pci_side)
2769 				swap = 1;
2770 			if (region == SCH_REG_SAFARI_REGS)
2771 				*afar |= schizo_base;
2772 			break;
2773 		}
2774 	}
2775 
2776 	if (swap) {
2777 		pci_p = cmn_p->pci_p[PCI_OTHER_SIDE(pci_p->pci_side)];
2778 
2779 		if (pci_p == NULL)
2780 			return;
2781 	}
2782 	pci_format_addr(pci_p->pci_dip, afar, flag);
2783 }
2784 
2785 /*
2786  * Function used to post control block specific ereports.
2787  */
2788 static void
2789 cb_ereport_post(dev_info_t *dip, uint64_t ena, cb_errstate_t *cb_err)
2790 {
2791 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
2792 	char buf[FM_MAX_CLASS], dev_path[MAXPATHLEN], *ptr;
2793 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2794 	nvlist_t *ereport, *detector;
2795 	errorq_elem_t *eqep;
2796 	nv_alloc_t *nva;
2797 
2798 	DEBUG1(DBG_ATTACH, dip, "cb_ereport_post: elog 0x%lx",
2799 	    cb_err->cb_elog);
2800 
2801 	/*
2802 	 * We do not use ddi_fm_ereport_post because we need to set a
2803 	 * special detector here. Since we do not have a device path for
2804 	 * the bridge chip we use what we think it should be to aid in
2805 	 * diagnosis.
2806 	 */
2807 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s", DDI_IO_CLASS,
2808 	    cb_err->cb_bridge_type, cb_err->cb_err_class);
2809 
2810 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2811 
2812 	eqep = errorq_reserve(fmhdl->fh_errorq);
2813 	if (eqep == NULL)
2814 		return;
2815 
2816 	ereport = errorq_elem_nvl(fmhdl->fh_errorq, eqep);
2817 	nva = errorq_elem_nva(fmhdl->fh_errorq, eqep);
2818 	detector = fm_nvlist_create(nva);
2819 
2820 	ASSERT(ereport);
2821 	ASSERT(nva);
2822 	ASSERT(detector);
2823 
2824 	ddi_pathname(dip, dev_path);
2825 	ptr = strrchr(dev_path, (int)',');
2826 
2827 	if (ptr)
2828 		*ptr = '\0';
2829 
2830 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, dev_path, NULL);
2831 
2832 	DEBUG1(DBG_ERR_INTR, dip, "cb_ereport_post: ereport_set: %s", buf);
2833 
2834 	if (CHIP_TYPE(pci_p) == PCI_CHIP_SCHIZO ||
2835 	    CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
2836 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2837 		    SAFARI_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2838 		    SAFARI_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2839 		    SAFARI_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2840 		    SAFARI_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2841 		    SAFARI_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2842 		    NULL);
2843 	} else if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO) {
2844 		fm_ereport_set(ereport, FM_EREPORT_VERSION, buf, ena, detector,
2845 		    JBUS_CSR, DATA_TYPE_UINT64, cb_err->cb_csr,
2846 		    JBUS_ERR, DATA_TYPE_UINT64, cb_err->cb_err,
2847 		    JBUS_INTR, DATA_TYPE_UINT64, cb_err->cb_intr,
2848 		    JBUS_ELOG, DATA_TYPE_UINT64, cb_err->cb_elog,
2849 		    JBUS_PCR, DATA_TYPE_UINT64, cb_err->cb_pcr,
2850 		    NULL);
2851 	}
2852 	errorq_commit(fmhdl->fh_errorq, eqep, ERRORQ_ASYNC);
2853 }
2854 
2855 /*
2856  * Function used to post IOMMU specific ereports.
2857  */
2858 static void
2859 iommu_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2860 {
2861 	char buf[FM_MAX_CLASS];
2862 
2863 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2864 		    pbm_err->pbm_bridge_type, pbm_err->pbm_err_class);
2865 
2866 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2867 
2868 	DEBUG1(DBG_ERR_INTR, dip, "iommu_ereport_post: ereport_set: %s", buf);
2869 
2870 	ddi_fm_ereport_post(dip, buf, ena, DDI_NOSLEEP,
2871 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2872 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2873 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2874 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2875 	    PCI_PBM_IOMMU_CTRL, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_stat,
2876 	    PCI_PBM_IOMMU_TFAR, DATA_TYPE_UINT64, pbm_err->pbm_iommu.iommu_tfar,
2877 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2878 	    PCI_PBM_VALOG, DATA_TYPE_UINT64, pbm_err->pbm_va_log,
2879 	    NULL);
2880 }
2881 
2882 /*
2883  * Function used to post PCI-X generic ereports.
2884  * This function needs to be fixed once the Fault Boundary Analysis
2885  * for PCI-X is conducted. The payload should be made more generic.
2886  */
2887 static void
2888 pcix_ereport_post(dev_info_t *dip, uint64_t ena, pbm_errstate_t *pbm_err)
2889 {
2890 	char buf[FM_MAX_CLASS];
2891 
2892 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2893 
2894 	DEBUG1(DBG_ERR_INTR, dip, "pcix_ereport_post: ereport_post: %s", buf);
2895 
2896 	ddi_fm_ereport_post(dip, pbm_err->pbm_err_class, ena, DDI_NOSLEEP,
2897 	    FM_VERSION, DATA_TYPE_UINT8, 0,
2898 	    PCI_CONFIG_STATUS, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_stat,
2899 	    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, pbm_err->pbm_pci.pci_cfg_comm,
2900 	    PCI_PBM_CSR, DATA_TYPE_UINT64, pbm_err->pbm_ctl_stat,
2901 	    PCI_PBM_AFSR, DATA_TYPE_UINT64, pbm_err->pbm_afsr,
2902 	    PCI_PBM_AFAR, DATA_TYPE_UINT64, pbm_err->pbm_afar,
2903 	    PCI_PBM_SLOT, DATA_TYPE_UINT64, pbm_err->pbm_err_sl,
2904 	    PCIX_STAT, DATA_TYPE_UINT64, pbm_err->pbm_pcix_stat,
2905 	    PCIX_PFAR, DATA_TYPE_UINT32, pbm_err->pbm_pcix_pfar,
2906 	    NULL);
2907 }
2908 
2909 static void
2910 iommu_ctx_free(iommu_t *iommu_p)
2911 {
2912 	kmem_free(iommu_p->iommu_ctx_bitmap, IOMMU_CTX_BITMAP_SIZE);
2913 }
2914 
2915 /*
2916  * iommu_tlb_scrub():
2917  *	Exam TLB entries through TLB diagnostic registers and look for errors.
2918  *	scrub = 1 : cleanup all error bits in tlb, called in FAULT_RESET case
2919  *	scrub = 0 : log all error conditions to console, FAULT_LOG case
2920  *	In both cases, it returns number of errors found in tlb entries.
2921  */
2922 static int
2923 iommu_tlb_scrub(iommu_t *iommu_p, int scrub)
2924 {
2925 	int i, nerr = 0;
2926 	dev_info_t *dip = iommu_p->iommu_pci_p->pci_dip;
2927 	char *neg = "not ";
2928 
2929 	uint64_t base = (uint64_t)iommu_p->iommu_ctrl_reg -
2930 		COMMON_IOMMU_CTRL_REG_OFFSET;
2931 
2932 	volatile uint64_t *tlb_tag = (volatile uint64_t *)
2933 		(base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
2934 	volatile uint64_t *tlb_data = (volatile uint64_t *)
2935 		(base + COMMON_IOMMU_TLB_DATA_DIAG_ACC_OFFSET);
2936 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
2937 		uint64_t tag = tlb_tag[i];
2938 		uint64_t data = tlb_data[i];
2939 		uint32_t errstat;
2940 		iopfn_t pfn;
2941 
2942 		if (!(tag & TLBTAG_ERR_BIT))
2943 			continue;
2944 
2945 		pfn = (iopfn_t)(data & TLBDATA_MEMPA_BITS);
2946 		errstat = (uint32_t)
2947 			((tag & TLBTAG_ERRSTAT_BITS) >> TLBTAG_ERRSTAT_SHIFT);
2948 		if (errstat == TLBTAG_ERRSTAT_INVALID) {
2949 			if (scrub)
2950 				tlb_tag[i] = tlb_data[i] = 0ull;
2951 		} else
2952 			nerr++;
2953 
2954 		if (scrub)
2955 			continue;
2956 
2957 		cmn_err(CE_CONT, "%s%d: Error %x on IOMMU TLB entry %x:\n"
2958 		"\tContext=%lx %sWritable %sStreamable\n"
2959 		"\tPCI Page Size=%sk Address in page %lx\n",
2960 			ddi_driver_name(dip), ddi_get_instance(dip), errstat, i,
2961 			(tag & TLBTAG_CONTEXT_BITS) >> TLBTAG_CONTEXT_SHIFT,
2962 			(tag & TLBTAG_WRITABLE_BIT) ? "" : neg,
2963 			(tag & TLBTAG_STREAM_BIT) ? "" : neg,
2964 			(tag & TLBTAG_PGSIZE_BIT) ? "64" : "8",
2965 			(tag & TLBTAG_PCIVPN_BITS) << 13);
2966 		cmn_err(CE_CONT, "Memory: %sValid %sCacheable Page Frame=%lx\n",
2967 			(data & TLBDATA_VALID_BIT) ? "" : neg,
2968 			(data & TLBDATA_CACHE_BIT) ? "" : neg, pfn);
2969 	}
2970 	return (nerr);
2971 }
2972 
2973 /*
2974  * pci_iommu_disp: calculates the displacement needed in tomatillo's
2975  *	iommu control register and modifies the control value template
2976  *	from caller. It also clears any error status bit that are new
2977  *	in tomatillo.
2978  * return value: an 8-bit mask to enable corresponding 512 MB segments
2979  *	suitable for tomatillo's target address register.
2980  *	0x00: no programming is needed, use existing value from prom
2981  *	0x60: use segment 5 and 6 to form a 1GB dvma range
2982  */
2983 static uint64_t
2984 pci_iommu_disp(iommu_t *iommu_p, uint64_t *ctl_p)
2985 {
2986 	uint64_t ctl_old;
2987 	if (CHIP_TYPE(iommu_p->iommu_pci_p) != PCI_CHIP_TOMATILLO)
2988 		return (0);
2989 
2990 	ctl_old = *iommu_p->iommu_ctrl_reg;
2991 	/* iommu ctrl reg error bits are W1C */
2992 	if (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT) {
2993 		cmn_err(CE_WARN, "Tomatillo iommu err: %lx", ctl_old);
2994 		*ctl_p |= (ctl_old >> TOMATIILO_IOMMU_ERR_REG_SHIFT)
2995 		    << TOMATIILO_IOMMU_ERR_REG_SHIFT;
2996 	}
2997 
2998 	if (iommu_p->iommu_tsb_size != TOMATILLO_IOMMU_TSB_MAX)
2999 		return (0);
3000 
3001 	/* Tomatillo 2.0 and later, and 1GB DVMA range */
3002 	*ctl_p |= 1 << TOMATILLO_IOMMU_SEG_DISP_SHIFT;
3003 	return (3 << (iommu_p->iommu_dvma_base >> (32 - 3)));
3004 }
3005 
3006 void
3007 pci_iommu_config(iommu_t *iommu_p, uint64_t iommu_ctl, uint64_t cfgpa)
3008 {
3009 	uintptr_t pbm_regbase = get_pbm_reg_base(iommu_p->iommu_pci_p);
3010 	volatile uint64_t *pbm_csr_p = (volatile uint64_t *)pbm_regbase;
3011 	volatile uint64_t *tgt_space_p = (volatile uint64_t *)(pbm_regbase |
3012 		(TOMATILLO_TGT_ADDR_SPACE_OFFSET - SCHIZO_PCI_CTRL_REG_OFFSET));
3013 	volatile uint64_t pbm_ctl = *pbm_csr_p;
3014 
3015 	volatile uint64_t *iommu_ctl_p = iommu_p->iommu_ctrl_reg;
3016 	volatile uint64_t tsb_bar_val = iommu_p->iommu_tsb_paddr;
3017 	volatile uint64_t *tsb_bar_p = iommu_p->iommu_tsb_base_addr_reg;
3018 	uint64_t mask = pci_iommu_disp(iommu_p, &iommu_ctl);
3019 
3020 	DEBUG2(DBG_ATTACH, iommu_p->iommu_pci_p->pci_dip,
3021 		"\npci_iommu_config: pbm_csr_p=%llx pbm_ctl=%llx",
3022 		pbm_csr_p, pbm_ctl);
3023 	DEBUG2(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3024 		"\n\tiommu_ctl_p=%llx iommu_ctl=%llx",
3025 		iommu_ctl_p, iommu_ctl);
3026 	DEBUG4(DBG_ATTACH|DBG_CONT, iommu_p->iommu_pci_p->pci_dip,
3027 		"\n\tcfgpa=%llx tgt_space_p=%llx mask=%x tsb=%llx\n",
3028 		cfgpa, tgt_space_p, mask, tsb_bar_val);
3029 
3030 	if (!cfgpa)
3031 		goto reprog;
3032 
3033 	/* disable PBM arbiters - turn off bits 0-7 */
3034 	*pbm_csr_p = (pbm_ctl >> 8) << 8;
3035 
3036 	/*
3037 	 * For non-XMITS, flush any previous writes. This is only
3038 	 * necessary for host bridges that may have a USB keywboard
3039 	 * attached.  XMITS does not.
3040 	 */
3041 	if (!(CHIP_TYPE(iommu_p->iommu_pci_p) == PCI_CHIP_XMITS))
3042 		(void) ldphysio(cfgpa);
3043 
3044 reprog:
3045 	if (mask)
3046 		*tgt_space_p = mask;
3047 
3048 	*tsb_bar_p = tsb_bar_val;
3049 	*iommu_ctl_p = iommu_ctl;
3050 
3051 	*pbm_csr_p = pbm_ctl;	/* re-enable bus arbitration */
3052 	pbm_ctl = *pbm_csr_p;	/* flush all prev writes */
3053 }
3054 
3055 
3056 int
3057 pci_get_portid(dev_info_t *dip)
3058 {
3059 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3060 	    "portid", -1));
3061 }
3062 
3063 /*
3064  * Schizo Safari Performance Events.
3065  */
3066 pci_kev_mask_t
3067 schizo_saf_events[] = {
3068 	{"saf_bus_cycles", 0x1},	{"saf_pause_asserted_cycles", 0x2},
3069 	{"saf_frn_coherent_cmds", 0x3},	{"saf_frn_coherent_hits", 0x4},
3070 	{"saf_my_coherent_cmds", 0x5},	{"saf_my_coherent_hits", 0x6},
3071 	{"saf_frn_io_cmds", 0x7}, 	{"saf_frn_io_hits", 0x8},
3072 	{"merge_buffer", 0x9}, 		{"interrupts", 0xa},
3073 	{"csr_pios", 0xc}, 		{"upa_pios", 0xd},
3074 	{"pcia_pios", 0xe}, 		{"pcib_pios", 0xf},
3075 	{"saf_pause_seen_cycles", 0x11}, 	{"dvma_reads", 0x12},
3076 	{"dvma_writes", 0x13},		{"saf_orq_full_cycles", 0x14},
3077 	{"saf_data_in_cycles", 0x15},	{"saf_data_out_cycles", 0x16},
3078 	{"clear_pic", 0x1f}
3079 };
3080 
3081 
3082 /*
3083  * Schizo PCI Performance Events.
3084  */
3085 pci_kev_mask_t
3086 schizo_pci_events[] = {
3087 	{"dvma_stream_rd", 0x0}, 	{"dvma_stream_wr", 0x1},
3088 	{"dvma_const_rd", 0x2},		{"dvma_const_wr", 0x3},
3089 	{"dvma_stream_buf_mis", 0x4},	{"dvma_cycles", 0x5},
3090 	{"dvma_wd_xfr", 0x6},		{"pio_cycles", 0x7},
3091 	{"dvma_tlb_misses", 0x10},	{"interrupts", 0x11},
3092 	{"saf_inter_nack", 0x12},	{"pio_reads", 0x13},
3093 	{"pio_writes", 0x14},		{"dvma_rd_buf_timeout", 0x15},
3094 	{"dvma_rd_rtry_stc", 0x16},	{"dvma_wr_rtry_stc", 0x17},
3095 	{"dvma_rd_rtry_nonstc", 0x18},	{"dvma_wr_rtry_nonstc", 0x19},
3096 	{"E*_slow_transitions", 0x1a},	{"E*_slow_cycles_per_64", 0x1b},
3097 	{"clear_pic", 0x1f}
3098 };
3099 
3100 
3101 /*
3102  * Create the picN kstats for the pci
3103  * and safari events.
3104  */
3105 void
3106 pci_kstat_init()
3107 {
3108 	pci_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3109 		KM_NOSLEEP);
3110 
3111 	if (pci_name_kstat == NULL) {
3112 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3113 	} else {
3114 		pci_name_kstat->pic_no_evs =
3115 			sizeof (schizo_pci_events) / sizeof (pci_kev_mask_t);
3116 		pci_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3117 		pci_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3118 		pci_create_name_kstat("pcis",
3119 			pci_name_kstat, schizo_pci_events);
3120 	}
3121 
3122 	saf_name_kstat = (pci_ksinfo_t *)kmem_alloc(sizeof (pci_ksinfo_t),
3123 		KM_NOSLEEP);
3124 	if (saf_name_kstat == NULL) {
3125 		cmn_err(CE_WARN, "pcisch : no space for kstat\n");
3126 	} else {
3127 		saf_name_kstat->pic_no_evs =
3128 			sizeof (schizo_saf_events) / sizeof (pci_kev_mask_t);
3129 		saf_name_kstat->pic_shift[0] = SCHIZO_SHIFT_PIC0;
3130 		saf_name_kstat->pic_shift[1] = SCHIZO_SHIFT_PIC1;
3131 		pci_create_name_kstat("saf", saf_name_kstat, schizo_saf_events);
3132 	}
3133 }
3134 
3135 void
3136 pci_kstat_fini()
3137 {
3138 	if (pci_name_kstat != NULL) {
3139 		pci_delete_name_kstat(pci_name_kstat);
3140 		kmem_free(pci_name_kstat, sizeof (pci_ksinfo_t));
3141 		pci_name_kstat = NULL;
3142 	}
3143 
3144 	if (saf_name_kstat != NULL) {
3145 		pci_delete_name_kstat(saf_name_kstat);
3146 		kmem_free(saf_name_kstat, sizeof (pci_ksinfo_t));
3147 		saf_name_kstat = NULL;
3148 	}
3149 }
3150 
3151 /*
3152  * Create 'counters' kstat for pci events.
3153  */
3154 void
3155 pci_add_pci_kstat(pci_t *pci_p)
3156 {
3157 	pci_cntr_addr_t *cntr_addr_p = &pci_p->pci_ks_addr;
3158 	uintptr_t regbase = (uintptr_t)pci_p->pci_address[0];
3159 
3160 	cntr_addr_p->pcr_addr = (uint64_t *)
3161 		(regbase + SCHIZO_PERF_PCI_PCR_OFFSET);
3162 	cntr_addr_p->pic_addr = (uint64_t *)
3163 		(regbase + SCHIZO_PERF_PCI_PIC_OFFSET);
3164 
3165 	pci_p->pci_ksp = pci_create_cntr_kstat(pci_p, "pcis",
3166 		NUM_OF_PICS, pci_cntr_kstat_update, cntr_addr_p);
3167 
3168 	if (pci_p->pci_ksp == NULL) {
3169 		cmn_err(CE_WARN, "pcisch : cannot create counter kstat");
3170 	}
3171 }
3172 
3173 void
3174 pci_rem_pci_kstat(pci_t *pci_p)
3175 {
3176 	if (pci_p->pci_ksp != NULL)
3177 		kstat_delete(pci_p->pci_ksp);
3178 	pci_p->pci_ksp = NULL;
3179 }
3180 
3181 void
3182 pci_add_upstream_kstat(pci_t *pci_p)
3183 {
3184 	pci_common_t	*cmn_p = pci_p->pci_common_p;
3185 	pci_cntr_pa_t	*cntr_pa_p = &cmn_p->pci_cmn_uks_pa;
3186 	uint64_t regbase = va_to_pa(pci_p->pci_address[1]);
3187 
3188 	cntr_pa_p->pcr_pa =
3189 		regbase + SCHIZO_PERF_SAF_PCR_OFFSET;
3190 	cntr_pa_p->pic_pa =
3191 		regbase + SCHIZO_PERF_SAF_PIC_OFFSET;
3192 
3193 	cmn_p->pci_common_uksp = pci_create_cntr_kstat(pci_p, "saf",
3194 		NUM_OF_PICS, pci_cntr_kstat_pa_update, cntr_pa_p);
3195 }
3196 
3197 /*
3198  * Extract the drivers binding name to identify which chip
3199  * we're binding to.  Whenever a new bus bridge is created, the driver alias
3200  * entry should be added here to identify the device if needed.  If a device
3201  * isn't added, the identity defaults to PCI_CHIP_UNIDENTIFIED.
3202  */
3203 static uint32_t
3204 pci_identity_init(pci_t *pci_p)
3205 {
3206 	dev_info_t *dip = pci_p->pci_dip;
3207 	char *name = ddi_binding_name(dip);
3208 	uint32_t ver = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3209 		"version#", 0);
3210 
3211 	if (strcmp(name, "pci108e,a801") == 0)
3212 		return (CHIP_ID(PCI_CHIP_TOMATILLO, ver, 0x00));
3213 
3214 	if (strcmp(name, "pci108e,8001") == 0)
3215 		return (CHIP_ID(PCI_CHIP_SCHIZO, ver, 0x00));
3216 
3217 	if (strcmp(name, "pci108e,8002") == 0) {
3218 		uint32_t mod_rev = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3219 			DDI_PROP_DONTPASS, "module-revision#", 0);
3220 		return (CHIP_ID(PCI_CHIP_XMITS, ver, mod_rev));
3221 	}
3222 
3223 	cmn_err(CE_WARN, "%s%d: Unknown PCI Host bridge %s %x\n",
3224 		ddi_driver_name(dip), ddi_get_instance(dip), name, ver);
3225 
3226 	return (PCI_CHIP_UNIDENTIFIED);
3227 }
3228 
3229 /*
3230  * Setup a physical pointer to one leaf config space area. This
3231  * is used in several places in order to do a dummy read which
3232  * guarantees the nexus (and not a bus master) has gained control
3233  * of the bus.
3234  */
3235 static void
3236 pci_setup_cfgpa(pci_t *pci_p)
3237 {
3238 	dev_info_t *dip = pci_p->pci_dip;
3239 	dev_info_t *cdip;
3240 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3241 	uint64_t cfgpa = pci_get_cfg_pabase(pci_p);
3242 	uint32_t *reg_p;
3243 	int reg_len;
3244 
3245 	for (cdip = ddi_get_child(dip); cdip != NULL;
3246 	    cdip = ddi_get_next_sibling(cdip)) {
3247 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
3248 		    "reg", (caddr_t)&reg_p, &reg_len) != DDI_PROP_SUCCESS)
3249 			continue;
3250 		cfgpa += (*reg_p) & (PCI_CONF_ADDR_MASK ^ PCI_REG_REG_M);
3251 		kmem_free(reg_p, reg_len);
3252 		break;
3253 	}
3254 	pbm_p->pbm_anychild_cfgpa = cfgpa;
3255 }
3256 
3257 void
3258 pci_post_init_child(pci_t *pci_p, dev_info_t *child)
3259 {
3260 	volatile uint64_t *ctrl_reg_p;
3261 	pbm_t *pbm_p = pci_p->pci_pbm_p;
3262 
3263 	pci_setup_cfgpa(pci_p);
3264 
3265 	/*
3266 	 * This is a hack for skyhawk/casinni combination to address
3267 	 * hardware problems between the request and grant signals which
3268 	 * causes a bus hang.  One workaround, which is applied here,
3269 	 * is to disable bus parking if the child contains the property
3270 	 * pci-req-removal.  Note that if the bus is quiesced we must mask
3271 	 * off the parking bit in the saved control registers, since the
3272 	 * quiesce operation temporarily turns off PCI bus parking.
3273 	 */
3274 	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
3275 		"pci-req-removal") == 1) {
3276 
3277 		if (pbm_p->pbm_quiesce_count > 0) {
3278 			pbm_p->pbm_saved_ctrl_reg &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3279 		} else {
3280 			ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3281 			*ctrl_reg_p &= ~SCHIZO_PCI_CTRL_ARB_PARK;
3282 		}
3283 	}
3284 
3285 	if (CHIP_TYPE(pci_p) == PCI_CHIP_XMITS) {
3286 		if (*pbm_p->pbm_ctrl_reg & XMITS_PCI_CTRL_X_MODE) {
3287 			int value;
3288 
3289 			/*
3290 			 * Due to a XMITS bug, we need to set the outstanding
3291 			 * split transactions to 1 for all PCI-X functions
3292 			 * behind the leaf.
3293 			 */
3294 			value = (xmits_max_transactions << 4) |
3295 			    (xmits_max_read_bytes << 2);
3296 
3297 			DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ "
3298 			    "Workaround: value = %x\n", value);
3299 
3300 			pcix_set_cmd_reg(child, value);
3301 
3302 			(void) ndi_prop_update_int(DDI_DEV_T_NONE,
3303 			    child, "pcix-update-cmd-reg", value);
3304 		}
3305 	}
3306 }
3307 
3308 void
3309 pci_post_uninit_child(pci_t *pci_p)
3310 {
3311 	pci_setup_cfgpa(pci_p);
3312 }
3313 
3314 static int
3315 pci_tom_nbintr_op(pci_t *pci_p, uint32_t inum, intrfunc f, caddr_t arg,
3316     int flag)
3317 {
3318 	uint32_t ino = pci_p->pci_inos[inum];
3319 	uint32_t mondo = IB_INO_TO_NBMONDO(pci_p->pci_ib_p, ino);
3320 	int ret = DDI_SUCCESS;
3321 
3322 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo); /* no op on tom */
3323 
3324 	switch (flag) {
3325 	case PCI_OBJ_INTR_ADD:
3326 		VERIFY(add_ivintr(mondo, pci_pil[inum], f, arg, NULL) == 0);
3327 		break;
3328 	case PCI_OBJ_INTR_REMOVE:
3329 		rem_ivintr(mondo, NULL);
3330 		break;
3331 	default:
3332 		ret = DDI_FAILURE;
3333 		break;
3334 	}
3335 
3336 	return (ret);
3337 }
3338 
3339 int
3340 pci_ecc_add_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3341 {
3342 	uint32_t mondo;
3343 	int	r;
3344 
3345 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3346 	    pci_p->pci_inos[inum]);
3347 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3348 
3349 	VERIFY(add_ivintr(mondo, pci_pil[inum], ecc_intr,
3350 	    (caddr_t)eii_p, NULL) == 0);
3351 
3352 	if (CHIP_TYPE(pci_p) != PCI_CHIP_TOMATILLO)
3353 		return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD,
3354 		    DDI_SUCCESS));
3355 
3356 	r = pci_tom_nbintr_op(pci_p, inum, ecc_intr,
3357 	    (caddr_t)eii_p, PCI_OBJ_INTR_ADD);
3358 	return (PCI_ATTACH_RETCODE(PCI_ECC_OBJ, PCI_OBJ_INTR_ADD, r));
3359 }
3360 
3361 void
3362 pci_ecc_rem_intr(pci_t *pci_p, int inum, ecc_intr_info_t *eii_p)
3363 {
3364 	uint32_t mondo;
3365 
3366 	mondo = ((pci_p->pci_cb_p->cb_ign << PCI_INO_BITS) |
3367 	    pci_p->pci_inos[inum]);
3368 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3369 
3370 	rem_ivintr(mondo, NULL);
3371 
3372 	if (CHIP_TYPE(pci_p) == PCI_CHIP_TOMATILLO)
3373 		pci_tom_nbintr_op(pci_p, inum, ecc_intr,
3374 			(caddr_t)eii_p, PCI_OBJ_INTR_REMOVE);
3375 }
3376 
3377 static uint_t
3378 pci_pbm_cdma_intr(caddr_t a)
3379 {
3380 	pbm_t *pbm_p = (pbm_t *)a;
3381 	pbm_p->pbm_cdma_flag = PBM_CDMA_DONE;
3382 #ifdef PBM_CDMA_DEBUG
3383 	pbm_p->pbm_cdma_intr_cnt++;
3384 #endif /* PBM_CDMA_DEBUG */
3385 	return (DDI_INTR_CLAIMED);
3386 }
3387 
3388 int
3389 pci_pbm_add_intr(pci_t *pci_p)
3390 {
3391 	uint32_t mondo;
3392 
3393 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3394 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3395 
3396 	VERIFY(add_ivintr(mondo, pci_pil[CBNINTR_CDMA],
3397 	    pci_pbm_cdma_intr, (caddr_t)pci_p->pci_pbm_p, NULL) == 0);
3398 
3399 	return (DDI_SUCCESS);
3400 }
3401 
3402 void
3403 pci_pbm_rem_intr(pci_t *pci_p)
3404 {
3405 	ib_t		*ib_p = pci_p->pci_ib_p;
3406 	uint32_t	mondo;
3407 
3408 	mondo = IB_INO_TO_MONDO(pci_p->pci_ib_p, pci_p->pci_inos[CBNINTR_CDMA]);
3409 	mondo = CB_MONDO_TO_XMONDO(pci_p->pci_cb_p, mondo);
3410 
3411 	ib_intr_disable(ib_p, pci_p->pci_inos[CBNINTR_CDMA], IB_INTR_NOWAIT);
3412 	rem_ivintr(mondo, NULL);
3413 }
3414 
3415 void
3416 pci_pbm_suspend(pci_t *pci_p)
3417 {
3418 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3419 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3420 
3421 	/* Save CDMA interrupt state */
3422 	pbm_p->pbm_cdma_imr_save = *ib_intr_map_reg_addr(pci_p->pci_ib_p, ino);
3423 }
3424 
3425 void
3426 pci_pbm_resume(pci_t *pci_p)
3427 {
3428 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
3429 	ib_ino_t	ino = pci_p->pci_inos[CBNINTR_CDMA];
3430 
3431 	/* Restore CDMA interrupt state */
3432 	*ib_intr_map_reg_addr(pci_p->pci_ib_p, ino) = pbm_p->pbm_cdma_imr_save;
3433 }
3434 
3435 /*
3436  * pci_bus_quiesce
3437  *
3438  * This function is called as the corresponding control ops routine
3439  * to a DDI_CTLOPS_QUIESCE command.  Its mission is to halt all DMA
3440  * activity on the bus by disabling arbitration/parking.
3441  */
3442 int
3443 pci_bus_quiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3444 {
3445 	volatile uint64_t *ctrl_reg_p;
3446 	volatile uint64_t ctrl_reg;
3447 	pbm_t *pbm_p;
3448 
3449 	pbm_p = pci_p->pci_pbm_p;
3450 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3451 
3452 	if (pbm_p->pbm_quiesce_count++ == 0) {
3453 
3454 		DEBUG0(DBG_PWR, dip, "quiescing bus\n");
3455 
3456 		ctrl_reg = *ctrl_reg_p;
3457 		pbm_p->pbm_saved_ctrl_reg = ctrl_reg;
3458 		ctrl_reg &= ~(SCHIZO_PCI_CTRL_ARB_EN_MASK |
3459 		    SCHIZO_PCI_CTRL_ARB_PARK);
3460 		*ctrl_reg_p = ctrl_reg;
3461 #ifdef	DEBUG
3462 		ctrl_reg = *ctrl_reg_p;
3463 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3464 		    SCHIZO_PCI_CTRL_ARB_PARK)) != 0)
3465 			panic("ctrl_reg didn't quiesce: 0x%lx\n", ctrl_reg);
3466 #endif
3467 		if (pbm_p->pbm_anychild_cfgpa)
3468 			(void) ldphysio(pbm_p->pbm_anychild_cfgpa);
3469 	}
3470 
3471 	return (DDI_SUCCESS);
3472 }
3473 
3474 /*
3475  * pci_bus_unquiesce
3476  *
3477  * This function is called as the corresponding control ops routine
3478  * to a DDI_CTLOPS_UNQUIESCE command.  Its mission is to resume paused
3479  * DMA activity on the bus by re-enabling arbitration (and maybe parking).
3480  */
3481 int
3482 pci_bus_unquiesce(pci_t *pci_p, dev_info_t *dip, void *result)
3483 {
3484 	volatile uint64_t *ctrl_reg_p;
3485 	pbm_t *pbm_p;
3486 #ifdef	DEBUG
3487 	volatile uint64_t ctrl_reg;
3488 #endif
3489 
3490 	pbm_p = pci_p->pci_pbm_p;
3491 	ctrl_reg_p = pbm_p->pbm_ctrl_reg;
3492 
3493 	ASSERT(pbm_p->pbm_quiesce_count > 0);
3494 	if (--pbm_p->pbm_quiesce_count == 0) {
3495 		*ctrl_reg_p = pbm_p->pbm_saved_ctrl_reg;
3496 #ifdef	DEBUG
3497 		ctrl_reg = *ctrl_reg_p;
3498 		if ((ctrl_reg & (SCHIZO_PCI_CTRL_ARB_EN_MASK |
3499 		    SCHIZO_PCI_CTRL_ARB_PARK)) == 0)
3500 			panic("ctrl_reg didn't unquiesce: 0x%lx\n", ctrl_reg);
3501 #endif
3502 	}
3503 
3504 	return (DDI_SUCCESS);
3505 }
3506 
3507 static void
3508 tm_vmem_free(ddi_dma_impl_t *mp, iommu_t *iommu_p, dvma_addr_t dvma_pg,
3509 	int npages)
3510 {
3511 	uint32_t dur_max, dur_base;
3512 	dvma_unbind_req_t *req_p, *req_max_p;
3513 	dvma_unbind_req_t *req_base_p = iommu_p->iommu_mtlb_req_p;
3514 	uint32_t tlb_vpn[IOMMU_TLB_ENTRIES];
3515 	caddr_t reg_base;
3516 	volatile uint64_t *tag_p;
3517 	int i, preserv_count = 0;
3518 
3519 	mutex_enter(&iommu_p->iommu_mtlb_lock);
3520 
3521 	iommu_p->iommu_mtlb_npgs += npages;
3522 	req_max_p = req_base_p + iommu_p->iommu_mtlb_nreq++;
3523 	req_max_p->dur_npg = npages;
3524 	req_max_p->dur_base = dvma_pg;
3525 	req_max_p->dur_flags = mp->dmai_flags & DMAI_FLAGS_VMEMCACHE;
3526 
3527 
3528 	if (iommu_p->iommu_mtlb_npgs <= iommu_p->iommu_mtlb_maxpgs)
3529 		goto done;
3530 
3531 	/* read TLB */
3532 	reg_base = iommu_p->iommu_pci_p->pci_address[0];
3533 	tag_p = (volatile uint64_t *)
3534 	    (reg_base + COMMON_IOMMU_TLB_TAG_DIAG_ACC_OFFSET);
3535 
3536 	for (i = 0; i < IOMMU_TLB_ENTRIES; i++)
3537 		tlb_vpn[i] = tag_p[i] & SCHIZO_VPN_MASK;
3538 
3539 	/* for each request search the TLB for a matching address */
3540 	for (req_p = req_base_p; req_p <= req_max_p; req_p++) {
3541 		dur_base = req_p->dur_base;
3542 		dur_max = req_p->dur_base + req_p->dur_npg;
3543 
3544 		for (i = 0; i < IOMMU_TLB_ENTRIES; i++) {
3545 			uint_t vpn = tlb_vpn[i];
3546 			if (vpn >= dur_base && vpn < dur_max)
3547 				break;
3548 		}
3549 		if (i >= IOMMU_TLB_ENTRIES) {
3550 			pci_vmem_do_free(iommu_p,
3551 			    (void *)IOMMU_PTOB(req_p->dur_base),
3552 			    req_p->dur_npg, req_p->dur_flags);
3553 			iommu_p->iommu_mtlb_npgs -= req_p->dur_npg;
3554 			continue;
3555 		}
3556 		/* if an empty slot exists */
3557 		if ((req_p - req_base_p) != preserv_count)
3558 			*(req_base_p + preserv_count) = *req_p;
3559 		preserv_count++;
3560 	}
3561 
3562 	iommu_p->iommu_mtlb_nreq = preserv_count;
3563 done:
3564 	mutex_exit(&iommu_p->iommu_mtlb_lock);
3565 }
3566 
3567 void
3568 pci_vmem_free(iommu_t *iommu_p, ddi_dma_impl_t *mp, void *dvma_addr,
3569     size_t npages)
3570 {
3571 	if (tm_mtlb_gc)
3572 		tm_vmem_free(mp, iommu_p,
3573 		    (dvma_addr_t)IOMMU_BTOP((dvma_addr_t)dvma_addr), npages);
3574 	else
3575 		pci_vmem_do_free(iommu_p, dvma_addr, npages,
3576 		    (mp->dmai_flags & DMAI_FLAGS_VMEMCACHE));
3577 }
3578