xref: /titanic_52/usr/src/uts/sun4u/opl/io/pcicmu/pcicmu.c (revision c1ecd8b9404ee0d96d93f02e82c441b9bb149a3d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * OPL CMU-CH PCI nexus driver.
30  *
31  */
32 
33 #include <sys/types.h>
34 #include <sys/sysmacros.h>
35 #include <sys/systm.h>
36 #include <sys/intreg.h>
37 #include <sys/intr.h>
38 #include <sys/machsystm.h>
39 #include <sys/conf.h>
40 #include <sys/stat.h>
41 #include <sys/kmem.h>
42 #include <sys/async.h>
43 #include <sys/ivintr.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/ndifm.h>
47 #include <sys/ontrap.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/ddi_subrdefs.h>
50 #include <sys/epm.h>
51 #include <sys/spl.h>
52 #include <sys/fm/util.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/io/pci.h>
56 #include <sys/fm/io/sun4upci.h>
57 #include <sys/pcicmu/pcicmu.h>
58 
59 #include <sys/cmn_err.h>
60 #include <sys/time.h>
61 #include <sys/pci.h>
62 #include <sys/modctl.h>
63 #include <sys/open.h>
64 #include <sys/errno.h>
65 #include <sys/file.h>
66 
67 
68 uint32_t pcmu_spurintr_duration = 60000000; /* One minute */
69 
70 /*
71  * The variable controls the default setting of the command register
72  * for pci devices.  See pcmu_init_child() for details.
73  *
74  * This flags also controls the setting of bits in the bridge control
75  * register pci to pci bridges.  See pcmu_init_child() for details.
76  */
77 ushort_t pcmu_command_default = PCI_COMM_SERR_ENABLE |
78 				PCI_COMM_WAIT_CYC_ENAB |
79 				PCI_COMM_PARITY_DETECT |
80 				PCI_COMM_ME |
81 				PCI_COMM_MAE |
82 				PCI_COMM_IO;
83 /*
84  * The following driver parameters are defined as variables to allow
85  * patching for debugging and tuning.  Flags that can be set on a per
86  * PBM basis are bit fields where the PBM device instance number maps
87  * to the bit position.
88  */
89 #ifdef DEBUG
90 uint64_t pcmu_debug_flags = 0;
91 #endif
92 uint_t ecc_error_intr_enable = 1;
93 
94 uint_t pcmu_ecc_afsr_retries = 100;	/* XXX - what's a good value? */
95 
96 uint_t pcmu_intr_retry_intv = 5;	/* for interrupt retry reg */
97 uint_t pcmu_panic_on_fatal_errors = 1;	/* should be 1 at beta */
98 
99 hrtime_t pcmu_intrpend_timeout = 5ll * NANOSEC;	/* 5 seconds in nanoseconds */
100 
101 uint64_t pcmu_errtrig_pa = 0x0;
102 
103 
104 /*
105  * The following value is the number of consecutive unclaimed interrupts that
106  * will be tolerated for a particular ino_p before the interrupt is deemed to
107  * be jabbering and is blocked.
108  */
109 uint_t pcmu_unclaimed_intr_max = 20;
110 
111 /*
112  * function prototypes for dev ops routines:
113  */
114 static int pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
115 static int pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
116 static int pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
117     void *arg, void **result);
118 static int pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp);
119 static int pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp);
120 static int pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
121 						cred_t *credp, int *rvalp);
122 static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
123     int flags, char *name, caddr_t valuep, int *lengthp);
124 static int pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args);
125 static int pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args,
126     void *result);
127 
128 static int map_pcmu_registers(pcmu_t *, dev_info_t *);
129 static void unmap_pcmu_registers(pcmu_t *);
130 static void pcmu_pbm_clear_error(pcmu_pbm_t *);
131 
132 static int pcmu_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
133     void *, void *);
134 static int pcmu_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
135     off_t, off_t, caddr_t *);
136 static int pcmu_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
137     ddi_intr_handle_impl_t *, void *);
138 
139 static uint32_t pcmu_identity_init(pcmu_t *pcmu_p);
140 static int pcmu_intr_setup(pcmu_t *pcmu_p);
141 static void pcmu_pbm_errstate_get(pcmu_t *pcmu_p,
142     pcmu_pbm_errstate_t *pbm_err_p);
143 static int pcmu_obj_setup(pcmu_t *pcmu_p);
144 static void pcmu_obj_destroy(pcmu_t *pcmu_p);
145 static void pcmu_obj_resume(pcmu_t *pcmu_p);
146 static void pcmu_obj_suspend(pcmu_t *pcmu_p);
147 
148 static void u2u_ittrans_init(pcmu_t *, u2u_ittrans_data_t **);
149 static void u2u_ittrans_resume(u2u_ittrans_data_t **);
150 static void u2u_ittrans_uninit(u2u_ittrans_data_t *);
151 
152 static pcmu_ksinfo_t	*pcmu_name_kstat;
153 
154 /*
155  * bus ops and dev ops structures:
156  */
157 static struct bus_ops pcmu_bus_ops = {
158 	BUSO_REV,
159 	pcmu_map,
160 	0,
161 	0,
162 	0,
163 	i_ddi_map_fault,
164 	0,
165 	0,
166 	0,
167 	0,
168 	0,
169 	0,
170 	0,
171 	0,
172 	pcmu_ctlops,
173 	ddi_bus_prop_op,
174 	ndi_busop_get_eventcookie,	/* (*bus_get_eventcookie)(); */
175 	ndi_busop_add_eventcall,	/* (*bus_add_eventcall)(); */
176 	ndi_busop_remove_eventcall,	/* (*bus_remove_eventcall)(); */
177 	ndi_post_event,			/* (*bus_post_event)(); */
178 	NULL,				/* (*bus_intr_ctl)(); */
179 	NULL,				/* (*bus_config)(); */
180 	NULL,				/* (*bus_unconfig)(); */
181 	NULL,				/* (*bus_fm_init)(); */
182 	NULL,				/* (*bus_fm_fini)(); */
183 	NULL,				/* (*bus_fm_access_enter)(); */
184 	NULL,				/* (*bus_fm_access_fini)(); */
185 	NULL,				/* (*bus_power)(); */
186 	pcmu_intr_ops			/* (*bus_intr_op)(); */
187 };
188 
189 struct cb_ops pcmu_cb_ops = {
190 	pcmu_open,			/* open */
191 	pcmu_close,			/* close */
192 	nodev,				/* strategy */
193 	nodev,				/* print */
194 	nodev,				/* dump */
195 	nodev,				/* read */
196 	nodev,				/* write */
197 	pcmu_ioctl,			/* ioctl */
198 	nodev,				/* devmap */
199 	nodev,				/* mmap */
200 	nodev,				/* segmap */
201 	nochpoll,			/* poll */
202 	pcmu_prop_op,			/* cb_prop_op */
203 	NULL,				/* streamtab */
204 	D_NEW | D_MP | D_HOTPLUG,	/* Driver compatibility flag */
205 	CB_REV,				/* rev */
206 	nodev,				/* int (*cb_aread)() */
207 	nodev				/* int (*cb_awrite)() */
208 };
209 
210 static struct dev_ops pcmu_ops = {
211 	DEVO_REV,
212 	0,
213 	pcmu_info,
214 	nulldev,
215 	0,
216 	pcmu_attach,
217 	pcmu_detach,
218 	nodev,
219 	&pcmu_cb_ops,
220 	&pcmu_bus_ops,
221 	0
222 };
223 
224 /*
225  * module definitions:
226  */
227 extern struct mod_ops mod_driverops;
228 
229 static struct modldrv modldrv = {
230 	&mod_driverops,				/* Type of module - driver */
231 	"OPL CMU-CH PCI Nexus driver %I%",	/* Name of module. */
232 	&pcmu_ops,				/* driver ops */
233 };
234 
235 static struct modlinkage modlinkage = {
236 	MODREV_1, (void *)&modldrv, NULL
237 };
238 
239 /*
240  * driver global data:
241  */
242 void *per_pcmu_state;			/* per-pbm soft state pointer */
243 kmutex_t pcmu_global_mutex;		/* attach/detach common struct lock */
244 errorq_t *pcmu_ecc_queue = NULL;	/* per-system ecc handling queue */
245 
246 extern void pcmu_child_cfg_save(dev_info_t *dip);
247 extern void pcmu_child_cfg_restore(dev_info_t *dip);
248 
249 int
250 _init(void)
251 {
252 	int e;
253 
254 	/*
255 	 * Initialize per-pci bus soft state pointer.
256 	 */
257 	e = ddi_soft_state_init(&per_pcmu_state, sizeof (pcmu_t), 1);
258 	if (e != 0)
259 		return (e);
260 
261 	/*
262 	 * Initialize global mutexes.
263 	 */
264 	mutex_init(&pcmu_global_mutex, NULL, MUTEX_DRIVER, NULL);
265 
266 	/*
267 	 * Create the performance kstats.
268 	 */
269 	pcmu_kstat_init();
270 
271 	/*
272 	 * Install the module.
273 	 */
274 	e = mod_install(&modlinkage);
275 	if (e != 0) {
276 		ddi_soft_state_fini(&per_pcmu_state);
277 		mutex_destroy(&pcmu_global_mutex);
278 	}
279 	return (e);
280 }
281 
282 int
283 _fini(void)
284 {
285 	int e;
286 
287 	/*
288 	 * Remove the module.
289 	 */
290 	e = mod_remove(&modlinkage);
291 	if (e != 0) {
292 		return (e);
293 	}
294 
295 	/*
296 	 * Destroy pcmu_ecc_queue, and set it to NULL.
297 	 */
298 	if (pcmu_ecc_queue) {
299 		errorq_destroy(pcmu_ecc_queue);
300 		pcmu_ecc_queue = NULL;
301 	}
302 
303 	/*
304 	 * Destroy the performance kstats.
305 	 */
306 	pcmu_kstat_fini();
307 
308 	/*
309 	 * Free the per-pci and per-CMU-CH soft state info and destroy
310 	 * mutex for per-CMU-CH soft state.
311 	 */
312 	ddi_soft_state_fini(&per_pcmu_state);
313 	mutex_destroy(&pcmu_global_mutex);
314 	return (e);
315 }
316 
317 int
318 _info(struct modinfo *modinfop)
319 {
320 	return (mod_info(&modlinkage, modinfop));
321 }
322 
323 /*ARGSUSED*/
324 static int
325 pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
326 {
327 	int	instance = getminor((dev_t)arg) >> 8;
328 	pcmu_t	*pcmu_p = get_pcmu_soft_state(instance);
329 
330 	switch (infocmd) {
331 	case DDI_INFO_DEVT2INSTANCE:
332 		*result = (void *)(uintptr_t)instance;
333 		return (DDI_SUCCESS);
334 
335 	case DDI_INFO_DEVT2DEVINFO:
336 		if (pcmu_p == NULL)
337 			return (DDI_FAILURE);
338 		*result = (void *)pcmu_p->pcmu_dip;
339 		return (DDI_SUCCESS);
340 
341 	default:
342 		return (DDI_FAILURE);
343 	}
344 }
345 
346 
347 /* device driver entry points */
348 /*
349  * attach entry point:
350  */
351 static int
352 pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
353 {
354 	pcmu_t *pcmu_p;
355 	int instance = ddi_get_instance(dip);
356 
357 	switch (cmd) {
358 	case DDI_ATTACH:
359 		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_ATTACH\n");
360 
361 		/*
362 		 * Allocate and get the per-pci soft state structure.
363 		 */
364 		if (alloc_pcmu_soft_state(instance) != DDI_SUCCESS) {
365 			cmn_err(CE_WARN, "%s%d: can't allocate pci state",
366 			    ddi_driver_name(dip), instance);
367 			goto err_bad_pcmu_softstate;
368 		}
369 		pcmu_p = get_pcmu_soft_state(instance);
370 		pcmu_p->pcmu_dip = dip;
371 		mutex_init(&pcmu_p->pcmu_mutex, NULL, MUTEX_DRIVER, NULL);
372 		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
373 		pcmu_p->pcmu_open_count = 0;
374 
375 		/*
376 		 * Get key properties of the pci bridge node.
377 		 */
378 		if (get_pcmu_properties(pcmu_p, dip) == DDI_FAILURE) {
379 			goto err_bad_pcmu_prop;
380 		}
381 
382 		/*
383 		 * Map in the registers.
384 		 */
385 		if (map_pcmu_registers(pcmu_p, dip) == DDI_FAILURE) {
386 			goto err_bad_reg_prop;
387 		}
388 		if (pcmu_obj_setup(pcmu_p) != DDI_SUCCESS) {
389 			goto err_bad_objs;
390 		}
391 
392 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
393 		    (uint_t)instance<<8 | 0xff,
394 		    DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
395 			goto err_bad_devctl_node;
396 		}
397 
398 		/*
399 		 * Due to unresolved hardware issues, disable PCIPM until
400 		 * the problem is fully understood.
401 		 *
402 		 * pcmu_pwr_setup(pcmu_p, dip);
403 		 */
404 
405 		ddi_report_dev(dip);
406 
407 		pcmu_p->pcmu_state = PCMU_ATTACHED;
408 		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "attach success\n");
409 		break;
410 
411 err_bad_objs:
412 		ddi_remove_minor_node(dip, "devctl");
413 err_bad_devctl_node:
414 		unmap_pcmu_registers(pcmu_p);
415 err_bad_reg_prop:
416 		free_pcmu_properties(pcmu_p);
417 err_bad_pcmu_prop:
418 		mutex_destroy(&pcmu_p->pcmu_mutex);
419 		free_pcmu_soft_state(instance);
420 err_bad_pcmu_softstate:
421 		return (DDI_FAILURE);
422 
423 	case DDI_RESUME:
424 		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_RESUME\n");
425 
426 		/*
427 		 * Make sure the CMU-CH control registers
428 		 * are configured properly.
429 		 */
430 		pcmu_p = get_pcmu_soft_state(instance);
431 		mutex_enter(&pcmu_p->pcmu_mutex);
432 
433 		/*
434 		 * Make sure this instance has been suspended.
435 		 */
436 		if (pcmu_p->pcmu_state != PCMU_SUSPENDED) {
437 			PCMU_DBG0(PCMU_DBG_ATTACH, dip,
438 			    "instance NOT suspended\n");
439 			mutex_exit(&pcmu_p->pcmu_mutex);
440 			return (DDI_FAILURE);
441 		}
442 		pcmu_obj_resume(pcmu_p);
443 		pcmu_p->pcmu_state = PCMU_ATTACHED;
444 
445 		pcmu_child_cfg_restore(dip);
446 
447 		mutex_exit(&pcmu_p->pcmu_mutex);
448 		break;
449 
450 	default:
451 		PCMU_DBG0(PCMU_DBG_ATTACH, dip, "unsupported attach op\n");
452 		return (DDI_FAILURE);
453 	}
454 
455 	return (DDI_SUCCESS);
456 }
457 
458 /*
459  * detach entry point:
460  */
461 static int
462 pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
463 {
464 	int instance = ddi_get_instance(dip);
465 	pcmu_t *pcmu_p = get_pcmu_soft_state(instance);
466 	int len;
467 
468 	/*
469 	 * Make sure we are currently attached
470 	 */
471 	if (pcmu_p->pcmu_state != PCMU_ATTACHED) {
472 		PCMU_DBG0(PCMU_DBG_ATTACH, dip,
473 		    "failed - instance not attached\n");
474 		return (DDI_FAILURE);
475 	}
476 
477 	mutex_enter(&pcmu_p->pcmu_mutex);
478 
479 	switch (cmd) {
480 	case DDI_DETACH:
481 		PCMU_DBG0(PCMU_DBG_DETACH, dip, "DDI_DETACH\n");
482 		pcmu_obj_destroy(pcmu_p);
483 
484 		/*
485 		 * Free the pci soft state structure and the rest of the
486 		 * resources it's using.
487 		 */
488 		free_pcmu_properties(pcmu_p);
489 		unmap_pcmu_registers(pcmu_p);
490 		mutex_exit(&pcmu_p->pcmu_mutex);
491 		mutex_destroy(&pcmu_p->pcmu_mutex);
492 		free_pcmu_soft_state(instance);
493 
494 		/* Free the interrupt-priorities prop if we created it. */
495 		if (ddi_getproplen(DDI_DEV_T_ANY, dip,
496 		    DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
497 		    "interrupt-priorities", &len) == DDI_PROP_SUCCESS) {
498 			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
499 			    "interrupt-priorities");
500 		}
501 		return (DDI_SUCCESS);
502 
503 	case DDI_SUSPEND:
504 		pcmu_child_cfg_save(dip);
505 		pcmu_obj_suspend(pcmu_p);
506 		pcmu_p->pcmu_state = PCMU_SUSPENDED;
507 
508 		mutex_exit(&pcmu_p->pcmu_mutex);
509 		return (DDI_SUCCESS);
510 
511 	default:
512 		PCMU_DBG0(PCMU_DBG_DETACH, dip, "unsupported detach op\n");
513 		mutex_exit(&pcmu_p->pcmu_mutex);
514 		return (DDI_FAILURE);
515 	}
516 }
517 
518 /* ARGSUSED3 */
519 static int
520 pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp)
521 {
522 	pcmu_t *pcmu_p;
523 
524 	if (otyp != OTYP_CHR) {
525 		return (EINVAL);
526 	}
527 
528 	/*
529 	 * Get the soft state structure for the device.
530 	 */
531 	pcmu_p = DEV_TO_SOFTSTATE(*devp);
532 	if (pcmu_p == NULL) {
533 		return (ENXIO);
534 	}
535 
536 	/*
537 	 * Handle the open by tracking the device state.
538 	 */
539 	PCMU_DBG2(PCMU_DBG_OPEN, pcmu_p->pcmu_dip,
540 	    "devp=%x: flags=%x\n", devp, flags);
541 	mutex_enter(&pcmu_p->pcmu_mutex);
542 	if (flags & FEXCL) {
543 		if (pcmu_p->pcmu_soft_state != PCMU_SOFT_STATE_CLOSED) {
544 			mutex_exit(&pcmu_p->pcmu_mutex);
545 			PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
546 			return (EBUSY);
547 		}
548 		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN_EXCL;
549 	} else {
550 		if (pcmu_p->pcmu_soft_state == PCMU_SOFT_STATE_OPEN_EXCL) {
551 			mutex_exit(&pcmu_p->pcmu_mutex);
552 			PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
553 			return (EBUSY);
554 		}
555 		pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN;
556 	}
557 	pcmu_p->pcmu_open_count++;
558 	mutex_exit(&pcmu_p->pcmu_mutex);
559 	return (0);
560 }
561 
562 
563 /* ARGSUSED */
564 static int
565 pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp)
566 {
567 	pcmu_t *pcmu_p;
568 
569 	if (otyp != OTYP_CHR) {
570 		return (EINVAL);
571 	}
572 
573 	pcmu_p = DEV_TO_SOFTSTATE(dev);
574 	if (pcmu_p == NULL) {
575 		return (ENXIO);
576 	}
577 
578 	PCMU_DBG2(PCMU_DBG_CLOSE, pcmu_p->pcmu_dip,
579 	    "dev=%x: flags=%x\n", dev, flags);
580 	mutex_enter(&pcmu_p->pcmu_mutex);
581 	pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
582 	pcmu_p->pcmu_open_count = 0;
583 	mutex_exit(&pcmu_p->pcmu_mutex);
584 	return (0);
585 }
586 
587 /* ARGSUSED */
588 static int
589 pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
590     cred_t *credp, int *rvalp)
591 {
592 	pcmu_t *pcmu_p;
593 	dev_info_t *dip;
594 	struct devctl_iocdata *dcp;
595 	uint_t bus_state;
596 	int rv = 0;
597 
598 	pcmu_p = DEV_TO_SOFTSTATE(dev);
599 	if (pcmu_p == NULL) {
600 		return (ENXIO);
601 	}
602 
603 	dip = pcmu_p->pcmu_dip;
604 	PCMU_DBG2(PCMU_DBG_IOCTL, dip, "dev=%x: cmd=%x\n", dev, cmd);
605 
606 	/*
607 	 * We can use the generic implementation for these ioctls
608 	 */
609 	switch (cmd) {
610 	case DEVCTL_DEVICE_GETSTATE:
611 	case DEVCTL_DEVICE_ONLINE:
612 	case DEVCTL_DEVICE_OFFLINE:
613 	case DEVCTL_BUS_GETSTATE:
614 		return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
615 	}
616 
617 	/*
618 	 * read devctl ioctl data
619 	 */
620 	if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
621 		return (EFAULT);
622 
623 	switch (cmd) {
624 
625 	case DEVCTL_DEVICE_RESET:
626 		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_DEVICE_RESET\n");
627 		rv = ENOTSUP;
628 		break;
629 
630 
631 	case DEVCTL_BUS_QUIESCE:
632 		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_QUIESCE\n");
633 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
634 			if (bus_state == BUS_QUIESCED) {
635 				break;
636 			}
637 		}
638 		(void) ndi_set_bus_state(dip, BUS_QUIESCED);
639 		break;
640 
641 	case DEVCTL_BUS_UNQUIESCE:
642 		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_UNQUIESCE\n");
643 		if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
644 			if (bus_state == BUS_ACTIVE) {
645 				break;
646 			}
647 		}
648 		(void) ndi_set_bus_state(dip, BUS_ACTIVE);
649 		break;
650 
651 	case DEVCTL_BUS_RESET:
652 		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESET\n");
653 		rv = ENOTSUP;
654 		break;
655 
656 	case DEVCTL_BUS_RESETALL:
657 		PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESETALL\n");
658 		rv = ENOTSUP;
659 		break;
660 
661 	default:
662 		rv = ENOTTY;
663 	}
664 
665 	ndi_dc_freehdl(dcp);
666 	return (rv);
667 }
668 
669 static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
670     int flags, char *name, caddr_t valuep, int *lengthp)
671 {
672 	return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
673 }
674 /* bus driver entry points */
675 
676 /*
677  * bus map entry point:
678  *
679  *	if map request is for an rnumber
680  *		get the corresponding regspec from device node
681  *	build a new regspec in our parent's format
682  *	build a new map_req with the new regspec
683  *	call up the tree to complete the mapping
684  */
685 static int
686 pcmu_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
687 	off_t off, off_t len, caddr_t *addrp)
688 {
689 	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
690 	struct regspec p_regspec;
691 	ddi_map_req_t p_mapreq;
692 	int reglen, rval, r_no;
693 	pci_regspec_t reloc_reg, *rp = &reloc_reg;
694 
695 	PCMU_DBG2(PCMU_DBG_MAP, dip, "rdip=%s%d:",
696 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
697 
698 	if (mp->map_flags & DDI_MF_USER_MAPPING) {
699 		return (DDI_ME_UNIMPLEMENTED);
700 	}
701 
702 	switch (mp->map_type) {
703 	case DDI_MT_REGSPEC:
704 		reloc_reg = *(pci_regspec_t *)mp->map_obj.rp;	/* dup whole */
705 		break;
706 
707 	case DDI_MT_RNUMBER:
708 		r_no = mp->map_obj.rnumber;
709 		PCMU_DBG1(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, " r#=%x", r_no);
710 
711 		if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS,
712 		    "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS) {
713 			return (DDI_ME_RNUMBER_RANGE);
714 		}
715 
716 		if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
717 			kmem_free(rp, reglen);
718 			return (DDI_ME_RNUMBER_RANGE);
719 		}
720 		rp += r_no;
721 		break;
722 
723 	default:
724 		return (DDI_ME_INVAL);
725 	}
726 	PCMU_DBG0(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, "\n");
727 
728 	/* use "assigned-addresses" to relocate regspec within pci space */
729 	if (rval = pcmu_reloc_reg(dip, rdip, pcmu_p, rp)) {
730 		goto done;
731 	}
732 
733 	/* adjust regspec according to mapping request */
734 	if (len) {
735 		rp->pci_size_low = (uint_t)len;
736 	}
737 	rp->pci_phys_low += off;
738 
739 	/* use "ranges" to translate relocated pci regspec into parent space */
740 	if (rval = pcmu_xlate_reg(pcmu_p, rp, &p_regspec)) {
741 		goto done;
742 	}
743 
744 	p_mapreq = *mp;		/* dup the whole structure */
745 	p_mapreq.map_type = DDI_MT_REGSPEC;
746 	p_mapreq.map_obj.rp = &p_regspec;
747 	rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
748 
749 done:
750 	if (mp->map_type == DDI_MT_RNUMBER) {
751 		kmem_free(rp - r_no, reglen);
752 	}
753 	return (rval);
754 }
755 
756 #ifdef  DEBUG
757 int	pcmu_peekfault_cnt = 0;
758 int	pcmu_pokefault_cnt = 0;
759 #endif  /* DEBUG */
760 
761 static int
762 pcmu_do_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
763 {
764 	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
765 	int err = DDI_SUCCESS;
766 	on_trap_data_t otd;
767 
768 	mutex_enter(&pcbm_p->pcbm_pokeflt_mutex);
769 	pcbm_p->pcbm_ontrap_data = &otd;
770 
771 	/* Set up protected environment. */
772 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
773 		uintptr_t tramp = otd.ot_trampoline;
774 
775 		otd.ot_trampoline = (uintptr_t)&poke_fault;
776 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
777 		    (void *)in_args->host_addr);
778 		otd.ot_trampoline = tramp;
779 	} else {
780 		err = DDI_FAILURE;
781 	}
782 
783 	/*
784 	 * Read the async fault register for the PBM to see it sees
785 	 * a master-abort.
786 	 */
787 	pcmu_pbm_clear_error(pcbm_p);
788 
789 	if (otd.ot_trap & OT_DATA_ACCESS) {
790 		err = DDI_FAILURE;
791 	}
792 
793 	/* Take down protected environment. */
794 	no_trap();
795 
796 	pcbm_p->pcbm_ontrap_data = NULL;
797 	mutex_exit(&pcbm_p->pcbm_pokeflt_mutex);
798 
799 #ifdef  DEBUG
800 	if (err == DDI_FAILURE)
801 		pcmu_pokefault_cnt++;
802 #endif
803 	return (err);
804 }
805 
806 
807 static int
808 pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
809 {
810 	return (pcmu_do_poke(pcmu_p, in_args));
811 }
812 
813 /* ARGSUSED */
814 static int
815 pcmu_do_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
816 {
817 	int err = DDI_SUCCESS;
818 	on_trap_data_t otd;
819 
820 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
821 		uintptr_t tramp = otd.ot_trampoline;
822 
823 		otd.ot_trampoline = (uintptr_t)&peek_fault;
824 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
825 		    (void *)in_args->host_addr);
826 		otd.ot_trampoline = tramp;
827 	} else
828 		err = DDI_FAILURE;
829 
830 	no_trap();
831 
832 #ifdef  DEBUG
833 	if (err == DDI_FAILURE)
834 		pcmu_peekfault_cnt++;
835 #endif
836 	return (err);
837 }
838 
839 
840 static int
841 pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args, void *result)
842 {
843 	result = (void *)in_args->host_addr;
844 	return (pcmu_do_peek(pcmu_p, in_args));
845 }
846 
847 /*
848  * control ops entry point:
849  *
850  * Requests handled completely:
851  *	DDI_CTLOPS_INITCHILD	see pcmu_init_child() for details
852  *	DDI_CTLOPS_UNINITCHILD
853  *	DDI_CTLOPS_REPORTDEV	see report_dev() for details
854  *	DDI_CTLOPS_XLATE_INTRS	nothing to do
855  *	DDI_CTLOPS_IOMIN	cache line size if streaming otherwise 1
856  *	DDI_CTLOPS_REGSIZE
857  *	DDI_CTLOPS_NREGS
858  *	DDI_CTLOPS_NINTRS
859  *	DDI_CTLOPS_DVMAPAGESIZE
860  *	DDI_CTLOPS_POKE
861  *	DDI_CTLOPS_PEEK
862  *	DDI_CTLOPS_QUIESCE
863  *	DDI_CTLOPS_UNQUIESCE
864  *
865  * All others passed to parent.
866  */
867 static int
868 pcmu_ctlops(dev_info_t *dip, dev_info_t *rdip,
869 	ddi_ctl_enum_t op, void *arg, void *result)
870 {
871 	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
872 
873 	switch (op) {
874 	case DDI_CTLOPS_INITCHILD:
875 		return (pcmu_init_child(pcmu_p, (dev_info_t *)arg));
876 
877 	case DDI_CTLOPS_UNINITCHILD:
878 		return (pcmu_uninit_child(pcmu_p, (dev_info_t *)arg));
879 
880 	case DDI_CTLOPS_REPORTDEV:
881 		return (pcmu_report_dev(rdip));
882 
883 	case DDI_CTLOPS_IOMIN:
884 		/*
885 		 * If we are using the streaming cache, align at
886 		 * least on a cache line boundary. Otherwise use
887 		 * whatever alignment is passed in.
888 		 */
889 		return (DDI_SUCCESS);
890 
891 	case DDI_CTLOPS_REGSIZE:
892 		*((off_t *)result) = pcmu_get_reg_set_size(rdip, *((int *)arg));
893 		return (DDI_SUCCESS);
894 
895 	case DDI_CTLOPS_NREGS:
896 		*((uint_t *)result) = pcmu_get_nreg_set(rdip);
897 		return (DDI_SUCCESS);
898 
899 	case DDI_CTLOPS_DVMAPAGESIZE:
900 		*((ulong_t *)result) = 0;
901 		return (DDI_SUCCESS);
902 
903 	case DDI_CTLOPS_POKE:
904 		return (pcmu_ctlops_poke(pcmu_p, (peekpoke_ctlops_t *)arg));
905 
906 	case DDI_CTLOPS_PEEK:
907 		return (pcmu_ctlops_peek(pcmu_p, (peekpoke_ctlops_t *)arg,
908 		    result));
909 
910 	case DDI_CTLOPS_AFFINITY:
911 		break;
912 
913 	case DDI_CTLOPS_QUIESCE:
914 		return (DDI_FAILURE);
915 
916 	case DDI_CTLOPS_UNQUIESCE:
917 		return (DDI_FAILURE);
918 
919 	default:
920 		break;
921 	}
922 
923 	/*
924 	 * Now pass the request up to our parent.
925 	 */
926 	PCMU_DBG2(PCMU_DBG_CTLOPS, dip,
927 	    "passing request to parent: rdip=%s%d\n",
928 	    ddi_driver_name(rdip), ddi_get_instance(rdip));
929 	return (ddi_ctlops(dip, rdip, op, arg, result));
930 }
931 
932 
933 /* ARGSUSED */
934 static int
935 pcmu_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
936     ddi_intr_handle_impl_t *hdlp, void *result)
937 {
938 	pcmu_t		*pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
939 	int		ret = DDI_SUCCESS;
940 
941 	switch (intr_op) {
942 	case DDI_INTROP_GETCAP:
943 		/* GetCap will always fail for all non PCI devices */
944 		(void) pci_intx_get_cap(rdip, (int *)result);
945 		break;
946 	case DDI_INTROP_SETCAP:
947 		ret = DDI_ENOTSUP;
948 		break;
949 	case DDI_INTROP_ALLOC:
950 		*(int *)result = hdlp->ih_scratch1;
951 		break;
952 	case DDI_INTROP_FREE:
953 		break;
954 	case DDI_INTROP_GETPRI:
955 		*(int *)result = hdlp->ih_pri ? hdlp->ih_pri : 0;
956 		break;
957 	case DDI_INTROP_SETPRI:
958 		break;
959 	case DDI_INTROP_ADDISR:
960 		ret = pcmu_add_intr(dip, rdip, hdlp);
961 		break;
962 	case DDI_INTROP_REMISR:
963 		ret = pcmu_remove_intr(dip, rdip, hdlp);
964 		break;
965 	case DDI_INTROP_ENABLE:
966 		ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
967 		    PCMU_INTR_STATE_ENABLE);
968 		break;
969 	case DDI_INTROP_DISABLE:
970 		ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
971 		    PCMU_INTR_STATE_DISABLE);
972 		break;
973 	case DDI_INTROP_SETMASK:
974 		ret = pci_intx_set_mask(rdip);
975 		break;
976 	case DDI_INTROP_CLRMASK:
977 		ret = pci_intx_clr_mask(rdip);
978 		break;
979 	case DDI_INTROP_GETPENDING:
980 		ret = pci_intx_get_pending(rdip, (int *)result);
981 		break;
982 	case DDI_INTROP_NINTRS:
983 	case DDI_INTROP_NAVAIL:
984 		*(int *)result = i_ddi_get_intx_nintrs(rdip);
985 		break;
986 	case DDI_INTROP_SUPPORTED_TYPES:
987 		/* PCI nexus driver supports only fixed interrupts */
988 		*(int *)result = i_ddi_get_intx_nintrs(rdip) ?
989 		    DDI_INTR_TYPE_FIXED : 0;
990 		break;
991 	default:
992 		ret = DDI_ENOTSUP;
993 		break;
994 	}
995 
996 	return (ret);
997 }
998 
999 /*
1000  * CMU-CH specifics implementation:
1001  *	interrupt mapping register
1002  *	PBM configuration
1003  *	ECC and PBM error handling
1004  */
1005 
1006 /* called by pcmu_attach() DDI_ATTACH to initialize pci objects */
1007 static int
1008 pcmu_obj_setup(pcmu_t *pcmu_p)
1009 {
1010 	int ret;
1011 
1012 	mutex_enter(&pcmu_global_mutex);
1013 	pcmu_p->pcmu_rev = ddi_prop_get_int(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
1014 	    DDI_PROP_DONTPASS, "module-revision#", 0);
1015 
1016 	pcmu_ib_create(pcmu_p);
1017 	pcmu_cb_create(pcmu_p);
1018 	pcmu_ecc_create(pcmu_p);
1019 	pcmu_pbm_create(pcmu_p);
1020 	pcmu_err_create(pcmu_p);
1021 	if ((ret = pcmu_intr_setup(pcmu_p)) != DDI_SUCCESS)
1022 		goto done;
1023 
1024 	/*
1025 	 * Due to a hardware bug, do not create kstat for DC systems
1026 	 * with PCI hw revision less than 5.
1027 	 */
1028 	if ((strncmp(ddi_binding_name(pcmu_p->pcmu_dip),
1029 	    PCICMU_OPL_DC_BINDING_NAME, strlen(PCICMU_OPL_DC_BINDING_NAME))
1030 	    != 0) || (pcmu_p->pcmu_rev > 4)) {
1031 		pcmu_kstat_create(pcmu_p);
1032 	}
1033 done:
1034 	mutex_exit(&pcmu_global_mutex);
1035 	if (ret != DDI_SUCCESS) {
1036 		cmn_err(CE_NOTE, "Interrupt register failure, returning 0x%x\n",
1037 		    ret);
1038 	}
1039 	return (ret);
1040 }
1041 
1042 /* called by pcmu_detach() DDI_DETACH to destroy pci objects */
1043 static void
1044 pcmu_obj_destroy(pcmu_t *pcmu_p)
1045 {
1046 	mutex_enter(&pcmu_global_mutex);
1047 
1048 	pcmu_kstat_destroy(pcmu_p);
1049 	pcmu_pbm_destroy(pcmu_p);
1050 	pcmu_err_destroy(pcmu_p);
1051 	pcmu_ecc_destroy(pcmu_p);
1052 	pcmu_cb_destroy(pcmu_p);
1053 	pcmu_ib_destroy(pcmu_p);
1054 	pcmu_intr_teardown(pcmu_p);
1055 
1056 	mutex_exit(&pcmu_global_mutex);
1057 }
1058 
1059 /* called by pcmu_attach() DDI_RESUME to (re)initialize pci objects */
1060 static void
1061 pcmu_obj_resume(pcmu_t *pcmu_p)
1062 {
1063 	mutex_enter(&pcmu_global_mutex);
1064 
1065 	pcmu_ib_configure(pcmu_p->pcmu_ib_p);
1066 	pcmu_ecc_configure(pcmu_p);
1067 	pcmu_ib_resume(pcmu_p->pcmu_ib_p);
1068 	u2u_ittrans_resume((u2u_ittrans_data_t **)
1069 	    &(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie));
1070 
1071 	pcmu_pbm_configure(pcmu_p->pcmu_pcbm_p);
1072 
1073 	pcmu_cb_resume(pcmu_p->pcmu_cb_p);
1074 
1075 	pcmu_pbm_resume(pcmu_p->pcmu_pcbm_p);
1076 
1077 	mutex_exit(&pcmu_global_mutex);
1078 }
1079 
1080 /* called by pcmu_detach() DDI_SUSPEND to suspend pci objects */
1081 static void
1082 pcmu_obj_suspend(pcmu_t *pcmu_p)
1083 {
1084 	mutex_enter(&pcmu_global_mutex);
1085 
1086 	pcmu_pbm_suspend(pcmu_p->pcmu_pcbm_p);
1087 	pcmu_ib_suspend(pcmu_p->pcmu_ib_p);
1088 	pcmu_cb_suspend(pcmu_p->pcmu_cb_p);
1089 
1090 	mutex_exit(&pcmu_global_mutex);
1091 }
1092 
1093 static int
1094 pcmu_intr_setup(pcmu_t *pcmu_p)
1095 {
1096 	dev_info_t *dip = pcmu_p->pcmu_dip;
1097 	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
1098 	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
1099 	int i, no_of_intrs;
1100 
1101 	/*
1102 	 * Get the interrupts property.
1103 	 */
1104 	if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1105 	    "interrupts", (caddr_t)&pcmu_p->pcmu_inos,
1106 	    &pcmu_p->pcmu_inos_len) != DDI_SUCCESS) {
1107 		cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
1108 		    ddi_driver_name(dip), ddi_get_instance(dip));
1109 	}
1110 
1111 	/*
1112 	 * figure out number of interrupts in the "interrupts" property
1113 	 * and convert them all into ino.
1114 	 */
1115 	i = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "#interrupt-cells", 1);
1116 	i = CELLS_1275_TO_BYTES(i);
1117 	no_of_intrs = pcmu_p->pcmu_inos_len / i;
1118 	for (i = 0; i < no_of_intrs; i++) {
1119 		pcmu_p->pcmu_inos[i] =
1120 		    PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[i]);
1121 	}
1122 
1123 	pcb_p->pcb_no_of_inos = no_of_intrs;
1124 	if (i = pcmu_ecc_register_intr(pcmu_p)) {
1125 		goto teardown;
1126 	}
1127 
1128 	intr_dist_add(pcmu_cb_intr_dist, pcb_p);
1129 	pcmu_ecc_enable_intr(pcmu_p);
1130 
1131 	if (i = pcmu_pbm_register_intr(pcbm_p)) {
1132 		intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
1133 		goto teardown;
1134 	}
1135 	intr_dist_add(pcmu_pbm_intr_dist, pcbm_p);
1136 	pcmu_ib_intr_enable(pcmu_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
1137 
1138 	intr_dist_add_weighted(pcmu_ib_intr_dist_all, pcmu_p->pcmu_ib_p);
1139 	return (DDI_SUCCESS);
1140 teardown:
1141 	pcmu_intr_teardown(pcmu_p);
1142 	return (i);
1143 }
1144 
1145 /*
1146  * pcmu_fix_ranges - fixes the config space entry of the "ranges"
1147  *	property on CMU-CH platforms
1148  */
1149 void
1150 pcmu_fix_ranges(pcmu_ranges_t *rng_p, int rng_entries)
1151 {
1152 	int i;
1153 	for (i = 0; i < rng_entries; i++, rng_p++) {
1154 		if ((rng_p->child_high & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG)
1155 			rng_p->parent_low |= rng_p->child_high;
1156 	}
1157 }
1158 
1159 /*
1160  * map_pcmu_registers
1161  *
1162  * This function is called from the attach routine to map the registers
1163  * accessed by this driver.
1164  *
1165  * used by: pcmu_attach()
1166  *
1167  * return value: DDI_FAILURE on failure
1168  */
1169 static int
1170 map_pcmu_registers(pcmu_t *pcmu_p, dev_info_t *dip)
1171 {
1172 	ddi_device_acc_attr_t attr;
1173 
1174 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1175 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1176 
1177 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
1178 	if (ddi_regs_map_setup(dip, 0, &pcmu_p->pcmu_address[0], 0, 0,
1179 	    &attr, &pcmu_p->pcmu_ac[0]) != DDI_SUCCESS) {
1180 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 0\n",
1181 		    ddi_driver_name(dip), ddi_get_instance(dip));
1182 		return (DDI_FAILURE);
1183 	}
1184 
1185 	/*
1186 	 * We still use pcmu_address[2]
1187 	 */
1188 	if (ddi_regs_map_setup(dip, 2, &pcmu_p->pcmu_address[2], 0, 0,
1189 	    &attr, &pcmu_p->pcmu_ac[2]) != DDI_SUCCESS) {
1190 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 2\n",
1191 		    ddi_driver_name(dip), ddi_get_instance(dip));
1192 		ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1193 		return (DDI_FAILURE);
1194 	}
1195 
1196 	/*
1197 	 * The second register set contains the bridge's configuration
1198 	 * header.  This header is at the very beginning of the bridge's
1199 	 * configuration space.  This space has litte-endian byte order.
1200 	 */
1201 	attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1202 	if (ddi_regs_map_setup(dip, 1, &pcmu_p->pcmu_address[1], 0,
1203 	    PCI_CONF_HDR_SIZE, &attr, &pcmu_p->pcmu_ac[1]) != DDI_SUCCESS) {
1204 
1205 		cmn_err(CE_WARN, "%s%d: unable to map reg entry 1\n",
1206 		    ddi_driver_name(dip), ddi_get_instance(dip));
1207 		ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1208 		return (DDI_FAILURE);
1209 	}
1210 	PCMU_DBG2(PCMU_DBG_ATTACH, dip, "address (%p,%p)\n",
1211 	    pcmu_p->pcmu_address[0], pcmu_p->pcmu_address[1]);
1212 	return (DDI_SUCCESS);
1213 }
1214 
1215 /*
1216  * unmap_pcmu_registers:
1217  *
1218  * This routine unmap the registers mapped by map_pcmu_registers.
1219  *
1220  * used by: pcmu_detach()
1221  *
1222  * return value: none
1223  */
1224 static void
1225 unmap_pcmu_registers(pcmu_t *pcmu_p)
1226 {
1227 	ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
1228 	ddi_regs_map_free(&pcmu_p->pcmu_ac[1]);
1229 	ddi_regs_map_free(&pcmu_p->pcmu_ac[2]);
1230 }
1231 
1232 /*
1233  * These convenience wrappers relies on map_pcmu_registers() to setup
1234  * pcmu_address[0-2] correctly at first.
1235  */
1236 static uintptr_t
1237 get_reg_base(pcmu_t *pcmu_p)
1238 {
1239 	return ((uintptr_t)pcmu_p->pcmu_address[2]);
1240 }
1241 
1242 /* The CMU-CH config reg base is always the 2nd reg entry */
1243 static uintptr_t
1244 get_config_reg_base(pcmu_t *pcmu_p)
1245 {
1246 	return ((uintptr_t)(pcmu_p->pcmu_address[1]));
1247 }
1248 
1249 uint64_t
1250 ib_get_map_reg(pcmu_ib_mondo_t mondo, uint32_t cpu_id)
1251 {
1252 	return ((mondo) | (cpu_id << PCMU_INTR_MAP_REG_TID_SHIFT) |
1253 	    PCMU_INTR_MAP_REG_VALID);
1254 
1255 }
1256 
1257 uint32_t
1258 ib_map_reg_get_cpu(volatile uint64_t reg)
1259 {
1260 	return ((reg & PCMU_INTR_MAP_REG_TID) >>
1261 	    PCMU_INTR_MAP_REG_TID_SHIFT);
1262 }
1263 
1264 uint64_t *
1265 ib_intr_map_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
1266 {
1267 	uint64_t *addr;
1268 
1269 	ASSERT(ino & 0x20);
1270 	addr = (uint64_t *)(pib_p->pib_obio_intr_map_regs +
1271 	    (((uint_t)ino & 0x1f) << 3));
1272 	return (addr);
1273 }
1274 
1275 uint64_t *
1276 ib_clear_intr_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
1277 {
1278 	uint64_t *addr;
1279 
1280 	ASSERT(ino & 0x20);
1281 	addr = (uint64_t *)(pib_p->pib_obio_clear_intr_regs +
1282 	    (((uint_t)ino & 0x1f) << 3));
1283 	return (addr);
1284 }
1285 
1286 uintptr_t
1287 pcmu_ib_setup(pcmu_ib_t *pib_p)
1288 {
1289 	pcmu_t *pcmu_p = pib_p->pib_pcmu_p;
1290 	uintptr_t a = get_reg_base(pcmu_p);
1291 
1292 	pib_p->pib_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
1293 	pib_p->pib_max_ino = PCMU_MAX_INO;
1294 	pib_p->pib_obio_intr_map_regs = a + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
1295 	pib_p->pib_obio_clear_intr_regs =
1296 	    a + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
1297 	return (a);
1298 }
1299 
1300 /*
1301  * Return the cpuid to to be used for an ino.
1302  *
1303  * On multi-function pci devices, functions have separate devinfo nodes and
1304  * interrupts.
1305  *
1306  * This function determines if there is already an established slot-oriented
1307  * interrupt-to-cpu binding established, if there is then it returns that
1308  * cpu.  Otherwise a new cpu is selected by intr_dist_cpuid().
1309  *
1310  * The devinfo node we are trying to associate a cpu with is
1311  * ino_p->pino_ih_head->ih_dip.
1312  */
1313 uint32_t
1314 pcmu_intr_dist_cpuid(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p)
1315 {
1316 	dev_info_t	*rdip = ino_p->pino_ih_head->ih_dip;
1317 	dev_info_t	*prdip = ddi_get_parent(rdip);
1318 	pcmu_ib_ino_info_t	*sino_p;
1319 	dev_info_t	*sdip;
1320 	dev_info_t	*psdip;
1321 	char		*buf1 = NULL, *buf2 = NULL;
1322 	char		*s1, *s2, *s3;
1323 	int		l2;
1324 	int		cpu_id;
1325 
1326 	/* must be CMU-CH driver parent (not ebus) */
1327 	if (strcmp(ddi_driver_name(prdip), "pcicmu") != 0)
1328 		goto newcpu;
1329 
1330 	/*
1331 	 * From PCI 1275 binding: 2.2.1.3 Unit Address representation:
1332 	 *   Since the "unit-number" is the address that appears in on Open
1333 	 *   Firmware 'device path', it follows that only the DD and DD,FF
1334 	 *   forms of the text representation can appear in a 'device path'.
1335 	 *
1336 	 * The rdip unit address is of the form "DD[,FF]".  Define two
1337 	 * unit address strings that represent same-slot use: "DD" and "DD,".
1338 	 * The first compare uses strcmp, the second uses strncmp.
1339 	 */
1340 	s1 = ddi_get_name_addr(rdip);
1341 	if (s1 == NULL) {
1342 		goto newcpu;
1343 	}
1344 
1345 	buf1 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strcmp */
1346 	buf2 = kmem_alloc(MAXNAMELEN, KM_SLEEP);	/* strncmp */
1347 	s1 = strcpy(buf1, s1);
1348 	s2 = strcpy(buf2, s1);
1349 
1350 	s1 = strrchr(s1, ',');
1351 	if (s1) {
1352 		*s1 = '\0';			/* have "DD,FF" */
1353 		s1 = buf1;			/* search via strcmp "DD" */
1354 
1355 		s2 = strrchr(s2, ',');
1356 		*(s2 + 1) = '\0';
1357 		s2 = buf2;
1358 		l2 = strlen(s2);		/* search via strncmp "DD," */
1359 	} else {
1360 		(void) strcat(s2, ",");		/* have "DD" */
1361 		l2 = strlen(s2);		/* search via strncmp "DD," */
1362 	}
1363 
1364 	/*
1365 	 * Search the established ino list for devinfo nodes bound
1366 	 * to an ino that matches one of the slot use strings.
1367 	 */
1368 	ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
1369 	for (sino_p = pib_p->pib_ino_lst; sino_p; sino_p = sino_p->pino_next) {
1370 		/* skip self and non-established */
1371 		if ((sino_p == ino_p) || (sino_p->pino_established == 0))
1372 			continue;
1373 
1374 		/* skip non-siblings */
1375 		sdip = sino_p->pino_ih_head->ih_dip;
1376 		psdip = ddi_get_parent(sdip);
1377 		if (psdip != prdip)
1378 			continue;
1379 
1380 		/* must be CMU-CH driver parent (not ebus) */
1381 		if (strcmp(ddi_driver_name(psdip), "pcicmu") != 0)
1382 			continue;
1383 
1384 		s3 = ddi_get_name_addr(sdip);
1385 		if ((s1 && (strcmp(s1, s3) == 0)) ||
1386 		    (strncmp(s2, s3, l2) == 0)) {
1387 			extern int intr_dist_debug;
1388 
1389 			if (intr_dist_debug) {
1390 				cmn_err(CE_CONT, "intr_dist: "
1391 				    "pcicmu`pcmu_intr_dist_cpuid "
1392 				    "%s#%d %s: cpu %d established "
1393 				    "by %s#%d %s\n", ddi_driver_name(rdip),
1394 				    ddi_get_instance(rdip),
1395 				    ddi_deviname(rdip, buf1),
1396 				    sino_p->pino_cpuid,
1397 				    ddi_driver_name(sdip),
1398 				    ddi_get_instance(sdip),
1399 				    ddi_deviname(sdip, buf2));
1400 			}
1401 			break;
1402 		}
1403 	}
1404 
1405 	/* If a slot use match is found then use established cpu */
1406 	if (sino_p) {
1407 		cpu_id = sino_p->pino_cpuid;	/* target established cpu */
1408 		goto out;
1409 	}
1410 
1411 newcpu:	cpu_id = intr_dist_cpuid();		/* target new cpu */
1412 
1413 out:	if (buf1)
1414 		kmem_free(buf1, MAXNAMELEN);
1415 	if (buf2)
1416 		kmem_free(buf2, MAXNAMELEN);
1417 	return (cpu_id);
1418 }
1419 
1420 void
1421 pcmu_cb_teardown(pcmu_t *pcmu_p)
1422 {
1423 	pcmu_cb_t	*pcb_p = pcmu_p->pcmu_cb_p;
1424 
1425 	u2u_ittrans_uninit((u2u_ittrans_data_t *)pcb_p->pcb_ittrans_cookie);
1426 }
1427 
1428 int
1429 pcmu_ecc_add_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
1430 {
1431 	uint32_t mondo;
1432 
1433 	mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
1434 	    pcmu_p->pcmu_inos[inum]);
1435 
1436 	VERIFY(add_ivintr(mondo, pcmu_pil[inum], (intrfunc)pcmu_ecc_intr,
1437 	    (caddr_t)eii_p, NULL, NULL) == 0);
1438 
1439 	return (PCMU_ATTACH_RETCODE(PCMU_ECC_OBJ,
1440 	    PCMU_OBJ_INTR_ADD, DDI_SUCCESS));
1441 }
1442 
1443 /* ARGSUSED */
1444 void
1445 pcmu_ecc_rem_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
1446 {
1447 	uint32_t mondo;
1448 
1449 	mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
1450 	    pcmu_p->pcmu_inos[inum]);
1451 
1452 	VERIFY(rem_ivintr(mondo, pcmu_pil[inum]) == 0);
1453 }
1454 
1455 void
1456 pcmu_pbm_configure(pcmu_pbm_t *pcbm_p)
1457 {
1458 	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1459 	dev_info_t *dip = pcmu_p->pcmu_dip;
1460 
1461 #define	pbm_err	((PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_PE_SHIFT) |	\
1462 		(PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_SE_SHIFT))
1463 #define	csr_err	(PCI_STAT_PERROR | PCI_STAT_S_PERROR |		\
1464 		PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB |	\
1465 		PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR)
1466 
1467 	/*
1468 	 * Clear any PBM errors.
1469 	 */
1470 	*pcbm_p->pcbm_async_flt_status_reg = pbm_err;
1471 
1472 	/*
1473 	 * Clear error bits in configuration status register.
1474 	 */
1475 	PCMU_DBG1(PCMU_DBG_ATTACH, dip,
1476 	    "pcmu_pbm_configure: conf status reg=%x\n", csr_err);
1477 
1478 	pcbm_p->pcbm_config_header->ch_status_reg = csr_err;
1479 
1480 	PCMU_DBG1(PCMU_DBG_ATTACH, dip,
1481 	    "pcmu_pbm_configure: conf status reg==%x\n",
1482 	    pcbm_p->pcbm_config_header->ch_status_reg);
1483 
1484 	(void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
1485 	    (int)pcbm_p->pcbm_config_header->ch_latency_timer_reg);
1486 #undef	pbm_err
1487 #undef	csr_err
1488 }
1489 
1490 uint_t
1491 pcmu_pbm_disable_errors(pcmu_pbm_t *pcbm_p)
1492 {
1493 	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1494 	pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
1495 
1496 	/*
1497 	 * Disable error and streaming byte hole interrupts via the
1498 	 * PBM control register.
1499 	 */
1500 	*pcbm_p->pcbm_ctrl_reg &= ~PCMU_PCI_CTRL_ERR_INT_EN;
1501 
1502 	/*
1503 	 * Disable error interrupts via the interrupt mapping register.
1504 	 */
1505 	pcmu_ib_intr_disable(pib_p,
1506 	    pcmu_p->pcmu_inos[CBNINTR_PBM], PCMU_IB_INTR_NOWAIT);
1507 	return (BF_NONE);
1508 }
1509 
1510 void
1511 pcmu_cb_setup(pcmu_t *pcmu_p)
1512 {
1513 	uint64_t csr, csr_pa, pa;
1514 	pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
1515 
1516 	pcb_p->pcb_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
1517 	pa = (uint64_t)hat_getpfnum(kas.a_hat, pcmu_p->pcmu_address[0]);
1518 	pcb_p->pcb_base_pa  = pa = pa >> (32 - MMU_PAGESHIFT) << 32;
1519 	pcb_p->pcb_map_pa = pa + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
1520 	pcb_p->pcb_clr_pa = pa + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
1521 	pcb_p->pcb_obsta_pa = pa + PCMU_IB_OBIO_INTR_STATE_DIAG_REG;
1522 
1523 	csr_pa = pa + PCMU_CB_CONTROL_STATUS_REG_OFFSET;
1524 	csr = lddphysio(csr_pa);
1525 
1526 	/*
1527 	 * Clear any pending address parity errors.
1528 	 */
1529 	if (csr & PCMU_CB_CONTROL_STATUS_APERR) {
1530 		csr |= PCMU_CB_CONTROL_STATUS_APERR;
1531 		cmn_err(CE_WARN, "clearing UPA address parity error\n");
1532 	}
1533 	csr |= PCMU_CB_CONTROL_STATUS_APCKEN;
1534 	csr &= ~PCMU_CB_CONTROL_STATUS_IAP;
1535 	stdphysio(csr_pa, csr);
1536 
1537 	u2u_ittrans_init(pcmu_p,
1538 	    (u2u_ittrans_data_t **)&pcb_p->pcb_ittrans_cookie);
1539 }
1540 
1541 void
1542 pcmu_ecc_setup(pcmu_ecc_t *pecc_p)
1543 {
1544 	pecc_p->pecc_ue.pecc_errpndg_mask = 0;
1545 	pecc_p->pecc_ue.pecc_offset_mask = PCMU_ECC_UE_AFSR_DW_OFFSET;
1546 	pecc_p->pecc_ue.pecc_offset_shift = PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1547 	pecc_p->pecc_ue.pecc_size_log2 = 3;
1548 }
1549 
1550 static uintptr_t
1551 get_pbm_reg_base(pcmu_t *pcmu_p)
1552 {
1553 	return ((uintptr_t)(pcmu_p->pcmu_address[0]));
1554 }
1555 
1556 void
1557 pcmu_pbm_setup(pcmu_pbm_t *pcbm_p)
1558 {
1559 	pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
1560 
1561 	/*
1562 	 * Get the base virtual address for the PBM control block.
1563 	 */
1564 	uintptr_t a = get_pbm_reg_base(pcmu_p);
1565 
1566 	/*
1567 	 * Get the virtual address of the PCI configuration header.
1568 	 * This should be mapped little-endian.
1569 	 */
1570 	pcbm_p->pcbm_config_header =
1571 	    (config_header_t *)get_config_reg_base(pcmu_p);
1572 
1573 	/*
1574 	 * Get the virtual addresses for control, error and diag
1575 	 * registers.
1576 	 */
1577 	pcbm_p->pcbm_ctrl_reg = (uint64_t *)(a + PCMU_PCI_CTRL_REG_OFFSET);
1578 	pcbm_p->pcbm_diag_reg = (uint64_t *)(a + PCMU_PCI_DIAG_REG_OFFSET);
1579 	pcbm_p->pcbm_async_flt_status_reg =
1580 	    (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
1581 	pcbm_p->pcbm_async_flt_addr_reg =
1582 	    (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
1583 }
1584 
1585 /*ARGSUSED*/
1586 void
1587 pcmu_pbm_teardown(pcmu_pbm_t *pcbm_p)
1588 {
1589 }
1590 
1591 int
1592 pcmu_get_numproxy(dev_info_t *dip)
1593 {
1594 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1595 	    "#upa-interrupt-proxies", 1));
1596 }
1597 
1598 int
1599 pcmu_get_portid(dev_info_t *dip)
1600 {
1601 	return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1602 	    "portid", -1));
1603 }
1604 
1605 /*
1606  * CMU-CH Performance Events.
1607  */
1608 static pcmu_kev_mask_t
1609 pcicmu_pcmu_events[] = {
1610 	{"pio_cycles_b", 0xf},		{"interrupts", 0x11},
1611 	{"upa_inter_nack", 0x12},	{"pio_reads", 0x13},
1612 	{"pio_writes", 0x14},
1613 	{"clear_pic", 0x1f}
1614 };
1615 
1616 /*
1617  * Create the picN kstat's.
1618  */
1619 void
1620 pcmu_kstat_init()
1621 {
1622 	pcmu_name_kstat = (pcmu_ksinfo_t *)kmem_alloc(sizeof (pcmu_ksinfo_t),
1623 	    KM_NOSLEEP);
1624 
1625 	if (pcmu_name_kstat == NULL) {
1626 		cmn_err(CE_WARN, "pcicmu : no space for kstat\n");
1627 	} else {
1628 		pcmu_name_kstat->pic_no_evs =
1629 		    sizeof (pcicmu_pcmu_events) / sizeof (pcmu_kev_mask_t);
1630 		pcmu_name_kstat->pic_shift[0] = PCMU_SHIFT_PIC0;
1631 		pcmu_name_kstat->pic_shift[1] = PCMU_SHIFT_PIC1;
1632 		pcmu_create_name_kstat("pcmup",
1633 		    pcmu_name_kstat, pcicmu_pcmu_events);
1634 	}
1635 }
1636 
1637 /*
1638  * Called from _fini()
1639  */
1640 void
1641 pcmu_kstat_fini()
1642 {
1643 	if (pcmu_name_kstat != NULL) {
1644 		pcmu_delete_name_kstat(pcmu_name_kstat);
1645 		kmem_free(pcmu_name_kstat, sizeof (pcmu_ksinfo_t));
1646 		pcmu_name_kstat = NULL;
1647 	}
1648 }
1649 
1650 /*
1651  * Create the performance 'counters' kstat.
1652  */
1653 void
1654 pcmu_add_upstream_kstat(pcmu_t *pcmu_p)
1655 {
1656 	pcmu_cntr_pa_t	*cntr_pa_p = &pcmu_p->pcmu_uks_pa;
1657 	uint64_t regbase = va_to_pa((void *)get_reg_base(pcmu_p));
1658 
1659 	cntr_pa_p->pcr_pa = regbase + PCMU_PERF_PCR_OFFSET;
1660 	cntr_pa_p->pic_pa = regbase + PCMU_PERF_PIC_OFFSET;
1661 	pcmu_p->pcmu_uksp = pcmu_create_cntr_kstat(pcmu_p, "pcmup",
1662 	    NUM_OF_PICS, pcmu_cntr_kstat_pa_update, cntr_pa_p);
1663 }
1664 
1665 /*
1666  * u2u_ittrans_init() is caled from in pci.c's pcmu_cb_setup() per CMU.
1667  * Second argument "ittrans_cookie" is address of pcb_ittrans_cookie in
1668  * pcb_p member. allocated interrupt block is returned in it.
1669  */
1670 static void
1671 u2u_ittrans_init(pcmu_t *pcmu_p, u2u_ittrans_data_t **ittrans_cookie)
1672 {
1673 
1674 	u2u_ittrans_data_t *u2u_trans_p;
1675 	ddi_device_acc_attr_t attr;
1676 	int ret;
1677 	int board;
1678 
1679 	/*
1680 	 * Allocate the data structure to support U2U's
1681 	 * interrupt target translations.
1682 	 */
1683 	u2u_trans_p = (u2u_ittrans_data_t *)
1684 	    kmem_zalloc(sizeof (u2u_ittrans_data_t), KM_SLEEP);
1685 
1686 	/*
1687 	 * Get other properties, "board#"
1688 	 */
1689 	board = ddi_getprop(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
1690 	    DDI_PROP_DONTPASS, "board#", -1);
1691 
1692 	u2u_trans_p->u2u_board = board;
1693 
1694 	if (board == -1) {
1695 		/* this cannot happen on production systems */
1696 		cmn_err(CE_PANIC, "u2u:Invalid property;board = %d", board);
1697 	}
1698 
1699 	/*
1700 	 * Initialize interrupt target translations mutex.
1701 	 */
1702 	mutex_init(&(u2u_trans_p->u2u_ittrans_lock), "u2u_ittrans_lock",
1703 	    MUTEX_DEFAULT, NULL);
1704 
1705 	/*
1706 	 * Get U2U's registers space by ddi_regs_map_setup(9F)
1707 	 */
1708 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1709 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1710 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
1711 
1712 	ret = ddi_regs_map_setup(pcmu_p->pcmu_dip,
1713 	    REGS_INDEX_OF_U2U, (caddr_t *)(&(u2u_trans_p->u2u_regs_base)),
1714 	    0, 0, &attr, &(u2u_trans_p->u2u_acc));
1715 
1716 	/*
1717 	 * check result of ddi_regs_map_setup().
1718 	 */
1719 	if (ret != DDI_SUCCESS) {
1720 		cmn_err(CE_PANIC, "u2u%d: registers map setup failed", board);
1721 	}
1722 
1723 	/*
1724 	 * Read Port-id(1 byte) in u2u
1725 	 */
1726 	u2u_trans_p->u2u_port_id = *(volatile int32_t *)
1727 	    (u2u_trans_p->u2u_regs_base + U2U_PID_REGISTER_OFFSET);
1728 
1729 	if (pcmu_p->pcmu_id != u2u_trans_p->u2u_port_id) {
1730 		cmn_err(CE_PANIC, "u2u%d: Invalid Port-ID", board);
1731 	}
1732 
1733 	*ittrans_cookie = u2u_trans_p;
1734 }
1735 
1736 /*
1737  * u2u_ittras_resume() is called from pcmu_obj_resume() at DDI_RESUME entry.
1738  */
1739 static void
1740 u2u_ittrans_resume(u2u_ittrans_data_t **ittrans_cookie)
1741 {
1742 
1743 	u2u_ittrans_data_t *u2u_trans_p;
1744 	u2u_ittrans_id_t *ittrans_id_p;
1745 	uintptr_t  data_reg_addr;
1746 	int ix;
1747 
1748 	u2u_trans_p = *ittrans_cookie;
1749 
1750 	/*
1751 	 * Set U2U Data Register
1752 	 */
1753 	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1754 		ittrans_id_p = &(u2u_trans_p->u2u_ittrans_id[ix]);
1755 		data_reg_addr = u2u_trans_p->u2u_regs_base +
1756 		    U2U_DATA_REGISTER_OFFSET + (ix * sizeof (uint64_t));
1757 		if (ittrans_id_p->u2u_ino_map_reg == NULL) {
1758 			/* This index was not set */
1759 			continue;
1760 		}
1761 		*(volatile uint32_t *) (data_reg_addr) =
1762 		    (uint32_t)ittrans_id_p->u2u_tgt_cpu_id;
1763 
1764 	}
1765 }
1766 
1767 /*
1768  * u2u_ittras_uninit() is called from ib_destroy() at detach,
1769  * or occuring error in attach.
1770  */
1771 static void
1772 u2u_ittrans_uninit(u2u_ittrans_data_t *ittrans_cookie)
1773 {
1774 
1775 	if (ittrans_cookie == NULL) {
1776 		return;	/* not support */
1777 	}
1778 
1779 	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1780 		return;	 /* illeagal case */
1781 	}
1782 
1783 	ddi_regs_map_free(&(ittrans_cookie->u2u_acc));
1784 	mutex_destroy(&(ittrans_cookie->u2u_ittrans_lock));
1785 	kmem_free((void *)ittrans_cookie, sizeof (u2u_ittrans_data_t));
1786 }
1787 
1788 /*
1789  * This routine,u2u_translate_tgtid(, , cpu_id, pino_map_reg),
1790  * searches index having same value of pino_map_reg, or empty.
1791  * Then, stores cpu_id in a U2U Data Register as this index,
1792  * and return this index.
1793  */
1794 int
1795 u2u_translate_tgtid(pcmu_t *pcmu_p, uint_t cpu_id,
1796     volatile uint64_t *pino_map_reg)
1797 {
1798 
1799 	int index = -1;
1800 	int ix;
1801 	int err_level;	/* severity level for cmn_err */
1802 	u2u_ittrans_id_t *ittrans_id_p;
1803 	uintptr_t  data_reg_addr;
1804 	u2u_ittrans_data_t *ittrans_cookie;
1805 
1806 	ittrans_cookie =
1807 	    (u2u_ittrans_data_t *)(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie);
1808 
1809 	if (ittrans_cookie == NULL) {
1810 		return (cpu_id);
1811 	}
1812 
1813 	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1814 		return (-1);	 /* illeagal case */
1815 	}
1816 
1817 	mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
1818 
1819 	/*
1820 	 * Decide index No. of U2U Data registers in either
1821 	 * already used by same pino_map_reg, or empty.
1822 	 */
1823 	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1824 		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
1825 		if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
1826 			/* already used this pino_map_reg */
1827 			index = ix;
1828 			break;
1829 		}
1830 		if (index == -1 &&
1831 		    ittrans_id_p->u2u_ino_map_reg == NULL) {
1832 			index = ix;
1833 		}
1834 	}
1835 
1836 	if (index == -1) {
1837 		if (panicstr) {
1838 			err_level = CE_WARN;
1839 		} else {
1840 			err_level = CE_PANIC;
1841 		}
1842 		cmn_err(err_level, "u2u%d:No more U2U-Data regs!!",
1843 		    ittrans_cookie->u2u_board);
1844 		return (cpu_id);
1845 	}
1846 
1847 	/*
1848 	 * For U2U
1849 	 * set cpu_id into u2u_data_reg by index.
1850 	 * ((uint64_t)(u2u_regs_base
1851 	 *	+ U2U_DATA_REGISTER_OFFSET))[index] = cpu_id;
1852 	 */
1853 
1854 	data_reg_addr = ittrans_cookie->u2u_regs_base
1855 	    + U2U_DATA_REGISTER_OFFSET
1856 	    + (index * sizeof (uint64_t));
1857 
1858 	/*
1859 	 * Set cpu_id into U2U Data register[index]
1860 	 */
1861 	*(volatile uint32_t *) (data_reg_addr) = (uint32_t)cpu_id;
1862 
1863 	/*
1864 	 * Setup for software, excepting at panicing.
1865 	 * and rebooting, etc...?
1866 	 */
1867 	if (!panicstr) {
1868 		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[index]);
1869 		ittrans_id_p->u2u_tgt_cpu_id = cpu_id;
1870 		ittrans_id_p->u2u_ino_map_reg = pino_map_reg;
1871 	}
1872 
1873 	mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
1874 
1875 	return (index);
1876 }
1877 
1878 /*
1879  * u2u_ittrans_cleanup() is called from common_pcmu_ib_intr_disable()
1880  * after called intr_rem_cpu(mondo).
1881  */
1882 void
1883 u2u_ittrans_cleanup(u2u_ittrans_data_t *ittrans_cookie,
1884 			volatile uint64_t *pino_map_reg)
1885 {
1886 
1887 	int ix;
1888 	u2u_ittrans_id_t *ittrans_id_p;
1889 
1890 	if (ittrans_cookie == NULL) {
1891 		return;
1892 	}
1893 
1894 	if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
1895 		return;	 /* illeagal case */
1896 	}
1897 
1898 	mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
1899 
1900 	for (ix = 0; ix < U2U_DATA_NUM; ix++) {
1901 		ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
1902 		if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
1903 			ittrans_id_p->u2u_ino_map_reg = NULL;
1904 			break;
1905 		}
1906 	}
1907 
1908 	mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
1909 }
1910 
1911 /*
1912  * pcmu_ecc_classify, called by ecc_handler to classify ecc errors
1913  * and determine if we should panic or not.
1914  */
1915 void
1916 pcmu_ecc_classify(uint64_t err, pcmu_ecc_errstate_t *ecc_err_p)
1917 {
1918 	struct async_flt *ecc = &ecc_err_p->ecc_aflt;
1919 	/* LINTED */
1920 	pcmu_t *pcmu_p = ecc_err_p->ecc_ii_p.pecc_p->pecc_pcmu_p;
1921 
1922 	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
1923 
1924 	ecc_err_p->ecc_bridge_type = PCI_OPLCMU;	/* RAGS */
1925 	/*
1926 	 * Get the parent bus id that caused the error.
1927 	 */
1928 	ecc_err_p->ecc_dev_id = (ecc_err_p->ecc_afsr & PCMU_ECC_UE_AFSR_ID)
1929 	    >> PCMU_ECC_UE_AFSR_ID_SHIFT;
1930 	/*
1931 	 * Determine the doubleword offset of the error.
1932 	 */
1933 	ecc_err_p->ecc_dw_offset = (ecc_err_p->ecc_afsr &
1934 	    PCMU_ECC_UE_AFSR_DW_OFFSET) >> PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
1935 	/*
1936 	 * Determine the primary error type.
1937 	 */
1938 	switch (err) {
1939 	case PCMU_ECC_UE_AFSR_E_PIO:
1940 		if (ecc_err_p->pecc_pri) {
1941 			ecc->flt_erpt_class = PCI_ECC_PIO_UE;
1942 		} else {
1943 			ecc->flt_erpt_class = PCI_ECC_SEC_PIO_UE;
1944 		}
1945 		/* For CMU-CH, a UE is always fatal. */
1946 		ecc->flt_panic = 1;
1947 		break;
1948 
1949 	default:
1950 		return;
1951 	}
1952 }
1953 
1954 /*
1955  * pcmu_pbm_classify, called by pcmu_pbm_afsr_report to classify piow afsr.
1956  */
1957 int
1958 pcmu_pbm_classify(pcmu_pbm_errstate_t *pbm_err_p)
1959 {
1960 	uint32_t e;
1961 	int nerr = 0;
1962 	char **tmp_class;
1963 
1964 	if (pbm_err_p->pcbm_pri) {
1965 		tmp_class = &pbm_err_p->pcbm_pci.pcmu_err_class;
1966 		e = PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr);
1967 		pbm_err_p->pbm_log = FM_LOG_PCI;
1968 	} else {
1969 		tmp_class = &pbm_err_p->pbm_err_class;
1970 		e = PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
1971 		pbm_err_p->pbm_log = FM_LOG_PBM;
1972 	}
1973 
1974 	if (e & PCMU_PCI_AFSR_E_MA) {
1975 		*tmp_class = pbm_err_p->pcbm_pri ? PCI_MA : PCI_SEC_MA;
1976 		nerr++;
1977 	}
1978 	return (nerr);
1979 }
1980 
1981 /*
1982  * Function used to clear PBM/PCI/IOMMU error state after error handling
1983  * is complete. Only clearing error bits which have been logged. Called by
1984  * pcmu_pbm_err_handler and pcmu_bus_exit.
1985  */
1986 static void
1987 pcmu_clear_error(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
1988 {
1989 	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
1990 
1991 	ASSERT(MUTEX_HELD(&pcbm_p->pcbm_pcmu_p->pcmu_err_mutex));
1992 
1993 	*pcbm_p->pcbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
1994 	*pcbm_p->pcbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
1995 	pcbm_p->pcbm_config_header->ch_status_reg =
1996 	    pbm_err_p->pcbm_pci.pcmu_cfg_stat;
1997 }
1998 
1999 /*ARGSUSED*/
2000 int
2001 pcmu_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
2002 		const void *impl_data, int caller)
2003 {
2004 	int fatal = 0;
2005 	int nonfatal = 0;
2006 	int unknown = 0;
2007 	uint32_t prierr, secerr;
2008 	pcmu_pbm_errstate_t pbm_err;
2009 	pcmu_t *pcmu_p = (pcmu_t *)impl_data;
2010 	int ret = 0;
2011 
2012 	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2013 	pcmu_pbm_errstate_get(pcmu_p, &pbm_err);
2014 
2015 	derr->fme_ena = derr->fme_ena ? derr->fme_ena :
2016 	    fm_ena_generate(0, FM_ENA_FMT1);
2017 
2018 	prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
2019 	secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
2020 
2021 	if (derr->fme_flag == DDI_FM_ERR_PEEK) {
2022 		/*
2023 		 * For ddi_peek treat all events as nonfatal. We only
2024 		 * really call this function so that pcmu_clear_error()
2025 		 * and ndi_fm_handler_dispatch() will get called.
2026 		 */
2027 		nonfatal++;
2028 		goto done;
2029 	} else if (derr->fme_flag == DDI_FM_ERR_POKE) {
2030 		/*
2031 		 * For ddi_poke we can treat as nonfatal if the
2032 		 * following conditions are met :
2033 		 * 1. Make sure only primary error is MA/TA
2034 		 * 2. Make sure no secondary error
2035 		 * 3. check pci config header stat reg to see MA/TA is
2036 		 *    logged. We cannot verify only MA/TA is recorded
2037 		 *    since it gets much more complicated when a
2038 		 *    PCI-to-PCI bridge is present.
2039 		 */
2040 		if ((prierr == PCMU_PCI_AFSR_E_MA) && !secerr &&
2041 		    (pbm_err.pcbm_pci.pcmu_cfg_stat & PCI_STAT_R_MAST_AB)) {
2042 			nonfatal++;
2043 			goto done;
2044 		}
2045 	}
2046 
2047 	if (prierr || secerr) {
2048 		ret = pcmu_pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
2049 		if (ret == DDI_FM_FATAL) {
2050 			fatal++;
2051 		} else {
2052 			nonfatal++;
2053 		}
2054 	}
2055 
2056 	ret = pcmu_cfg_report(dip, derr, &pbm_err.pcbm_pci, caller, prierr);
2057 	if (ret == DDI_FM_FATAL) {
2058 		fatal++;
2059 	} else if (ret == DDI_FM_NONFATAL) {
2060 		nonfatal++;
2061 	}
2062 
2063 done:
2064 	if (ret == DDI_FM_FATAL) {
2065 		fatal++;
2066 	} else if (ret == DDI_FM_NONFATAL) {
2067 		nonfatal++;
2068 	} else if (ret == DDI_FM_UNKNOWN) {
2069 		unknown++;
2070 	}
2071 
2072 	/* Cleanup and reset error bits */
2073 	pcmu_clear_error(pcmu_p, &pbm_err);
2074 
2075 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2076 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2077 }
2078 
2079 int
2080 pcmu_check_error(pcmu_t *pcmu_p)
2081 {
2082 	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
2083 	uint16_t pcmu_cfg_stat;
2084 	uint64_t pbm_afsr;
2085 
2086 	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2087 
2088 	pcmu_cfg_stat = pcbm_p->pcbm_config_header->ch_status_reg;
2089 	pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2090 
2091 	if ((pcmu_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
2092 	    PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
2093 	    PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
2094 	    (PBM_AFSR_TO_PRIERR(pbm_afsr))) {
2095 		return (1);
2096 	}
2097 	return (0);
2098 
2099 }
2100 
2101 /*
2102  * Function used to gather PBM/PCI error state for the
2103  * pcmu_pbm_err_handler. This function must be called while pcmu_err_mutex
2104  * is held.
2105  */
2106 static void
2107 pcmu_pbm_errstate_get(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
2108 {
2109 	pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
2110 
2111 	ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
2112 	bzero(pbm_err_p, sizeof (pcmu_pbm_errstate_t));
2113 
2114 	/*
2115 	 * Capture all pbm error state for later logging
2116 	 */
2117 	pbm_err_p->pbm_bridge_type = PCI_OPLCMU;	/* RAGS */
2118 	pbm_err_p->pcbm_pci.pcmu_cfg_stat =
2119 	    pcbm_p->pcbm_config_header->ch_status_reg;
2120 	pbm_err_p->pbm_ctl_stat = *pcbm_p->pcbm_ctrl_reg;
2121 	pbm_err_p->pcbm_pci.pcmu_cfg_comm =
2122 	    pcbm_p->pcbm_config_header->ch_command_reg;
2123 	pbm_err_p->pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2124 	pbm_err_p->pbm_afar = *pcbm_p->pcbm_async_flt_addr_reg;
2125 	pbm_err_p->pcbm_pci.pcmu_pa = *pcbm_p->pcbm_async_flt_addr_reg;
2126 }
2127 
2128 static void
2129 pcmu_pbm_clear_error(pcmu_pbm_t *pcbm_p)
2130 {
2131 	uint64_t pbm_afsr;
2132 
2133 	/*
2134 	 * for poke() support - called from POKE_FLUSH. Spin waiting
2135 	 * for MA, TA or SERR to be cleared by a pcmu_pbm_error_intr().
2136 	 * We have to wait for SERR too in case the device is beyond
2137 	 * a pci-pci bridge.
2138 	 */
2139 	pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2140 	while (((pbm_afsr >> PCMU_PCI_AFSR_PE_SHIFT) &
2141 	    (PCMU_PCI_AFSR_E_MA | PCMU_PCI_AFSR_E_TA))) {
2142 		pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
2143 	}
2144 }
2145 
2146 void
2147 pcmu_err_create(pcmu_t *pcmu_p)
2148 {
2149 	/*
2150 	 * PCI detected ECC errorq, to schedule async handling
2151 	 * of ECC errors and logging.
2152 	 * The errorq is created here but destroyed when _fini is called
2153 	 * for the pci module.
2154 	 */
2155 	if (pcmu_ecc_queue == NULL) {
2156 		pcmu_ecc_queue = errorq_create("pcmu_ecc_queue",
2157 		    (errorq_func_t)pcmu_ecc_err_drain,
2158 		    (void *)NULL,
2159 		    ECC_MAX_ERRS, sizeof (pcmu_ecc_errstate_t),
2160 		    PIL_2, ERRORQ_VITAL);
2161 		if (pcmu_ecc_queue == NULL)
2162 			panic("failed to create required system error queue");
2163 	}
2164 
2165 	/*
2166 	 * Initialize error handling mutex.
2167 	 */
2168 	mutex_init(&pcmu_p->pcmu_err_mutex, NULL, MUTEX_DRIVER,
2169 	    (void *)pcmu_p->pcmu_fm_ibc);
2170 }
2171 
2172 void
2173 pcmu_err_destroy(pcmu_t *pcmu_p)
2174 {
2175 	mutex_destroy(&pcmu_p->pcmu_err_mutex);
2176 }
2177 
2178 /*
2179  * Function used to post PCI block module specific ereports.
2180  */
2181 void
2182 pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena,
2183     pcmu_pbm_errstate_t *pbm_err)
2184 {
2185 	char *aux_msg;
2186 	uint32_t prierr, secerr;
2187 	pcmu_t *pcmu_p;
2188 	int instance = ddi_get_instance(dip);
2189 
2190 	ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
2191 
2192 	pcmu_p = get_pcmu_soft_state(instance);
2193 	prierr = PBM_AFSR_TO_PRIERR(pbm_err->pbm_afsr);
2194 	secerr = PBM_AFSR_TO_SECERR(pbm_err->pbm_afsr);
2195 	if (prierr)
2196 		aux_msg = "PCI primary error: Master Abort";
2197 	else if (secerr)
2198 		aux_msg = "PCI secondary error: Master Abort";
2199 	else
2200 		aux_msg = "";
2201 	cmn_err(CE_WARN, "%s %s: %s %s=0x%lx, %s=0x%lx, %s=0x%lx %s=0x%x",
2202 	    (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str,
2203 	    (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str,
2204 	    aux_msg,
2205 	    PCI_PBM_AFAR, pbm_err->pbm_afar,
2206 	    PCI_PBM_AFSR, pbm_err->pbm_afsr,
2207 	    PCI_PBM_CSR, pbm_err->pbm_ctl_stat,
2208 	    "portid", pcmu_p->pcmu_id);
2209 }
2210