xref: /titanic_51/usr/src/uts/i86pc/io/pciex/npe.c (revision f6cfb02b955a670e8c39660b2d0468385cbc7e80)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	Host to PCI-Express local bus driver
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/modctl.h>
33 #include <sys/pci_impl.h>
34 #include <sys/pcie_impl.h>
35 #include <sys/sysmacros.h>
36 #include <sys/ddi_intr.h>
37 #include <sys/sunndi.h>
38 #include <sys/sunddi.h>
39 #include <sys/ddifm.h>
40 #include <sys/ndifm.h>
41 #include <sys/fm/util.h>
42 #include <sys/hotplug/pci/pcihp.h>
43 #include <io/pci/pci_tools_ext.h>
44 #include <io/pci/pci_common.h>
45 #include <io/pciex/pcie_nvidia.h>
46 
47 /*
48  * Bus Operation functions
49  */
50 static int	npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
51 		    off_t, off_t, caddr_t *);
52 static int	npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
53 		    void *, void *);
54 static int	npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
55 		    ddi_intr_handle_impl_t *, void *);
56 static int	npe_fm_init(dev_info_t *, dev_info_t *, int,
57 		    ddi_iblock_cookie_t *);
58 
59 static int	npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *);
60 
61 /*
62  * Disable URs and Received MA for all PCIe devices.  Until x86 SW is changed so
63  * that random drivers do not do PIO accesses on devices that it does not own,
64  * these error bits must be disabled.  SERR must also be disabled if URs have
65  * been masked.
66  */
67 uint32_t	npe_aer_uce_mask = PCIE_AER_UCE_UR;
68 uint32_t	npe_aer_ce_mask = 0;
69 uint32_t	npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA;
70 
71 struct bus_ops npe_bus_ops = {
72 	BUSO_REV,
73 	npe_bus_map,
74 	NULL,
75 	NULL,
76 	NULL,
77 	i_ddi_map_fault,
78 	ddi_dma_map,
79 	ddi_dma_allochdl,
80 	ddi_dma_freehdl,
81 	ddi_dma_bindhdl,
82 	ddi_dma_unbindhdl,
83 	ddi_dma_flush,
84 	ddi_dma_win,
85 	ddi_dma_mctl,
86 	npe_ctlops,
87 	ddi_bus_prop_op,
88 	0,		/* (*bus_get_eventcookie)();	*/
89 	0,		/* (*bus_add_eventcall)();	*/
90 	0,		/* (*bus_remove_eventcall)();	*/
91 	0,		/* (*bus_post_event)();		*/
92 	0,		/* (*bus_intr_ctl)(); */
93 	0,		/* (*bus_config)(); */
94 	0,		/* (*bus_unconfig)(); */
95 	npe_fm_init,	/* (*bus_fm_init)(); */
96 	NULL,		/* (*bus_fm_fini)(); */
97 	NULL,		/* (*bus_fm_access_enter)(); */
98 	NULL,		/* (*bus_fm_access_exit)(); */
99 	NULL,		/* (*bus_power)(); */
100 	npe_intr_ops	/* (*bus_intr_op)(); */
101 };
102 
103 /*
104  * One goal here is to leverage off of the pcihp.c source without making
105  * changes to it.  Call into it's cb_ops directly if needed, piggybacking
106  * anything else needed by the pci_tools.c module.  Only pci_tools and pcihp
107  * will be using the PCI devctl node.
108  */
109 static int	npe_open(dev_t *, int, int, cred_t *);
110 static int	npe_close(dev_t, int, int, cred_t *);
111 static int	npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
112 static int	npe_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, char *,
113 		    caddr_t, int *);
114 static int	npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
115 
116 struct cb_ops npe_cb_ops = {
117 	npe_open,			/* open */
118 	npe_close,			/* close */
119 	nodev,				/* strategy */
120 	nodev,				/* print */
121 	nodev,				/* dump */
122 	nodev,				/* read */
123 	nodev,				/* write */
124 	npe_ioctl,			/* ioctl */
125 	nodev,				/* devmap */
126 	nodev,				/* mmap */
127 	nodev,				/* segmap */
128 	nochpoll,			/* poll */
129 	npe_prop_op,			/* cb_prop_op */
130 	NULL,				/* streamtab */
131 	D_NEW | D_MP | D_HOTPLUG,	/* Driver compatibility flag */
132 	CB_REV,				/* rev */
133 	nodev,				/* int (*cb_aread)() */
134 	nodev				/* int (*cb_awrite)() */
135 };
136 
137 
138 /*
139  * Device Node Operation functions
140  */
141 static int	npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
142 static int	npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
143 
144 struct dev_ops npe_ops = {
145 	DEVO_REV,		/* devo_rev */
146 	0,			/* refcnt  */
147 	npe_info,		/* info */
148 	nulldev,		/* identify */
149 	nulldev,		/* probe */
150 	npe_attach,		/* attach */
151 	npe_detach,		/* detach */
152 	nulldev,		/* reset */
153 	&npe_cb_ops,		/* driver operations */
154 	&npe_bus_ops,		/* bus operations */
155 	NULL,			/* power */
156 	ddi_quiesce_not_needed,		/* quiesce */
157 };
158 
159 /*
160  * Internal routines in support of particular npe_ctlops.
161  */
162 static int npe_removechild(dev_info_t *child);
163 static int npe_initchild(dev_info_t *child);
164 
165 /*
166  * External support routine
167  */
168 extern void	npe_query_acpi_mcfg(dev_info_t *dip);
169 extern void	npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl);
170 extern int	npe_disable_empty_bridges_workaround(dev_info_t *child);
171 extern void	npe_nvidia_error_mask(ddi_acc_handle_t cfg_hdl);
172 extern void	npe_intel_error_mask(ddi_acc_handle_t cfg_hdl);
173 
174 /*
175  * Module linkage information for the kernel.
176  */
177 static struct modldrv modldrv = {
178 	&mod_driverops, /* Type of module */
179 	"Host to PCIe nexus driver",
180 	&npe_ops,	/* driver ops */
181 };
182 
183 static struct modlinkage modlinkage = {
184 	MODREV_1,
185 	(void *)&modldrv,
186 	NULL
187 };
188 
189 /* Save minimal state. */
190 void *npe_statep;
191 
192 int
193 _init(void)
194 {
195 	int e;
196 
197 	/*
198 	 * Initialize per-pci bus soft state pointer.
199 	 */
200 	e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1);
201 	if (e != 0)
202 		return (e);
203 
204 	if ((e = mod_install(&modlinkage)) != 0)
205 		ddi_soft_state_fini(&npe_statep);
206 
207 	return (e);
208 }
209 
210 
211 int
212 _fini(void)
213 {
214 	int rc;
215 
216 	rc = mod_remove(&modlinkage);
217 	if (rc != 0)
218 		return (rc);
219 
220 	ddi_soft_state_fini(&npe_statep);
221 	return (rc);
222 }
223 
224 
225 int
226 _info(struct modinfo *modinfop)
227 {
228 	return (mod_info(&modlinkage, modinfop));
229 }
230 
231 /*ARGSUSED*/
232 static int
233 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
234 {
235 	/*
236 	 * Use the minor number as constructed by pcihp, as the index value to
237 	 * ddi_soft_state_zalloc.
238 	 */
239 	int instance = ddi_get_instance(devi);
240 	pci_state_t *pcip = NULL;
241 
242 	if (cmd == DDI_RESUME)
243 		return (DDI_SUCCESS);
244 
245 	if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type",
246 	    "pciex") != DDI_PROP_SUCCESS) {
247 		cmn_err(CE_WARN, "npe:  'device_type' prop create failed");
248 	}
249 
250 	if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS)
251 		pcip = ddi_get_soft_state(npe_statep, instance);
252 
253 	if (pcip == NULL)
254 		return (DDI_FAILURE);
255 
256 	pcip->pci_dip = devi;
257 
258 	pcie_rc_init_bus(devi);
259 
260 	/*
261 	 * Initialize hotplug support on this bus. At minimum
262 	 * (for non hotplug bus) this would create ":devctl" minor
263 	 * node to support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls
264 	 * to this bus.
265 	 */
266 	if (pcihp_init(devi) != DDI_SUCCESS) {
267 		cmn_err(CE_WARN, "npe: Failed to setup hotplug framework");
268 		ddi_soft_state_free(npe_statep, instance);
269 		return (DDI_FAILURE);
270 	}
271 
272 	/* Second arg: initialize for pci_express root nexus */
273 	if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS) {
274 		(void) pcihp_uninit(devi);
275 		ddi_soft_state_free(npe_statep, instance);
276 		return (DDI_FAILURE);
277 	}
278 
279 	pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
280 	    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
281 	ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc);
282 
283 	if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) {
284 		ddi_fm_handler_register(devi, npe_fm_callback, NULL);
285 	}
286 
287 	PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP);
288 	pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi));
289 
290 	npe_query_acpi_mcfg(devi);
291 	ddi_report_dev(devi);
292 	return (DDI_SUCCESS);
293 
294 }
295 
296 /*ARGSUSED*/
297 static int
298 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
299 {
300 	int instance = ddi_get_instance(devi);
301 	pci_state_t *pcip;
302 
303 	pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi));
304 
305 	switch (cmd) {
306 	case DDI_DETACH:
307 
308 		/* Uninitialize pcitool support. */
309 		pcitool_uninit(devi);
310 
311 		/*
312 		 * Uninitialize hotplug support on this bus.
313 		 */
314 		(void) pcihp_uninit(devi);
315 
316 		if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE)
317 			ddi_fm_handler_unregister(devi);
318 
319 		pcie_rc_fini_bus(devi);
320 		pcie_rc_fini_pfd(PCIE_DIP2PFD(devi));
321 		kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t));
322 
323 		ddi_fm_fini(devi);
324 		ddi_soft_state_free(npe_statep, instance);
325 		return (DDI_SUCCESS);
326 
327 	case DDI_SUSPEND:
328 		return (DDI_SUCCESS);
329 	default:
330 		return (DDI_FAILURE);
331 	}
332 }
333 
334 static int
335 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
336     off_t offset, off_t len, caddr_t *vaddrp)
337 {
338 	int 		rnumber;
339 	int		length;
340 	int		space;
341 	ddi_acc_impl_t	*ap;
342 	ddi_acc_hdl_t	*hp;
343 	ddi_map_req_t	mr;
344 	pci_regspec_t	pci_reg;
345 	pci_regspec_t	*pci_rp;
346 	struct regspec	reg;
347 	pci_acc_cfblk_t	*cfp;
348 	int		retval;
349 
350 	mr = *mp; /* Get private copy of request */
351 	mp = &mr;
352 
353 	/*
354 	 * check for register number
355 	 */
356 	switch (mp->map_type) {
357 	case DDI_MT_REGSPEC:
358 		pci_reg = *(pci_regspec_t *)(mp->map_obj.rp);
359 		pci_rp = &pci_reg;
360 		if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
361 			return (DDI_FAILURE);
362 		break;
363 	case DDI_MT_RNUMBER:
364 		rnumber = mp->map_obj.rnumber;
365 		/*
366 		 * get ALL "reg" properties for dip, select the one of
367 		 * of interest. In x86, "assigned-addresses" property
368 		 * is identical to the "reg" property, so there is no
369 		 * need to cross check the two to determine the physical
370 		 * address of the registers.
371 		 * This routine still performs some validity checks to
372 		 * make sure that everything is okay.
373 		 */
374 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
375 		    DDI_PROP_DONTPASS, "reg", (int **)&pci_rp,
376 		    (uint_t *)&length) != DDI_PROP_SUCCESS)
377 			return (DDI_FAILURE);
378 
379 		/*
380 		 * validate the register number.
381 		 */
382 		length /= (sizeof (pci_regspec_t) / sizeof (int));
383 		if (rnumber >= length) {
384 			ddi_prop_free(pci_rp);
385 			return (DDI_FAILURE);
386 		}
387 
388 		/*
389 		 * copy the required entry.
390 		 */
391 		pci_reg = pci_rp[rnumber];
392 
393 		/*
394 		 * free the memory allocated by ddi_prop_lookup_int_array
395 		 */
396 		ddi_prop_free(pci_rp);
397 
398 		pci_rp = &pci_reg;
399 		if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
400 			return (DDI_FAILURE);
401 		mp->map_type = DDI_MT_REGSPEC;
402 		break;
403 	default:
404 		return (DDI_ME_INVAL);
405 	}
406 
407 	space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M;
408 
409 	/*
410 	 * check for unmap and unlock of address space
411 	 */
412 	if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) {
413 		switch (space) {
414 		case PCI_ADDR_IO:
415 			reg.regspec_bustype = 1;
416 			break;
417 
418 		case PCI_ADDR_CONFIG:
419 			/*
420 			 * Check for AMD's northbridges
421 			 *	AND
422 			 * for any PCI device.
423 			 *
424 			 * This is a workaround fix for
425 			 * AMD-8132's inability to handle MMCFG
426 			 * accesses on Galaxy's PE servers
427 			 *	AND
428 			 * to disable MMCFG for any PCI device.
429 			 *
430 			 * If a device is *not* found to have PCIe
431 			 * capability, then assume it is a PCI device.
432 			 */
433 
434 			if (is_amd_northbridge(rdip) == 0 ||
435 			    (ddi_prop_get_int(DDI_DEV_T_ANY, rdip,
436 			    DDI_PROP_DONTPASS, "pcie-capid-pointer",
437 			    PCI_CAP_NEXT_PTR_NULL) == PCI_CAP_NEXT_PTR_NULL)) {
438 				if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
439 				    mp->map_handlep->ah_acc.devacc_attr_access
440 				    != DDI_DEFAULT_ACC) {
441 					ndi_fmc_remove(rdip, ACC_HANDLE,
442 					    (void *)mp->map_handlep);
443 				}
444 				return (DDI_SUCCESS);
445 			}
446 
447 
448 			/* FALLTHROUGH */
449 		case PCI_ADDR_MEM64:
450 			/*
451 			 * MEM64 requires special treatment on map, to check
452 			 * that the device is below 4G.  On unmap, however,
453 			 * we can assume that everything is OK... the map
454 			 * must have succeeded.
455 			 */
456 			/* FALLTHROUGH */
457 		case PCI_ADDR_MEM32:
458 			reg.regspec_bustype = 0;
459 			break;
460 
461 		default:
462 			return (DDI_FAILURE);
463 		}
464 
465 		/*
466 		 * Adjust offset and length
467 		 * A non-zero length means override the one in the regspec.
468 		 */
469 		pci_rp->pci_phys_low += (uint_t)offset;
470 		if (len != 0)
471 			pci_rp->pci_size_low = len;
472 
473 		reg.regspec_addr = pci_rp->pci_phys_low;
474 		reg.regspec_size = pci_rp->pci_size_low;
475 
476 		mp->map_obj.rp = &reg;
477 		retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
478 		if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
479 		    mp->map_handlep->ah_acc.devacc_attr_access !=
480 		    DDI_DEFAULT_ACC) {
481 			ndi_fmc_remove(rdip, ACC_HANDLE,
482 			    (void *)mp->map_handlep);
483 		}
484 		return (retval);
485 
486 	}
487 
488 	/* check for user mapping request - not legal for Config */
489 	if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) {
490 		cmn_err(CE_NOTE, "npe: Config mapping request from user\n");
491 		return (DDI_FAILURE);
492 	}
493 
494 
495 	/*
496 	 * Note that pci_fm_acc_setup() is called to serve two purposes
497 	 * i) enable legacy PCI I/O style config space access
498 	 * ii) register with FMA
499 	 */
500 	if (space == PCI_ADDR_CONFIG) {
501 		/* Can't map config space without a handle */
502 		hp = (ddi_acc_hdl_t *)mp->map_handlep;
503 		if (hp == NULL)
504 			return (DDI_FAILURE);
505 
506 		/* record the device address for future reference */
507 		cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private;
508 		cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi);
509 		cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi);
510 		cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
511 
512 		*vaddrp = (caddr_t)offset;
513 
514 		/*
515 		 * Check for AMD's northbridges, pci devices and
516 		 * devices underneath a pci bridge.  This is to setup
517 		 * I/O based config space access.
518 		 */
519 		if (is_amd_northbridge(rdip) == 0 ||
520 		    (ddi_prop_get_int(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
521 		    "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL) ==
522 		    PCI_CAP_NEXT_PTR_NULL)) {
523 			int ret;
524 
525 			if ((ret = pci_fm_acc_setup(hp, offset, len)) ==
526 			    DDI_SUCCESS) {
527 				if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
528 				    mp->map_handlep->ah_acc.devacc_attr_access
529 				    != DDI_DEFAULT_ACC) {
530 					ndi_fmc_insert(rdip, ACC_HANDLE,
531 					    (void *)mp->map_handlep, NULL);
532 				}
533 			}
534 			return (ret);
535 		}
536 
537 		pci_rp->pci_phys_low = ddi_prop_get_int64(DDI_DEV_T_ANY,
538 		    rdip, 0, "ecfga-base-address", 0);
539 
540 		pci_rp->pci_phys_low += ((cfp->c_busnum << 20) |
541 		    (cfp->c_devnum) << 15 | (cfp->c_funcnum << 12));
542 
543 		pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE;
544 	}
545 
546 	length = pci_rp->pci_size_low;
547 
548 	/*
549 	 * range check
550 	 */
551 	if ((offset >= length) || (len > length) || (offset + len > length))
552 		return (DDI_FAILURE);
553 
554 	/*
555 	 * Adjust offset and length
556 	 * A non-zero length means override the one in the regspec.
557 	 */
558 	pci_rp->pci_phys_low += (uint_t)offset;
559 	if (len != 0)
560 		pci_rp->pci_size_low = len;
561 
562 	/*
563 	 * convert the pci regsec into the generic regspec used by the
564 	 * parent root nexus driver.
565 	 */
566 	switch (space) {
567 	case PCI_ADDR_IO:
568 		reg.regspec_bustype = 1;
569 		break;
570 	case PCI_ADDR_CONFIG:
571 	case PCI_ADDR_MEM64:
572 		/*
573 		 * We can't handle 64-bit devices that are mapped above
574 		 * 4G or that are larger than 4G.
575 		 */
576 		if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0)
577 			return (DDI_FAILURE);
578 		/*
579 		 * Other than that, we can treat them as 32-bit mappings
580 		 */
581 		/* FALLTHROUGH */
582 	case PCI_ADDR_MEM32:
583 		reg.regspec_bustype = 0;
584 		break;
585 	default:
586 		return (DDI_FAILURE);
587 	}
588 
589 	reg.regspec_addr = pci_rp->pci_phys_low;
590 	reg.regspec_size = pci_rp->pci_size_low;
591 
592 	mp->map_obj.rp = &reg;
593 	retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
594 	if (retval == DDI_SUCCESS) {
595 		/*
596 		 * For config space gets force use of cautious access routines.
597 		 * These will handle default and protected mode accesses too.
598 		 */
599 		if (space == PCI_ADDR_CONFIG) {
600 			ap = (ddi_acc_impl_t *)mp->map_handlep;
601 			ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
602 			ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE;
603 			ap->ahi_get8 = i_ddi_caut_get8;
604 			ap->ahi_get16 = i_ddi_caut_get16;
605 			ap->ahi_get32 = i_ddi_caut_get32;
606 			ap->ahi_get64 = i_ddi_caut_get64;
607 			ap->ahi_rep_get8 = i_ddi_caut_rep_get8;
608 			ap->ahi_rep_get16 = i_ddi_caut_rep_get16;
609 			ap->ahi_rep_get32 = i_ddi_caut_rep_get32;
610 			ap->ahi_rep_get64 = i_ddi_caut_rep_get64;
611 		}
612 		if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
613 		    mp->map_handlep->ah_acc.devacc_attr_access !=
614 		    DDI_DEFAULT_ACC) {
615 			ndi_fmc_insert(rdip, ACC_HANDLE,
616 			    (void *)mp->map_handlep, NULL);
617 		}
618 	}
619 	return (retval);
620 }
621 
622 
623 
624 /*ARGSUSED*/
625 static int
626 npe_ctlops(dev_info_t *dip, dev_info_t *rdip,
627 	ddi_ctl_enum_t ctlop, void *arg, void *result)
628 {
629 	int		rn;
630 	int		totreg;
631 	uint_t		reglen;
632 	pci_regspec_t	*drv_regp;
633 	struct attachspec *asp;
634 	struct detachspec *dsp;
635 	pci_state_t	*pci_p = ddi_get_soft_state(npe_statep,
636 	    ddi_get_instance(dip));
637 
638 	switch (ctlop) {
639 	case DDI_CTLOPS_REPORTDEV:
640 		if (rdip == (dev_info_t *)0)
641 			return (DDI_FAILURE);
642 		cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n",
643 		    ddi_node_name(rdip), ddi_get_name_addr(rdip),
644 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
645 		return (DDI_SUCCESS);
646 
647 	case DDI_CTLOPS_INITCHILD:
648 		return (npe_initchild((dev_info_t *)arg));
649 
650 	case DDI_CTLOPS_UNINITCHILD:
651 		return (npe_removechild((dev_info_t *)arg));
652 
653 	case DDI_CTLOPS_SIDDEV:
654 		return (DDI_SUCCESS);
655 
656 	case DDI_CTLOPS_REGSIZE:
657 	case DDI_CTLOPS_NREGS:
658 		if (rdip == (dev_info_t *)0)
659 			return (DDI_FAILURE);
660 
661 		*(int *)result = 0;
662 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
663 		    DDI_PROP_DONTPASS, "reg", (int **)&drv_regp,
664 		    &reglen) != DDI_PROP_SUCCESS) {
665 			return (DDI_FAILURE);
666 		}
667 
668 		totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t);
669 		if (ctlop == DDI_CTLOPS_NREGS)
670 			*(int *)result = totreg;
671 		else if (ctlop == DDI_CTLOPS_REGSIZE) {
672 			rn = *(int *)arg;
673 			if (rn >= totreg) {
674 				ddi_prop_free(drv_regp);
675 				return (DDI_FAILURE);
676 			}
677 			*(off_t *)result = drv_regp[rn].pci_size_low;
678 		}
679 		ddi_prop_free(drv_regp);
680 
681 		return (DDI_SUCCESS);
682 
683 	case DDI_CTLOPS_POWER:
684 	{
685 		power_req_t	*reqp = (power_req_t *)arg;
686 		/*
687 		 * We currently understand reporting of PCI_PM_IDLESPEED
688 		 * capability. Everything else is passed up.
689 		 */
690 		if ((reqp->request_type == PMR_REPORT_PMCAP) &&
691 		    (reqp->req.report_pmcap_req.cap ==  PCI_PM_IDLESPEED))
692 			return (DDI_SUCCESS);
693 
694 		break;
695 	}
696 
697 	case DDI_CTLOPS_PEEK:
698 	case DDI_CTLOPS_POKE:
699 		return (pci_common_peekpoke(dip, rdip, ctlop, arg, result));
700 
701 	/* X86 systems support PME wakeup from suspended state */
702 	case DDI_CTLOPS_ATTACH:
703 		if (!pcie_is_child(dip, rdip))
704 			return (DDI_SUCCESS);
705 
706 		asp = (struct attachspec *)arg;
707 		if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) {
708 			pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd);
709 			(void) pcie_postattach_child(rdip);
710 		}
711 
712 		/* only do this for immediate children */
713 		if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE &&
714 		    ddi_get_parent(rdip) == dip)
715 			if (pci_pre_resume(rdip) != DDI_SUCCESS) {
716 				/* Not good, better stop now. */
717 				cmn_err(CE_PANIC,
718 				    "Couldn't pre-resume device %p",
719 				    (void *) dip);
720 				/* NOTREACHED */
721 			}
722 
723 		return (DDI_SUCCESS);
724 
725 	case DDI_CTLOPS_DETACH:
726 		if (!pcie_is_child(dip, rdip))
727 			return (DDI_SUCCESS);
728 
729 		dsp = (struct detachspec *)arg;
730 
731 		if (dsp->when == DDI_PRE)
732 			pf_fini(rdip, dsp->cmd);
733 
734 		/* only do this for immediate children */
735 		if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST &&
736 		    ddi_get_parent(rdip) == dip)
737 			if (pci_post_suspend(rdip) != DDI_SUCCESS)
738 				return (DDI_FAILURE);
739 
740 		return (DDI_SUCCESS);
741 
742 	default:
743 		break;
744 	}
745 
746 	return (ddi_ctlops(dip, rdip, ctlop, arg, result));
747 
748 }
749 
750 
751 /*
752  * npe_intr_ops
753  */
754 static int
755 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
756     ddi_intr_handle_impl_t *hdlp, void *result)
757 {
758 	return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result));
759 }
760 
761 
762 static int
763 npe_initchild(dev_info_t *child)
764 {
765 	char		name[80];
766 	pcie_bus_t	*bus_p;
767 	uint32_t	regs;
768 	ddi_acc_handle_t	cfg_hdl;
769 
770 	/*
771 	 * Do not bind drivers to empty bridges.
772 	 * Fail above, if the bridge is found to be hotplug capable
773 	 */
774 	if (npe_disable_empty_bridges_workaround(child) == 1)
775 		return (DDI_FAILURE);
776 
777 	if (pci_common_name_child(child, name, 80) != DDI_SUCCESS)
778 		return (DDI_FAILURE);
779 
780 	ddi_set_name_addr(child, name);
781 
782 	/*
783 	 * Pseudo nodes indicate a prototype node with per-instance
784 	 * properties to be merged into the real h/w device node.
785 	 * The interpretation of the unit-address is DD[,F]
786 	 * where DD is the device id and F is the function.
787 	 */
788 	if (ndi_dev_is_persistent_node(child) == 0) {
789 		extern int pci_allow_pseudo_children;
790 
791 		ddi_set_parent_data(child, NULL);
792 
793 		/*
794 		 * Try to merge the properties from this prototype
795 		 * node into real h/w nodes.
796 		 */
797 		if (ndi_merge_node(child, pci_common_name_child) ==
798 		    DDI_SUCCESS) {
799 			/*
800 			 * Merged ok - return failure to remove the node.
801 			 */
802 			ddi_set_name_addr(child, NULL);
803 			return (DDI_FAILURE);
804 		}
805 
806 		/* workaround for DDIVS to run under PCI Express */
807 		if (pci_allow_pseudo_children) {
808 			/*
809 			 * If the "interrupts" property doesn't exist,
810 			 * this must be the ddivs no-intr case, and it returns
811 			 * DDI_SUCCESS instead of DDI_FAILURE.
812 			 */
813 			if (ddi_prop_get_int(DDI_DEV_T_ANY, child,
814 			    DDI_PROP_DONTPASS, "interrupts", -1) == -1)
815 				return (DDI_SUCCESS);
816 			/*
817 			 * Create the ddi_parent_private_data for a pseudo
818 			 * child.
819 			 */
820 			pci_common_set_parent_private_data(child);
821 			return (DDI_SUCCESS);
822 		}
823 
824 		/*
825 		 * The child was not merged into a h/w node,
826 		 * but there's not much we can do with it other
827 		 * than return failure to cause the node to be removed.
828 		 */
829 		cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
830 		    ddi_get_name(child), ddi_get_name_addr(child),
831 		    ddi_get_name(child));
832 		ddi_set_name_addr(child, NULL);
833 		return (DDI_NOT_WELL_FORMED);
834 	}
835 
836 	if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
837 	    "interrupts", -1) != -1)
838 		pci_common_set_parent_private_data(child);
839 	else
840 		ddi_set_parent_data(child, NULL);
841 
842 	/* Disable certain errors on PCIe drivers for x86 platforms */
843 	regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask;
844 	pcie_set_aer_uce_mask(regs);
845 	regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask;
846 	pcie_set_aer_ce_mask(regs);
847 	regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask;
848 	pcie_set_aer_suce_mask(regs);
849 
850 	/*
851 	 * If URs are disabled, mask SERRs as well, otherwise the system will
852 	 * still be notified of URs
853 	 */
854 	if (npe_aer_uce_mask & PCIE_AER_UCE_UR)
855 		pcie_set_serr_mask(1);
856 
857 	if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) {
858 		npe_ck804_fix_aer_ptr(cfg_hdl);
859 		npe_nvidia_error_mask(cfg_hdl);
860 		npe_intel_error_mask(cfg_hdl);
861 		pci_config_teardown(&cfg_hdl);
862 	}
863 
864 	bus_p = pcie_init_bus(child);
865 	if (bus_p) {
866 		uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16);
867 		uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF);
868 		uint16_t rev_id = bus_p->bus_rev_id;
869 
870 		/* Disable AER for certain NVIDIA Chipsets */
871 		if ((vendor_id == NVIDIA_VENDOR_ID) &&
872 		    (device_id == NVIDIA_CK804_DEVICE_ID) &&
873 		    (rev_id < NVIDIA_CK804_AER_VALID_REVID))
874 			bus_p->bus_aer_off = 0;
875 
876 		(void) pcie_initchild(child);
877 	}
878 
879 	return (DDI_SUCCESS);
880 }
881 
882 
883 static int
884 npe_removechild(dev_info_t *dip)
885 {
886 	pcie_uninitchild(dip);
887 
888 	ddi_set_name_addr(dip, NULL);
889 
890 	/*
891 	 * Strip the node to properly convert it back to prototype form
892 	 */
893 	ddi_remove_minor_node(dip, NULL);
894 
895 	ddi_prop_remove_all(dip);
896 
897 	return (DDI_SUCCESS);
898 }
899 
900 
901 /*
902  * When retrofitting this module for pci_tools, functions such as open, close,
903  * and ioctl are now pulled into this module.  Before this, the functions in
904  * the pcihp module were referenced directly.  Now they are called or
905  * referenced through the pcihp cb_ops structure from functions in this module.
906  */
907 static int
908 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp)
909 {
910 	return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp));
911 }
912 
913 static int
914 npe_close(dev_t dev, int flags, int otyp, cred_t *credp)
915 {
916 	return ((pcihp_get_cb_ops())->cb_close(dev, flags, otyp, credp));
917 }
918 
919 static int
920 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
921 {
922 	minor_t		minor = getminor(dev);
923 	int		instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(minor);
924 	pci_state_t	*pci_p = ddi_get_soft_state(npe_statep, instance);
925 	dev_info_t	*dip;
926 
927 	if (pci_p == NULL)
928 		return (ENXIO);
929 
930 	dip = pci_p->pci_dip;
931 
932 	return (pci_common_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
933 }
934 
935 static int
936 npe_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
937 	int flags, char *name, caddr_t valuep, int *lengthp)
938 {
939 	return ((pcihp_get_cb_ops())->cb_prop_op(dev, dip, prop_op, flags,
940 	    name, valuep, lengthp));
941 }
942 
943 static int
944 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
945 {
946 	return (pcihp_info(dip, cmd, arg, result));
947 }
948 
949 /*ARGSUSED*/
950 static int
951 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap,
952     ddi_iblock_cookie_t *ibc)
953 {
954 	pci_state_t  *pcip = ddi_get_soft_state(npe_statep,
955 	    ddi_get_instance(dip));
956 
957 	ASSERT(ibc != NULL);
958 	*ibc = pcip->pci_fm_ibc;
959 
960 	return (pcip->pci_fmcap);
961 }
962 
963 /*ARGSUSED*/
964 static int
965 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used)
966 {
967 	/*
968 	 * On current x86 systems, npe's callback does not get called for failed
969 	 * loads.  If in the future this feature is used, the fault PA should be
970 	 * logged in the derr->fme_bus_specific field.  The appropriate PCIe
971 	 * error handling code should be called and needs to be coordinated with
972 	 * safe access handling.
973 	 */
974 
975 	return (DDI_FM_OK);
976 }
977