1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * SPARC Host to PCI Express nexus driver
28 */
29
30 #include <sys/types.h>
31 #include <sys/conf.h> /* nulldev */
32 #include <sys/stat.h> /* devctl */
33 #include <sys/kmem.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/ddi_subrdefs.h>
37 #include <sys/spl.h>
38 #include <sys/epm.h>
39 #include <sys/iommutsb.h>
40 #include "px_obj.h"
41 #include <sys/hotplug/pci/pcie_hp.h>
42 #include <sys/pci_tools.h>
43 #include "px_tools_ext.h"
44 #include <sys/pcie_pwr.h>
45 #include <sys/pci_cfgacc.h>
46
47 /*
48 * function prototypes for dev ops routines:
49 */
50 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
51 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
52 static int px_enable_err_intr(px_t *px_p);
53 static void px_disable_err_intr(px_t *px_p);
54 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
55 void *arg, void **result);
56 static int px_cb_attach(px_t *);
57 static int px_pwr_setup(dev_info_t *dip);
58 static void px_pwr_teardown(dev_info_t *dip);
59 static void px_set_mps(px_t *px_p);
60
61 extern void pci_cfgacc_acc(pci_cfgacc_req_t *);
62 extern int pcie_max_mps;
63 extern void (*pci_cfgacc_acc_p)(pci_cfgacc_req_t *);
64 /*
65 * bus ops and dev ops structures:
66 */
67 static struct bus_ops px_bus_ops = {
68 BUSO_REV,
69 px_map,
70 0,
71 0,
72 0,
73 i_ddi_map_fault,
74 px_dma_setup,
75 px_dma_allochdl,
76 px_dma_freehdl,
77 px_dma_bindhdl,
78 px_dma_unbindhdl,
79 px_lib_dma_sync,
80 px_dma_win,
81 px_dma_ctlops,
82 px_ctlops,
83 ddi_bus_prop_op,
84 ndi_busop_get_eventcookie,
85 ndi_busop_add_eventcall,
86 ndi_busop_remove_eventcall,
87 ndi_post_event,
88 NULL,
89 NULL, /* (*bus_config)(); */
90 NULL, /* (*bus_unconfig)(); */
91 px_fm_init_child, /* (*bus_fm_init)(); */
92 NULL, /* (*bus_fm_fini)(); */
93 px_bus_enter, /* (*bus_fm_access_enter)(); */
94 px_bus_exit, /* (*bus_fm_access_fini)(); */
95 pcie_bus_power, /* (*bus_power)(); */
96 px_intr_ops, /* (*bus_intr_op)(); */
97 pcie_hp_common_ops /* (*bus_hp_op)(); */
98 };
99
100 extern struct cb_ops px_cb_ops;
101
102 static struct dev_ops px_ops = {
103 DEVO_REV,
104 0,
105 px_info,
106 nulldev,
107 0,
108 px_attach,
109 px_detach,
110 nodev,
111 &px_cb_ops,
112 &px_bus_ops,
113 nulldev,
114 ddi_quiesce_not_needed, /* quiesce */
115 };
116
117 /*
118 * module definitions:
119 */
120 #include <sys/modctl.h>
121 extern struct mod_ops mod_driverops;
122
123 static struct modldrv modldrv = {
124 &mod_driverops, /* Type of module - driver */
125 #if defined(sun4u)
126 "Sun4u Host to PCIe nexus driver", /* Name of module. */
127 #elif defined(sun4v)
128 "Sun4v Host to PCIe nexus driver", /* Name of module. */
129 #endif
130 &px_ops, /* driver ops */
131 };
132
133 static struct modlinkage modlinkage = {
134 MODREV_1, (void *)&modldrv, NULL
135 };
136
137 /* driver soft state */
138 void *px_state_p;
139
140 int px_force_intx_support = 1;
141
142 int
_init(void)143 _init(void)
144 {
145 int e;
146
147 /*
148 * Initialize per-px bus soft state pointer.
149 */
150 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1);
151 if (e != DDI_SUCCESS)
152 return (e);
153
154 /*
155 * Install the module.
156 */
157 e = mod_install(&modlinkage);
158 if (e != DDI_SUCCESS)
159 ddi_soft_state_fini(&px_state_p);
160 return (e);
161 }
162
163 int
_fini(void)164 _fini(void)
165 {
166 int e;
167
168 /*
169 * Remove the module.
170 */
171 e = mod_remove(&modlinkage);
172 if (e != DDI_SUCCESS)
173 return (e);
174
175 /* Free px soft state */
176 ddi_soft_state_fini(&px_state_p);
177
178 return (e);
179 }
180
181 int
_info(struct modinfo * modinfop)182 _info(struct modinfo *modinfop)
183 {
184 return (mod_info(&modlinkage, modinfop));
185 }
186
187 /* ARGSUSED */
188 static int
px_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)189 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
190 {
191 minor_t minor = getminor((dev_t)arg);
192 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
193 px_t *px_p = INST_TO_STATE(instance);
194 int ret = DDI_SUCCESS;
195
196 switch (infocmd) {
197 case DDI_INFO_DEVT2INSTANCE:
198 *result = (void *)(intptr_t)instance;
199 break;
200 case DDI_INFO_DEVT2DEVINFO:
201 if (px_p == NULL) {
202 ret = DDI_FAILURE;
203 break;
204 }
205
206 *result = (void *)px_p->px_dip;
207 break;
208 default:
209 ret = DDI_FAILURE;
210 break;
211 }
212
213 return (ret);
214 }
215
216 /* device driver entry points */
217 /*
218 * attach entry point:
219 */
220 /*ARGSUSED*/
221 static int
px_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)222 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
223 {
224 px_t *px_p; /* per bus state pointer */
225 int instance = DIP_TO_INST(dip);
226 int ret = DDI_SUCCESS;
227 devhandle_t dev_hdl = 0;
228 pcie_hp_regops_t regops;
229 pcie_bus_t *bus_p;
230
231 switch (cmd) {
232 case DDI_ATTACH:
233 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n");
234
235 /* See pci_cfgacc.c */
236 pci_cfgacc_acc_p = pci_cfgacc_acc;
237
238 /*
239 * Allocate and get the per-px soft state structure.
240 */
241 if (ddi_soft_state_zalloc(px_state_p, instance)
242 != DDI_SUCCESS) {
243 cmn_err(CE_WARN, "%s%d: can't allocate px state",
244 ddi_driver_name(dip), instance);
245 goto err_bad_px_softstate;
246 }
247 px_p = INST_TO_STATE(instance);
248 px_p->px_dip = dip;
249 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL);
250 px_p->px_soft_state = PCI_SOFT_STATE_CLOSED;
251
252 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
253 "device_type", "pciex");
254
255 /* Initialize px_dbg for high pil printing */
256 px_dbg_attach(dip, &px_p->px_dbg_hdl);
257 pcie_rc_init_bus(dip);
258
259 /*
260 * Get key properties of the pci bridge node and
261 * determine it's type (psycho, schizo, etc ...).
262 */
263 if (px_get_props(px_p, dip) == DDI_FAILURE)
264 goto err_bad_px_prop;
265
266 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS)
267 goto err_bad_dev_init;
268
269 /* Initialize device handle */
270 px_p->px_dev_hdl = dev_hdl;
271
272 /* Cache the BDF of the root port nexus */
273 px_p->px_bdf = px_lib_get_bdf(px_p);
274
275 /*
276 * Initialize interrupt block. Note that this
277 * initialize error handling for the PEC as well.
278 */
279 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS)
280 goto err_bad_ib;
281
282 if (px_cb_attach(px_p) != DDI_SUCCESS)
283 goto err_bad_cb;
284
285 /*
286 * Start creating the modules.
287 * Note that attach() routines should
288 * register and enable their own interrupts.
289 */
290
291 if ((px_mmu_attach(px_p)) != DDI_SUCCESS)
292 goto err_bad_mmu;
293
294 if ((px_msiq_attach(px_p)) != DDI_SUCCESS)
295 goto err_bad_msiq;
296
297 if ((px_msi_attach(px_p)) != DDI_SUCCESS)
298 goto err_bad_msi;
299
300 if ((px_pec_attach(px_p)) != DDI_SUCCESS)
301 goto err_bad_pec;
302
303 if ((px_dma_attach(px_p)) != DDI_SUCCESS)
304 goto err_bad_dma; /* nothing to uninitialize on DMA */
305
306 if ((px_fm_attach(px_p)) != DDI_SUCCESS)
307 goto err_bad_dma;
308
309 /*
310 * All of the error handlers have been registered
311 * by now so it's time to activate all the interrupt.
312 */
313 if ((px_enable_err_intr(px_p)) != DDI_SUCCESS)
314 goto err_bad_intr;
315
316 if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) {
317 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
318
319 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
320 }
321
322 (void) px_set_mps(px_p);
323
324 if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS)
325 goto err_bad_hotplug;
326
327 (void) pcie_hpintr_enable(dip);
328
329 if (pxtool_init(dip) != DDI_SUCCESS)
330 goto err_bad_pcitool_node;
331
332 /*
333 * power management setup. Even if it fails, attach will
334 * succeed as this is a optional feature. Since we are
335 * always at full power, this is not critical.
336 */
337 if (pwr_common_setup(dip) != DDI_SUCCESS) {
338 DBG(DBG_PWR, dip, "pwr_common_setup failed\n");
339 } else if (px_pwr_setup(dip) != DDI_SUCCESS) {
340 DBG(DBG_PWR, dip, "px_pwr_setup failed \n");
341 pwr_common_teardown(dip);
342 }
343
344 /*
345 * add cpr callback
346 */
347 px_cpr_add_callb(px_p);
348
349 /*
350 * do fabric sync in case we don't need to wait for
351 * any bridge driver to be ready
352 */
353 (void) px_lib_fabric_sync(dip);
354
355 ddi_report_dev(dip);
356
357 px_p->px_state = PX_ATTACHED;
358
359 /*
360 * save base addr in bus_t for pci_cfgacc_xxx(), this
361 * depends of px structure being properly initialized.
362 */
363 bus_p = PCIE_DIP2BUS(dip);
364 bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip);
365
366 /*
367 * Partially populate bus_t for all devices in this fabric
368 * for device type macros to work.
369 */
370 /*
371 * Populate bus_t for all devices in this fabric, after FMA
372 * is initializated, so that config access errors could
373 * trigger panic.
374 */
375 pcie_fab_init_bus(dip, PCIE_BUS_ALL);
376
377 DBG(DBG_ATTACH, dip, "attach success\n");
378 break;
379
380 err_bad_pcitool_node:
381 (void) pcie_hpintr_disable(dip);
382 (void) pcie_uninit(dip);
383 err_bad_hotplug:
384 (void) px_lib_hotplug_uninit(dip);
385 px_disable_err_intr(px_p);
386 err_bad_intr:
387 px_fm_detach(px_p);
388 err_bad_dma:
389 px_pec_detach(px_p);
390 err_bad_pec:
391 px_msi_detach(px_p);
392 err_bad_msi:
393 px_msiq_detach(px_p);
394 err_bad_msiq:
395 px_mmu_detach(px_p);
396 err_bad_mmu:
397 err_bad_cb:
398 px_ib_detach(px_p);
399 err_bad_ib:
400 if (px_lib_dev_fini(dip) != DDI_SUCCESS) {
401 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n");
402 }
403 err_bad_dev_init:
404 px_free_props(px_p);
405 err_bad_px_prop:
406 pcie_rc_fini_bus(dip);
407 px_dbg_detach(dip, &px_p->px_dbg_hdl);
408 mutex_destroy(&px_p->px_mutex);
409 ddi_soft_state_free(px_state_p, instance);
410 err_bad_px_softstate:
411 ret = DDI_FAILURE;
412 break;
413
414 case DDI_RESUME:
415 DBG(DBG_ATTACH, dip, "DDI_RESUME\n");
416
417 px_p = INST_TO_STATE(instance);
418
419 mutex_enter(&px_p->px_mutex);
420
421 /* suspend might have not succeeded */
422 if (px_p->px_state != PX_SUSPENDED) {
423 DBG(DBG_ATTACH, px_p->px_dip,
424 "instance NOT suspended\n");
425 ret = DDI_FAILURE;
426 break;
427 }
428
429 px_msiq_resume(px_p);
430 px_lib_resume(dip);
431 (void) pcie_pwr_resume(dip);
432 px_p->px_state = PX_ATTACHED;
433
434 mutex_exit(&px_p->px_mutex);
435
436 break;
437 default:
438 DBG(DBG_ATTACH, dip, "unsupported attach op\n");
439 ret = DDI_FAILURE;
440 break;
441 }
442
443 return (ret);
444 }
445
446 /*
447 * detach entry point:
448 */
449 /*ARGSUSED*/
450 static int
px_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)451 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
452 {
453 int instance = ddi_get_instance(dip);
454 px_t *px_p = INST_TO_STATE(instance);
455 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
456 int ret;
457
458 /*
459 * Make sure we are currently attached
460 */
461 if (px_p->px_state != PX_ATTACHED) {
462 DBG(DBG_DETACH, dip, "Instance not attached\n");
463 return (DDI_FAILURE);
464 }
465
466 mutex_enter(&px_p->px_mutex);
467
468 switch (cmd) {
469 case DDI_DETACH:
470 DBG(DBG_DETACH, dip, "DDI_DETACH\n");
471
472 /*
473 * remove cpr callback
474 */
475 px_cpr_rem_callb(px_p);
476
477 (void) pcie_hpintr_disable(dip);
478
479 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p))
480 (void) px_lib_hotplug_uninit(dip);
481
482 if (pcie_uninit(dip) != DDI_SUCCESS) {
483 mutex_exit(&px_p->px_mutex);
484 return (DDI_FAILURE);
485 }
486
487 /* Destroy bus_t for the whole fabric */
488 pcie_fab_fini_bus(dip, PCIE_BUS_ALL);
489
490 /*
491 * things which used to be done in obj_destroy
492 * are now in-lined here.
493 */
494
495 px_p->px_state = PX_DETACHED;
496
497 pxtool_uninit(dip);
498
499 px_disable_err_intr(px_p);
500 px_fm_detach(px_p);
501 px_pec_detach(px_p);
502 px_pwr_teardown(dip);
503 pwr_common_teardown(dip);
504 px_msi_detach(px_p);
505 px_msiq_detach(px_p);
506 px_mmu_detach(px_p);
507 px_ib_detach(px_p);
508 if (px_lib_dev_fini(dip) != DDI_SUCCESS) {
509 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n");
510 }
511
512 /*
513 * Free the px soft state structure and the rest of the
514 * resources it's using.
515 */
516 px_free_props(px_p);
517 pcie_rc_fini_bus(dip);
518 px_dbg_detach(dip, &px_p->px_dbg_hdl);
519 mutex_exit(&px_p->px_mutex);
520 mutex_destroy(&px_p->px_mutex);
521
522 px_p->px_dev_hdl = 0;
523 ddi_soft_state_free(px_state_p, instance);
524
525 return (DDI_SUCCESS);
526
527 case DDI_SUSPEND:
528 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) {
529 mutex_exit(&px_p->px_mutex);
530 return (DDI_FAILURE);
531 }
532 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS)
533 px_p->px_state = PX_SUSPENDED;
534 mutex_exit(&px_p->px_mutex);
535
536 return (ret);
537
538 default:
539 DBG(DBG_DETACH, dip, "unsupported detach op\n");
540 mutex_exit(&px_p->px_mutex);
541 return (DDI_FAILURE);
542 }
543 }
544
545 static int
px_enable_err_intr(px_t * px_p)546 px_enable_err_intr(px_t *px_p)
547 {
548 /* Add FMA Callback handler for failed PIO Loads */
549 px_fm_cb_enable(px_p);
550
551 /* Add Common Block mondo handler */
552 if (px_cb_add_intr(&px_p->px_cb_fault) != DDI_SUCCESS)
553 goto cb_bad;
554
555 /* Add PEU Block Mondo Handler */
556 if (px_err_add_intr(&px_p->px_fault) != DDI_SUCCESS)
557 goto peu_bad;
558
559 /* Enable interrupt handler for PCIE Fabric Error Messages */
560 if (px_pec_msg_add_intr(px_p) != DDI_SUCCESS)
561 goto msg_bad;
562
563 return (DDI_SUCCESS);
564
565 msg_bad:
566 px_err_rem_intr(&px_p->px_fault);
567 peu_bad:
568 px_cb_rem_intr(&px_p->px_cb_fault);
569 cb_bad:
570 px_fm_cb_disable(px_p);
571
572 return (DDI_FAILURE);
573 }
574
575 static void
px_disable_err_intr(px_t * px_p)576 px_disable_err_intr(px_t *px_p)
577 {
578 px_pec_msg_rem_intr(px_p);
579 px_err_rem_intr(&px_p->px_fault);
580 px_cb_rem_intr(&px_p->px_cb_fault);
581 px_fm_cb_disable(px_p);
582 }
583
584 int
px_cb_attach(px_t * px_p)585 px_cb_attach(px_t *px_p)
586 {
587 px_fault_t *fault_p = &px_p->px_cb_fault;
588 dev_info_t *dip = px_p->px_dip;
589 sysino_t sysino;
590
591 if (px_lib_intr_devino_to_sysino(dip,
592 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS)
593 return (DDI_FAILURE);
594
595 fault_p->px_fh_dip = dip;
596 fault_p->px_fh_sysino = sysino;
597 fault_p->px_err_func = px_err_cb_intr;
598 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC];
599
600 return (DDI_SUCCESS);
601 }
602
603 /*
604 * power management related initialization specific to px
605 * called by px_attach()
606 */
607 static int
px_pwr_setup(dev_info_t * dip)608 px_pwr_setup(dev_info_t *dip)
609 {
610 pcie_pwr_t *pwr_p;
611 int instance = ddi_get_instance(dip);
612 px_t *px_p = INST_TO_STATE(instance);
613 ddi_intr_handle_impl_t hdl;
614
615 ASSERT(PCIE_PMINFO(dip));
616 pwr_p = PCIE_NEXUS_PMINFO(dip);
617 ASSERT(pwr_p);
618
619 /*
620 * indicate support LDI (Layered Driver Interface)
621 * Create the property, if it is not already there
622 */
623 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
624 DDI_KERNEL_IOCTL)) {
625 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
626 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) {
627 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n");
628 return (DDI_FAILURE);
629 }
630 }
631 /* No support for device PM. We are always at full power */
632 pwr_p->pwr_func_lvl = PM_LEVEL_D0;
633
634 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER,
635 DDI_INTR_PRI(px_pwr_pil));
636 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL);
637
638 /* Initialize handle */
639 bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
640 hdl.ih_cb_arg1 = px_p;
641 hdl.ih_ver = DDI_INTR_VERSION;
642 hdl.ih_state = DDI_IHDL_STATE_ALLOC;
643 hdl.ih_dip = dip;
644 hdl.ih_pri = px_pwr_pil;
645
646 /* Add PME_TO_ACK message handler */
647 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr;
648 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC,
649 (msgcode_t)PCIE_PME_ACK_MSG, -1,
650 &px_p->px_pm_msiq_id) != DDI_SUCCESS) {
651 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add "
652 " PME_TO_ACK intr\n");
653 goto pwr_setup_err1;
654 }
655 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id);
656 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID);
657
658 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
659 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
660 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) {
661 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt"
662 " state failed\n");
663 goto px_pwrsetup_err_state;
664 }
665
666 return (DDI_SUCCESS);
667
668 px_pwrsetup_err_state:
669 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
670 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
671 px_p->px_pm_msiq_id);
672 pwr_setup_err1:
673 mutex_destroy(&px_p->px_l23ready_lock);
674 cv_destroy(&px_p->px_l23ready_cv);
675
676 return (DDI_FAILURE);
677 }
678
679 /*
680 * undo whatever is done in px_pwr_setup. called by px_detach()
681 */
682 static void
px_pwr_teardown(dev_info_t * dip)683 px_pwr_teardown(dev_info_t *dip)
684 {
685 int instance = ddi_get_instance(dip);
686 px_t *px_p = INST_TO_STATE(instance);
687 ddi_intr_handle_impl_t hdl;
688
689 if (PCIE_PMINFO(dip) == NULL || PCIE_NEXUS_PMINFO(dip) == NULL)
690 return;
691
692 /* Initialize handle */
693 bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
694 hdl.ih_ver = DDI_INTR_VERSION;
695 hdl.ih_state = DDI_IHDL_STATE_ALLOC;
696 hdl.ih_dip = dip;
697 hdl.ih_pri = px_pwr_pil;
698
699 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID);
700 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG,
701 px_p->px_pm_msiq_id);
702
703 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum,
704 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil,
705 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG);
706
707 px_p->px_pm_msiq_id = (msiqid_t)-1;
708
709 cv_destroy(&px_p->px_l23ready_cv);
710 mutex_destroy(&px_p->px_l23ready_lock);
711 }
712
713 /* bus driver entry points */
714
715 /*
716 * bus map entry point:
717 *
718 * if map request is for an rnumber
719 * get the corresponding regspec from device node
720 * build a new regspec in our parent's format
721 * build a new map_req with the new regspec
722 * call up the tree to complete the mapping
723 */
724 int
px_map(dev_info_t * dip,dev_info_t * rdip,ddi_map_req_t * mp,off_t off,off_t len,caddr_t * addrp)725 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
726 off_t off, off_t len, caddr_t *addrp)
727 {
728 px_t *px_p = DIP_TO_STATE(dip);
729 struct regspec p_regspec;
730 ddi_map_req_t p_mapreq;
731 int reglen, rval, r_no;
732 pci_regspec_t reloc_reg, *rp = &reloc_reg;
733
734 DBG(DBG_MAP, dip, "rdip=%s%d:",
735 ddi_driver_name(rdip), ddi_get_instance(rdip));
736
737 if (mp->map_flags & DDI_MF_USER_MAPPING)
738 return (DDI_ME_UNIMPLEMENTED);
739
740 switch (mp->map_type) {
741 case DDI_MT_REGSPEC:
742 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */
743 break;
744
745 case DDI_MT_RNUMBER:
746 r_no = mp->map_obj.rnumber;
747 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no);
748
749 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
750 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS)
751 return (DDI_ME_RNUMBER_RANGE);
752
753 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
754 kmem_free(rp, reglen);
755 return (DDI_ME_RNUMBER_RANGE);
756 }
757 rp += r_no;
758 break;
759
760 default:
761 return (DDI_ME_INVAL);
762 }
763 DBG(DBG_MAP | DBG_CONT, dip, "\n");
764
765 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) {
766 /*
767 * There may be a need to differentiate between PCI
768 * and PCI-Ex devices so the following range check is
769 * done correctly, depending on the implementation of
770 * pcieb bridge nexus driver.
771 */
772 if ((off >= PCIE_CONF_HDR_SIZE) ||
773 (len > PCIE_CONF_HDR_SIZE) ||
774 (off + len > PCIE_CONF_HDR_SIZE))
775 return (DDI_ME_INVAL);
776 /*
777 * the following function returning a DDI_FAILURE assumes
778 * that there are no virtual config space access services
779 * defined in this layer. Otherwise it is availed right
780 * here and we return.
781 */
782 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp);
783 if (rval == DDI_SUCCESS)
784 goto done;
785 }
786
787 /*
788 * No virtual config space services or we are mapping
789 * a region of memory mapped config/IO/memory space, so proceed
790 * to the parent.
791 */
792
793 /* relocate within 64-bit pci space through "assigned-addresses" */
794 if (rval = px_reloc_reg(dip, rdip, px_p, rp))
795 goto done;
796
797 if (len) /* adjust regspec according to mapping request */
798 rp->pci_size_low = len; /* MIN ? */
799 rp->pci_phys_low += off;
800
801 /* translate relocated pci regspec into parent space through "ranges" */
802 if (rval = px_xlate_reg(px_p, rp, &p_regspec))
803 goto done;
804
805 p_mapreq = *mp; /* dup the whole structure */
806 p_mapreq.map_type = DDI_MT_REGSPEC;
807 p_mapreq.map_obj.rp = &p_regspec;
808 px_lib_map_attr_check(&p_mapreq);
809 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
810
811 if (rval == DDI_SUCCESS) {
812 /*
813 * Set-up access functions for FM access error capable drivers.
814 */
815 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)))
816 px_fm_acc_setup(mp, rdip, rp);
817 }
818
819 done:
820 if (mp->map_type == DDI_MT_RNUMBER)
821 kmem_free(rp - r_no, reglen);
822
823 return (rval);
824 }
825
826 /*
827 * bus dma map entry point
828 * return value:
829 * DDI_DMA_PARTIAL_MAP 1
830 * DDI_DMA_MAPOK 0
831 * DDI_DMA_MAPPED 0
832 * DDI_DMA_NORESOURCES -1
833 * DDI_DMA_NOMAPPING -2
834 * DDI_DMA_TOOBIG -3
835 */
836 int
px_dma_setup(dev_info_t * dip,dev_info_t * rdip,ddi_dma_req_t * dmareq,ddi_dma_handle_t * handlep)837 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq,
838 ddi_dma_handle_t *handlep)
839 {
840 px_t *px_p = DIP_TO_STATE(dip);
841 px_mmu_t *mmu_p = px_p->px_mmu_p;
842 ddi_dma_impl_t *mp;
843 int ret;
844
845 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n",
846 ddi_driver_name(rdip), ddi_get_instance(rdip),
847 handlep ? "alloc" : "advisory");
848
849 mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq);
850 if (mp == NULL)
851 return (DDI_DMA_NORESOURCES);
852 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING)
853 return (DDI_DMA_NOMAPPING);
854 if (ret = px_dma_type(px_p, dmareq, mp))
855 goto freehandle;
856 if (ret = px_dma_pfn(px_p, dmareq, mp))
857 goto freehandle;
858
859 switch (PX_DMA_TYPE(mp)) {
860 case PX_DMAI_FLAGS_DVMA:
861 ret = px_dvma_win(px_p, dmareq, mp);
862 if (ret != 0 || handlep == NULL)
863 goto freehandle;
864 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */
865 if (PX_DMA_CANFAST(mp)) {
866 if (!px_dvma_map_fast(mmu_p, mp))
867 break;
868 } else {
869 PX_DVMA_FASTTRAK_PROF(mp);
870 }
871 }
872 if (ret = px_dvma_map(mp, dmareq, mmu_p))
873 goto freehandle;
874 break;
875 case PX_DMAI_FLAGS_PTP:
876 ret = px_dma_physwin(px_p, dmareq, mp);
877 if (ret == 0 || handlep == NULL)
878 goto freehandle;
879 break;
880 case PX_DMAI_FLAGS_BYPASS:
881 default:
882 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x",
883 ddi_driver_name(rdip), ddi_get_instance(rdip),
884 PX_DMA_TYPE(mp));
885 /*NOTREACHED*/
886 }
887 *handlep = (ddi_dma_handle_t)mp;
888 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
889 px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
890
891 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
892 freehandle:
893 if (ret == DDI_DMA_NORESOURCES)
894 px_dma_freemp(mp); /* don't run_callback() */
895 else
896 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
897 return (ret);
898 }
899
900
901 /*
902 * bus dma alloc handle entry point:
903 */
904 int
px_dma_allochdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_attr_t * attrp,int (* waitfp)(caddr_t),caddr_t arg,ddi_dma_handle_t * handlep)905 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp,
906 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
907 {
908 px_t *px_p = DIP_TO_STATE(dip);
909 ddi_dma_impl_t *mp;
910 int rval;
911
912 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n",
913 ddi_driver_name(rdip), ddi_get_instance(rdip));
914
915 if (attrp->dma_attr_version != DMA_ATTR_V0)
916 return (DDI_DMA_BADATTR);
917
918 mp = px_dma_allocmp(dip, rdip, waitfp, arg);
919 if (mp == NULL)
920 return (DDI_DMA_NORESOURCES);
921
922 /*
923 * Save requestor's information
924 */
925 mp->dmai_attr = *attrp; /* whole object - augmented later */
926 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */
927 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp);
928
929 /* check and convert dma attributes to handle parameters */
930 if (rval = px_dma_attr2hdl(px_p, mp)) {
931 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp);
932 *handlep = NULL;
933 return (rval);
934 }
935 *handlep = (ddi_dma_handle_t)mp;
936 return (DDI_SUCCESS);
937 }
938
939
940 /*
941 * bus dma free handle entry point:
942 */
943 /*ARGSUSED*/
944 int
px_dma_freehdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)945 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
946 {
947 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n",
948 ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
949 px_dma_freemp((ddi_dma_impl_t *)handle);
950
951 if (px_kmem_clid) {
952 DBG(DBG_DMA_FREEH, dip, "run handle callback\n");
953 ddi_run_callback(&px_kmem_clid);
954 }
955 return (DDI_SUCCESS);
956 }
957
958
959 /*
960 * bus dma bind handle entry point:
961 */
962 int
px_dma_bindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,ddi_dma_req_t * dmareq,ddi_dma_cookie_t * cookiep,uint_t * ccountp)963 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
964 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq,
965 ddi_dma_cookie_t *cookiep, uint_t *ccountp)
966 {
967 px_t *px_p = DIP_TO_STATE(dip);
968 px_mmu_t *mmu_p = px_p->px_mmu_p;
969 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
970 int ret;
971
972 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n",
973 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq);
974
975 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE)
976 return (DDI_DMA_INUSE);
977
978 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0);
979 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE;
980
981 if (ret = px_dma_type(px_p, dmareq, mp))
982 goto err;
983 if (ret = px_dma_pfn(px_p, dmareq, mp))
984 goto err;
985
986 switch (PX_DMA_TYPE(mp)) {
987 case PX_DMAI_FLAGS_DVMA:
988 if (ret = px_dvma_win(px_p, dmareq, mp))
989 goto map_err;
990 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */
991 if (PX_DMA_CANFAST(mp)) {
992 if (!px_dvma_map_fast(mmu_p, mp))
993 goto mapped;
994 } else {
995 PX_DVMA_FASTTRAK_PROF(mp);
996 }
997 }
998 if (ret = px_dvma_map(mp, dmareq, mmu_p))
999 goto map_err;
1000 mapped:
1001 *ccountp = 1;
1002 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size);
1003 mp->dmai_ncookies = 1;
1004 mp->dmai_curcookie = 1;
1005 break;
1006 case PX_DMAI_FLAGS_BYPASS:
1007 case PX_DMAI_FLAGS_PTP:
1008 if (ret = px_dma_physwin(px_p, dmareq, mp))
1009 goto map_err;
1010 *ccountp = PX_WINLST(mp)->win_ncookies;
1011 *cookiep =
1012 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */
1013 /*
1014 * mp->dmai_ncookies and mp->dmai_curcookie are set by
1015 * px_dma_physwin().
1016 */
1017 break;
1018 default:
1019 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type",
1020 ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
1021 /*NOTREACHED*/
1022 }
1023 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n",
1024 cookiep->dmac_address, cookiep->dmac_size);
1025 px_dump_dma_handle(DBG_DMA_MAP, dip, mp);
1026
1027 /* insert dma handle into FMA cache */
1028 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR)
1029 mp->dmai_error.err_cf = px_err_dma_hdl_check;
1030
1031 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP);
1032 map_err:
1033 px_dma_freepfn(mp);
1034 err:
1035 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
1036 return (ret);
1037 }
1038
1039
1040 /*
1041 * bus dma unbind handle entry point:
1042 */
1043 /*ARGSUSED*/
1044 int
px_dma_unbindhdl(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle)1045 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
1046 {
1047 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1048 px_t *px_p = DIP_TO_STATE(dip);
1049 px_mmu_t *mmu_p = px_p->px_mmu_p;
1050
1051 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n",
1052 ddi_driver_name(rdip), ddi_get_instance(rdip), handle);
1053 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) {
1054 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n");
1055 return (DDI_FAILURE);
1056 }
1057
1058 mp->dmai_error.err_cf = NULL;
1059
1060 /*
1061 * Here if the handle is using the iommu. Unload all the iommu
1062 * translations.
1063 */
1064 switch (PX_DMA_TYPE(mp)) {
1065 case PX_DMAI_FLAGS_DVMA:
1066 px_mmu_unmap_window(mmu_p, mp);
1067 px_dvma_unmap(mmu_p, mp);
1068 px_dma_freepfn(mp);
1069 break;
1070 case PX_DMAI_FLAGS_BYPASS:
1071 case PX_DMAI_FLAGS_PTP:
1072 px_dma_freewin(mp);
1073 break;
1074 default:
1075 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p",
1076 ddi_driver_name(rdip), ddi_get_instance(rdip), mp);
1077 /*NOTREACHED*/
1078 }
1079 if (mmu_p->mmu_dvma_clid != 0) {
1080 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n");
1081 ddi_run_callback(&mmu_p->mmu_dvma_clid);
1082 }
1083 if (px_kmem_clid) {
1084 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n");
1085 ddi_run_callback(&px_kmem_clid);
1086 }
1087 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE;
1088 mp->dmai_ncookies = 0;
1089 mp->dmai_curcookie = 0;
1090
1091 return (DDI_SUCCESS);
1092 }
1093
1094 /*
1095 * bus dma win entry point:
1096 */
1097 int
px_dma_win(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,uint_t win,off_t * offp,size_t * lenp,ddi_dma_cookie_t * cookiep,uint_t * ccountp)1098 px_dma_win(dev_info_t *dip, dev_info_t *rdip,
1099 ddi_dma_handle_t handle, uint_t win, off_t *offp,
1100 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1101 {
1102 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1103 int ret;
1104
1105 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n",
1106 ddi_driver_name(rdip), ddi_get_instance(rdip));
1107
1108 px_dump_dma_handle(DBG_DMA_WIN, dip, mp);
1109 if (win >= mp->dmai_nwin) {
1110 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win);
1111 return (DDI_FAILURE);
1112 }
1113
1114 switch (PX_DMA_TYPE(mp)) {
1115 case PX_DMAI_FLAGS_DVMA:
1116 if (win != PX_DMA_CURWIN(mp)) {
1117 px_t *px_p = DIP_TO_STATE(dip);
1118 px_mmu_t *mmu_p = px_p->px_mmu_p;
1119 px_mmu_unmap_window(mmu_p, mp);
1120
1121 /* map_window sets dmai_mapping/size/offset */
1122 px_mmu_map_window(mmu_p, mp, win);
1123 if ((ret = px_mmu_map_window(mmu_p,
1124 mp, win)) != DDI_SUCCESS)
1125 return (ret);
1126 }
1127 if (cookiep)
1128 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping,
1129 mp->dmai_size);
1130 if (ccountp)
1131 *ccountp = 1;
1132 mp->dmai_ncookies = 1;
1133 mp->dmai_curcookie = 1;
1134 break;
1135 case PX_DMAI_FLAGS_PTP:
1136 case PX_DMAI_FLAGS_BYPASS: {
1137 int i;
1138 ddi_dma_cookie_t *ck_p;
1139 px_dma_win_t *win_p = mp->dmai_winlst;
1140
1141 for (i = 0; i < win; win_p = win_p->win_next, i++) {};
1142 ck_p = (ddi_dma_cookie_t *)(win_p + 1);
1143 *cookiep = *ck_p;
1144 mp->dmai_offset = win_p->win_offset;
1145 mp->dmai_size = win_p->win_size;
1146 mp->dmai_mapping = ck_p->dmac_laddress;
1147 mp->dmai_cookie = ck_p + 1;
1148 win_p->win_curseg = 0;
1149 if (ccountp)
1150 *ccountp = win_p->win_ncookies;
1151 mp->dmai_ncookies = win_p->win_ncookies;
1152 mp->dmai_curcookie = 1;
1153 }
1154 break;
1155 default:
1156 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x",
1157 ddi_driver_name(rdip), ddi_get_instance(rdip),
1158 PX_DMA_TYPE(mp));
1159 return (DDI_FAILURE);
1160 }
1161 if (cookiep)
1162 DBG(DBG_DMA_WIN, dip,
1163 "cookie - dmac_address=%x dmac_size=%x\n",
1164 cookiep->dmac_address, cookiep->dmac_size);
1165 if (offp)
1166 *offp = (off_t)mp->dmai_offset;
1167 if (lenp)
1168 *lenp = mp->dmai_size;
1169 return (DDI_SUCCESS);
1170 }
1171
1172 #ifdef DEBUG
1173 static char *px_dmactl_str[] = {
1174 "DDI_DMA_FREE",
1175 "DDI_DMA_SYNC",
1176 "DDI_DMA_HTOC",
1177 "DDI_DMA_KVADDR",
1178 "DDI_DMA_MOVWIN",
1179 "DDI_DMA_REPWIN",
1180 "DDI_DMA_GETERR",
1181 "DDI_DMA_COFF",
1182 "DDI_DMA_NEXTWIN",
1183 "DDI_DMA_NEXTSEG",
1184 "DDI_DMA_SEGTOC",
1185 "DDI_DMA_RESERVE",
1186 "DDI_DMA_RELEASE",
1187 "DDI_DMA_RESETH",
1188 "DDI_DMA_CKSYNC",
1189 "DDI_DMA_IOPB_ALLOC",
1190 "DDI_DMA_IOPB_FREE",
1191 "DDI_DMA_SMEM_ALLOC",
1192 "DDI_DMA_SMEM_FREE",
1193 "DDI_DMA_SET_SBUS64"
1194 };
1195 #endif /* DEBUG */
1196
1197 /*
1198 * bus dma control entry point:
1199 */
1200 /*ARGSUSED*/
1201 int
px_dma_ctlops(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,enum ddi_dma_ctlops cmd,off_t * offp,size_t * lenp,caddr_t * objp,uint_t cache_flags)1202 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
1203 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1204 uint_t cache_flags)
1205 {
1206 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1207
1208 #ifdef DEBUG
1209 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd],
1210 ddi_driver_name(rdip), ddi_get_instance(rdip));
1211 #endif /* DEBUG */
1212
1213 switch (cmd) {
1214 case DDI_DMA_FREE:
1215 (void) px_dma_unbindhdl(dip, rdip, handle);
1216 (void) px_dma_freehdl(dip, rdip, handle);
1217 return (DDI_SUCCESS);
1218 case DDI_DMA_RESERVE: {
1219 px_t *px_p = DIP_TO_STATE(dip);
1220 return (px_fdvma_reserve(dip, rdip, px_p,
1221 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp));
1222 }
1223 case DDI_DMA_RELEASE: {
1224 px_t *px_p = DIP_TO_STATE(dip);
1225 return (px_fdvma_release(dip, px_p, mp));
1226 }
1227 default:
1228 break;
1229 }
1230
1231 switch (PX_DMA_TYPE(mp)) {
1232 case PX_DMAI_FLAGS_DVMA:
1233 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1234 cache_flags));
1235 case PX_DMAI_FLAGS_PTP:
1236 case PX_DMAI_FLAGS_BYPASS:
1237 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp,
1238 cache_flags));
1239 default:
1240 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x",
1241 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd,
1242 mp->dmai_flags);
1243 /*NOTREACHED*/
1244 }
1245 return (0);
1246 }
1247
1248 /*
1249 * control ops entry point:
1250 *
1251 * Requests handled completely:
1252 * DDI_CTLOPS_INITCHILD see init_child() for details
1253 * DDI_CTLOPS_UNINITCHILD
1254 * DDI_CTLOPS_REPORTDEV see report_dev() for details
1255 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1
1256 * DDI_CTLOPS_REGSIZE
1257 * DDI_CTLOPS_NREGS
1258 * DDI_CTLOPS_DVMAPAGESIZE
1259 * DDI_CTLOPS_POKE
1260 * DDI_CTLOPS_PEEK
1261 *
1262 * All others passed to parent.
1263 */
1264 int
px_ctlops(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t op,void * arg,void * result)1265 px_ctlops(dev_info_t *dip, dev_info_t *rdip,
1266 ddi_ctl_enum_t op, void *arg, void *result)
1267 {
1268 px_t *px_p = DIP_TO_STATE(dip);
1269 struct detachspec *ds;
1270 struct attachspec *as;
1271
1272 switch (op) {
1273 case DDI_CTLOPS_INITCHILD:
1274 return (px_init_child(px_p, (dev_info_t *)arg));
1275
1276 case DDI_CTLOPS_UNINITCHILD:
1277 return (px_uninit_child(px_p, (dev_info_t *)arg));
1278
1279 case DDI_CTLOPS_ATTACH:
1280 if (!pcie_is_child(dip, rdip))
1281 return (DDI_SUCCESS);
1282
1283 as = (struct attachspec *)arg;
1284 switch (as->when) {
1285 case DDI_PRE:
1286 if (as->cmd == DDI_ATTACH) {
1287 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n",
1288 ddi_driver_name(rdip),
1289 ddi_get_instance(rdip));
1290 return (pcie_pm_hold(dip));
1291 }
1292 if (as->cmd == DDI_RESUME) {
1293 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n",
1294 ddi_driver_name(rdip),
1295 ddi_get_instance(rdip));
1296
1297 pcie_clear_errors(rdip);
1298 }
1299 return (DDI_SUCCESS);
1300
1301 case DDI_POST:
1302 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n",
1303 ddi_driver_name(rdip), ddi_get_instance(rdip));
1304 if (as->cmd == DDI_ATTACH &&
1305 as->result != DDI_SUCCESS) {
1306 /*
1307 * Attach failed for the child device. The child
1308 * driver may have made PM calls before the
1309 * attach failed. pcie_pm_remove_child() should
1310 * cleanup PM state and holds (if any)
1311 * associated with the child device.
1312 */
1313 return (pcie_pm_remove_child(dip, rdip));
1314 }
1315
1316 if (as->result == DDI_SUCCESS)
1317 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd);
1318
1319 (void) pcie_postattach_child(rdip);
1320
1321 return (DDI_SUCCESS);
1322 default:
1323 break;
1324 }
1325 break;
1326
1327 case DDI_CTLOPS_DETACH:
1328 if (!pcie_is_child(dip, rdip))
1329 return (DDI_SUCCESS);
1330
1331 ds = (struct detachspec *)arg;
1332 switch (ds->when) {
1333 case DDI_POST:
1334 if (ds->cmd == DDI_DETACH &&
1335 ds->result == DDI_SUCCESS) {
1336 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n",
1337 ddi_driver_name(rdip),
1338 ddi_get_instance(rdip));
1339 return (pcie_pm_remove_child(dip, rdip));
1340 }
1341 return (DDI_SUCCESS);
1342 case DDI_PRE:
1343 pf_fini(rdip, ds->cmd);
1344 return (DDI_SUCCESS);
1345 default:
1346 break;
1347 }
1348 break;
1349
1350 case DDI_CTLOPS_REPORTDEV:
1351 if (ddi_get_parent(rdip) == dip)
1352 return (px_report_dev(rdip));
1353
1354 (void) px_lib_fabric_sync(rdip);
1355 return (DDI_SUCCESS);
1356
1357 case DDI_CTLOPS_IOMIN:
1358 return (DDI_SUCCESS);
1359
1360 case DDI_CTLOPS_REGSIZE:
1361 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg));
1362 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS);
1363
1364 case DDI_CTLOPS_NREGS:
1365 *((uint_t *)result) = px_get_nreg_set(rdip);
1366 return (DDI_SUCCESS);
1367
1368 case DDI_CTLOPS_DVMAPAGESIZE:
1369 *((ulong_t *)result) = MMU_PAGE_SIZE;
1370 return (DDI_SUCCESS);
1371
1372 case DDI_CTLOPS_POKE: /* platform dependent implementation. */
1373 return (px_lib_ctlops_poke(dip, rdip,
1374 (peekpoke_ctlops_t *)arg));
1375
1376 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */
1377 return (px_lib_ctlops_peek(dip, rdip,
1378 (peekpoke_ctlops_t *)arg, result));
1379
1380 case DDI_CTLOPS_POWER:
1381 default:
1382 break;
1383 }
1384
1385 /*
1386 * Now pass the request up to our parent.
1387 */
1388 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n",
1389 ddi_driver_name(rdip), ddi_get_instance(rdip));
1390 return (ddi_ctlops(dip, rdip, op, arg, result));
1391 }
1392
1393 /* ARGSUSED */
1394 int
px_intr_ops(dev_info_t * dip,dev_info_t * rdip,ddi_intr_op_t intr_op,ddi_intr_handle_impl_t * hdlp,void * result)1395 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
1396 ddi_intr_handle_impl_t *hdlp, void *result)
1397 {
1398 int intr_types, ret = DDI_SUCCESS;
1399 px_t *px_p = DIP_TO_STATE(dip);
1400
1401 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n",
1402 ddi_driver_name(rdip), ddi_get_instance(rdip));
1403
1404 /* Process DDI_INTROP_SUPPORTED_TYPES request here */
1405 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) {
1406 *(int *)result = i_ddi_get_intx_nintrs(rdip) ?
1407 DDI_INTR_TYPE_FIXED : 0;
1408
1409 if ((pci_msi_get_supported_type(rdip,
1410 &intr_types)) == DDI_SUCCESS) {
1411 /*
1412 * Double check supported interrupt types vs.
1413 * what the host bridge supports.
1414 */
1415 *(int *)result |= intr_types;
1416 }
1417
1418 *(int *)result &=
1419 (px_force_intx_support ?
1420 (px_p->px_supp_intr_types | DDI_INTR_TYPE_FIXED) :
1421 px_p->px_supp_intr_types);
1422 return (*(int *)result ? DDI_SUCCESS : DDI_FAILURE);
1423 }
1424
1425 /*
1426 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts.
1427 * Return failure if interrupt type is not supported.
1428 */
1429 switch (hdlp->ih_type) {
1430 case DDI_INTR_TYPE_FIXED:
1431 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result);
1432 break;
1433 case DDI_INTR_TYPE_MSI:
1434 case DDI_INTR_TYPE_MSIX:
1435 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result);
1436 break;
1437 default:
1438 ret = DDI_ENOTSUP;
1439 break;
1440 }
1441
1442 return (ret);
1443 }
1444
1445 static void
px_set_mps(px_t * px_p)1446 px_set_mps(px_t *px_p)
1447 {
1448 dev_info_t *dip;
1449 pcie_bus_t *bus_p;
1450 int max_supported;
1451
1452 dip = px_p->px_dip;
1453 bus_p = PCIE_DIP2BUS(dip);
1454
1455 bus_p->bus_mps = -1;
1456
1457 if (pcie_root_port(dip) == DDI_FAILURE) {
1458 if (px_lib_get_root_complex_mps(px_p, dip,
1459 &max_supported) < 0) {
1460
1461 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n");
1462 return;
1463 }
1464
1465 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n",
1466 max_supported);
1467
1468 if (pcie_max_mps < max_supported)
1469 max_supported = pcie_max_mps;
1470
1471 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip),
1472 &max_supported);
1473
1474 bus_p->bus_mps = max_supported;
1475
1476 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps);
1477
1478 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n",
1479 bus_p->bus_mps);
1480 }
1481 }
1482