1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/types.h>
26 #include <sys/kmem.h>
27 #include <sys/conf.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/sunndi.h>
31 #include <sys/fm/protocol.h>
32 #include <sys/fm/util.h>
33 #include <sys/modctl.h>
34 #include <sys/disp.h>
35 #include <sys/stat.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/vmem.h>
38 #include <sys/iommutsb.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ivintr.h>
41 #include <sys/byteorder.h>
42 #include <sys/spl.h>
43 #include <px_obj.h>
44 #include <sys/pcie_pwr.h>
45 #include "px_tools_var.h"
46 #include <px_regs.h>
47 #include <px_csr.h>
48 #include <sys/machsystm.h>
49 #include "px_lib4u.h"
50 #include "px_err.h"
51 #include "oberon_regs.h"
52 #include <sys/hotplug/pci/pcie_hp.h>
53
54 #pragma weak jbus_stst_order
55
56 extern void jbus_stst_order();
57
58 ulong_t px_mmu_dvma_end = 0xfffffffful;
59 uint_t px_ranges_phi_mask = 0xfffffffful;
60 uint64_t *px_oberon_ubc_scratch_regs;
61 uint64_t px_paddr_mask;
62
63 static int px_goto_l23ready(px_t *px_p);
64 static int px_goto_l0(px_t *px_p);
65 static int px_pre_pwron_check(px_t *px_p);
66 static uint32_t px_identity_init(px_t *px_p);
67 static boolean_t px_cpr_callb(void *arg, int code);
68 static uint_t px_cb_intr(caddr_t arg);
69
70 /*
71 * ACKNAK Latency Threshold Table.
72 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
73 */
74 int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
75 {0xED, 0x49, 0x43, 0x30},
76 {0x1A0, 0x76, 0x6B, 0x48},
77 {0x22F, 0x9A, 0x56, 0x56},
78 {0x42F, 0x11A, 0x96, 0x96},
79 {0x82F, 0x21A, 0x116, 0x116},
80 {0x102F, 0x41A, 0x216, 0x216}
81 };
82
83 /*
84 * TxLink Replay Timer Latency Table
85 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
86 */
87 int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
88 {0x379, 0x112, 0xFC, 0xB4},
89 {0x618, 0x1BA, 0x192, 0x10E},
90 {0x831, 0x242, 0x143, 0x143},
91 {0xFB1, 0x422, 0x233, 0x233},
92 {0x1EB0, 0x7E1, 0x412, 0x412},
93 {0x3CB0, 0xF61, 0x7D2, 0x7D2}
94 };
95 /*
96 * px_lib_map_registers
97 *
98 * This function is called from the attach routine to map the registers
99 * accessed by this driver.
100 *
101 * used by: px_attach()
102 *
103 * return value: DDI_FAILURE on failure
104 */
105 int
px_lib_map_regs(pxu_t * pxu_p,dev_info_t * dip)106 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
107 {
108 ddi_device_acc_attr_t attr;
109 px_reg_bank_t reg_bank = PX_REG_CSR;
110
111 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
112 pxu_p, dip);
113
114 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
115 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
116 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
117
118 /*
119 * PCI CSR Base
120 */
121 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
122 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
123 goto fail;
124 }
125
126 reg_bank++;
127
128 /*
129 * XBUS CSR Base
130 */
131 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
132 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
133 goto fail;
134 }
135
136 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
137
138 done:
139 for (; reg_bank >= PX_REG_CSR; reg_bank--) {
140 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
141 reg_bank, pxu_p->px_address[reg_bank]);
142 }
143
144 return (DDI_SUCCESS);
145
146 fail:
147 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
148 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
149
150 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
151 pxu_p->px_address[reg_bank] = NULL;
152 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
153 }
154
155 return (DDI_FAILURE);
156 }
157
158 /*
159 * px_lib_unmap_regs:
160 *
161 * This routine unmaps the registers mapped by map_px_registers.
162 *
163 * used by: px_detach(), and error conditions in px_attach()
164 *
165 * return value: none
166 */
167 void
px_lib_unmap_regs(pxu_t * pxu_p)168 px_lib_unmap_regs(pxu_t *pxu_p)
169 {
170 int i;
171
172 for (i = 0; i < PX_REG_MAX; i++) {
173 if (pxu_p->px_ac[i])
174 ddi_regs_map_free(&pxu_p->px_ac[i]);
175 }
176 }
177
178 int
px_lib_dev_init(dev_info_t * dip,devhandle_t * dev_hdl)179 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
180 {
181
182 caddr_t xbc_csr_base, csr_base;
183 px_dvma_range_prop_t px_dvma_range;
184 pxu_t *pxu_p;
185 uint8_t chip_mask;
186 px_t *px_p = DIP_TO_STATE(dip);
187 px_chip_type_t chip_type = px_identity_init(px_p);
188
189 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip);
190
191 if (chip_type == PX_CHIP_UNIDENTIFIED) {
192 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n",
193 NAMEINST(dip));
194 return (DDI_FAILURE);
195 }
196
197 chip_mask = BITMASK(chip_type);
198 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK :
199 MMU_OBERON_PADDR_MASK;
200
201 /*
202 * Allocate platform specific structure and link it to
203 * the px state structure.
204 */
205 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
206 pxu_p->chip_type = chip_type;
207 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
208 "portid", -1);
209
210 /* Map in the registers */
211 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
212 kmem_free(pxu_p, sizeof (pxu_t));
213
214 return (DDI_FAILURE);
215 }
216
217 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
218 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
219
220 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
221 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
222 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
223
224 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
225
226 /*
227 * Create "virtual-dma" property to support child devices
228 * needing to know DVMA range.
229 */
230 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
231 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
232 px_dvma_range.dvma_len = (uint32_t)
233 px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
234
235 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
236 "virtual-dma", (int *)&px_dvma_range,
237 sizeof (px_dvma_range_prop_t) / sizeof (int));
238 /*
239 * Initilize all fire hardware specific blocks.
240 */
241 hvio_cb_init(xbc_csr_base, pxu_p);
242 hvio_ib_init(csr_base, pxu_p);
243 hvio_pec_init(csr_base, pxu_p);
244 hvio_mmu_init(csr_base, pxu_p);
245
246 px_p->px_plat_p = (void *)pxu_p;
247
248 /*
249 * Initialize all the interrupt handlers
250 */
251 switch (PX_CHIP_TYPE(pxu_p)) {
252 case PX_CHIP_OBERON:
253 /*
254 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable
255 * register to indicate the status of leaf reset,
256 * we need to preserve the value of this bit, and keep it in
257 * px_ilu_log_mask to reflect the state of the bit
258 */
259 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))
260 px_ilu_log_mask |= (1ull <<
261 ILU_ERROR_LOG_ENABLE_SPARE3);
262 else
263 px_ilu_log_mask &= ~(1ull <<
264 ILU_ERROR_LOG_ENABLE_SPARE3);
265
266 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
267 break;
268
269 case PX_CHIP_FIRE:
270 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
271 break;
272
273 default:
274 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
275 ddi_driver_name(dip), ddi_get_instance(dip));
276 return (DDI_FAILURE);
277 }
278
279 /* Initilize device handle */
280 *dev_hdl = (devhandle_t)csr_base;
281
282 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
283
284 /* Sun4u always support fixed interrupt */
285 px_p->px_supp_intr_types |= DDI_INTR_TYPE_FIXED;
286
287 return (DDI_SUCCESS);
288 }
289
290 int
px_lib_dev_fini(dev_info_t * dip)291 px_lib_dev_fini(dev_info_t *dip)
292 {
293 caddr_t csr_base;
294 uint8_t chip_mask;
295 px_t *px_p = DIP_TO_STATE(dip);
296 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
297
298 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
299
300 /*
301 * Deinitialize all the interrupt handlers
302 */
303 switch (PX_CHIP_TYPE(pxu_p)) {
304 case PX_CHIP_OBERON:
305 case PX_CHIP_FIRE:
306 chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p));
307 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
308 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE);
309 break;
310
311 default:
312 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
313 ddi_driver_name(dip), ddi_get_instance(dip));
314 return (DDI_FAILURE);
315 }
316
317 iommu_tsb_free(pxu_p->tsb_cookie);
318
319 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
320 kmem_free(px_p->px_plat_p, sizeof (pxu_t));
321 px_p->px_plat_p = NULL;
322 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma");
323
324 return (DDI_SUCCESS);
325 }
326
327 /*ARGSUSED*/
328 int
px_lib_intr_devino_to_sysino(dev_info_t * dip,devino_t devino,sysino_t * sysino)329 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
330 sysino_t *sysino)
331 {
332 px_t *px_p = DIP_TO_STATE(dip);
333 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
334 uint64_t ret;
335
336 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
337 "devino 0x%x\n", dip, devino);
338
339 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
340 pxu_p, devino, sysino)) != H_EOK) {
341 DBG(DBG_LIB_INT, dip,
342 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
343 return (DDI_FAILURE);
344 }
345
346 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
347 *sysino);
348
349 return (DDI_SUCCESS);
350 }
351
352 /*ARGSUSED*/
353 int
px_lib_intr_getvalid(dev_info_t * dip,sysino_t sysino,intr_valid_state_t * intr_valid_state)354 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
355 intr_valid_state_t *intr_valid_state)
356 {
357 uint64_t ret;
358
359 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
360 dip, sysino);
361
362 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
363 sysino, intr_valid_state)) != H_EOK) {
364 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
365 ret);
366 return (DDI_FAILURE);
367 }
368
369 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
370 *intr_valid_state);
371
372 return (DDI_SUCCESS);
373 }
374
375 /*ARGSUSED*/
376 int
px_lib_intr_setvalid(dev_info_t * dip,sysino_t sysino,intr_valid_state_t intr_valid_state)377 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
378 intr_valid_state_t intr_valid_state)
379 {
380 uint64_t ret;
381
382 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
383 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
384
385 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
386 sysino, intr_valid_state)) != H_EOK) {
387 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
388 ret);
389 return (DDI_FAILURE);
390 }
391
392 return (DDI_SUCCESS);
393 }
394
395 /*ARGSUSED*/
396 int
px_lib_intr_getstate(dev_info_t * dip,sysino_t sysino,intr_state_t * intr_state)397 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
398 intr_state_t *intr_state)
399 {
400 uint64_t ret;
401
402 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
403 dip, sysino);
404
405 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
406 sysino, intr_state)) != H_EOK) {
407 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
408 ret);
409 return (DDI_FAILURE);
410 }
411
412 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
413 *intr_state);
414
415 return (DDI_SUCCESS);
416 }
417
418 /*ARGSUSED*/
419 int
px_lib_intr_setstate(dev_info_t * dip,sysino_t sysino,intr_state_t intr_state)420 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
421 intr_state_t intr_state)
422 {
423 uint64_t ret;
424
425 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
426 "intr_state 0x%x\n", dip, sysino, intr_state);
427
428 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
429 sysino, intr_state)) != H_EOK) {
430 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
431 ret);
432 return (DDI_FAILURE);
433 }
434
435 return (DDI_SUCCESS);
436 }
437
438 /*ARGSUSED*/
439 int
px_lib_intr_gettarget(dev_info_t * dip,sysino_t sysino,cpuid_t * cpuid)440 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
441 {
442 px_t *px_p = DIP_TO_STATE(dip);
443 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
444 uint64_t ret;
445
446 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
447 dip, sysino);
448
449 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
450 sysino, cpuid)) != H_EOK) {
451 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
452 ret);
453 return (DDI_FAILURE);
454 }
455
456 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
457
458 return (DDI_SUCCESS);
459 }
460
461 /*ARGSUSED*/
462 int
px_lib_intr_settarget(dev_info_t * dip,sysino_t sysino,cpuid_t cpuid)463 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
464 {
465 px_t *px_p = DIP_TO_STATE(dip);
466 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
467 uint64_t ret;
468
469 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
470 "cpuid 0x%x\n", dip, sysino, cpuid);
471
472 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
473 sysino, cpuid)) != H_EOK) {
474 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
475 ret);
476 return (DDI_FAILURE);
477 }
478
479 return (DDI_SUCCESS);
480 }
481
482 /*ARGSUSED*/
483 int
px_lib_intr_reset(dev_info_t * dip)484 px_lib_intr_reset(dev_info_t *dip)
485 {
486 devino_t ino;
487 sysino_t sysino;
488
489 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
490
491 /* Reset all Interrupts */
492 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
493 if (px_lib_intr_devino_to_sysino(dip, ino,
494 &sysino) != DDI_SUCCESS)
495 return (BF_FATAL);
496
497 if (px_lib_intr_setstate(dip, sysino,
498 INTR_IDLE_STATE) != DDI_SUCCESS)
499 return (BF_FATAL);
500 }
501
502 return (BF_NONE);
503 }
504
505 /*ARGSUSED*/
506 int
px_lib_iommu_map(dev_info_t * dip,tsbid_t tsbid,pages_t pages,io_attributes_t attr,void * addr,size_t pfn_index,int flags)507 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
508 io_attributes_t attr, void *addr, size_t pfn_index, int flags)
509 {
510 px_t *px_p = DIP_TO_STATE(dip);
511 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
512 uint64_t ret;
513
514 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
515 "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n",
516 dip, tsbid, pages, attr, addr, pfn_index, flags);
517
518 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
519 attr, addr, pfn_index, flags)) != H_EOK) {
520 DBG(DBG_LIB_DMA, dip,
521 "px_lib_iommu_map failed, ret 0x%lx\n", ret);
522 return (DDI_FAILURE);
523 }
524
525 return (DDI_SUCCESS);
526 }
527
528 /*ARGSUSED*/
529 int
px_lib_iommu_demap(dev_info_t * dip,tsbid_t tsbid,pages_t pages)530 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
531 {
532 px_t *px_p = DIP_TO_STATE(dip);
533 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
534 uint64_t ret;
535
536 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
537 "pages 0x%x\n", dip, tsbid, pages);
538
539 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
540 != H_EOK) {
541 DBG(DBG_LIB_DMA, dip,
542 "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
543
544 return (DDI_FAILURE);
545 }
546
547 return (DDI_SUCCESS);
548 }
549
550 /*ARGSUSED*/
551 int
px_lib_iommu_getmap(dev_info_t * dip,tsbid_t tsbid,io_attributes_t * attr_p,r_addr_t * r_addr_p)552 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
553 r_addr_t *r_addr_p)
554 {
555 px_t *px_p = DIP_TO_STATE(dip);
556 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
557 uint64_t ret;
558
559 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
560 dip, tsbid);
561
562 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
563 attr_p, r_addr_p)) != H_EOK) {
564 DBG(DBG_LIB_DMA, dip,
565 "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
566
567 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
568 }
569
570 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx "
571 "r_addr 0x%llx\n", *attr_p, *r_addr_p);
572
573 return (DDI_SUCCESS);
574 }
575
576 int
px_lib_iommu_detach(px_t * px_p)577 px_lib_iommu_detach(px_t *px_p)
578 {
579 /*
580 * Deallocate DVMA addr space that was reserved for OBP TTE's
581 * during Attach.
582 */
583 hvio_obptsb_detach(px_p);
584
585 return (DDI_SUCCESS);
586 }
587
588 /*
589 * Checks dma attributes against system bypass ranges
590 * The bypass range is determined by the hardware. Return them so the
591 * common code can do generic checking against them.
592 */
593 /*ARGSUSED*/
594 int
px_lib_dma_bypass_rngchk(dev_info_t * dip,ddi_dma_attr_t * attr_p,uint64_t * lo_p,uint64_t * hi_p)595 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
596 uint64_t *lo_p, uint64_t *hi_p)
597 {
598 px_t *px_p = DIP_TO_STATE(dip);
599 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
600
601 *lo_p = hvio_get_bypass_base(pxu_p);
602 *hi_p = hvio_get_bypass_end(pxu_p);
603
604 return (DDI_SUCCESS);
605 }
606
607
608 /*ARGSUSED*/
609 int
px_lib_iommu_getbypass(dev_info_t * dip,r_addr_t ra,io_attributes_t attr,io_addr_t * io_addr_p)610 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
611 io_addr_t *io_addr_p)
612 {
613 uint64_t ret;
614 px_t *px_p = DIP_TO_STATE(dip);
615 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
616
617 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
618 "attr 0x%llx\n", dip, ra, attr);
619
620 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
621 attr, io_addr_p)) != H_EOK) {
622 DBG(DBG_LIB_DMA, dip,
623 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
624 return (DDI_FAILURE);
625 }
626
627 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
628 *io_addr_p);
629
630 return (DDI_SUCCESS);
631 }
632
633 /*
634 * Returns any needed IO address bit(s) for relaxed ordering in IOMMU
635 * bypass mode.
636 */
637 uint64_t
px_lib_ro_bypass(dev_info_t * dip,io_attributes_t attr,uint64_t ioaddr)638 px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr)
639 {
640 px_t *px_p = DIP_TO_STATE(dip);
641 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
642
643 if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) && (attr & PCI_MAP_ATTR_RO))
644 return (MMU_OBERON_BYPASS_RO | ioaddr);
645 else
646 return (ioaddr);
647 }
648
649 /*
650 * bus dma sync entry point.
651 */
652 /*ARGSUSED*/
653 int
px_lib_dma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)654 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
655 off_t off, size_t len, uint_t cache_flags)
656 {
657 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
658 px_t *px_p = DIP_TO_STATE(dip);
659 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
660
661 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
662 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
663 dip, rdip, handle, off, len, cache_flags);
664
665 /*
666 * No flush needed for Oberon
667 */
668 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
669 return (DDI_SUCCESS);
670
671 /*
672 * jbus_stst_order is found only in certain cpu modules.
673 * Just return success if not present.
674 */
675 if (&jbus_stst_order == NULL)
676 return (DDI_SUCCESS);
677
678 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
679 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
680 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
681
682 return (DDI_FAILURE);
683 }
684
685 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
686 return (DDI_SUCCESS);
687
688 /*
689 * No flush needed when sending data from memory to device.
690 * Nothing to do to "sync" memory to what device would already see.
691 */
692 if (!(mp->dmai_rflags & DDI_DMA_READ) ||
693 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
694 return (DDI_SUCCESS);
695
696 /*
697 * Perform necessary cpu workaround to ensure jbus ordering.
698 * CPU's internal "invalidate FIFOs" are flushed.
699 */
700
701 kpreempt_disable();
702 jbus_stst_order();
703 kpreempt_enable();
704 return (DDI_SUCCESS);
705 }
706
707 /*
708 * MSIQ Functions:
709 */
710 /*ARGSUSED*/
711 int
px_lib_msiq_init(dev_info_t * dip)712 px_lib_msiq_init(dev_info_t *dip)
713 {
714 px_t *px_p = DIP_TO_STATE(dip);
715 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
716 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
717 px_dvma_addr_t pg_index;
718 size_t q_sz = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
719 size_t size;
720 int i, ret;
721
722 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
723
724 /* must aligned on q_sz (happens to be !!! page) boundary */
725 ASSERT(q_sz == 8 * 1024);
726
727 /*
728 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
729 * and then initialize the base address register.
730 *
731 * Allocate entries from Fire IOMMU so that the resulting address
732 * is properly aligned. Calculate the index of the first allocated
733 * entry. Note: The size of the mapping is assumed to be a multiple
734 * of the page size.
735 */
736 size = msiq_state_p->msiq_cnt * q_sz;
737
738 msiq_state_p->msiq_buf_p = kmem_zalloc(size, KM_SLEEP);
739
740 for (i = 0; i < msiq_state_p->msiq_cnt; i++)
741 msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *)
742 ((caddr_t)msiq_state_p->msiq_buf_p + (i * q_sz));
743
744 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
745 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
746
747 if (pxu_p->msiq_mapped_p == NULL)
748 return (DDI_FAILURE);
749
750 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
751 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
752
753 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
754 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p,
755 0, MMU_MAP_BUF)) != DDI_SUCCESS) {
756 DBG(DBG_LIB_MSIQ, dip,
757 "px_lib_msiq_init: px_lib_iommu_map failed, "
758 "ret 0x%lx\n", ret);
759
760 (void) px_lib_msiq_fini(dip);
761 return (DDI_FAILURE);
762 }
763
764 if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip),
765 pxu_p)) != H_EOK) {
766 DBG(DBG_LIB_MSIQ, dip,
767 "hvio_msiq_init failed, ret 0x%lx\n", ret);
768
769 (void) px_lib_msiq_fini(dip);
770 return (DDI_FAILURE);
771 }
772
773 return (DDI_SUCCESS);
774 }
775
776 /*ARGSUSED*/
777 int
px_lib_msiq_fini(dev_info_t * dip)778 px_lib_msiq_fini(dev_info_t *dip)
779 {
780 px_t *px_p = DIP_TO_STATE(dip);
781 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
782 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
783 px_dvma_addr_t pg_index;
784 size_t size;
785
786 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
787
788 /*
789 * Unmap and free the EQ memory that had been mapped
790 * into the Fire IOMMU.
791 */
792 size = msiq_state_p->msiq_cnt *
793 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
794
795 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
796 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
797
798 (void) px_lib_iommu_demap(px_p->px_dip,
799 PCI_TSBID(0, pg_index), MMU_BTOP(size));
800
801 /* Free the entries from the Fire MMU */
802 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
803 (void *)pxu_p->msiq_mapped_p, size);
804
805 kmem_free(msiq_state_p->msiq_buf_p, msiq_state_p->msiq_cnt *
806 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t));
807
808 return (DDI_SUCCESS);
809 }
810
811 /*ARGSUSED*/
812 int
px_lib_msiq_info(dev_info_t * dip,msiqid_t msiq_id,r_addr_t * ra_p,uint_t * msiq_rec_cnt_p)813 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
814 uint_t *msiq_rec_cnt_p)
815 {
816 px_t *px_p = DIP_TO_STATE(dip);
817 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
818 size_t msiq_size;
819
820 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
821 dip, msiq_id);
822
823 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
824 ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p +
825 (msiq_id * msiq_size));
826
827 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
828
829 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
830 ra_p, *msiq_rec_cnt_p);
831
832 return (DDI_SUCCESS);
833 }
834
835 /*ARGSUSED*/
836 int
px_lib_msiq_getvalid(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_valid_state_t * msiq_valid_state)837 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
838 pci_msiq_valid_state_t *msiq_valid_state)
839 {
840 uint64_t ret;
841
842 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
843 dip, msiq_id);
844
845 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
846 msiq_id, msiq_valid_state)) != H_EOK) {
847 DBG(DBG_LIB_MSIQ, dip,
848 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
849 return (DDI_FAILURE);
850 }
851
852 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
853 *msiq_valid_state);
854
855 return (DDI_SUCCESS);
856 }
857
858 /*ARGSUSED*/
859 int
px_lib_msiq_setvalid(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_valid_state_t msiq_valid_state)860 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
861 pci_msiq_valid_state_t msiq_valid_state)
862 {
863 uint64_t ret;
864
865 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
866 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
867
868 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
869 msiq_id, msiq_valid_state)) != H_EOK) {
870 DBG(DBG_LIB_MSIQ, dip,
871 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
872 return (DDI_FAILURE);
873 }
874
875 return (DDI_SUCCESS);
876 }
877
878 /*ARGSUSED*/
879 int
px_lib_msiq_getstate(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_state_t * msiq_state)880 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
881 pci_msiq_state_t *msiq_state)
882 {
883 uint64_t ret;
884
885 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
886 dip, msiq_id);
887
888 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
889 msiq_id, msiq_state)) != H_EOK) {
890 DBG(DBG_LIB_MSIQ, dip,
891 "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
892 return (DDI_FAILURE);
893 }
894
895 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
896 *msiq_state);
897
898 return (DDI_SUCCESS);
899 }
900
901 /*ARGSUSED*/
902 int
px_lib_msiq_setstate(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_state_t msiq_state)903 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
904 pci_msiq_state_t msiq_state)
905 {
906 uint64_t ret;
907
908 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
909 "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
910
911 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
912 msiq_id, msiq_state)) != H_EOK) {
913 DBG(DBG_LIB_MSIQ, dip,
914 "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
915 return (DDI_FAILURE);
916 }
917
918 return (DDI_SUCCESS);
919 }
920
921 /*ARGSUSED*/
922 int
px_lib_msiq_gethead(dev_info_t * dip,msiqid_t msiq_id,msiqhead_t * msiq_head)923 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
924 msiqhead_t *msiq_head)
925 {
926 uint64_t ret;
927
928 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
929 dip, msiq_id);
930
931 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
932 msiq_id, msiq_head)) != H_EOK) {
933 DBG(DBG_LIB_MSIQ, dip,
934 "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
935 return (DDI_FAILURE);
936 }
937
938 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
939 *msiq_head);
940
941 return (DDI_SUCCESS);
942 }
943
944 /*ARGSUSED*/
945 int
px_lib_msiq_sethead(dev_info_t * dip,msiqid_t msiq_id,msiqhead_t msiq_head)946 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
947 msiqhead_t msiq_head)
948 {
949 uint64_t ret;
950
951 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
952 "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
953
954 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
955 msiq_id, msiq_head)) != H_EOK) {
956 DBG(DBG_LIB_MSIQ, dip,
957 "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
958 return (DDI_FAILURE);
959 }
960
961 return (DDI_SUCCESS);
962 }
963
964 /*ARGSUSED*/
965 int
px_lib_msiq_gettail(dev_info_t * dip,msiqid_t msiq_id,msiqtail_t * msiq_tail)966 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
967 msiqtail_t *msiq_tail)
968 {
969 uint64_t ret;
970
971 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
972 dip, msiq_id);
973
974 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
975 msiq_id, msiq_tail)) != H_EOK) {
976 DBG(DBG_LIB_MSIQ, dip,
977 "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
978 return (DDI_FAILURE);
979 }
980
981 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
982 *msiq_tail);
983
984 return (DDI_SUCCESS);
985 }
986
987 /*ARGSUSED*/
988 void
px_lib_get_msiq_rec(dev_info_t * dip,msiqhead_t * msiq_head_p,msiq_rec_t * msiq_rec_p)989 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
990 msiq_rec_t *msiq_rec_p)
991 {
992 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p;
993
994 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
995 dip, eq_rec_p);
996
997 if (!eq_rec_p->eq_rec_fmt_type) {
998 /* Set msiq_rec_type to zero */
999 msiq_rec_p->msiq_rec_type = 0;
1000
1001 return;
1002 }
1003
1004 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
1005 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
1006 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
1007 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
1008 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
1009 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
1010 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
1011 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
1012
1013 /*
1014 * Only upper 4 bits of eq_rec_fmt_type is used
1015 * to identify the EQ record type.
1016 */
1017 switch (eq_rec_p->eq_rec_fmt_type >> 3) {
1018 case EQ_REC_MSI32:
1019 msiq_rec_p->msiq_rec_type = MSI32_REC;
1020
1021 msiq_rec_p->msiq_rec_data.msi.msi_data =
1022 eq_rec_p->eq_rec_data0;
1023 break;
1024 case EQ_REC_MSI64:
1025 msiq_rec_p->msiq_rec_type = MSI64_REC;
1026
1027 msiq_rec_p->msiq_rec_data.msi.msi_data =
1028 eq_rec_p->eq_rec_data0;
1029 break;
1030 case EQ_REC_MSG:
1031 msiq_rec_p->msiq_rec_type = MSG_REC;
1032
1033 msiq_rec_p->msiq_rec_data.msg.msg_route =
1034 eq_rec_p->eq_rec_fmt_type & 7;
1035 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
1036 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
1037 break;
1038 default:
1039 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
1040 "0x%x is an unknown EQ record type",
1041 ddi_driver_name(dip), ddi_get_instance(dip),
1042 (int)eq_rec_p->eq_rec_fmt_type);
1043 break;
1044 }
1045
1046 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
1047 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
1048 (eq_rec_p->eq_rec_addr0 << 2));
1049 }
1050
1051 /*ARGSUSED*/
1052 void
px_lib_clr_msiq_rec(dev_info_t * dip,msiqhead_t * msiq_head_p)1053 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
1054 {
1055 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p;
1056
1057 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
1058 dip, eq_rec_p);
1059
1060 if (eq_rec_p->eq_rec_fmt_type) {
1061 /* Zero out eq_rec_fmt_type field */
1062 eq_rec_p->eq_rec_fmt_type = 0;
1063 }
1064 }
1065
1066 /*
1067 * MSI Functions:
1068 */
1069 /*ARGSUSED*/
1070 int
px_lib_msi_init(dev_info_t * dip)1071 px_lib_msi_init(dev_info_t *dip)
1072 {
1073 px_t *px_p = DIP_TO_STATE(dip);
1074 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state;
1075 uint64_t ret;
1076
1077 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
1078
1079 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
1080 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
1081 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
1082 ret);
1083 return (DDI_FAILURE);
1084 }
1085
1086 return (DDI_SUCCESS);
1087 }
1088
1089 /*ARGSUSED*/
1090 int
px_lib_msi_getmsiq(dev_info_t * dip,msinum_t msi_num,msiqid_t * msiq_id)1091 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
1092 msiqid_t *msiq_id)
1093 {
1094 uint64_t ret;
1095
1096 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1097 dip, msi_num);
1098
1099 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1100 msi_num, msiq_id)) != H_EOK) {
1101 DBG(DBG_LIB_MSI, dip,
1102 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1103 return (DDI_FAILURE);
1104 }
1105
1106 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1107 *msiq_id);
1108
1109 return (DDI_SUCCESS);
1110 }
1111
1112 /*ARGSUSED*/
1113 int
px_lib_msi_setmsiq(dev_info_t * dip,msinum_t msi_num,msiqid_t msiq_id,msi_type_t msitype)1114 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1115 msiqid_t msiq_id, msi_type_t msitype)
1116 {
1117 uint64_t ret;
1118
1119 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1120 "msq_id 0x%x\n", dip, msi_num, msiq_id);
1121
1122 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1123 msi_num, msiq_id)) != H_EOK) {
1124 DBG(DBG_LIB_MSI, dip,
1125 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1126 return (DDI_FAILURE);
1127 }
1128
1129 return (DDI_SUCCESS);
1130 }
1131
1132 /*ARGSUSED*/
1133 int
px_lib_msi_getvalid(dev_info_t * dip,msinum_t msi_num,pci_msi_valid_state_t * msi_valid_state)1134 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1135 pci_msi_valid_state_t *msi_valid_state)
1136 {
1137 uint64_t ret;
1138
1139 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1140 dip, msi_num);
1141
1142 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1143 msi_num, msi_valid_state)) != H_EOK) {
1144 DBG(DBG_LIB_MSI, dip,
1145 "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1146 return (DDI_FAILURE);
1147 }
1148
1149 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1150 *msi_valid_state);
1151
1152 return (DDI_SUCCESS);
1153 }
1154
1155 /*ARGSUSED*/
1156 int
px_lib_msi_setvalid(dev_info_t * dip,msinum_t msi_num,pci_msi_valid_state_t msi_valid_state)1157 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1158 pci_msi_valid_state_t msi_valid_state)
1159 {
1160 uint64_t ret;
1161
1162 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1163 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1164
1165 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1166 msi_num, msi_valid_state)) != H_EOK) {
1167 DBG(DBG_LIB_MSI, dip,
1168 "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1169 return (DDI_FAILURE);
1170 }
1171
1172 return (DDI_SUCCESS);
1173 }
1174
1175 /*ARGSUSED*/
1176 int
px_lib_msi_getstate(dev_info_t * dip,msinum_t msi_num,pci_msi_state_t * msi_state)1177 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1178 pci_msi_state_t *msi_state)
1179 {
1180 uint64_t ret;
1181
1182 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1183 dip, msi_num);
1184
1185 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1186 msi_num, msi_state)) != H_EOK) {
1187 DBG(DBG_LIB_MSI, dip,
1188 "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1189 return (DDI_FAILURE);
1190 }
1191
1192 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1193 *msi_state);
1194
1195 return (DDI_SUCCESS);
1196 }
1197
1198 /*ARGSUSED*/
1199 int
px_lib_msi_setstate(dev_info_t * dip,msinum_t msi_num,pci_msi_state_t msi_state)1200 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1201 pci_msi_state_t msi_state)
1202 {
1203 uint64_t ret;
1204
1205 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1206 "msi_state 0x%x\n", dip, msi_num, msi_state);
1207
1208 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1209 msi_num, msi_state)) != H_EOK) {
1210 DBG(DBG_LIB_MSI, dip,
1211 "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1212 return (DDI_FAILURE);
1213 }
1214
1215 return (DDI_SUCCESS);
1216 }
1217
1218 /*
1219 * MSG Functions:
1220 */
1221 /*ARGSUSED*/
1222 int
px_lib_msg_getmsiq(dev_info_t * dip,pcie_msg_type_t msg_type,msiqid_t * msiq_id)1223 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1224 msiqid_t *msiq_id)
1225 {
1226 uint64_t ret;
1227
1228 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1229 dip, msg_type);
1230
1231 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1232 msg_type, msiq_id)) != H_EOK) {
1233 DBG(DBG_LIB_MSG, dip,
1234 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1235 return (DDI_FAILURE);
1236 }
1237
1238 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1239 *msiq_id);
1240
1241 return (DDI_SUCCESS);
1242 }
1243
1244 /*ARGSUSED*/
1245 int
px_lib_msg_setmsiq(dev_info_t * dip,pcie_msg_type_t msg_type,msiqid_t msiq_id)1246 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1247 msiqid_t msiq_id)
1248 {
1249 uint64_t ret;
1250
1251 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1252 "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1253
1254 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1255 msg_type, msiq_id)) != H_EOK) {
1256 DBG(DBG_LIB_MSG, dip,
1257 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1258 return (DDI_FAILURE);
1259 }
1260
1261 return (DDI_SUCCESS);
1262 }
1263
1264 /*ARGSUSED*/
1265 int
px_lib_msg_getvalid(dev_info_t * dip,pcie_msg_type_t msg_type,pcie_msg_valid_state_t * msg_valid_state)1266 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1267 pcie_msg_valid_state_t *msg_valid_state)
1268 {
1269 uint64_t ret;
1270
1271 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1272 dip, msg_type);
1273
1274 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1275 msg_valid_state)) != H_EOK) {
1276 DBG(DBG_LIB_MSG, dip,
1277 "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1278 return (DDI_FAILURE);
1279 }
1280
1281 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1282 *msg_valid_state);
1283
1284 return (DDI_SUCCESS);
1285 }
1286
1287 /*ARGSUSED*/
1288 int
px_lib_msg_setvalid(dev_info_t * dip,pcie_msg_type_t msg_type,pcie_msg_valid_state_t msg_valid_state)1289 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1290 pcie_msg_valid_state_t msg_valid_state)
1291 {
1292 uint64_t ret;
1293
1294 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1295 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1296
1297 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1298 msg_valid_state)) != H_EOK) {
1299 DBG(DBG_LIB_MSG, dip,
1300 "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1301 return (DDI_FAILURE);
1302 }
1303
1304 return (DDI_SUCCESS);
1305 }
1306
1307 /*ARGSUSED*/
1308 void
px_panic_domain(px_t * px_p,pcie_req_id_t bdf)1309 px_panic_domain(px_t *px_p, pcie_req_id_t bdf)
1310 {
1311 }
1312
1313 /*
1314 * Suspend/Resume Functions:
1315 * Currently unsupported by hypervisor
1316 */
1317 int
px_lib_suspend(dev_info_t * dip)1318 px_lib_suspend(dev_info_t *dip)
1319 {
1320 px_t *px_p = DIP_TO_STATE(dip);
1321 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1322 px_cb_t *cb_p = PX2CB(px_p);
1323 devhandle_t dev_hdl, xbus_dev_hdl;
1324 uint64_t ret = H_EOK;
1325
1326 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1327
1328 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1329 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1330
1331 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
1332 goto fail;
1333
1334 if (--cb_p->attachcnt == 0) {
1335 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
1336 if (ret != H_EOK)
1337 cb_p->attachcnt++;
1338 }
1339 pxu_p->cpr_flag = PX_ENTERED_CPR;
1340
1341 fail:
1342 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1343 }
1344
1345 void
px_lib_resume(dev_info_t * dip)1346 px_lib_resume(dev_info_t *dip)
1347 {
1348 px_t *px_p = DIP_TO_STATE(dip);
1349 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1350 px_cb_t *cb_p = PX2CB(px_p);
1351 devhandle_t dev_hdl, xbus_dev_hdl;
1352 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC];
1353 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC];
1354
1355 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1356
1357 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1358 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1359
1360 if (++cb_p->attachcnt == 1)
1361 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1362
1363 hvio_resume(dev_hdl, pec_ino, pxu_p);
1364 }
1365
1366 /*
1367 * Generate a unique Oberon UBC ID based on the Logicial System Board and
1368 * the IO Channel from the portid property field.
1369 */
1370 static uint64_t
oberon_get_ubc_id(dev_info_t * dip)1371 oberon_get_ubc_id(dev_info_t *dip)
1372 {
1373 px_t *px_p = DIP_TO_STATE(dip);
1374 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1375 uint64_t ubc_id;
1376
1377 /*
1378 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
1379 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
1380 */
1381 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
1382 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
1383 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
1384 << OBERON_UBC_ID_LSB));
1385
1386 return (ubc_id);
1387 }
1388
1389 /*
1390 * Oberon does not have a UBC scratch register, so alloc an array of scratch
1391 * registers when needed and use a unique UBC ID as an index. This code
1392 * can be simplified if we use a pre-allocated array. They are currently
1393 * being dynamically allocated because it's only needed by the Oberon.
1394 */
1395 static void
oberon_set_cb(dev_info_t * dip,uint64_t val)1396 oberon_set_cb(dev_info_t *dip, uint64_t val)
1397 {
1398 uint64_t ubc_id;
1399
1400 if (px_oberon_ubc_scratch_regs == NULL)
1401 px_oberon_ubc_scratch_regs =
1402 (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
1403 OBERON_UBC_ID_MAX, KM_SLEEP);
1404
1405 ubc_id = oberon_get_ubc_id(dip);
1406
1407 px_oberon_ubc_scratch_regs[ubc_id] = val;
1408
1409 /*
1410 * Check if any scratch registers are still in use. If all scratch
1411 * registers are currently set to zero, then deallocate the scratch
1412 * register array.
1413 */
1414 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
1415 if (px_oberon_ubc_scratch_regs[ubc_id] != 0)
1416 return;
1417 }
1418
1419 /*
1420 * All scratch registers are set to zero so deallocate the scratch
1421 * register array and set the pointer to NULL.
1422 */
1423 kmem_free(px_oberon_ubc_scratch_regs,
1424 (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
1425
1426 px_oberon_ubc_scratch_regs = NULL;
1427 }
1428
1429 /*
1430 * Oberon does not have a UBC scratch register, so use an allocated array of
1431 * scratch registers and use the unique UBC ID as an index into that array.
1432 */
1433 static uint64_t
oberon_get_cb(dev_info_t * dip)1434 oberon_get_cb(dev_info_t *dip)
1435 {
1436 uint64_t ubc_id;
1437
1438 if (px_oberon_ubc_scratch_regs == NULL)
1439 return (0);
1440
1441 ubc_id = oberon_get_ubc_id(dip);
1442
1443 return (px_oberon_ubc_scratch_regs[ubc_id]);
1444 }
1445
1446 /*
1447 * Misc Functions:
1448 * Currently unsupported by hypervisor
1449 */
1450 static uint64_t
px_get_cb(dev_info_t * dip)1451 px_get_cb(dev_info_t *dip)
1452 {
1453 px_t *px_p = DIP_TO_STATE(dip);
1454 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1455
1456 /*
1457 * Oberon does not currently have Scratchpad registers.
1458 */
1459 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
1460 return (oberon_get_cb(dip));
1461
1462 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1463 }
1464
1465 static void
px_set_cb(dev_info_t * dip,uint64_t val)1466 px_set_cb(dev_info_t *dip, uint64_t val)
1467 {
1468 px_t *px_p = DIP_TO_STATE(dip);
1469 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1470
1471 /*
1472 * Oberon does not currently have Scratchpad registers.
1473 */
1474 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1475 oberon_set_cb(dip, val);
1476 return;
1477 }
1478
1479 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1480 }
1481
1482 /*ARGSUSED*/
1483 int
px_lib_map_vconfig(dev_info_t * dip,ddi_map_req_t * mp,pci_config_offset_t off,pci_regspec_t * rp,caddr_t * addrp)1484 px_lib_map_vconfig(dev_info_t *dip,
1485 ddi_map_req_t *mp, pci_config_offset_t off,
1486 pci_regspec_t *rp, caddr_t *addrp)
1487 {
1488 /*
1489 * No special config space access services in this layer.
1490 */
1491 return (DDI_FAILURE);
1492 }
1493
1494 void
px_lib_map_attr_check(ddi_map_req_t * mp)1495 px_lib_map_attr_check(ddi_map_req_t *mp)
1496 {
1497 ddi_acc_hdl_t *hp = mp->map_handlep;
1498
1499 /* fire does not accept byte masks from PIO store merge */
1500 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1501 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1502 }
1503
1504 /* This function is called only by poke, caut put and pxtool poke. */
1505 void
px_lib_clr_errs(px_t * px_p,dev_info_t * rdip,uint64_t addr)1506 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr)
1507 {
1508 px_pec_t *pec_p = px_p->px_pec_p;
1509 dev_info_t *rpdip = px_p->px_dip;
1510 int rc_err, fab_err, i;
1511 int acctype = pec_p->pec_safeacc_type;
1512 ddi_fm_error_t derr;
1513 pci_ranges_t *ranges_p;
1514 int range_len;
1515 uint32_t addr_high, addr_low;
1516 pcie_req_id_t bdf = PCIE_INVALID_BDF;
1517
1518 /* Create the derr */
1519 bzero(&derr, sizeof (ddi_fm_error_t));
1520 derr.fme_version = DDI_FME_VERSION;
1521 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1522 derr.fme_flag = acctype;
1523
1524 if (acctype == DDI_FM_ERR_EXPECTED) {
1525 derr.fme_status = DDI_FM_NONFATAL;
1526 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1527 }
1528
1529 if (px_fm_enter(px_p) != DDI_SUCCESS)
1530 return;
1531
1532 /* send ereport/handle/clear fire registers */
1533 rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL);
1534
1535 /* Figure out if this is a cfg or mem32 access */
1536 addr_high = (uint32_t)(addr >> 32);
1537 addr_low = (uint32_t)addr;
1538 range_len = px_p->px_ranges_length / sizeof (pci_ranges_t);
1539 i = 0;
1540 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
1541 if (ranges_p->parent_high == addr_high) {
1542 switch (ranges_p->child_high & PCI_ADDR_MASK) {
1543 case PCI_ADDR_CONFIG:
1544 bdf = (pcie_req_id_t)(addr_low >> 12);
1545 addr_low = 0;
1546 break;
1547 case PCI_ADDR_MEM32:
1548 if (rdip)
1549 bdf = PCI_GET_BDF(rdip);
1550 else
1551 bdf = PCIE_INVALID_BDF;
1552 break;
1553 }
1554 break;
1555 }
1556 }
1557
1558 (void) px_rp_en_q(px_p, bdf, addr_low, 0);
1559
1560 /*
1561 * XXX - Current code scans the fabric for all px_tool accesses.
1562 * In future, do not scan fabric for px_tool access to IO Root Nexus
1563 */
1564 fab_err = px_scan_fabric(px_p, rpdip, &derr);
1565
1566 px_err_panic(rc_err, PX_RC, fab_err, B_TRUE);
1567 px_fm_exit(px_p);
1568 px_err_panic(rc_err, PX_RC, fab_err, B_FALSE);
1569 }
1570
1571 #ifdef DEBUG
1572 int px_peekfault_cnt = 0;
1573 int px_pokefault_cnt = 0;
1574 #endif /* DEBUG */
1575
1576 /*ARGSUSED*/
1577 static int
px_lib_do_poke(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * in_args)1578 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1579 peekpoke_ctlops_t *in_args)
1580 {
1581 px_t *px_p = DIP_TO_STATE(dip);
1582 px_pec_t *pec_p = px_p->px_pec_p;
1583 int err = DDI_SUCCESS;
1584 on_trap_data_t otd;
1585
1586 mutex_enter(&pec_p->pec_pokefault_mutex);
1587 pec_p->pec_ontrap_data = &otd;
1588 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1589
1590 /* Set up protected environment. */
1591 if (!on_trap(&otd, OT_DATA_ACCESS)) {
1592 uintptr_t tramp = otd.ot_trampoline;
1593
1594 otd.ot_trampoline = (uintptr_t)&poke_fault;
1595 err = do_poke(in_args->size, (void *)in_args->dev_addr,
1596 (void *)in_args->host_addr);
1597 otd.ot_trampoline = tramp;
1598 } else
1599 err = DDI_FAILURE;
1600
1601 px_lib_clr_errs(px_p, rdip, in_args->dev_addr);
1602
1603 if (otd.ot_trap & OT_DATA_ACCESS)
1604 err = DDI_FAILURE;
1605
1606 /* Take down protected environment. */
1607 no_trap();
1608
1609 pec_p->pec_ontrap_data = NULL;
1610 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1611 mutex_exit(&pec_p->pec_pokefault_mutex);
1612
1613 #ifdef DEBUG
1614 if (err == DDI_FAILURE)
1615 px_pokefault_cnt++;
1616 #endif
1617 return (err);
1618 }
1619
1620 /*ARGSUSED*/
1621 static int
px_lib_do_caut_put(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * cautacc_ctlops_arg)1622 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1623 peekpoke_ctlops_t *cautacc_ctlops_arg)
1624 {
1625 size_t size = cautacc_ctlops_arg->size;
1626 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1627 uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1628 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1629 size_t repcount = cautacc_ctlops_arg->repcount;
1630 uint_t flags = cautacc_ctlops_arg->flags;
1631
1632 px_t *px_p = DIP_TO_STATE(dip);
1633 px_pec_t *pec_p = px_p->px_pec_p;
1634 int err = DDI_SUCCESS;
1635
1636 /*
1637 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1638 * mutex.
1639 */
1640 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1641
1642 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1643 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1644 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1645
1646 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1647 for (; repcount; repcount--) {
1648 switch (size) {
1649
1650 case sizeof (uint8_t):
1651 i_ddi_put8(hp, (uint8_t *)dev_addr,
1652 *(uint8_t *)host_addr);
1653 break;
1654
1655 case sizeof (uint16_t):
1656 i_ddi_put16(hp, (uint16_t *)dev_addr,
1657 *(uint16_t *)host_addr);
1658 break;
1659
1660 case sizeof (uint32_t):
1661 i_ddi_put32(hp, (uint32_t *)dev_addr,
1662 *(uint32_t *)host_addr);
1663 break;
1664
1665 case sizeof (uint64_t):
1666 i_ddi_put64(hp, (uint64_t *)dev_addr,
1667 *(uint64_t *)host_addr);
1668 break;
1669 }
1670
1671 host_addr += size;
1672
1673 if (flags == DDI_DEV_AUTOINCR)
1674 dev_addr += size;
1675
1676 px_lib_clr_errs(px_p, rdip, dev_addr);
1677
1678 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1679 err = DDI_FAILURE;
1680 #ifdef DEBUG
1681 px_pokefault_cnt++;
1682 #endif
1683 break;
1684 }
1685 }
1686 }
1687
1688 i_ddi_notrap((ddi_acc_handle_t)hp);
1689 pec_p->pec_ontrap_data = NULL;
1690 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1691 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1692 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1693
1694 return (err);
1695 }
1696
1697
1698 int
px_lib_ctlops_poke(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * in_args)1699 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1700 peekpoke_ctlops_t *in_args)
1701 {
1702 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1703 px_lib_do_poke(dip, rdip, in_args));
1704 }
1705
1706
1707 /*ARGSUSED*/
1708 static int
px_lib_do_peek(dev_info_t * dip,peekpoke_ctlops_t * in_args)1709 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1710 {
1711 px_t *px_p = DIP_TO_STATE(dip);
1712 px_pec_t *pec_p = px_p->px_pec_p;
1713 int err = DDI_SUCCESS;
1714 on_trap_data_t otd;
1715
1716 mutex_enter(&pec_p->pec_pokefault_mutex);
1717 if (px_fm_enter(px_p) != DDI_SUCCESS) {
1718 mutex_exit(&pec_p->pec_pokefault_mutex);
1719 return (DDI_FAILURE);
1720 }
1721 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1722 px_fm_exit(px_p);
1723
1724 if (!on_trap(&otd, OT_DATA_ACCESS)) {
1725 uintptr_t tramp = otd.ot_trampoline;
1726
1727 otd.ot_trampoline = (uintptr_t)&peek_fault;
1728 err = do_peek(in_args->size, (void *)in_args->dev_addr,
1729 (void *)in_args->host_addr);
1730 otd.ot_trampoline = tramp;
1731 } else
1732 err = DDI_FAILURE;
1733
1734 no_trap();
1735 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1736 mutex_exit(&pec_p->pec_pokefault_mutex);
1737
1738 #ifdef DEBUG
1739 if (err == DDI_FAILURE)
1740 px_peekfault_cnt++;
1741 #endif
1742 return (err);
1743 }
1744
1745
1746 static int
px_lib_do_caut_get(dev_info_t * dip,peekpoke_ctlops_t * cautacc_ctlops_arg)1747 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1748 {
1749 size_t size = cautacc_ctlops_arg->size;
1750 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1751 uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1752 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1753 size_t repcount = cautacc_ctlops_arg->repcount;
1754 uint_t flags = cautacc_ctlops_arg->flags;
1755
1756 px_t *px_p = DIP_TO_STATE(dip);
1757 px_pec_t *pec_p = px_p->px_pec_p;
1758 int err = DDI_SUCCESS;
1759
1760 /*
1761 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1762 * mutex.
1763 */
1764 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1765
1766 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1767 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1768 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1769
1770 if (repcount == 1) {
1771 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1772 i_ddi_caut_get(size, (void *)dev_addr,
1773 (void *)host_addr);
1774 } else {
1775 int i;
1776 uint8_t *ff_addr = (uint8_t *)host_addr;
1777 for (i = 0; i < size; i++)
1778 *ff_addr++ = 0xff;
1779
1780 err = DDI_FAILURE;
1781 #ifdef DEBUG
1782 px_peekfault_cnt++;
1783 #endif
1784 }
1785 } else {
1786 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1787 for (; repcount; repcount--) {
1788 i_ddi_caut_get(size, (void *)dev_addr,
1789 (void *)host_addr);
1790
1791 host_addr += size;
1792
1793 if (flags == DDI_DEV_AUTOINCR)
1794 dev_addr += size;
1795 }
1796 } else {
1797 err = DDI_FAILURE;
1798 #ifdef DEBUG
1799 px_peekfault_cnt++;
1800 #endif
1801 }
1802 }
1803
1804 i_ddi_notrap((ddi_acc_handle_t)hp);
1805 pec_p->pec_ontrap_data = NULL;
1806 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1807 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1808 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1809
1810 return (err);
1811 }
1812
1813 /*ARGSUSED*/
1814 int
px_lib_ctlops_peek(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * in_args,void * result)1815 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1816 peekpoke_ctlops_t *in_args, void *result)
1817 {
1818 result = (void *)in_args->host_addr;
1819 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1820 px_lib_do_peek(dip, in_args));
1821 }
1822
1823 /*
1824 * implements PPM interface
1825 */
1826 int
px_lib_pmctl(int cmd,px_t * px_p)1827 px_lib_pmctl(int cmd, px_t *px_p)
1828 {
1829 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1830 switch (cmd) {
1831 case PPMREQ_PRE_PWR_OFF:
1832 /*
1833 * Currently there is no device power management for
1834 * the root complex (fire). When there is we need to make
1835 * sure that it is at full power before trying to send the
1836 * PME_Turn_Off message.
1837 */
1838 DBG(DBG_PWR, px_p->px_dip,
1839 "ioctl: request to send PME_Turn_Off\n");
1840 return (px_goto_l23ready(px_p));
1841
1842 case PPMREQ_PRE_PWR_ON:
1843 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1844 return (px_pre_pwron_check(px_p));
1845
1846 case PPMREQ_POST_PWR_ON:
1847 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1848 return (px_goto_l0(px_p));
1849
1850 default:
1851 return (DDI_FAILURE);
1852 }
1853 }
1854
1855 /*
1856 * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1857 * called by px_ioctl.
1858 * returns DDI_SUCCESS or DDI_FAILURE
1859 * 1. Wait for link to be in L1 state (link status reg)
1860 * 2. write to PME_Turn_off reg to boradcast
1861 * 3. set timeout
1862 * 4. If timeout, return failure.
1863 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1864 */
1865 static int
px_goto_l23ready(px_t * px_p)1866 px_goto_l23ready(px_t *px_p)
1867 {
1868 pcie_pwr_t *pwr_p;
1869 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
1870 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1871 int ret = DDI_SUCCESS;
1872 clock_t end, timeleft;
1873 int mutex_held = 1;
1874
1875 /* If no PM info, return failure */
1876 if (PCIE_PMINFO(px_p->px_dip) == NULL ||
1877 (pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)) == NULL)
1878 return (DDI_FAILURE);
1879
1880 mutex_enter(&pwr_p->pwr_lock);
1881 mutex_enter(&px_p->px_l23ready_lock);
1882 /* Clear the PME_To_ACK receieved flag */
1883 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1884 /*
1885 * When P25 is the downstream device, after receiving
1886 * PME_To_ACK, fire will go to Detect state, which causes
1887 * the link down event. Inform FMA that this is expected.
1888 * In case of all other cards complaint with the pci express
1889 * spec, this will happen when the power is re-applied. FMA
1890 * code will clear this flag after one instance of LDN. Since
1891 * there will not be a LDN event for the spec compliant cards,
1892 * we need to clear the flag after receiving PME_To_ACK.
1893 */
1894 px_p->px_pm_flags |= PX_LDN_EXPECTED;
1895 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1896 ret = DDI_FAILURE;
1897 goto l23ready_done;
1898 }
1899 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1900
1901 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1902 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1903 timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1904 &px_p->px_l23ready_lock, end);
1905 /*
1906 * if cv_timedwait returns -1, it is either
1907 * 1) timed out or
1908 * 2) there was a pre-mature wakeup but by the time
1909 * cv_timedwait is called again end < lbolt i.e.
1910 * end is in the past.
1911 * 3) By the time we make first cv_timedwait call,
1912 * end < lbolt is true.
1913 */
1914 if (timeleft == -1)
1915 break;
1916 }
1917 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1918 /*
1919 * Either timedout or interrupt didn't get a
1920 * chance to grab the mutex and set the flag.
1921 * release the mutex and delay for sometime.
1922 * This will 1) give a chance for interrupt to
1923 * set the flag 2) creates a delay between two
1924 * consequetive requests.
1925 */
1926 mutex_exit(&px_p->px_l23ready_lock);
1927 delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1928 mutex_held = 0;
1929 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1930 ret = DDI_FAILURE;
1931 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1932 " for PME_TO_ACK\n");
1933 }
1934 }
1935 px_p->px_pm_flags &=
1936 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1937
1938 l23ready_done:
1939 if (mutex_held)
1940 mutex_exit(&px_p->px_l23ready_lock);
1941 /*
1942 * Wait till link is in L1 idle, if sending PME_Turn_Off
1943 * was succesful.
1944 */
1945 if (ret == DDI_SUCCESS) {
1946 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1947 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1948 " even though we received PME_To_ACK.\n");
1949 /*
1950 * Workaround for hardware bug with P25.
1951 * Due to a hardware bug with P25, link state
1952 * will be Detect state rather than L1 after
1953 * link is transitioned to L23Ready state. Since
1954 * we don't know whether link is L23ready state
1955 * without Fire's state being L1_idle, we delay
1956 * here just to make sure that we wait till link
1957 * is transitioned to L23Ready state.
1958 */
1959 delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1960 }
1961 pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1962
1963 }
1964 mutex_exit(&pwr_p->pwr_lock);
1965 return (ret);
1966 }
1967
1968 /*
1969 * Message interrupt handler intended to be shared for both
1970 * PME and PME_TO_ACK msg handling, currently only handles
1971 * PME_To_ACK message.
1972 */
1973 uint_t
px_pmeq_intr(caddr_t arg)1974 px_pmeq_intr(caddr_t arg)
1975 {
1976 px_t *px_p = (px_t *)arg;
1977
1978 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1979 mutex_enter(&px_p->px_l23ready_lock);
1980 cv_broadcast(&px_p->px_l23ready_cv);
1981 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1982 px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1983 } else {
1984 /*
1985 * This maybe the second ack received. If so then,
1986 * we should be receiving it during wait4L1 stage.
1987 */
1988 px_p->px_pmetoack_ignored++;
1989 }
1990 mutex_exit(&px_p->px_l23ready_lock);
1991 return (DDI_INTR_CLAIMED);
1992 }
1993
1994 static int
px_pre_pwron_check(px_t * px_p)1995 px_pre_pwron_check(px_t *px_p)
1996 {
1997 pcie_pwr_t *pwr_p;
1998
1999 /* If no PM info, return failure */
2000 if (PCIE_PMINFO(px_p->px_dip) == NULL ||
2001 (pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)) == NULL)
2002 return (DDI_FAILURE);
2003
2004 /*
2005 * For the spec compliant downstream cards link down
2006 * is expected when the device is powered on.
2007 */
2008 px_p->px_pm_flags |= PX_LDN_EXPECTED;
2009 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
2010 }
2011
2012 static int
px_goto_l0(px_t * px_p)2013 px_goto_l0(px_t *px_p)
2014 {
2015 pcie_pwr_t *pwr_p;
2016 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2017 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2018 int ret = DDI_SUCCESS;
2019 uint64_t time_spent = 0;
2020
2021 /* If no PM info, return failure */
2022 if (PCIE_PMINFO(px_p->px_dip) == NULL ||
2023 (pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)) == NULL)
2024 return (DDI_FAILURE);
2025
2026 mutex_enter(&pwr_p->pwr_lock);
2027 /*
2028 * The following link retrain activity will cause LDN and LUP event.
2029 * Receiving LDN prior to receiving LUP is expected, not an error in
2030 * this case. Receiving LUP indicates link is fully up to support
2031 * powering up down stream device, and of course any further LDN and
2032 * LUP outside this context will be error.
2033 */
2034 px_p->px_lup_pending = 1;
2035 if (px_link_retrain(csr_base) != DDI_SUCCESS) {
2036 ret = DDI_FAILURE;
2037 goto l0_done;
2038 }
2039
2040 /* LUP event takes the order of 15ms amount of time to occur */
2041 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
2042 time_spent += px_lup_poll_interval)
2043 drv_usecwait(px_lup_poll_interval);
2044 if (px_p->px_lup_pending)
2045 ret = DDI_FAILURE;
2046 l0_done:
2047 px_enable_detect_quiet(csr_base);
2048 if (ret == DDI_SUCCESS)
2049 pwr_p->pwr_link_lvl = PM_LEVEL_L0;
2050 mutex_exit(&pwr_p->pwr_lock);
2051 return (ret);
2052 }
2053
2054 /*
2055 * Extract the drivers binding name to identify which chip we're binding to.
2056 * Whenever a new bus bridge is created, the driver alias entry should be
2057 * added here to identify the device if needed. If a device isn't added,
2058 * the identity defaults to PX_CHIP_UNIDENTIFIED.
2059 */
2060 static uint32_t
px_identity_init(px_t * px_p)2061 px_identity_init(px_t *px_p)
2062 {
2063 dev_info_t *dip = px_p->px_dip;
2064 char *name = ddi_binding_name(dip);
2065 uint32_t revision = 0;
2066
2067 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2068 "module-revision#", 0);
2069
2070 /* Check for Fire driver binding name */
2071 if (strcmp(name, "pciex108e,80f0") == 0) {
2072 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
2073 "(FIRE), module-revision %d\n", NAMEINST(dip),
2074 revision);
2075
2076 return ((revision >= FIRE_MOD_REV_20) ?
2077 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED);
2078 }
2079
2080 /* Check for Oberon driver binding name */
2081 if (strcmp(name, "pciex108e,80f8") == 0) {
2082 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
2083 "(OBERON), module-revision %d\n", NAMEINST(dip),
2084 revision);
2085
2086 return (PX_CHIP_OBERON);
2087 }
2088
2089 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
2090 ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
2091
2092 return (PX_CHIP_UNIDENTIFIED);
2093 }
2094
2095 int
px_err_add_intr(px_fault_t * px_fault_p)2096 px_err_add_intr(px_fault_t *px_fault_p)
2097 {
2098 dev_info_t *dip = px_fault_p->px_fh_dip;
2099 px_t *px_p = DIP_TO_STATE(dip);
2100
2101 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
2102 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p,
2103 NULL, NULL) == 0);
2104
2105 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
2106
2107 return (DDI_SUCCESS);
2108 }
2109
2110 void
px_err_rem_intr(px_fault_t * px_fault_p)2111 px_err_rem_intr(px_fault_t *px_fault_p)
2112 {
2113 dev_info_t *dip = px_fault_p->px_fh_dip;
2114 px_t *px_p = DIP_TO_STATE(dip);
2115
2116 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
2117 IB_INTR_WAIT);
2118
2119 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2120 }
2121
2122 /*
2123 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
2124 */
2125 void
px_cb_intr_redist(void * arg)2126 px_cb_intr_redist(void *arg)
2127 {
2128 px_cb_t *cb_p = (px_cb_t *)arg;
2129 px_cb_list_t *pxl;
2130 px_t *pxp = NULL;
2131 px_fault_t *f_p = NULL;
2132 uint32_t new_cpuid;
2133 intr_valid_state_t enabled = 0;
2134
2135 mutex_enter(&cb_p->cb_mutex);
2136
2137 pxl = cb_p->pxl;
2138 if (pxl == NULL)
2139 goto cb_done;
2140
2141 pxp = pxl->pxp;
2142 f_p = &pxp->px_cb_fault;
2143 for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) {
2144 pxl = pxl->next;
2145 pxp = pxl->pxp;
2146 f_p = &pxp->px_cb_fault;
2147 }
2148 if (pxl == NULL)
2149 goto cb_done;
2150
2151 new_cpuid = intr_dist_cpuid();
2152 if (new_cpuid == cb_p->cpuid)
2153 goto cb_done;
2154
2155 if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled)
2156 != DDI_SUCCESS) || !enabled) {
2157 DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, "
2158 "sysino(0x%x)\n", f_p->px_fh_sysino);
2159 goto cb_done;
2160 }
2161
2162 PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino);
2163
2164 cb_p->cpuid = new_cpuid;
2165 cb_p->sysino = f_p->px_fh_sysino;
2166 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2167
2168 cb_done:
2169 mutex_exit(&cb_p->cb_mutex);
2170 }
2171
2172 /*
2173 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
2174 * created, to add CB interrupt vector always, but enable only once.
2175 */
2176 int
px_cb_add_intr(px_fault_t * fault_p)2177 px_cb_add_intr(px_fault_t *fault_p)
2178 {
2179 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip);
2180 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2181 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
2182 px_cb_list_t *pxl, *pxl_new;
2183 boolean_t is_proxy = B_FALSE;
2184
2185 /* create cb */
2186 if (cb_p == NULL) {
2187 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
2188
2189 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER,
2190 (void *) ipltospl(FM_ERR_PIL));
2191
2192 cb_p->px_cb_func = px_cb_intr;
2193 pxu_p->px_cb_p = cb_p;
2194 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
2195
2196 /* px_lib_dev_init allows only FIRE and OBERON */
2197 px_err_reg_enable(
2198 (pxu_p->chip_type == PX_CHIP_FIRE) ?
2199 PX_ERR_JBC : PX_ERR_UBC,
2200 pxu_p->px_address[PX_REG_XBC]);
2201 } else
2202 pxu_p->px_cb_p = cb_p;
2203
2204 /* register cb interrupt */
2205 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
2206 (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0);
2207
2208
2209 /* update cb list */
2210 mutex_enter(&cb_p->cb_mutex);
2211 if (cb_p->pxl == NULL) {
2212 is_proxy = B_TRUE;
2213 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2214 pxl->pxp = px_p;
2215 cb_p->pxl = pxl;
2216 cb_p->sysino = fault_p->px_fh_sysino;
2217 cb_p->cpuid = intr_dist_cpuid();
2218 } else {
2219 /*
2220 * Find the last pxl or
2221 * stop short at encountering a redundent entry, or
2222 * both.
2223 */
2224 pxl = cb_p->pxl;
2225 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {};
2226 ASSERT(pxl->pxp != px_p);
2227
2228 /* add to linked list */
2229 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2230 pxl_new->pxp = px_p;
2231 pxl->next = pxl_new;
2232 }
2233 cb_p->attachcnt++;
2234 mutex_exit(&cb_p->cb_mutex);
2235
2236 if (is_proxy) {
2237 /* add to interrupt redistribution list */
2238 intr_dist_add(px_cb_intr_redist, cb_p);
2239
2240 /* enable cb hw interrupt */
2241 px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino);
2242 }
2243
2244 return (DDI_SUCCESS);
2245 }
2246
2247 /*
2248 * px_cb_rem_intr() - Called from detach(9E) to remove its CB
2249 * interrupt vector, to shift proxy to the next available px,
2250 * or disable CB interrupt when itself is the last.
2251 */
2252 void
px_cb_rem_intr(px_fault_t * fault_p)2253 px_cb_rem_intr(px_fault_t *fault_p)
2254 {
2255 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
2256 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2257 px_cb_t *cb_p = PX2CB(px_p);
2258 px_cb_list_t *pxl, *prev;
2259 px_fault_t *f_p;
2260
2261 ASSERT(cb_p->pxl);
2262
2263 /* find and remove this px, and update cb list */
2264 mutex_enter(&cb_p->cb_mutex);
2265
2266 pxl = cb_p->pxl;
2267 if (pxl->pxp == px_p) {
2268 cb_p->pxl = pxl->next;
2269 } else {
2270 prev = pxl;
2271 pxl = pxl->next;
2272 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) {
2273 };
2274 if (pxl == NULL) {
2275 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
2276 "in registered CB list.", (void *)px_p);
2277 mutex_exit(&cb_p->cb_mutex);
2278 return;
2279 }
2280 prev->next = pxl->next;
2281 }
2282 pxu_p->px_cb_p = NULL;
2283 cb_p->attachcnt--;
2284 kmem_free(pxl, sizeof (px_cb_list_t));
2285 mutex_exit(&cb_p->cb_mutex);
2286
2287 /* disable cb hw interrupt */
2288 if (fault_p->px_fh_sysino == cb_p->sysino)
2289 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
2290 IB_INTR_WAIT);
2291
2292 /* if last px, remove from interrupt redistribution list */
2293 if (cb_p->pxl == NULL)
2294 intr_dist_rem(px_cb_intr_redist, cb_p);
2295
2296 /* de-register interrupt */
2297 VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2298
2299 /* if not last px, assign next px to manage cb */
2300 mutex_enter(&cb_p->cb_mutex);
2301 if (cb_p->pxl) {
2302 if (fault_p->px_fh_sysino == cb_p->sysino) {
2303 pxp = cb_p->pxl->pxp;
2304 f_p = &pxp->px_cb_fault;
2305 cb_p->sysino = f_p->px_fh_sysino;
2306
2307 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2308 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
2309 INTR_IDLE_STATE);
2310 }
2311 mutex_exit(&cb_p->cb_mutex);
2312 return;
2313 }
2314
2315 /* clean up after the last px */
2316 mutex_exit(&cb_p->cb_mutex);
2317
2318 /* px_lib_dev_init allows only FIRE and OBERON */
2319 px_err_reg_disable(
2320 (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC,
2321 pxu_p->px_address[PX_REG_XBC]);
2322
2323 mutex_destroy(&cb_p->cb_mutex);
2324 px_set_cb(fault_p->px_fh_dip, 0ull);
2325 kmem_free(cb_p, sizeof (px_cb_t));
2326 }
2327
2328 /*
2329 * px_cb_intr() - sun4u only, CB interrupt dispatcher
2330 */
2331 uint_t
px_cb_intr(caddr_t arg)2332 px_cb_intr(caddr_t arg)
2333 {
2334 px_cb_t *cb_p = (px_cb_t *)arg;
2335 px_t *pxp;
2336 px_fault_t *f_p;
2337 int ret;
2338
2339 mutex_enter(&cb_p->cb_mutex);
2340
2341 if (cb_p->pxl == NULL) {
2342 mutex_exit(&cb_p->cb_mutex);
2343 return (DDI_INTR_UNCLAIMED);
2344 }
2345
2346 pxp = cb_p->pxl->pxp;
2347 f_p = &pxp->px_cb_fault;
2348
2349 ret = f_p->px_err_func((caddr_t)f_p);
2350
2351 mutex_exit(&cb_p->cb_mutex);
2352 return (ret);
2353 }
2354
2355 #ifdef FMA
2356 void
px_fill_rc_status(px_fault_t * px_fault_p,pciex_rc_error_regs_t * rc_status)2357 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2358 {
2359 /* populate the rc_status by reading the registers - TBD */
2360 }
2361 #endif /* FMA */
2362
2363 /*
2364 * cpr callback
2365 *
2366 * disable fabric error msg interrupt prior to suspending
2367 * all device drivers; re-enable fabric error msg interrupt
2368 * after all devices are resumed.
2369 */
2370 static boolean_t
px_cpr_callb(void * arg,int code)2371 px_cpr_callb(void *arg, int code)
2372 {
2373 px_t *px_p = (px_t *)arg;
2374 px_ib_t *ib_p = px_p->px_ib_p;
2375 px_pec_t *pec_p = px_p->px_pec_p;
2376 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2377 caddr_t csr_base;
2378 devino_t ce_ino, nf_ino, f_ino;
2379 px_ino_t *ce_ino_p, *nf_ino_p, *f_ino_p;
2380 uint64_t imu_log_enable, imu_intr_enable;
2381 uint64_t imu_log_mask, imu_intr_mask;
2382
2383 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2384 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2385 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2386 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2387
2388 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2389 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2390
2391 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2392 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2393 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2394
2395 imu_intr_mask =
2396 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2397 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2398 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2399 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2400 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2401 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2402
2403 switch (code) {
2404 case CB_CODE_CPR_CHKPT:
2405 /* disable imu rbne on corr/nonfatal/fatal errors */
2406 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2407 imu_log_enable & (~imu_log_mask));
2408
2409 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2410 imu_intr_enable & (~imu_intr_mask));
2411
2412 /* disable CORR intr mapping */
2413 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2414
2415 /* disable NON FATAL intr mapping */
2416 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2417
2418 /* disable FATAL intr mapping */
2419 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2420
2421 break;
2422
2423 case CB_CODE_CPR_RESUME:
2424 pxu_p->cpr_flag = PX_NOT_CPR;
2425 mutex_enter(&ib_p->ib_ino_lst_mutex);
2426
2427 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2428 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2429 f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2430
2431 /* enable CORR intr mapping */
2432 if (ce_ino_p)
2433 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2434 else
2435 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2436 "reenable PCIe Correctable msg intr.\n");
2437
2438 /* enable NON FATAL intr mapping */
2439 if (nf_ino_p)
2440 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2441 else
2442 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2443 "reenable PCIe Non Fatal msg intr.\n");
2444
2445 /* enable FATAL intr mapping */
2446 if (f_ino_p)
2447 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2448 else
2449 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2450 "reenable PCIe Fatal msg intr.\n");
2451
2452 mutex_exit(&ib_p->ib_ino_lst_mutex);
2453
2454 /* enable corr/nonfatal/fatal not enable error */
2455 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2456 (imu_log_mask & px_imu_log_mask)));
2457 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2458 (imu_intr_mask & px_imu_intr_mask)));
2459
2460 break;
2461 }
2462
2463 return (B_TRUE);
2464 }
2465
2466 uint64_t
px_get_rng_parent_hi_mask(px_t * px_p)2467 px_get_rng_parent_hi_mask(px_t *px_p)
2468 {
2469 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2470 uint64_t mask;
2471
2472 switch (PX_CHIP_TYPE(pxu_p)) {
2473 case PX_CHIP_OBERON:
2474 mask = OBERON_RANGE_PROP_MASK;
2475 break;
2476 case PX_CHIP_FIRE:
2477 mask = PX_RANGE_PROP_MASK;
2478 break;
2479 default:
2480 mask = PX_RANGE_PROP_MASK;
2481 }
2482
2483 return (mask);
2484 }
2485
2486 /*
2487 * fetch chip's range propery's value
2488 */
2489 uint64_t
px_get_range_prop(px_t * px_p,pci_ranges_t * rp,int bank)2490 px_get_range_prop(px_t *px_p, pci_ranges_t *rp, int bank)
2491 {
2492 uint64_t mask, range_prop;
2493
2494 mask = px_get_rng_parent_hi_mask(px_p);
2495 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
2496 rp[bank].parent_low;
2497
2498 return (range_prop);
2499 }
2500
2501 /*
2502 * fetch the config space base addr of the root complex
2503 * note this depends on px structure being initialized
2504 */
2505 uint64_t
px_lib_get_cfgacc_base(dev_info_t * dip)2506 px_lib_get_cfgacc_base(dev_info_t *dip)
2507 {
2508 int instance = DIP_TO_INST(dip);
2509 px_t *px_p = INST_TO_STATE(instance);
2510 pci_ranges_t *rp = px_p->px_ranges_p;
2511 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2512
2513 /* Get Fire's Physical Base Address */
2514 return (px_get_range_prop(px_p, rp, bank));
2515 }
2516
2517 /*
2518 * add cpr callback
2519 */
2520 void
px_cpr_add_callb(px_t * px_p)2521 px_cpr_add_callb(px_t *px_p)
2522 {
2523 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2524 CB_CL_CPR_POST_USER, "px_cpr");
2525 }
2526
2527 /*
2528 * remove cpr callback
2529 */
2530 void
px_cpr_rem_callb(px_t * px_p)2531 px_cpr_rem_callb(px_t *px_p)
2532 {
2533 (void) callb_delete(px_p->px_cprcb_id);
2534 }
2535
2536 /*ARGSUSED*/
2537 static uint_t
px_hp_intr(caddr_t arg1,caddr_t arg2)2538 px_hp_intr(caddr_t arg1, caddr_t arg2)
2539 {
2540 px_t *px_p = (px_t *)arg1;
2541 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2542 int rval;
2543
2544 rval = pcie_intr(px_p->px_dip);
2545
2546 #ifdef DEBUG
2547 if (rval == DDI_INTR_UNCLAIMED)
2548 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
2549 ddi_driver_name(px_p->px_dip),
2550 ddi_get_instance(px_p->px_dip));
2551 #endif
2552
2553 /* Set the interrupt state to idle */
2554 if (px_lib_intr_setstate(px_p->px_dip,
2555 pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS)
2556 return (DDI_INTR_UNCLAIMED);
2557
2558 return (rval);
2559 }
2560
2561 int
px_lib_hotplug_init(dev_info_t * dip,void * arg)2562 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2563 {
2564 px_t *px_p = DIP_TO_STATE(dip);
2565 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2566 uint64_t ret;
2567
2568 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2569 "hotplug-capable") == 0)
2570 return (DDI_FAILURE);
2571
2572 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
2573 if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2574 px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) !=
2575 DDI_SUCCESS) {
2576 #ifdef DEBUG
2577 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2578 ddi_driver_name(px_p->px_dip),
2579 ddi_get_instance(px_p->px_dip));
2580 #endif
2581 return (DDI_FAILURE);
2582 }
2583
2584 VERIFY(add_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI,
2585 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0);
2586
2587 px_ib_intr_enable(px_p, intr_dist_cpuid(),
2588 px_p->px_inos[PX_INTR_HOTPLUG]);
2589 }
2590
2591 return (ret);
2592 }
2593
2594 void
px_lib_hotplug_uninit(dev_info_t * dip)2595 px_lib_hotplug_uninit(dev_info_t *dip)
2596 {
2597 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
2598 px_t *px_p = DIP_TO_STATE(dip);
2599 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2600
2601 px_ib_intr_disable(px_p->px_ib_p,
2602 px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT);
2603
2604 VERIFY(rem_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI) == 0);
2605 }
2606 }
2607
2608 /*
2609 * px_hp_intr_redist() - sun4u only, HP interrupt redistribution
2610 */
2611 void
px_hp_intr_redist(px_t * px_p)2612 px_hp_intr_redist(px_t *px_p)
2613 {
2614 pcie_bus_t *bus_p = PCIE_DIP2BUS(px_p->px_dip);
2615
2616 if (px_p && PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
2617 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(),
2618 px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE);
2619 }
2620 }
2621
2622 boolean_t
px_lib_is_in_drain_state(px_t * px_p)2623 px_lib_is_in_drain_state(px_t *px_p)
2624 {
2625 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2626 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2627 uint64_t drain_status;
2628
2629 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
2630 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN);
2631 } else {
2632 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN);
2633 }
2634
2635 return (drain_status);
2636 }
2637
2638 pcie_req_id_t
px_lib_get_bdf(px_t * px_p)2639 px_lib_get_bdf(px_t *px_p)
2640 {
2641 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2642 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2643 pcie_req_id_t bdf;
2644
2645 bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID);
2646
2647 return (bdf);
2648 }
2649
2650 /*ARGSUSED*/
2651 int
px_lib_get_root_complex_mps(px_t * px_p,dev_info_t * dip,int * mps)2652 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps)
2653 {
2654 pxu_t *pxu_p;
2655 caddr_t csr_base;
2656
2657 pxu_p = (pxu_t *)px_p->px_plat_p;
2658
2659 if (pxu_p == NULL)
2660 return (DDI_FAILURE);
2661
2662 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2663
2664
2665 *mps = CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES) &
2666 TLU_DEVICE_CAPABILITIES_MPS_MASK;
2667
2668 return (DDI_SUCCESS);
2669 }
2670
2671 /*ARGSUSED*/
2672 int
px_lib_set_root_complex_mps(px_t * px_p,dev_info_t * dip,int mps)2673 px_lib_set_root_complex_mps(px_t *px_p, dev_info_t *dip, int mps)
2674 {
2675 pxu_t *pxu_p;
2676 caddr_t csr_base;
2677 uint64_t dev_ctrl;
2678 int link_width, val;
2679 px_chip_type_t chip_type = px_identity_init(px_p);
2680
2681 pxu_p = (pxu_t *)px_p->px_plat_p;
2682
2683 if (pxu_p == NULL)
2684 return (DDI_FAILURE);
2685
2686 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2687
2688 dev_ctrl = CSR_XR(csr_base, TLU_DEVICE_CONTROL);
2689 dev_ctrl |= (mps << TLU_DEVICE_CONTROL_MPS);
2690
2691 CSR_XS(csr_base, TLU_DEVICE_CONTROL, dev_ctrl);
2692
2693 link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
2694
2695 /*
2696 * Convert link_width to match timer array configuration.
2697 */
2698 switch (link_width) {
2699 case 1:
2700 link_width = 0;
2701 break;
2702 case 4:
2703 link_width = 1;
2704 break;
2705 case 8:
2706 link_width = 2;
2707 break;
2708 case 16:
2709 link_width = 3;
2710 break;
2711 default:
2712 link_width = 0;
2713 }
2714
2715 val = px_replay_timer_table[mps][link_width];
2716 CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
2717
2718 if (chip_type == PX_CHIP_OBERON)
2719 return (DDI_SUCCESS);
2720
2721 val = px_acknak_timer_table[mps][link_width];
2722 CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
2723
2724 return (DDI_SUCCESS);
2725 }
2726
2727 /*ARGSUSED*/
2728 int
px_lib_fabric_sync(dev_info_t * dip)2729 px_lib_fabric_sync(dev_info_t *dip)
2730 {
2731 /* an no-op on sun4u platform */
2732 return (DDI_SUCCESS);
2733 }
2734