1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2023 Oxide Computer Company
27 */
28
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/ddi.h>
32 #include <sys/async.h>
33 #include <sys/sunddi.h>
34 #include <sys/ddifm.h>
35 #include <sys/fm/protocol.h>
36 #include <sys/vmem.h>
37 #include <sys/intr.h>
38 #include <sys/ivintr.h>
39 #include <sys/errno.h>
40 #include <sys/hypervisor_api.h>
41 #include <sys/hsvc.h>
42 #include <px_obj.h>
43 #include <sys/machsystm.h>
44 #include <sys/sunndi.h>
45 #include <sys/pcie_impl.h>
46 #include "px_lib4v.h"
47 #include "px_err.h"
48 #include <sys/pci_cfgacc.h>
49 #include <sys/pci_cfgacc_4v.h>
50
51
52 /* mask for the ranges property in calculating the real PFN range */
53 uint_t px_ranges_phi_mask = ((1 << 28) -1);
54
55 /*
56 * Hypervisor VPCI services information for the px nexus driver.
57 */
58 static uint64_t px_vpci_maj_ver; /* Negotiated VPCI API major version */
59 static uint64_t px_vpci_min_ver; /* Negotiated VPCI API minor version */
60 static uint_t px_vpci_users = 0; /* VPCI API users */
61 static hsvc_info_t px_hsvc_vpci = {
62 HSVC_REV_1, NULL, HSVC_GROUP_VPCI, PX_VPCI_MAJOR_VER,
63 PX_VPCI_MINOR_VER, "PX"
64 };
65
66 /*
67 * Hypervisor SDIO services information for the px nexus driver.
68 */
69 static uint64_t px_sdio_min_ver; /* Negotiated SDIO API minor version */
70 static uint_t px_sdio_users = 0; /* SDIO API users */
71 static hsvc_info_t px_hsvc_sdio = {
72 HSVC_REV_1, NULL, HSVC_GROUP_SDIO, PX_SDIO_MAJOR_VER,
73 PX_SDIO_MINOR_VER, "PX"
74 };
75
76 /*
77 * Hypervisor SDIO ERR services information for the px nexus driver.
78 */
79 static uint64_t px_sdio_err_min_ver; /* Negotiated SDIO ERR API */
80 /* minor version */
81 static uint_t px_sdio_err_users = 0; /* SDIO ERR API users */
82 static hsvc_info_t px_hsvc_sdio_err = {
83 HSVC_REV_1, NULL, HSVC_GROUP_SDIO_ERR, PX_SDIO_ERR_MAJOR_VER,
84 PX_SDIO_ERR_MINOR_VER, "PX"
85 };
86
87 #define CHILD_LOANED "child_loaned"
88 static int px_lib_count_waiting_dev(dev_info_t *);
89
90 int
px_lib_dev_init(dev_info_t * dip,devhandle_t * dev_hdl)91 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
92 {
93 px_nexus_regspec_t *rp;
94 uint_t reglen;
95 int ret;
96 px_t *px_p = DIP_TO_STATE(dip);
97 uint64_t mjrnum;
98 uint64_t mnrnum;
99
100 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
101
102 /*
103 * Check HV intr group api versioning.
104 * This driver uses the old interrupt routines which are supported
105 * in old firmware in the CORE API group and in newer firmware in
106 * the INTR API group. Support for these calls will be dropped
107 * once the INTR API group major goes to 2.
108 */
109 if ((hsvc_version(HSVC_GROUP_INTR, &mjrnum, &mnrnum) == 0) &&
110 (mjrnum > 1)) {
111 cmn_err(CE_WARN, "px: unsupported intr api group: "
112 "maj:0x%lx, min:0x%lx", mjrnum, mnrnum);
113 return (ENOTSUP);
114 }
115
116 ret = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
117 "reg", (uchar_t **)&rp, ®len);
118 if (ret != DDI_PROP_SUCCESS) {
119 DBG(DBG_ATTACH, dip, "px_lib_dev_init failed ret=%d\n", ret);
120 return (DDI_FAILURE);
121 }
122
123 /*
124 * Initilize device handle. The device handle uniquely identifies
125 * a SUN4V device. It consists of the lower 28-bits of the hi-cell
126 * of the first entry of the SUN4V device's "reg" property as
127 * defined by the SUN4V Bus Binding to Open Firmware.
128 */
129 *dev_hdl = (devhandle_t)((rp->phys_addr >> 32) & DEVHDLE_MASK);
130 ddi_prop_free(rp);
131
132 /*
133 * hotplug implementation requires this property to be associated with
134 * any indirect PCI config access services
135 */
136 (void) ddi_prop_update_int(makedevice(ddi_driver_major(dip),
137 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR)), dip,
138 PCI_BUS_CONF_MAP_PROP, 1);
139
140 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
141
142 /*
143 * If a /pci node has a pci-intx-not-supported property, this property
144 * represents that the fabric doesn't support fixed interrupt.
145 */
146 if (!ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
147 "pci-intx-not-supported")) {
148 DBG(DBG_ATTACH, dip, "px_lib_dev_init: "
149 "pci-intx-not-supported is not found, dip=0x%p\n", dip);
150 px_p->px_supp_intr_types |= DDI_INTR_TYPE_FIXED;
151 }
152
153 /*
154 * Negotiate the API version for VPCI hypervisor services.
155 */
156 if (px_vpci_users == 0) {
157 if ((ret = hsvc_register(&px_hsvc_vpci, &px_vpci_min_ver))
158 == 0) {
159 px_vpci_maj_ver = px_hsvc_vpci.hsvc_major;
160 goto hv_negotiation_complete;
161 }
162 /*
163 * Negotiation with the latest known VPCI hypervisor services
164 * failed. Fallback to version 1.0.
165 */
166 px_hsvc_vpci.hsvc_major = PX_HSVC_MAJOR_VER_1;
167 px_hsvc_vpci.hsvc_minor = PX_HSVC_MINOR_VER_0;
168
169 if ((ret = hsvc_register(&px_hsvc_vpci, &px_vpci_min_ver))
170 == 0) {
171 px_vpci_maj_ver = px_hsvc_vpci.hsvc_major;
172 goto hv_negotiation_complete;
173 }
174
175 cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
176 "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n",
177 px_hsvc_vpci.hsvc_modname, px_hsvc_vpci.hsvc_group,
178 px_hsvc_vpci.hsvc_major, px_hsvc_vpci.hsvc_minor, ret);
179
180 return (DDI_FAILURE);
181 }
182 hv_negotiation_complete:
183
184 px_vpci_users++;
185
186 DBG(DBG_ATTACH, dip, "px_lib_dev_init: negotiated VPCI API version, "
187 "major 0x%lx minor 0x%lx\n", px_vpci_maj_ver,
188 px_vpci_min_ver);
189
190 /*
191 * Negotiate the API version for SDIO hypervisor services.
192 */
193 if ((px_sdio_users == 0) &&
194 ((ret = hsvc_register(&px_hsvc_sdio, &px_sdio_min_ver)) != 0)) {
195 DBG(DBG_ATTACH, dip, "%s: cannot negotiate hypervisor "
196 "services group: 0x%lx major: 0x%lx minor: 0x%lx "
197 "errno: %d\n", px_hsvc_sdio.hsvc_modname,
198 px_hsvc_sdio.hsvc_group, px_hsvc_sdio.hsvc_major,
199 px_hsvc_sdio.hsvc_minor, ret);
200 } else {
201 px_sdio_users++;
202 DBG(DBG_ATTACH, dip, "px_lib_dev_init: negotiated SDIO API"
203 "version, major 0x%lx minor 0x%lx\n",
204 px_hsvc_sdio.hsvc_major, px_sdio_min_ver);
205 }
206
207 /*
208 * Negotiate the API version for SDIO ERR hypervisor services.
209 */
210 if ((px_sdio_err_users == 0) &&
211 ((ret = hsvc_register(&px_hsvc_sdio_err,
212 &px_sdio_err_min_ver)) != 0)) {
213 DBG(DBG_ATTACH, dip, "%s: cannot negotiate SDIO ERR hypervisor "
214 "services group: 0x%lx major: 0x%lx minor: 0x%lx "
215 "errno: %d\n", px_hsvc_sdio_err.hsvc_modname,
216 px_hsvc_sdio_err.hsvc_group, px_hsvc_sdio_err.hsvc_major,
217 px_hsvc_sdio_err.hsvc_minor, ret);
218 } else {
219 px_sdio_err_users++;
220 DBG(DBG_ATTACH, dip, "px_lib_dev_init: negotiated SDIO ERR API "
221 "version, major 0x%lx minor 0x%lx\n",
222 px_hsvc_sdio_err.hsvc_major, px_sdio_err_min_ver);
223 }
224
225 /*
226 * Find out the number of dev we need to wait under this RC
227 * before we issue fabric sync hypercall
228 */
229 px_p->px_plat_p = (void *)(uintptr_t)px_lib_count_waiting_dev(dip);
230 DBG(DBG_ATTACH, dip, "Found %d bridges need waiting under RC %p",
231 (int)(uintptr_t)px_p->px_plat_p, dip);
232 return (DDI_SUCCESS);
233 }
234
235 /*ARGSUSED*/
236 int
px_lib_dev_fini(dev_info_t * dip)237 px_lib_dev_fini(dev_info_t *dip)
238 {
239 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
240
241 (void) ddi_prop_remove(makedevice(ddi_driver_major(dip),
242 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR)), dip,
243 PCI_BUS_CONF_MAP_PROP);
244
245 if (--px_vpci_users == 0)
246 (void) hsvc_unregister(&px_hsvc_vpci);
247
248 if (--px_sdio_users == 0)
249 (void) hsvc_unregister(&px_hsvc_sdio);
250
251 if (--px_sdio_err_users == 0)
252 (void) hsvc_unregister(&px_hsvc_sdio_err);
253
254 return (DDI_SUCCESS);
255 }
256
257 /*ARGSUSED*/
258 int
px_lib_intr_devino_to_sysino(dev_info_t * dip,devino_t devino,sysino_t * sysino)259 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
260 sysino_t *sysino)
261 {
262 uint64_t ret;
263
264 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
265 "devino 0x%x\n", dip, devino);
266
267 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
268 devino, sysino)) != H_EOK) {
269 DBG(DBG_LIB_INT, dip,
270 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
271 return (DDI_FAILURE);
272 }
273
274 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
275 *sysino);
276
277 return (DDI_SUCCESS);
278 }
279
280 /*ARGSUSED*/
281 int
px_lib_intr_getvalid(dev_info_t * dip,sysino_t sysino,intr_valid_state_t * intr_valid_state)282 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
283 intr_valid_state_t *intr_valid_state)
284 {
285 uint64_t ret;
286
287 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
288 dip, sysino);
289
290 if ((ret = hvio_intr_getvalid(sysino,
291 (int *)intr_valid_state)) != H_EOK) {
292 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
293 ret);
294 return (DDI_FAILURE);
295 }
296
297 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
298 *intr_valid_state);
299
300 return (DDI_SUCCESS);
301 }
302
303 /*ARGSUSED*/
304 int
px_lib_intr_setvalid(dev_info_t * dip,sysino_t sysino,intr_valid_state_t intr_valid_state)305 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
306 intr_valid_state_t intr_valid_state)
307 {
308 uint64_t ret;
309
310 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
311 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
312
313 if ((ret = hvio_intr_setvalid(sysino, intr_valid_state)) != H_EOK) {
314 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
315 ret);
316 return (DDI_FAILURE);
317 }
318
319 return (DDI_SUCCESS);
320 }
321
322 /*ARGSUSED*/
323 int
px_lib_intr_getstate(dev_info_t * dip,sysino_t sysino,intr_state_t * intr_state)324 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
325 intr_state_t *intr_state)
326 {
327 uint64_t ret;
328
329 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
330 dip, sysino);
331
332 if ((ret = hvio_intr_getstate(sysino, (int *)intr_state)) != H_EOK) {
333 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
334 ret);
335 return (DDI_FAILURE);
336 }
337
338 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
339 *intr_state);
340
341 return (DDI_SUCCESS);
342 }
343
344 /*ARGSUSED*/
345 int
px_lib_intr_setstate(dev_info_t * dip,sysino_t sysino,intr_state_t intr_state)346 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
347 intr_state_t intr_state)
348 {
349 uint64_t ret;
350
351 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
352 "intr_state 0x%x\n", dip, sysino, intr_state);
353
354 if ((ret = hvio_intr_setstate(sysino, intr_state)) != H_EOK) {
355 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
356 ret);
357 return (DDI_FAILURE);
358 }
359
360 return (DDI_SUCCESS);
361 }
362
363 /*ARGSUSED*/
364 int
px_lib_intr_gettarget(dev_info_t * dip,sysino_t sysino,cpuid_t * cpuid)365 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
366 {
367 uint64_t ret;
368
369 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
370 dip, sysino);
371
372 if ((ret = hvio_intr_gettarget(sysino, cpuid)) != H_EOK) {
373 DBG(DBG_LIB_INT, dip,
374 "hvio_intr_gettarget failed, ret 0x%lx\n", ret);
375 return (DDI_FAILURE);
376 }
377
378 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", *cpuid);
379
380 return (DDI_SUCCESS);
381 }
382
383 /*ARGSUSED*/
384 int
px_lib_intr_settarget(dev_info_t * dip,sysino_t sysino,cpuid_t cpuid)385 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
386 {
387 uint64_t ret;
388
389 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
390 "cpuid 0x%x\n", dip, sysino, cpuid);
391
392 ret = hvio_intr_settarget(sysino, cpuid);
393 if (ret == H_ECPUERROR) {
394 cmn_err(CE_PANIC,
395 "px_lib_intr_settarget: hvio_intr_settarget failed, "
396 "ret = 0x%lx, cpuid = 0x%x, sysino = 0x%lx\n", ret,
397 cpuid, sysino);
398 } else if (ret != H_EOK) {
399 DBG(DBG_LIB_INT, dip,
400 "hvio_intr_settarget failed, ret 0x%lx\n", ret);
401 return (DDI_FAILURE);
402 }
403
404 return (DDI_SUCCESS);
405 }
406
407 /*ARGSUSED*/
408 int
px_lib_intr_reset(dev_info_t * dip)409 px_lib_intr_reset(dev_info_t *dip)
410 {
411 px_t *px_p = DIP_TO_STATE(dip);
412 px_ib_t *ib_p = px_p->px_ib_p;
413 px_ino_t *ino_p;
414
415 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
416
417 mutex_enter(&ib_p->ib_ino_lst_mutex);
418
419 /* Reset all Interrupts */
420 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) {
421 if (px_lib_intr_setstate(dip, ino_p->ino_sysino,
422 INTR_IDLE_STATE) != DDI_SUCCESS)
423 return (BF_FATAL);
424 }
425
426 mutex_exit(&ib_p->ib_ino_lst_mutex);
427
428 return (BF_NONE);
429 }
430
431 /*ARGSUSED*/
432 int
px_lib_iommu_map(dev_info_t * dip,tsbid_t tsbid,pages_t pages,io_attributes_t attr,void * addr,size_t pfn_index,int flags)433 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
434 io_attributes_t attr, void *addr, size_t pfn_index, int flags)
435 {
436 tsbnum_t tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
437 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
438 io_page_list_t *pfns, *pfn_p;
439 pages_t ttes_mapped = 0;
440 int i, err = DDI_SUCCESS;
441
442 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
443 "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n",
444 dip, tsbid, pages, attr, addr, pfn_index, flags);
445
446 if ((pfns = pfn_p = kmem_zalloc((pages * sizeof (io_page_list_t)),
447 KM_NOSLEEP)) == NULL) {
448 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: kmem_zalloc failed\n");
449 return (DDI_FAILURE);
450 }
451
452 for (i = 0; i < pages; i++)
453 pfns[i] = MMU_PTOB(PX_ADDR2PFN(addr, pfn_index, flags, i));
454
455 /*
456 * If HV VPCI version is 2.0 and higher, pass BDF, phantom function,
457 * and relaxed ordering attributes. Otherwise, pass only read or write
458 * attribute.
459 */
460 if ((px_vpci_maj_ver == PX_HSVC_MAJOR_VER_1) &&
461 (px_vpci_min_ver == PX_HSVC_MINOR_VER_0))
462 attr = attr & (PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE);
463
464 while ((ttes_mapped = pfn_p - pfns) < pages) {
465 uintptr_t ra = va_to_pa(pfn_p);
466 pages_t ttes2map;
467 uint64_t ret;
468
469 ttes2map = (MMU_PAGE_SIZE - P2PHASE(ra, MMU_PAGE_SIZE)) >> 3;
470 ra = MMU_PTOB(MMU_BTOP(ra));
471
472 for (ttes2map = MIN(ttes2map, pages - ttes_mapped); ttes2map;
473 ttes2map -= ttes_mapped, pfn_p += ttes_mapped) {
474
475 ttes_mapped = 0;
476 if ((ret = hvio_iommu_map(DIP_TO_HANDLE(dip),
477 PCI_TSBID(tsb_num, tsb_index + (pfn_p - pfns)),
478 ttes2map, attr, (io_page_list_t *)(ra |
479 ((uintptr_t)pfn_p & MMU_PAGE_OFFSET)),
480 &ttes_mapped)) != H_EOK) {
481 DBG(DBG_LIB_DMA, dip, "hvio_iommu_map failed "
482 "ret 0x%lx\n", ret);
483
484 ttes_mapped = pfn_p - pfns;
485 err = DDI_FAILURE;
486 goto cleanup;
487 }
488
489 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: tsb_num 0x%x "
490 "tsb_index 0x%lx ttes_to_map 0x%lx attr 0x%llx "
491 "ra 0x%p ttes_mapped 0x%x\n", tsb_num,
492 tsb_index + (pfn_p - pfns), ttes2map, attr,
493 ra | ((uintptr_t)pfn_p & MMU_PAGE_OFFSET),
494 ttes_mapped);
495 }
496 }
497
498 cleanup:
499 if ((err == DDI_FAILURE) && ttes_mapped)
500 (void) px_lib_iommu_demap(dip, tsbid, ttes_mapped);
501
502 kmem_free(pfns, pages * sizeof (io_page_list_t));
503 return (err);
504 }
505
506 /*ARGSUSED*/
507 int
px_lib_iommu_demap(dev_info_t * dip,tsbid_t tsbid,pages_t pages)508 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
509 {
510 tsbnum_t tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
511 tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
512 pages_t ttes2demap, ttes_demapped = 0;
513 uint64_t ret;
514
515 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
516 "pages 0x%x\n", dip, tsbid, pages);
517
518 for (ttes2demap = pages; ttes2demap;
519 ttes2demap -= ttes_demapped, tsb_index += ttes_demapped) {
520 if ((ret = hvio_iommu_demap(DIP_TO_HANDLE(dip),
521 PCI_TSBID(tsb_num, tsb_index), ttes2demap,
522 &ttes_demapped)) != H_EOK) {
523 DBG(DBG_LIB_DMA, dip, "hvio_iommu_demap failed, "
524 "ret 0x%lx\n", ret);
525
526 return (DDI_FAILURE);
527 }
528
529 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: tsb_num 0x%x "
530 "tsb_index 0x%lx ttes_to_demap 0x%lx ttes_demapped 0x%x\n",
531 tsb_num, tsb_index, ttes2demap, ttes_demapped);
532 }
533
534 return (DDI_SUCCESS);
535 }
536
537 /*ARGSUSED*/
538 int
px_lib_iommu_getmap(dev_info_t * dip,tsbid_t tsbid,io_attributes_t * attr_p,r_addr_t * r_addr_p)539 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
540 r_addr_t *r_addr_p)
541 {
542 uint64_t ret;
543
544 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
545 dip, tsbid);
546
547 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), tsbid,
548 attr_p, r_addr_p)) != H_EOK) {
549 DBG(DBG_LIB_DMA, dip,
550 "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
551
552 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
553 }
554
555 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx "
556 "r_addr 0x%llx\n", *attr_p, *r_addr_p);
557
558 return (DDI_SUCCESS);
559 }
560
561 /*ARGSUSED*/
562 int
px_lib_iommu_detach(px_t * px_p)563 px_lib_iommu_detach(px_t *px_p)
564 {
565 return (DDI_SUCCESS);
566 }
567
568 /*ARGSUSED*/
569 uint64_t
px_get_rng_parent_hi_mask(px_t * px_p)570 px_get_rng_parent_hi_mask(px_t *px_p)
571 {
572 return (PX_RANGE_PROP_MASK);
573 }
574
575 /*
576 * Checks dma attributes against system bypass ranges
577 * A sun4v device must be capable of generating the entire 64-bit
578 * address in order to perform bypass DMA.
579 */
580 /*ARGSUSED*/
581 int
px_lib_dma_bypass_rngchk(dev_info_t * dip,ddi_dma_attr_t * attr_p,uint64_t * lo_p,uint64_t * hi_p)582 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
583 uint64_t *lo_p, uint64_t *hi_p)
584 {
585 if ((attr_p->dma_attr_addr_lo != 0ull) ||
586 (attr_p->dma_attr_addr_hi != UINT64_MAX)) {
587
588 return (DDI_DMA_BADATTR);
589 }
590
591 *lo_p = 0ull;
592 *hi_p = UINT64_MAX;
593
594 return (DDI_SUCCESS);
595 }
596
597
598 /*ARGSUSED*/
599 int
px_lib_iommu_getbypass(dev_info_t * dip,r_addr_t ra,io_attributes_t attr,io_addr_t * io_addr_p)600 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
601 io_addr_t *io_addr_p)
602 {
603 uint64_t ret;
604
605 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
606 "attr 0x%llx\n", dip, ra, attr);
607 /*
608 * If HV VPCI version is 2.0 and higher, pass BDF, phantom function,
609 * and relaxed ordering attributes. Otherwise, pass only read or write
610 * attribute.
611 */
612 if ((px_vpci_maj_ver == PX_HSVC_MAJOR_VER_1) &&
613 (px_vpci_min_ver == PX_HSVC_MINOR_VER_0))
614 attr &= PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE;
615
616 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
617 attr, io_addr_p)) != H_EOK) {
618 DBG(DBG_LIB_DMA, dip,
619 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
620 return (ret == H_ENOTSUPPORTED ? DDI_ENOTSUP : DDI_FAILURE);
621 }
622
623 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
624 *io_addr_p);
625
626 return (DDI_SUCCESS);
627 }
628
629 /*
630 * Returns any needed IO address bit(s) for relaxed ordering in IOMMU
631 * bypass mode.
632 */
633 /* ARGSUSED */
634 uint64_t
px_lib_ro_bypass(dev_info_t * dip,io_attributes_t attr,uint64_t ioaddr)635 px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr)
636 {
637 return (ioaddr);
638 }
639
640 /*ARGSUSED*/
641 int
px_lib_dma_sync(dev_info_t * dip,dev_info_t * rdip,ddi_dma_handle_t handle,off_t off,size_t len,uint_t cache_flags)642 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
643 off_t off, size_t len, uint_t cache_flags)
644 {
645 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
646 uint64_t sync_dir;
647 size_t bytes_synced;
648 int end, idx;
649 off_t pg_off;
650 devhandle_t hdl = DIP_TO_HANDLE(dip); /* need to cache hdl */
651
652 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
653 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
654 dip, rdip, handle, off, len, cache_flags);
655
656 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
657 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
658 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
659 return (DDI_FAILURE);
660 }
661
662 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
663 return (DDI_SUCCESS);
664
665 if (!len)
666 len = mp->dmai_size;
667
668 if (mp->dmai_rflags & DDI_DMA_READ)
669 sync_dir = HVIO_DMA_SYNC_DIR_FROM_DEV;
670 else
671 sync_dir = HVIO_DMA_SYNC_DIR_TO_DEV;
672
673 off += mp->dmai_offset;
674 pg_off = off & MMU_PAGEOFFSET;
675
676 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: page offset %x size %x\n",
677 pg_off, len);
678
679 /* sync on page basis */
680 end = MMU_BTOPR(off + len - 1);
681 for (idx = MMU_BTOP(off); idx < end; idx++,
682 len -= bytes_synced, pg_off = 0) {
683 size_t bytes_to_sync = bytes_to_sync =
684 MIN(len, MMU_PAGESIZE - pg_off);
685
686 if (hvio_dma_sync(hdl, MMU_PTOB(PX_GET_MP_PFN(mp, idx)) +
687 pg_off, bytes_to_sync, sync_dir, &bytes_synced) != H_EOK)
688 break;
689
690 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: Called hvio_dma_sync "
691 "ra = %p bytes to sync = %x bytes synced %x\n",
692 MMU_PTOB(PX_GET_MP_PFN(mp, idx)) + pg_off, bytes_to_sync,
693 bytes_synced);
694
695 if (bytes_to_sync != bytes_synced)
696 break;
697 }
698
699 return (len ? DDI_FAILURE : DDI_SUCCESS);
700 }
701
702
703 /*
704 * MSIQ Functions:
705 */
706
707 /*ARGSUSED*/
708 int
px_lib_msiq_init(dev_info_t * dip)709 px_lib_msiq_init(dev_info_t *dip)
710 {
711 px_t *px_p = DIP_TO_STATE(dip);
712 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
713 r_addr_t ra;
714 size_t msiq_size;
715 uint_t rec_cnt;
716 int i, err = DDI_SUCCESS;
717 uint64_t ret;
718
719 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
720
721 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
722
723 /* sun4v requires all EQ allocation to be on q size boundary */
724 if ((msiq_state_p->msiq_buf_p = contig_mem_alloc_align(
725 msiq_state_p->msiq_cnt * msiq_size, msiq_size)) == NULL) {
726 DBG(DBG_LIB_MSIQ, dip,
727 "px_lib_msiq_init: Contig alloc failed\n");
728
729 return (DDI_FAILURE);
730 }
731
732 for (i = 0; i < msiq_state_p->msiq_cnt; i++) {
733 msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *)
734 ((caddr_t)msiq_state_p->msiq_buf_p + (i * msiq_size));
735
736 ra = (r_addr_t)va_to_pa((caddr_t)msiq_state_p->msiq_buf_p +
737 (i * msiq_size));
738
739 if ((ret = hvio_msiq_conf(DIP_TO_HANDLE(dip),
740 (i + msiq_state_p->msiq_1st_msiq_id),
741 ra, msiq_state_p->msiq_rec_cnt)) != H_EOK) {
742 DBG(DBG_LIB_MSIQ, dip,
743 "hvio_msiq_conf failed, ret 0x%lx\n", ret);
744 err = DDI_FAILURE;
745 break;
746 }
747
748 if ((err = px_lib_msiq_info(dip,
749 (i + msiq_state_p->msiq_1st_msiq_id),
750 &ra, &rec_cnt)) != DDI_SUCCESS) {
751 DBG(DBG_LIB_MSIQ, dip,
752 "px_lib_msiq_info failed, ret 0x%x\n", err);
753 err = DDI_FAILURE;
754 break;
755 }
756
757 DBG(DBG_LIB_MSIQ, dip,
758 "px_lib_msiq_init: ra 0x%p rec_cnt 0x%x\n", ra, rec_cnt);
759 }
760
761 return (err);
762 }
763
764 /*ARGSUSED*/
765 int
px_lib_msiq_fini(dev_info_t * dip)766 px_lib_msiq_fini(dev_info_t *dip)
767 {
768 px_t *px_p = DIP_TO_STATE(dip);
769 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
770 size_t msiq_size;
771
772 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
773 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
774
775 if (msiq_state_p->msiq_buf_p != NULL)
776 contig_mem_free(msiq_state_p->msiq_buf_p,
777 msiq_state_p->msiq_cnt * msiq_size);
778
779 return (DDI_SUCCESS);
780 }
781
782 /*ARGSUSED*/
783 int
px_lib_msiq_info(dev_info_t * dip,msiqid_t msiq_id,r_addr_t * ra_p,uint_t * msiq_rec_cnt_p)784 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
785 uint_t *msiq_rec_cnt_p)
786 {
787 uint64_t ret;
788
789 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
790 dip, msiq_id);
791
792 if ((ret = hvio_msiq_info(DIP_TO_HANDLE(dip),
793 msiq_id, ra_p, msiq_rec_cnt_p)) != H_EOK) {
794 DBG(DBG_LIB_MSIQ, dip,
795 "hvio_msiq_info failed, ret 0x%lx\n", ret);
796 return (DDI_FAILURE);
797 }
798
799 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
800 ra_p, *msiq_rec_cnt_p);
801
802 return (DDI_SUCCESS);
803 }
804
805 /*ARGSUSED*/
806 int
px_lib_msiq_getvalid(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_valid_state_t * msiq_valid_state)807 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
808 pci_msiq_valid_state_t *msiq_valid_state)
809 {
810 uint64_t ret;
811
812 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
813 dip, msiq_id);
814
815 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
816 msiq_id, msiq_valid_state)) != H_EOK) {
817 DBG(DBG_LIB_MSIQ, dip,
818 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
819 return (DDI_FAILURE);
820 }
821
822 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
823 *msiq_valid_state);
824
825 return (DDI_SUCCESS);
826 }
827
828 /*ARGSUSED*/
829 int
px_lib_msiq_setvalid(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_valid_state_t msiq_valid_state)830 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
831 pci_msiq_valid_state_t msiq_valid_state)
832 {
833 uint64_t ret;
834
835 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
836 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
837
838 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
839 msiq_id, msiq_valid_state)) != H_EOK) {
840 DBG(DBG_LIB_MSIQ, dip,
841 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
842 return (DDI_FAILURE);
843 }
844
845 return (DDI_SUCCESS);
846 }
847
848 /*ARGSUSED*/
849 int
px_lib_msiq_getstate(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_state_t * msiq_state)850 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
851 pci_msiq_state_t *msiq_state)
852 {
853 uint64_t ret;
854
855 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
856 dip, msiq_id);
857
858 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
859 msiq_id, msiq_state)) != H_EOK) {
860 DBG(DBG_LIB_MSIQ, dip,
861 "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
862 return (DDI_FAILURE);
863 }
864
865 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
866 *msiq_state);
867
868 return (DDI_SUCCESS);
869 }
870
871 /*ARGSUSED*/
872 int
px_lib_msiq_setstate(dev_info_t * dip,msiqid_t msiq_id,pci_msiq_state_t msiq_state)873 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
874 pci_msiq_state_t msiq_state)
875 {
876 uint64_t ret;
877
878 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
879 "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
880
881 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
882 msiq_id, msiq_state)) != H_EOK) {
883 DBG(DBG_LIB_MSIQ, dip,
884 "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
885 return (DDI_FAILURE);
886 }
887
888 return (DDI_SUCCESS);
889 }
890
891 /*ARGSUSED*/
892 int
px_lib_msiq_gethead(dev_info_t * dip,msiqid_t msiq_id,msiqhead_t * msiq_head_p)893 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
894 msiqhead_t *msiq_head_p)
895 {
896 uint64_t ret;
897
898 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
899 dip, msiq_id);
900
901 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
902 msiq_id, msiq_head_p)) != H_EOK) {
903 DBG(DBG_LIB_MSIQ, dip,
904 "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
905 return (DDI_FAILURE);
906 }
907
908 *msiq_head_p = (*msiq_head_p / sizeof (msiq_rec_t));
909
910 DBG(DBG_LIB_MSIQ, dip, "px_msiq_gethead: msiq_head 0x%x\n",
911 *msiq_head_p);
912
913 return (DDI_SUCCESS);
914 }
915
916 /*ARGSUSED*/
917 int
px_lib_msiq_sethead(dev_info_t * dip,msiqid_t msiq_id,msiqhead_t msiq_head)918 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
919 msiqhead_t msiq_head)
920 {
921 uint64_t ret;
922
923 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
924 "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
925
926 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
927 msiq_id, msiq_head * sizeof (msiq_rec_t))) != H_EOK) {
928 DBG(DBG_LIB_MSIQ, dip,
929 "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
930 return (DDI_FAILURE);
931 }
932
933 return (DDI_SUCCESS);
934 }
935
936 /*ARGSUSED*/
937 int
px_lib_msiq_gettail(dev_info_t * dip,msiqid_t msiq_id,msiqtail_t * msiq_tail_p)938 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
939 msiqtail_t *msiq_tail_p)
940 {
941 uint64_t ret;
942
943 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
944 dip, msiq_id);
945
946 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
947 msiq_id, msiq_tail_p)) != H_EOK) {
948 DBG(DBG_LIB_MSIQ, dip,
949 "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
950 return (DDI_FAILURE);
951 }
952
953 *msiq_tail_p = (*msiq_tail_p / sizeof (msiq_rec_t));
954 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
955 *msiq_tail_p);
956
957 return (DDI_SUCCESS);
958 }
959
960 /*ARGSUSED*/
961 void
px_lib_get_msiq_rec(dev_info_t * dip,msiqhead_t * msiq_head_p,msiq_rec_t * msiq_rec_p)962 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
963 msiq_rec_t *msiq_rec_p)
964 {
965 msiq_rec_t *curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p;
966
967 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p\n", dip);
968
969 if (!curr_msiq_rec_p->msiq_rec_type) {
970 /* Set msiq_rec_type to zero */
971 msiq_rec_p->msiq_rec_type = 0;
972
973 return;
974 }
975
976 *msiq_rec_p = *curr_msiq_rec_p;
977 }
978
979 /*ARGSUSED*/
980 void
px_lib_clr_msiq_rec(dev_info_t * dip,msiqhead_t * msiq_head_p)981 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
982 {
983 msiq_rec_t *curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p;
984
985 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p\n", dip);
986
987 /* Zero out msiq_rec_type field */
988 curr_msiq_rec_p->msiq_rec_type = 0;
989 }
990
991 /*
992 * MSI Functions:
993 */
994
995 /*ARGSUSED*/
996 int
px_lib_msi_init(dev_info_t * dip)997 px_lib_msi_init(dev_info_t *dip)
998 {
999 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
1000
1001 /* Noop */
1002 return (DDI_SUCCESS);
1003 }
1004
1005 /*ARGSUSED*/
1006 int
px_lib_msi_getmsiq(dev_info_t * dip,msinum_t msi_num,msiqid_t * msiq_id)1007 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
1008 msiqid_t *msiq_id)
1009 {
1010 uint64_t ret;
1011
1012 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1013 dip, msi_num);
1014
1015 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1016 msi_num, msiq_id)) != H_EOK) {
1017 DBG(DBG_LIB_MSI, dip,
1018 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1019 return (DDI_FAILURE);
1020 }
1021
1022 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1023 *msiq_id);
1024
1025 return (DDI_SUCCESS);
1026 }
1027
1028 /*ARGSUSED*/
1029 int
px_lib_msi_setmsiq(dev_info_t * dip,msinum_t msi_num,msiqid_t msiq_id,msi_type_t msitype)1030 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1031 msiqid_t msiq_id, msi_type_t msitype)
1032 {
1033 uint64_t ret;
1034
1035 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1036 "msq_id 0x%x\n", dip, msi_num, msiq_id);
1037
1038 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1039 msi_num, msiq_id, msitype)) != H_EOK) {
1040 DBG(DBG_LIB_MSI, dip,
1041 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1042 return (DDI_FAILURE);
1043 }
1044
1045 return (DDI_SUCCESS);
1046 }
1047
1048 /*ARGSUSED*/
1049 int
px_lib_msi_getvalid(dev_info_t * dip,msinum_t msi_num,pci_msi_valid_state_t * msi_valid_state)1050 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1051 pci_msi_valid_state_t *msi_valid_state)
1052 {
1053 uint64_t ret;
1054
1055 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1056 dip, msi_num);
1057
1058 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1059 msi_num, msi_valid_state)) != H_EOK) {
1060 DBG(DBG_LIB_MSI, dip,
1061 "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1062 return (DDI_FAILURE);
1063 }
1064
1065 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1066 *msi_valid_state);
1067
1068 return (DDI_SUCCESS);
1069 }
1070
1071 /*ARGSUSED*/
1072 int
px_lib_msi_setvalid(dev_info_t * dip,msinum_t msi_num,pci_msi_valid_state_t msi_valid_state)1073 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1074 pci_msi_valid_state_t msi_valid_state)
1075 {
1076 uint64_t ret;
1077
1078 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1079 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1080
1081 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1082 msi_num, msi_valid_state)) != H_EOK) {
1083 DBG(DBG_LIB_MSI, dip,
1084 "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1085 return (DDI_FAILURE);
1086 }
1087
1088 return (DDI_SUCCESS);
1089 }
1090
1091 /*ARGSUSED*/
1092 int
px_lib_msi_getstate(dev_info_t * dip,msinum_t msi_num,pci_msi_state_t * msi_state)1093 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1094 pci_msi_state_t *msi_state)
1095 {
1096 uint64_t ret;
1097
1098 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1099 dip, msi_num);
1100
1101 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1102 msi_num, msi_state)) != H_EOK) {
1103 DBG(DBG_LIB_MSI, dip,
1104 "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1105 return (DDI_FAILURE);
1106 }
1107
1108 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1109 *msi_state);
1110
1111 return (DDI_SUCCESS);
1112 }
1113
1114 /*ARGSUSED*/
1115 int
px_lib_msi_setstate(dev_info_t * dip,msinum_t msi_num,pci_msi_state_t msi_state)1116 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1117 pci_msi_state_t msi_state)
1118 {
1119 uint64_t ret;
1120
1121 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1122 "msi_state 0x%x\n", dip, msi_num, msi_state);
1123
1124 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1125 msi_num, msi_state)) != H_EOK) {
1126 DBG(DBG_LIB_MSI, dip,
1127 "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1128 return (DDI_FAILURE);
1129 }
1130
1131 return (DDI_SUCCESS);
1132 }
1133
1134 /*
1135 * MSG Functions:
1136 */
1137
1138 /*ARGSUSED*/
1139 int
px_lib_msg_getmsiq(dev_info_t * dip,pcie_msg_type_t msg_type,msiqid_t * msiq_id)1140 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1141 msiqid_t *msiq_id)
1142 {
1143 uint64_t ret;
1144
1145 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1146 dip, msg_type);
1147
1148 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1149 msg_type, msiq_id)) != H_EOK) {
1150 DBG(DBG_LIB_MSG, dip,
1151 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1152 return (DDI_FAILURE);
1153 }
1154
1155 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1156 *msiq_id);
1157
1158 return (DDI_SUCCESS);
1159 }
1160
1161 /*ARGSUSED*/
1162 int
px_lib_msg_setmsiq(dev_info_t * dip,pcie_msg_type_t msg_type,msiqid_t msiq_id)1163 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1164 msiqid_t msiq_id)
1165 {
1166 uint64_t ret;
1167
1168 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setmsiq: dip 0x%p msg_type 0x%x "
1169 "msq_id 0x%x\n", dip, msg_type, msiq_id);
1170
1171 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1172 msg_type, msiq_id)) != H_EOK) {
1173 DBG(DBG_LIB_MSG, dip,
1174 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1175 return (DDI_FAILURE);
1176 }
1177
1178 return (DDI_SUCCESS);
1179 }
1180
1181 /*ARGSUSED*/
1182 int
px_lib_msg_getvalid(dev_info_t * dip,pcie_msg_type_t msg_type,pcie_msg_valid_state_t * msg_valid_state)1183 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1184 pcie_msg_valid_state_t *msg_valid_state)
1185 {
1186 uint64_t ret;
1187
1188 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1189 dip, msg_type);
1190
1191 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1192 msg_valid_state)) != H_EOK) {
1193 DBG(DBG_LIB_MSG, dip,
1194 "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1195 return (DDI_FAILURE);
1196 }
1197
1198 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1199 *msg_valid_state);
1200
1201 return (DDI_SUCCESS);
1202 }
1203
1204 /*ARGSUSED*/
1205 int
px_lib_msg_setvalid(dev_info_t * dip,pcie_msg_type_t msg_type,pcie_msg_valid_state_t msg_valid_state)1206 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1207 pcie_msg_valid_state_t msg_valid_state)
1208 {
1209 uint64_t ret;
1210
1211 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1212 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1213
1214 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1215 msg_valid_state)) != H_EOK) {
1216 DBG(DBG_LIB_MSG, dip,
1217 "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1218 return (DDI_FAILURE);
1219 }
1220
1221 return (DDI_SUCCESS);
1222 }
1223
1224 /*
1225 * Suspend/Resume Functions:
1226 * Currently unsupported by hypervisor and all functions are noops.
1227 */
1228 /*ARGSUSED*/
1229 int
px_lib_suspend(dev_info_t * dip)1230 px_lib_suspend(dev_info_t *dip)
1231 {
1232 DBG(DBG_ATTACH, dip, "px_lib_suspend: Not supported\n");
1233
1234 /* Not supported */
1235 return (DDI_FAILURE);
1236 }
1237
1238 /*ARGSUSED*/
1239 void
px_lib_resume(dev_info_t * dip)1240 px_lib_resume(dev_info_t *dip)
1241 {
1242 DBG(DBG_ATTACH, dip, "px_lib_resume: Not supported\n");
1243
1244 /* Noop */
1245 }
1246
1247 /*
1248 * Misc Functions:
1249 * Currently unsupported by hypervisor and all functions are noops.
1250 */
1251 /*ARGSUSED*/
1252 static int
px_lib_config_get(dev_info_t * dip,pci_device_t bdf,pci_config_offset_t off,uint8_t size,pci_cfg_data_t * data_p)1253 px_lib_config_get(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1254 uint8_t size, pci_cfg_data_t *data_p)
1255 {
1256 uint64_t ret;
1257
1258 DBG(DBG_LIB_CFG, dip, "px_lib_config_get: dip 0x%p, bdf 0x%llx "
1259 "off 0x%x size 0x%x\n", dip, bdf, off, size);
1260
1261 if ((ret = hvio_config_get(DIP_TO_HANDLE(dip), bdf, off,
1262 size, data_p)) != H_EOK) {
1263 DBG(DBG_LIB_CFG, dip,
1264 "hvio_config_get failed, ret 0x%lx\n", ret);
1265 return (DDI_FAILURE);
1266 }
1267 DBG(DBG_LIB_CFG, dip, "px_config_get: data 0x%x\n", data_p->dw);
1268
1269 return (DDI_SUCCESS);
1270 }
1271
1272 /*ARGSUSED*/
1273 static int
px_lib_config_put(dev_info_t * dip,pci_device_t bdf,pci_config_offset_t off,uint8_t size,pci_cfg_data_t data)1274 px_lib_config_put(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1275 uint8_t size, pci_cfg_data_t data)
1276 {
1277 uint64_t ret;
1278
1279 DBG(DBG_LIB_CFG, dip, "px_lib_config_put: dip 0x%p, bdf 0x%llx "
1280 "off 0x%x size 0x%x data 0x%llx\n", dip, bdf, off, size, data.qw);
1281
1282 if ((ret = hvio_config_put(DIP_TO_HANDLE(dip), bdf, off,
1283 size, data)) != H_EOK) {
1284 DBG(DBG_LIB_CFG, dip,
1285 "hvio_config_put failed, ret 0x%lx\n", ret);
1286 return (DDI_FAILURE);
1287 }
1288
1289 return (DDI_SUCCESS);
1290 }
1291
1292 static uint32_t
px_pci_config_get(ddi_acc_impl_t * handle,uint32_t * addr,int size)1293 px_pci_config_get(ddi_acc_impl_t *handle, uint32_t *addr, int size)
1294 {
1295 px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *)
1296 handle->ahi_common.ah_bus_private;
1297 pcie_bus_t *busp = NULL;
1298 dev_info_t *cdip = NULL;
1299 uint32_t pci_dev_addr = px_pvt->raddr;
1300 uint32_t vaddr = px_pvt->vaddr;
1301 uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff;
1302 uint64_t rdata = 0;
1303
1304 if (px_lib_config_get(px_pvt->dip, pci_dev_addr, off,
1305 size, (pci_cfg_data_t *)&rdata) != DDI_SUCCESS)
1306 /* XXX update error kstats */
1307 return (0xffffffff);
1308
1309 if (cdip = pcie_find_dip_by_bdf(px_pvt->dip, pci_dev_addr >> 8))
1310 busp = PCIE_DIP2BUS(cdip);
1311 /*
1312 * This can be called early, before busp or busp->bus_dom has
1313 * been initialized, so check both before invoking
1314 * PCIE_IS_ASSIGNED.
1315 */
1316 if (busp && PCIE_BUS2DOM(busp) && PCIE_IS_ASSIGNED(busp)) {
1317 if (off == PCI_CONF_VENID && size == 2)
1318 rdata = busp->bus_dev_ven_id & 0xffff;
1319 else if (off == PCI_CONF_DEVID && size == 2)
1320 rdata = busp->bus_dev_ven_id >> 16;
1321 else if (off == PCI_CONF_VENID && size == 4)
1322 rdata = busp->bus_dev_ven_id;
1323 }
1324 return ((uint32_t)rdata);
1325 }
1326
1327 static void
px_pci_config_put(ddi_acc_impl_t * handle,uint32_t * addr,int size,pci_cfg_data_t wdata)1328 px_pci_config_put(ddi_acc_impl_t *handle, uint32_t *addr,
1329 int size, pci_cfg_data_t wdata)
1330 {
1331 px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *)
1332 handle->ahi_common.ah_bus_private;
1333 uint32_t pci_dev_addr = px_pvt->raddr;
1334 uint32_t vaddr = px_pvt->vaddr;
1335 uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff;
1336
1337 if (px_lib_config_put(px_pvt->dip, pci_dev_addr, off,
1338 size, wdata) != DDI_SUCCESS) {
1339 /*EMPTY*/
1340 /* XXX update error kstats */
1341 }
1342 }
1343
1344 static uint8_t
px_pci_config_get8(ddi_acc_impl_t * handle,uint8_t * addr)1345 px_pci_config_get8(ddi_acc_impl_t *handle, uint8_t *addr)
1346 {
1347 return ((uint8_t)px_pci_config_get(handle, (uint32_t *)addr, 1));
1348 }
1349
1350 static uint16_t
px_pci_config_get16(ddi_acc_impl_t * handle,uint16_t * addr)1351 px_pci_config_get16(ddi_acc_impl_t *handle, uint16_t *addr)
1352 {
1353 return ((uint16_t)px_pci_config_get(handle, (uint32_t *)addr, 2));
1354 }
1355
1356 static uint32_t
px_pci_config_get32(ddi_acc_impl_t * handle,uint32_t * addr)1357 px_pci_config_get32(ddi_acc_impl_t *handle, uint32_t *addr)
1358 {
1359 return ((uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4));
1360 }
1361
1362 static uint64_t
px_pci_config_get64(ddi_acc_impl_t * handle,uint64_t * addr)1363 px_pci_config_get64(ddi_acc_impl_t *handle, uint64_t *addr)
1364 {
1365 uint32_t rdatah, rdatal;
1366
1367 rdatal = (uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4);
1368 rdatah = (uint32_t)px_pci_config_get(handle,
1369 (uint32_t *)((char *)addr+4), 4);
1370 return (((uint64_t)rdatah << 32) | rdatal);
1371 }
1372
1373 static void
px_pci_config_put8(ddi_acc_impl_t * handle,uint8_t * addr,uint8_t data)1374 px_pci_config_put8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t data)
1375 {
1376 pci_cfg_data_t wdata = { 0 };
1377
1378 wdata.qw = (uint8_t)data;
1379 px_pci_config_put(handle, (uint32_t *)addr, 1, wdata);
1380 }
1381
1382 static void
px_pci_config_put16(ddi_acc_impl_t * handle,uint16_t * addr,uint16_t data)1383 px_pci_config_put16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t data)
1384 {
1385 pci_cfg_data_t wdata = { 0 };
1386
1387 wdata.qw = (uint16_t)data;
1388 px_pci_config_put(handle, (uint32_t *)addr, 2, wdata);
1389 }
1390
1391 static void
px_pci_config_put32(ddi_acc_impl_t * handle,uint32_t * addr,uint32_t data)1392 px_pci_config_put32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t data)
1393 {
1394 pci_cfg_data_t wdata = { 0 };
1395
1396 wdata.qw = (uint32_t)data;
1397 px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1398 }
1399
1400 static void
px_pci_config_put64(ddi_acc_impl_t * handle,uint64_t * addr,uint64_t data)1401 px_pci_config_put64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t data)
1402 {
1403 pci_cfg_data_t wdata = { 0 };
1404
1405 wdata.qw = (uint32_t)(data & 0xffffffff);
1406 px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1407 wdata.qw = (uint32_t)((data >> 32) & 0xffffffff);
1408 px_pci_config_put(handle, (uint32_t *)((char *)addr+4), 4, wdata);
1409 }
1410
1411 static void
px_pci_config_rep_get8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)1412 px_pci_config_rep_get8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1413 uint8_t *dev_addr, size_t repcount, uint_t flags)
1414 {
1415 if (flags == DDI_DEV_AUTOINCR)
1416 for (; repcount; repcount--)
1417 *host_addr++ = px_pci_config_get8(handle, dev_addr++);
1418 else
1419 for (; repcount; repcount--)
1420 *host_addr++ = px_pci_config_get8(handle, dev_addr);
1421 }
1422
1423 /*
1424 * Function to rep read 16 bit data off the PCI configuration space behind
1425 * the 21554's host interface.
1426 */
1427 static void
px_pci_config_rep_get16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)1428 px_pci_config_rep_get16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1429 uint16_t *dev_addr, size_t repcount, uint_t flags)
1430 {
1431 if (flags == DDI_DEV_AUTOINCR)
1432 for (; repcount; repcount--)
1433 *host_addr++ = px_pci_config_get16(handle, dev_addr++);
1434 else
1435 for (; repcount; repcount--)
1436 *host_addr++ = px_pci_config_get16(handle, dev_addr);
1437 }
1438
1439 /*
1440 * Function to rep read 32 bit data off the PCI configuration space behind
1441 * the 21554's host interface.
1442 */
1443 static void
px_pci_config_rep_get32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)1444 px_pci_config_rep_get32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1445 uint32_t *dev_addr, size_t repcount, uint_t flags)
1446 {
1447 if (flags == DDI_DEV_AUTOINCR)
1448 for (; repcount; repcount--)
1449 *host_addr++ = px_pci_config_get32(handle, dev_addr++);
1450 else
1451 for (; repcount; repcount--)
1452 *host_addr++ = px_pci_config_get32(handle, dev_addr);
1453 }
1454
1455 /*
1456 * Function to rep read 64 bit data off the PCI configuration space behind
1457 * the 21554's host interface.
1458 */
1459 static void
px_pci_config_rep_get64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)1460 px_pci_config_rep_get64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1461 uint64_t *dev_addr, size_t repcount, uint_t flags)
1462 {
1463 if (flags == DDI_DEV_AUTOINCR)
1464 for (; repcount; repcount--)
1465 *host_addr++ = px_pci_config_get64(handle, dev_addr++);
1466 else
1467 for (; repcount; repcount--)
1468 *host_addr++ = px_pci_config_get64(handle, dev_addr);
1469 }
1470
1471 /*
1472 * Function to rep write 8 bit data into the PCI configuration space behind
1473 * the 21554's host interface.
1474 */
1475 static void
px_pci_config_rep_put8(ddi_acc_impl_t * handle,uint8_t * host_addr,uint8_t * dev_addr,size_t repcount,uint_t flags)1476 px_pci_config_rep_put8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1477 uint8_t *dev_addr, size_t repcount, uint_t flags)
1478 {
1479 if (flags == DDI_DEV_AUTOINCR)
1480 for (; repcount; repcount--)
1481 px_pci_config_put8(handle, dev_addr++, *host_addr++);
1482 else
1483 for (; repcount; repcount--)
1484 px_pci_config_put8(handle, dev_addr, *host_addr++);
1485 }
1486
1487 /*
1488 * Function to rep write 16 bit data into the PCI configuration space behind
1489 * the 21554's host interface.
1490 */
1491 static void
px_pci_config_rep_put16(ddi_acc_impl_t * handle,uint16_t * host_addr,uint16_t * dev_addr,size_t repcount,uint_t flags)1492 px_pci_config_rep_put16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1493 uint16_t *dev_addr, size_t repcount, uint_t flags)
1494 {
1495 if (flags == DDI_DEV_AUTOINCR)
1496 for (; repcount; repcount--)
1497 px_pci_config_put16(handle, dev_addr++, *host_addr++);
1498 else
1499 for (; repcount; repcount--)
1500 px_pci_config_put16(handle, dev_addr, *host_addr++);
1501 }
1502
1503 /*
1504 * Function to rep write 32 bit data into the PCI configuration space behind
1505 * the 21554's host interface.
1506 */
1507 static void
px_pci_config_rep_put32(ddi_acc_impl_t * handle,uint32_t * host_addr,uint32_t * dev_addr,size_t repcount,uint_t flags)1508 px_pci_config_rep_put32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1509 uint32_t *dev_addr, size_t repcount, uint_t flags)
1510 {
1511 if (flags == DDI_DEV_AUTOINCR)
1512 for (; repcount; repcount--)
1513 px_pci_config_put32(handle, dev_addr++, *host_addr++);
1514 else
1515 for (; repcount; repcount--)
1516 px_pci_config_put32(handle, dev_addr, *host_addr++);
1517 }
1518
1519 /*
1520 * Function to rep write 64 bit data into the PCI configuration space behind
1521 * the 21554's host interface.
1522 */
1523 static void
px_pci_config_rep_put64(ddi_acc_impl_t * handle,uint64_t * host_addr,uint64_t * dev_addr,size_t repcount,uint_t flags)1524 px_pci_config_rep_put64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1525 uint64_t *dev_addr, size_t repcount, uint_t flags)
1526 {
1527 if (flags == DDI_DEV_AUTOINCR)
1528 for (; repcount; repcount--)
1529 px_pci_config_put64(handle, dev_addr++, *host_addr++);
1530 else
1531 for (; repcount; repcount--)
1532 px_pci_config_put64(handle, dev_addr, *host_addr++);
1533 }
1534
1535 /*
1536 * Provide a private access handle to route config access calls to Hypervisor.
1537 * Beware: Do all error checking for config space accesses before calling
1538 * this function. ie. do error checking from the calling function.
1539 * Due to a lack of meaningful error code in DDI, the gauranteed return of
1540 * DDI_SUCCESS from here makes the code organization readable/easier from
1541 * the generic code.
1542 */
1543 /*ARGSUSED*/
1544 int
px_lib_map_vconfig(dev_info_t * dip,ddi_map_req_t * mp,pci_config_offset_t off,pci_regspec_t * rp,caddr_t * addrp)1545 px_lib_map_vconfig(dev_info_t *dip,
1546 ddi_map_req_t *mp, pci_config_offset_t off,
1547 pci_regspec_t *rp, caddr_t *addrp)
1548 {
1549 int fmcap;
1550 ndi_err_t *errp;
1551 on_trap_data_t *otp;
1552 ddi_acc_hdl_t *hp;
1553 ddi_acc_impl_t *ap;
1554 uchar_t busnum; /* bus number */
1555 uchar_t devnum; /* device number */
1556 uchar_t funcnum; /* function number */
1557 px_config_acc_pvt_t *px_pvt;
1558
1559 hp = (ddi_acc_hdl_t *)mp->map_handlep;
1560 ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1561
1562 /* Check for mapping teardown operation */
1563 if ((mp->map_op == DDI_MO_UNMAP) ||
1564 (mp->map_op == DDI_MO_UNLOCK)) {
1565 /* free up memory allocated for the private access handle. */
1566 px_pvt = (px_config_acc_pvt_t *)hp->ah_bus_private;
1567 kmem_free((void *)px_pvt, sizeof (px_config_acc_pvt_t));
1568
1569 /* unmap operation of PCI IO/config space. */
1570 return (DDI_SUCCESS);
1571 }
1572
1573 fmcap = ddi_fm_capable(dip);
1574 if (DDI_FM_ACC_ERR_CAP(fmcap)) {
1575 errp = ((ddi_acc_impl_t *)hp)->ahi_err;
1576 otp = (on_trap_data_t *)errp->err_ontrap;
1577 otp->ot_handle = (void *)(hp);
1578 otp->ot_prot = OT_DATA_ACCESS;
1579 errp->err_status = DDI_FM_OK;
1580 errp->err_expected = DDI_FM_ERR_UNEXPECTED;
1581 errp->err_cf = px_err_cfg_hdl_check;
1582 }
1583
1584 ap->ahi_get8 = px_pci_config_get8;
1585 ap->ahi_get16 = px_pci_config_get16;
1586 ap->ahi_get32 = px_pci_config_get32;
1587 ap->ahi_get64 = px_pci_config_get64;
1588 ap->ahi_put8 = px_pci_config_put8;
1589 ap->ahi_put16 = px_pci_config_put16;
1590 ap->ahi_put32 = px_pci_config_put32;
1591 ap->ahi_put64 = px_pci_config_put64;
1592 ap->ahi_rep_get8 = px_pci_config_rep_get8;
1593 ap->ahi_rep_get16 = px_pci_config_rep_get16;
1594 ap->ahi_rep_get32 = px_pci_config_rep_get32;
1595 ap->ahi_rep_get64 = px_pci_config_rep_get64;
1596 ap->ahi_rep_put8 = px_pci_config_rep_put8;
1597 ap->ahi_rep_put16 = px_pci_config_rep_put16;
1598 ap->ahi_rep_put32 = px_pci_config_rep_put32;
1599 ap->ahi_rep_put64 = px_pci_config_rep_put64;
1600
1601 /* Initialize to default check/notify functions */
1602 ap->ahi_fault = 0;
1603 ap->ahi_fault_check = i_ddi_acc_fault_check;
1604 ap->ahi_fault_notify = i_ddi_acc_fault_notify;
1605
1606 /* allocate memory for our private handle */
1607 px_pvt = (px_config_acc_pvt_t *)
1608 kmem_zalloc(sizeof (px_config_acc_pvt_t), KM_SLEEP);
1609 hp->ah_bus_private = (void *)px_pvt;
1610
1611 busnum = PCI_REG_BUS_G(rp->pci_phys_hi);
1612 devnum = PCI_REG_DEV_G(rp->pci_phys_hi);
1613 funcnum = PCI_REG_FUNC_G(rp->pci_phys_hi);
1614
1615 /* set up private data for use during IO routines */
1616
1617 /* addr needed by the HV APIs */
1618 px_pvt->raddr = busnum << 16 | devnum << 11 | funcnum << 8;
1619 /*
1620 * Address that specifies the actual offset into the 256MB
1621 * memory mapped configuration space, 4K per device.
1622 * First 12bits form the offset into 4K config space.
1623 * This address is only used during the IO routines to calculate
1624 * the offset at which the transaction must be performed.
1625 * Drivers bypassing DDI functions to access PCI config space will
1626 * panic the system since the following is a bogus virtual address.
1627 */
1628 px_pvt->vaddr = busnum << 20 | devnum << 15 | funcnum << 12 | off;
1629 px_pvt->dip = dip;
1630
1631 DBG(DBG_LIB_CFG, dip, "px_config_setup: raddr 0x%x, vaddr 0x%x\n",
1632 px_pvt->raddr, px_pvt->vaddr);
1633 *addrp = (caddr_t)(uintptr_t)px_pvt->vaddr;
1634 return (DDI_SUCCESS);
1635 }
1636
1637 /*ARGSUSED*/
1638 void
px_lib_map_attr_check(ddi_map_req_t * mp)1639 px_lib_map_attr_check(ddi_map_req_t *mp)
1640 {
1641 }
1642
1643 /*
1644 * px_lib_log_safeacc_err:
1645 * Imitate a cpu/mem trap call when a peek/poke fails.
1646 * This will initiate something similar to px_fm_callback.
1647 */
1648 static void
px_lib_log_safeacc_err(px_t * px_p,ddi_acc_handle_t handle,int fme_flag,r_addr_t addr)1649 px_lib_log_safeacc_err(px_t *px_p, ddi_acc_handle_t handle, int fme_flag,
1650 r_addr_t addr)
1651 {
1652 uint32_t addr_high, addr_low;
1653 pcie_req_id_t bdf = PCIE_INVALID_BDF;
1654 pci_ranges_t *ranges_p;
1655 int range_len, i;
1656 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle;
1657 ddi_fm_error_t derr;
1658
1659 if (px_fm_enter(px_p) != DDI_SUCCESS)
1660 return;
1661
1662 derr.fme_status = DDI_FM_NONFATAL;
1663 derr.fme_version = DDI_FME_VERSION;
1664 derr.fme_flag = fme_flag;
1665 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1666 derr.fme_acc_handle = handle;
1667 if (hp)
1668 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1669
1670 addr_high = (uint32_t)(addr >> 32);
1671 addr_low = (uint32_t)addr;
1672
1673 /*
1674 * Make sure this failed load came from this PCIe port. Check by
1675 * matching the upper 32 bits of the address with the ranges property.
1676 */
1677 range_len = px_p->px_ranges_length / sizeof (pci_ranges_t);
1678 i = 0;
1679 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
1680 if (ranges_p->parent_high == addr_high) {
1681 switch (ranges_p->child_high & PCI_ADDR_MASK) {
1682 case PCI_ADDR_CONFIG:
1683 bdf = (pcie_req_id_t)(addr_low >> 12);
1684 break;
1685 default:
1686 bdf = PCIE_INVALID_BDF;
1687 break;
1688 }
1689 break;
1690 }
1691 }
1692
1693 (void) px_rp_en_q(px_p, bdf, addr, 0);
1694 (void) px_scan_fabric(px_p, px_p->px_dip, &derr);
1695 px_fm_exit(px_p);
1696 }
1697
1698
1699 #ifdef DEBUG
1700 int px_peekfault_cnt = 0;
1701 int px_pokefault_cnt = 0;
1702 #endif /* DEBUG */
1703
1704 /*
1705 * Do a safe write to a device.
1706 *
1707 * When this function is given a handle (cautious access), all errors are
1708 * suppressed.
1709 *
1710 * When this function is not given a handle (poke), only Unsupported Request
1711 * and Completer Abort errors are suppressed.
1712 *
1713 * In all cases, all errors are returned in the function return status.
1714 */
1715
1716 int
px_lib_ctlops_poke(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * in_args)1717 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1718 peekpoke_ctlops_t *in_args)
1719 {
1720 px_t *px_p = DIP_TO_STATE(dip);
1721 px_pec_t *pec_p = px_p->px_pec_p;
1722 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
1723
1724 size_t repcount = in_args->repcount;
1725 size_t size = in_args->size;
1726 uintptr_t dev_addr = in_args->dev_addr;
1727 uintptr_t host_addr = in_args->host_addr;
1728
1729 int err = DDI_SUCCESS;
1730 uint64_t hvio_poke_status;
1731 uint32_t wrt_stat;
1732
1733 r_addr_t ra;
1734 uint64_t pokeval;
1735 pcie_req_id_t bdf;
1736
1737 ra = (r_addr_t)va_to_pa((void *)dev_addr);
1738 for (; repcount; repcount--) {
1739
1740 switch (size) {
1741 case sizeof (uint8_t):
1742 pokeval = *(uint8_t *)host_addr;
1743 break;
1744 case sizeof (uint16_t):
1745 pokeval = *(uint16_t *)host_addr;
1746 break;
1747 case sizeof (uint32_t):
1748 pokeval = *(uint32_t *)host_addr;
1749 break;
1750 case sizeof (uint64_t):
1751 pokeval = *(uint64_t *)host_addr;
1752 break;
1753 default:
1754 DBG(DBG_MAP, px_p->px_dip,
1755 "poke: invalid size %d passed\n", size);
1756 err = DDI_FAILURE;
1757 goto done;
1758 }
1759
1760 /*
1761 * Grab pokefault mutex since hypervisor does not guarantee
1762 * poke serialization.
1763 */
1764 if (hp) {
1765 i_ndi_busop_access_enter(hp->ahi_common.ah_dip,
1766 (ddi_acc_handle_t)hp);
1767 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1768 } else {
1769 mutex_enter(&pec_p->pec_pokefault_mutex);
1770 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1771 }
1772
1773 if (pcie_get_bdf_from_dip(rdip, &bdf) != DDI_SUCCESS) {
1774 err = DDI_FAILURE;
1775 goto done;
1776 }
1777
1778 hvio_poke_status = hvio_poke(px_p->px_dev_hdl, ra, size,
1779 pokeval, bdf << 8, &wrt_stat);
1780
1781 if ((hvio_poke_status != H_EOK) || (wrt_stat != H_EOK)) {
1782 err = DDI_FAILURE;
1783 #ifdef DEBUG
1784 px_pokefault_cnt++;
1785 #endif
1786 /*
1787 * For CAUTIOUS and POKE access, notify FMA to
1788 * cleanup. Imitate a cpu/mem trap call like in sun4u.
1789 */
1790 px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp,
1791 (hp ? DDI_FM_ERR_EXPECTED :
1792 DDI_FM_ERR_POKE), ra);
1793
1794 pec_p->pec_ontrap_data = NULL;
1795 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1796 if (hp) {
1797 i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1798 (ddi_acc_handle_t)hp);
1799 } else {
1800 mutex_exit(&pec_p->pec_pokefault_mutex);
1801 }
1802 goto done;
1803 }
1804
1805 pec_p->pec_ontrap_data = NULL;
1806 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1807 if (hp) {
1808 i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1809 (ddi_acc_handle_t)hp);
1810 } else {
1811 mutex_exit(&pec_p->pec_pokefault_mutex);
1812 }
1813
1814 host_addr += size;
1815
1816 if (in_args->flags == DDI_DEV_AUTOINCR) {
1817 dev_addr += size;
1818 ra = (r_addr_t)va_to_pa((void *)dev_addr);
1819 }
1820 }
1821
1822 done:
1823 return (err);
1824 }
1825
1826
1827 /*ARGSUSED*/
1828 int
px_lib_ctlops_peek(dev_info_t * dip,dev_info_t * rdip,peekpoke_ctlops_t * in_args,void * result)1829 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1830 peekpoke_ctlops_t *in_args, void *result)
1831 {
1832 px_t *px_p = DIP_TO_STATE(dip);
1833 px_pec_t *pec_p = px_p->px_pec_p;
1834 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
1835
1836 size_t repcount = in_args->repcount;
1837 uintptr_t dev_addr = in_args->dev_addr;
1838 uintptr_t host_addr = in_args->host_addr;
1839
1840 r_addr_t ra;
1841 uint32_t read_status;
1842 uint64_t hvio_peek_status;
1843 uint64_t peekval;
1844 int err = DDI_SUCCESS;
1845
1846 result = (void *)in_args->host_addr;
1847
1848 ra = (r_addr_t)va_to_pa((void *)dev_addr);
1849 for (; repcount; repcount--) {
1850
1851 /* Lock pokefault mutex so read doesn't mask a poke fault. */
1852 if (hp) {
1853 i_ndi_busop_access_enter(hp->ahi_common.ah_dip,
1854 (ddi_acc_handle_t)hp);
1855 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1856 } else {
1857 mutex_enter(&pec_p->pec_pokefault_mutex);
1858 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1859 }
1860
1861 hvio_peek_status = hvio_peek(px_p->px_dev_hdl, ra,
1862 in_args->size, &read_status, &peekval);
1863
1864 if ((hvio_peek_status != H_EOK) || (read_status != H_EOK)) {
1865 err = DDI_FAILURE;
1866
1867 /*
1868 * For CAUTIOUS and PEEK access, notify FMA to
1869 * cleanup. Imitate a cpu/mem trap call like in sun4u.
1870 */
1871 px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp,
1872 (hp ? DDI_FM_ERR_EXPECTED :
1873 DDI_FM_ERR_PEEK), ra);
1874
1875 /* Stuff FFs in host addr if peek. */
1876 if (hp == NULL) {
1877 int i;
1878 uint8_t *ff_addr = (uint8_t *)host_addr;
1879 for (i = 0; i < in_args->size; i++)
1880 *ff_addr++ = 0xff;
1881 }
1882 #ifdef DEBUG
1883 px_peekfault_cnt++;
1884 #endif
1885 pec_p->pec_ontrap_data = NULL;
1886 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1887 if (hp) {
1888 i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1889 (ddi_acc_handle_t)hp);
1890 } else {
1891 mutex_exit(&pec_p->pec_pokefault_mutex);
1892 }
1893 goto done;
1894
1895 }
1896 pec_p->pec_ontrap_data = NULL;
1897 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1898 if (hp) {
1899 i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1900 (ddi_acc_handle_t)hp);
1901 } else {
1902 mutex_exit(&pec_p->pec_pokefault_mutex);
1903 }
1904
1905 switch (in_args->size) {
1906 case sizeof (uint8_t):
1907 *(uint8_t *)host_addr = (uint8_t)peekval;
1908 break;
1909 case sizeof (uint16_t):
1910 *(uint16_t *)host_addr = (uint16_t)peekval;
1911 break;
1912 case sizeof (uint32_t):
1913 *(uint32_t *)host_addr = (uint32_t)peekval;
1914 break;
1915 case sizeof (uint64_t):
1916 *(uint64_t *)host_addr = (uint64_t)peekval;
1917 break;
1918 default:
1919 DBG(DBG_MAP, px_p->px_dip,
1920 "peek: invalid size %d passed\n",
1921 in_args->size);
1922 err = DDI_FAILURE;
1923 goto done;
1924 }
1925
1926 host_addr += in_args->size;
1927
1928 if (in_args->flags == DDI_DEV_AUTOINCR) {
1929 dev_addr += in_args->size;
1930 ra = (r_addr_t)va_to_pa((void *)dev_addr);
1931 }
1932 }
1933 done:
1934 return (err);
1935 }
1936
1937
1938 /* add interrupt vector */
1939 int
px_err_add_intr(px_fault_t * px_fault_p)1940 px_err_add_intr(px_fault_t *px_fault_p)
1941 {
1942 px_t *px_p = DIP_TO_STATE(px_fault_p->px_fh_dip);
1943
1944 DBG(DBG_LIB_INT, px_p->px_dip,
1945 "px_err_add_intr: calling add_ivintr");
1946
1947 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1948 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL,
1949 (caddr_t)&px_fault_p->px_intr_payload[0]) == 0);
1950
1951 DBG(DBG_LIB_INT, px_p->px_dip,
1952 "px_err_add_intr: ib_intr_enable ");
1953
1954 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1955
1956 return (DDI_SUCCESS);
1957 }
1958
1959 /* remove interrupt vector */
1960 void
px_err_rem_intr(px_fault_t * px_fault_p)1961 px_err_rem_intr(px_fault_t *px_fault_p)
1962 {
1963 px_t *px_p = DIP_TO_STATE(px_fault_p->px_fh_dip);
1964
1965 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1966 IB_INTR_WAIT);
1967
1968 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
1969 }
1970
1971 void
px_cb_intr_redist(void * arg)1972 px_cb_intr_redist(void *arg)
1973 {
1974 px_t *px_p = (px_t *)arg;
1975 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(),
1976 px_p->px_inos[PX_INTR_XBC], B_FALSE);
1977 }
1978
1979 int
px_cb_add_intr(px_fault_t * f_p)1980 px_cb_add_intr(px_fault_t *f_p)
1981 {
1982 px_t *px_p = DIP_TO_STATE(f_p->px_fh_dip);
1983
1984 DBG(DBG_LIB_INT, px_p->px_dip,
1985 "px_err_add_intr: calling add_ivintr");
1986
1987 VERIFY(add_ivintr(f_p->px_fh_sysino, PX_ERR_PIL,
1988 (intrfunc)f_p->px_err_func, (caddr_t)f_p, NULL,
1989 (caddr_t)&f_p->px_intr_payload[0]) == 0);
1990
1991 intr_dist_add(px_cb_intr_redist, px_p);
1992
1993 DBG(DBG_LIB_INT, px_p->px_dip,
1994 "px_err_add_intr: ib_intr_enable ");
1995
1996 px_ib_intr_enable(px_p, intr_dist_cpuid(), f_p->px_intr_ino);
1997
1998 return (DDI_SUCCESS);
1999 }
2000
2001 void
px_cb_rem_intr(px_fault_t * f_p)2002 px_cb_rem_intr(px_fault_t *f_p)
2003 {
2004 intr_dist_rem(px_cb_intr_redist, DIP_TO_STATE(f_p->px_fh_dip));
2005 px_err_rem_intr(f_p);
2006 }
2007
2008 #ifdef FMA
2009 void
px_fill_rc_status(px_fault_t * px_fault_p,pciex_rc_error_regs_t * rc_status)2010 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2011 {
2012 px_pec_err_t *err_pkt;
2013
2014 err_pkt = (px_pec_err_t *)px_fault_p->px_intr_payload;
2015
2016 /* initialise all the structure members */
2017 rc_status->status_valid = 0;
2018
2019 if (err_pkt->pec_descr.P) {
2020 /* PCI Status Register */
2021 rc_status->pci_err_status = err_pkt->pci_err_status;
2022 rc_status->status_valid |= PCI_ERR_STATUS_VALID;
2023 }
2024
2025 if (err_pkt->pec_descr.E) {
2026 /* PCIe Status Register */
2027 rc_status->pcie_err_status = err_pkt->pcie_err_status;
2028 rc_status->status_valid |= PCIE_ERR_STATUS_VALID;
2029 }
2030
2031 if (err_pkt->pec_descr.U) {
2032 rc_status->ue_status = err_pkt->ue_reg_status;
2033 rc_status->status_valid |= UE_STATUS_VALID;
2034 }
2035
2036 if (err_pkt->pec_descr.H) {
2037 rc_status->ue_hdr1 = err_pkt->hdr[0];
2038 rc_status->status_valid |= UE_HDR1_VALID;
2039 }
2040
2041 if (err_pkt->pec_descr.I) {
2042 rc_status->ue_hdr2 = err_pkt->hdr[1];
2043 rc_status->status_valid |= UE_HDR2_VALID;
2044 }
2045
2046 /* ue_fst_err_ptr - not available for sun4v?? */
2047
2048
2049 if (err_pkt->pec_descr.S) {
2050 rc_status->source_id = err_pkt->err_src_reg;
2051 rc_status->status_valid |= SOURCE_ID_VALID;
2052 }
2053
2054 if (err_pkt->pec_descr.R) {
2055 rc_status->root_err_status = err_pkt->root_err_status;
2056 rc_status->status_valid |= CE_STATUS_VALID;
2057 }
2058 }
2059 #endif
2060
2061 /*ARGSUSED*/
2062 int
px_lib_pmctl(int cmd,px_t * px_p)2063 px_lib_pmctl(int cmd, px_t *px_p)
2064 {
2065 return (DDI_FAILURE);
2066 }
2067
2068 /*ARGSUSED*/
2069 uint_t
px_pmeq_intr(caddr_t arg)2070 px_pmeq_intr(caddr_t arg)
2071 {
2072 return (DDI_INTR_CLAIMED);
2073 }
2074
2075 /*
2076 * fetch the config space base addr of the root complex
2077 * note this depends on px structure being initialized
2078 */
2079 uint64_t
px_lib_get_cfgacc_base(dev_info_t * dip)2080 px_lib_get_cfgacc_base(dev_info_t *dip)
2081 {
2082 int instance = DIP_TO_INST(dip);
2083 px_t *px_p = INST_TO_STATE(instance);
2084
2085 return (px_p->px_dev_hdl);
2086 }
2087
2088 void
px_panic_domain(px_t * px_p,pcie_req_id_t bdf)2089 px_panic_domain(px_t *px_p, pcie_req_id_t bdf)
2090 {
2091 uint64_t ret;
2092 dev_info_t *dip = px_p->px_dip;
2093
2094 DBG(DBG_ERR_INTR, dip, "px_panic_domain: handle 0x%lx, ino %d, "
2095 "bdf<<8 0x%lx\n",
2096 (uint64_t)DIP_TO_HANDLE(dip), px_p->px_cb_fault.px_intr_ino,
2097 (pci_device_t)bdf << 8);
2098 if ((ret = pci_error_send(DIP_TO_HANDLE(dip),
2099 px_p->px_cb_fault.px_intr_ino, (pci_device_t)bdf << 8)) != H_EOK) {
2100 DBG(DBG_ERR_INTR, dip, "pci_error_send failed, ret 0x%lx\n",
2101 ret);
2102 } else
2103 DBG(DBG_ERR_INTR, dip, "pci_error_send worked\n");
2104 }
2105
2106 /*ARGSUSED*/
2107 int
px_lib_hotplug_init(dev_info_t * dip,void * arg)2108 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2109 {
2110 return (DDI_ENOTSUP);
2111 }
2112
2113 /*ARGSUSED*/
2114 void
px_lib_hotplug_uninit(dev_info_t * dip)2115 px_lib_hotplug_uninit(dev_info_t *dip)
2116 {
2117 }
2118
2119 /*ARGSUSED*/
2120 void
px_hp_intr_redist(px_t * px_p)2121 px_hp_intr_redist(px_t *px_p)
2122 {
2123 }
2124
2125 /* Dummy cpr add callback */
2126 /*ARGSUSED*/
2127 void
px_cpr_add_callb(px_t * px_p)2128 px_cpr_add_callb(px_t *px_p)
2129 {
2130 }
2131
2132 /* Dummy cpr rem callback */
2133 /*ARGSUSED*/
2134 void
px_cpr_rem_callb(px_t * px_p)2135 px_cpr_rem_callb(px_t *px_p)
2136 {
2137 }
2138
2139 /*ARGSUSED*/
2140 boolean_t
px_lib_is_in_drain_state(px_t * px_p)2141 px_lib_is_in_drain_state(px_t *px_p)
2142 {
2143 return (B_FALSE);
2144 }
2145
2146 /*
2147 * There is no IOAPI to get the BDF of the pcie root port nexus at this moment.
2148 * Assume it is 0x0000, until otherwise noted. For now, all sun4v platforms
2149 * have programmed the BDF to be 0x0000.
2150 */
2151 /*ARGSUSED*/
2152 pcie_req_id_t
px_lib_get_bdf(px_t * px_p)2153 px_lib_get_bdf(px_t *px_p)
2154 {
2155 return (0x0000);
2156 }
2157
2158 int
px_lib_get_root_complex_mps(px_t * px_p,dev_info_t * dip,int * mps)2159 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps)
2160 {
2161 pci_device_t bdf = px_lib_get_bdf(px_p);
2162
2163 if (hvio_get_rp_mps_cap(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK)
2164 return (DDI_SUCCESS);
2165 else
2166 return (DDI_FAILURE);
2167 }
2168
2169 int
px_lib_set_root_complex_mps(px_t * px_p,dev_info_t * dip,int mps)2170 px_lib_set_root_complex_mps(px_t *px_p, dev_info_t *dip, int mps)
2171 {
2172 pci_device_t bdf = px_lib_get_bdf(px_p);
2173
2174 if (hvio_set_rp_mps(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK)
2175 return (DDI_SUCCESS);
2176 else
2177 return (DDI_FAILURE);
2178 }
2179
2180 static int
px_lib_do_count_waiting_dev(dev_info_t * dip,void * arg)2181 px_lib_do_count_waiting_dev(dev_info_t *dip, void *arg)
2182 {
2183 int *count = (int *)arg;
2184 dev_info_t *cdip = ddi_get_child(dip);
2185
2186 while (cdip != NULL) {
2187 /* check if this is an assigned device */
2188 if (ddi_prop_exists(DDI_DEV_T_NONE, cdip, DDI_PROP_DONTPASS,
2189 "ddi-assigned")) {
2190 DBG(DBG_ATTACH, dip, "px_lib_do_count_waiting_dev: "
2191 "Found an assigned dev %p, under bridge %p",
2192 cdip, dip);
2193
2194 /*
2195 * Mark this bridge as needing waiting for
2196 * CHILD_LOANED will be removed after bridge reports
2197 * its readyness back to px driver
2198 */
2199 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
2200 CHILD_LOANED, 1) == DDI_PROP_SUCCESS)
2201 (*count)++;
2202 break;
2203 }
2204 cdip = ddi_get_next_sibling(cdip);
2205 }
2206
2207 return (DDI_WALK_CONTINUE);
2208 }
2209
2210 static int
px_lib_count_waiting_dev(dev_info_t * dip)2211 px_lib_count_waiting_dev(dev_info_t *dip)
2212 {
2213 int count = 0;
2214
2215 /* No need to continue if this system is not SDIO capable */
2216 if (px_sdio_users == 0)
2217 return (0);
2218
2219 /* see if px iteslf has assigned children */
2220 (void) px_lib_do_count_waiting_dev(dip, &count);
2221
2222 /* scan dev under this px */
2223 ndi_devi_enter(dip);
2224 ddi_walk_devs(ddi_get_child(dip), px_lib_do_count_waiting_dev, &count);
2225 ndi_devi_exit(dip);
2226 return (count);
2227 }
2228
2229 /* Called from px/bridge driver directly to report its readyness */
2230 int
px_lib_fabric_sync(dev_info_t * dip)2231 px_lib_fabric_sync(dev_info_t *dip)
2232 {
2233 px_t *px;
2234 dev_info_t *rcdip;
2235 int waitdev;
2236
2237 /* No need to continue if this system is not SDIO capable */
2238 if (px_sdio_users == 0)
2239 return (DDI_SUCCESS);
2240
2241 /* a valid bridge w/ assigned dev under it? */
2242 if (ddi_prop_remove(DDI_DEV_T_NONE, dip, CHILD_LOANED) !=
2243 DDI_PROP_SUCCESS)
2244 return (DDI_FAILURE);
2245
2246 /* find out RC dip */
2247 for (rcdip = dip; rcdip != NULL; rcdip = ddi_get_parent(rcdip)) {
2248 if (PCIE_DIP2BUS(rcdip) && PCIE_IS_RC(PCIE_DIP2BUS(rcdip)))
2249 break;
2250 }
2251 if ((rcdip == NULL) || ((px = (px_t *)DIP_TO_STATE(rcdip)) == NULL))
2252 return (DDI_FAILURE);
2253
2254 /* are we ready? */
2255 waitdev = (int)(uintptr_t)px->px_plat_p;
2256 ASSERT(waitdev);
2257 DBG(DBG_CTLOPS, rcdip, "px_lib_fabric_sync: "
2258 "Px/bridge %p is ready, %d left", rcdip, waitdev - 1);
2259 --waitdev;
2260 px->px_plat_p = (void *)(uintptr_t)waitdev;
2261 if (waitdev != 0)
2262 return (DDI_SUCCESS);
2263
2264 /* notify hpyervisor */
2265 DBG(DBG_CTLOPS, rcdip, "px_lib_fabric_sync: "
2266 "Notifying HV that RC %p is ready users=%d", rcdip, px_sdio_users);
2267
2268 if (pci_iov_root_configured(px->px_dev_hdl) != H_EOK)
2269 return (DDI_FAILURE);
2270
2271 return (DDI_SUCCESS);
2272 }
2273