1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
26 */
27
28 /*
29 * fcpci.c: Framework PCI fcode ops
30 */
31 #include <sys/types.h>
32 #include <sys/kmem.h>
33 #include <sys/systm.h>
34 #include <sys/pci.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/sunndi.h>
38 #include <sys/ddidmareq.h>
39 #include <sys/pci.h>
40 #include <sys/modctl.h>
41 #include <sys/ndi_impldefs.h>
42 #include <sys/fcode.h>
43 #include <sys/promif.h>
44 #include <sys/promimpl.h>
45 #include <sys/ddi_implfuncs.h>
46
47 #define PCI_NPT_bits (PCI_RELOCAT_B | PCI_PREFETCH_B | PCI_ALIAS_B)
48 #define PCI_BDF_bits (PCI_REG_BDFR_M & ~PCI_REG_REG_M)
49
50 #define PCICFG_CONF_INDIRECT_MAP 1
51
52 static int pfc_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
53 static int pfc_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
54 static int pfc_dma_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
55 static int pfc_dma_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
56 static int pfc_dma_sync(dev_info_t *, fco_handle_t, fc_ci_t *);
57 static int pfc_dma_cleanup(dev_info_t *, fco_handle_t, fc_ci_t *);
58
59 static int pfc_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
60 static int pfc_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
61 static int pfc_config_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
62 static int pfc_config_store(dev_info_t *, fco_handle_t, fc_ci_t *);
63
64 static int pfc_probe_address(dev_info_t *, fco_handle_t, fc_ci_t *);
65 static int pfc_probe_space(dev_info_t *, fco_handle_t, fc_ci_t *);
66
67 static int pfc_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
68 static int pfc_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
69 static int pfc_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
70 int prom_get_fcode_size(char *);
71 int prom_get_fcode(char *, char *);
72 int pfc_update_assigned_prop(dev_info_t *, pci_regspec_t *);
73 int pfc_remove_assigned_prop(dev_info_t *, pci_regspec_t *);
74 int pci_alloc_resource(dev_info_t *, pci_regspec_t);
75 int pci_free_resource(dev_info_t *, pci_regspec_t);
76 int pci_alloc_mem_chunk(dev_info_t *, uint64_t, uint64_t *, uint64_t *);
77 int pci_alloc_io_chunk(dev_info_t *, uint64_t, uint64_t *, uint64_t *);
78 static int fcpci_indirect_map(dev_info_t *);
79
80 int fcpci_unloadable;
81
82 static ddi_dma_attr_t fcpci_dma_attr = {
83 DMA_ATTR_V0, /* version number */
84 0x0, /* lowest usable address */
85 0xFFFFFFFFull, /* high DMA address range */
86 0xFFFFFFFFull, /* DMA counter register */
87 1, /* DMA address alignment */
88 1, /* DMA burstsizes */
89 1, /* min effective DMA size */
90 0xFFFFFFFFull, /* max DMA xfer size */
91 0xFFFFFFFFull, /* segment boundary */
92 1, /* s/g list length */
93 1, /* granularity of device */
94 0 /* DMA transfer flags */
95 };
96
97 #define HIADDR(n) ((uint32_t)(((uint64_t)(n) & 0xFFFFFFFF00000000)>> 32))
98 #define LOADDR(n)((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
99 #define LADDR(lo, hi) (((uint64_t)(hi) << 32) | (uint32_t)(lo))
100 #define PCI_4GIG_LIMIT 0xFFFFFFFFUL
101 #define PCI_MEMGRAN 0x100000
102 #define PCI_IOGRAN 0x1000
103
104
105 /*
106 * Module linkage information for the kernel.
107 */
108 static struct modlmisc modlmisc = {
109 &mod_miscops, "FCode pci bus functions"
110 };
111
112 static struct modlinkage modlinkage = {
113 MODREV_1, (void *)&modlmisc, NULL
114 };
115
116 int
_init(void)117 _init(void)
118 {
119 return (mod_install(&modlinkage));
120 }
121
122 int
_fini(void)123 _fini(void)
124 {
125 if (fcpci_unloadable)
126 return (mod_remove(&modlinkage));
127 return (EBUSY);
128 }
129
130 int
_info(struct modinfo * modinfop)131 _info(struct modinfo *modinfop)
132 {
133 return (mod_info(&modlinkage, modinfop));
134 }
135
136
137 struct pfc_ops_v {
138 char *svc_name;
139 fc_ops_t *f;
140 };
141
142 static struct pfc_ops_v pov[] = {
143 { "map-in", pfc_map_in},
144 { "map-out", pfc_map_out},
145 { "dma-map-in", pfc_dma_map_in},
146 { "dma-map-out", pfc_dma_map_out},
147 { "dma-sync", pfc_dma_sync},
148 { "rx@", pfc_register_fetch},
149 { "rl@", pfc_register_fetch},
150 { "rw@", pfc_register_fetch},
151 { "rb@", pfc_register_fetch},
152 { "rx!", pfc_register_store},
153 { "rl!", pfc_register_store},
154 { "rw!", pfc_register_store},
155 { "rb!", pfc_register_store},
156 { "config-l@", pfc_config_fetch},
157 { "config-w@", pfc_config_fetch},
158 { "config-b@", pfc_config_fetch},
159 { "config-l!", pfc_config_store},
160 { "config-w!", pfc_config_store},
161 { "config-b!", pfc_config_store},
162 { FC_PROBE_ADDRESS, pfc_probe_address},
163 { FC_PROBE_SPACE, pfc_probe_space},
164 { FC_SVC_EXIT, pfc_dma_cleanup},
165 { FC_CONFIG_CHILD, pfc_config_child},
166 { FC_GET_FCODE_SIZE, pfc_get_fcode_size},
167 { FC_GET_FCODE, pfc_get_fcode},
168 { NULL, NULL}
169 };
170
171 static struct pfc_ops_v shared_pov[] = {
172 { FC_SVC_EXIT, pfc_dma_cleanup},
173 { NULL, NULL}
174 };
175
176 int pci_map_phys(dev_info_t *, pci_regspec_t *,
177 caddr_t *, ddi_device_acc_attr_t *, ddi_acc_handle_t *);
178
179 void pci_unmap_phys(ddi_acc_handle_t *, pci_regspec_t *);
180
181 fco_handle_t
pci_fc_ops_alloc_handle(dev_info_t * ap,dev_info_t * child,void * fcode,size_t fcode_size,char * unit_address,struct pci_ops_bus_args * up)182 pci_fc_ops_alloc_handle(dev_info_t *ap, dev_info_t *child,
183 void *fcode, size_t fcode_size, char *unit_address,
184 struct pci_ops_bus_args *up)
185 {
186 fco_handle_t rp;
187 struct pci_ops_bus_args *bp = NULL;
188 phandle_t h;
189
190 rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
191 rp->next_handle = fc_ops_alloc_handle(ap, child, fcode, fcode_size,
192 unit_address, NULL);
193 rp->ap = ap;
194 rp->child = child;
195 rp->fcode = fcode;
196 rp->fcode_size = fcode_size;
197 if (unit_address) {
198 char *buf;
199
200 buf = kmem_zalloc(strlen(unit_address) + 1, KM_SLEEP);
201 (void) strcpy(buf, unit_address);
202 rp->unit_address = buf;
203 }
204
205 bp = kmem_zalloc(sizeof (struct pci_ops_bus_args), KM_SLEEP);
206 *bp = *up;
207 rp->bus_args = bp;
208
209 /*
210 * Add the child's nodeid to our table...
211 */
212 h = ddi_get_nodeid(rp->child);
213 fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
214
215 return (rp);
216 }
217
218 void
pci_fc_ops_free_handle(fco_handle_t rp)219 pci_fc_ops_free_handle(fco_handle_t rp)
220 {
221 struct pci_ops_bus_args *bp;
222 struct fc_resource *ip, *np;
223
224 ASSERT(rp);
225
226 if (rp->next_handle)
227 fc_ops_free_handle(rp->next_handle);
228 if (rp->unit_address)
229 kmem_free(rp->unit_address, strlen(rp->unit_address) + 1);
230 if ((bp = rp->bus_args) != NULL)
231 kmem_free(bp, sizeof (struct pci_ops_bus_args));
232
233 /*
234 * Release all the resources from the resource list
235 * XXX: We don't handle 'unknown' types, but we don't create them.
236 */
237 for (ip = rp->head; ip != NULL; ip = np) {
238 np = ip->next;
239 switch (ip->type) {
240 case RT_MAP:
241 FC_DEBUG1(1, CE_CONT, "pci_fc_ops_free: "
242 "pci_unmap_phys(%p)\n", ip->fc_map_handle);
243 pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
244 kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
245 break;
246 case RT_DMA:
247 /* DMA has to be freed up at exit time */
248 cmn_err(CE_CONT, "pfc_fc_ops_free: DMA seen!\n");
249 break;
250 default:
251 cmn_err(CE_CONT, "pci_fc_ops_free: "
252 "unknown resource type %d\n", ip->type);
253 break;
254 }
255 fc_rem_resource(rp, ip);
256 kmem_free(ip, sizeof (struct fc_resource));
257 }
258 kmem_free(rp, sizeof (struct fc_resource_list));
259 }
260
261 int
pci_fc_ops(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)262 pci_fc_ops(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
263 {
264 struct pfc_ops_v *pv;
265 char *name = fc_cell2ptr(cp->svc_name);
266
267 ASSERT(rp);
268
269 /*
270 * First try the generic fc_ops. If the ops is a shared op,
271 * also call our local function.
272 */
273 if (fc_ops(ap, rp->next_handle, cp) == 0) {
274 for (pv = shared_pov; pv->svc_name != NULL; ++pv)
275 if (strcmp(pv->svc_name, name) == 0)
276 return (pv->f(ap, rp, cp));
277 return (0);
278 }
279
280 for (pv = pov; pv->svc_name != NULL; ++pv)
281 if (strcmp(pv->svc_name, name) == 0)
282 return (pv->f(ap, rp, cp));
283
284 FC_DEBUG1(9, CE_CONT, "pci_fc_ops: <%s> not serviced\n", name);
285
286 return (-1);
287 }
288
289 /*
290 * Create a dma mapping for a given user address.
291 */
292 static int
pfc_dma_map_in(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)293 pfc_dma_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
294 {
295 ddi_dma_handle_t h;
296 int error;
297 caddr_t virt;
298 size_t len;
299 uint_t flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
300 struct fc_resource *ip;
301 ddi_dma_cookie_t c;
302 struct buf *bp;
303 uint_t ccnt;
304
305 if (fc_cell2int(cp->nargs) != 3)
306 return (fc_syntax_error(cp, "nargs must be 3"));
307
308 if (fc_cell2int(cp->nresults) < 1)
309 return (fc_syntax_error(cp, "nresults must be >= 1"));
310
311 /*
312 * XXX: It's not clear what we should do with a non-cacheable request
313 */
314 virt = fc_cell2ptr(fc_arg(cp, 2));
315 len = fc_cell2size(fc_arg(cp, 1));
316 #ifdef notdef
317 cacheable = fc_cell2int(fc_arg(cp, 0)); /* XXX: do what? */
318 #endif
319
320 FC_DEBUG2(6, CE_CONT, "pcf_dma_map_in: virt %p, len %d\n", virt, len);
321
322 /*
323 * Set up the address space for physio from userland
324 */
325 error = fc_physio_setup(&bp, virt, len);
326
327 if (error) {
328 FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: fc_physio_setup failed "
329 "error: %d virt: %p len %d\n", error, virt, len);
330 return (fc_priv_error(cp, "fc_physio_setup failed"));
331 }
332
333 FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: dma_map_in; bp = %p\n", bp);
334 error = fc_ddi_dma_alloc_handle(ap, &fcpci_dma_attr, DDI_DMA_SLEEP,
335 NULL, &h);
336 if (error != DDI_SUCCESS) {
337 FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
338 "error: %d virt: %p len %d\n", error, virt, len);
339 return (fc_priv_error(cp, "real dma-map-in failed"));
340 }
341
342 error = fc_ddi_dma_buf_bind_handle(h, bp, flags, DDI_DMA_SLEEP, NULL,
343 &c, &ccnt);
344 if ((error != DDI_DMA_MAPPED) || (ccnt != 1)) {
345 fc_ddi_dma_free_handle(&h);
346 FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
347 "error: %d virt: %p len %d\n", error, virt, len);
348 return (fc_priv_error(cp, "real dma-map-in failed"));
349 }
350
351 if (c.dmac_size < len) {
352 error = fc_ddi_dma_unbind_handle(h);
353 if (error != DDI_SUCCESS) {
354 return (fc_priv_error(cp, "ddi_dma_unbind error"));
355 }
356 fc_ddi_dma_free_handle(&h);
357 return (fc_priv_error(cp, "ddi_dma_buf_bind size < len"));
358 }
359
360 FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: returning devaddr %x\n",
361 c.dmac_address);
362
363 cp->nresults = fc_int2cell(1);
364 fc_result(cp, 0) = fc_uint32_t2cell(c.dmac_address); /* XXX size */
365
366 /*
367 * Now we have to log this resource saving the handle and buf header
368 */
369 ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
370 ip->type = RT_DMA;
371 ip->fc_dma_virt = virt;
372 ip->fc_dma_len = len;
373 ip->fc_dma_handle = h;
374 ip->fc_dma_devaddr = c.dmac_address;
375 ip->fc_dma_bp = bp;
376 fc_add_resource(rp, ip);
377
378 return (fc_success_op(ap, rp, cp));
379 }
380
381 static int
pfc_dma_sync(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)382 pfc_dma_sync(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
383 {
384 void *virt;
385 size_t len;
386 uint32_t devaddr;
387 int error;
388 struct fc_resource *ip;
389
390 if (fc_cell2int(cp->nargs) != 3)
391 return (fc_syntax_error(cp, "nargs must be 3"));
392
393 virt = fc_cell2ptr(fc_arg(cp, 2));
394 devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
395 len = fc_cell2size(fc_arg(cp, 0));
396
397 /*
398 * Find if this virt is 'within' a request we know about
399 */
400 fc_lock_resource_list(rp);
401 for (ip = rp->head; ip != NULL; ip = ip->next) {
402 if (ip->type != RT_DMA)
403 continue;
404 if (ip->fc_dma_devaddr != devaddr)
405 continue;
406 if (((char *)virt >= (char *)ip->fc_dma_virt) &&
407 (((char *)virt + len) <=
408 ((char *)ip->fc_dma_virt + ip->fc_dma_len)))
409 break;
410 }
411 fc_unlock_resource_list(rp);
412
413 if (ip == NULL)
414 return (fc_priv_error(cp, "request not within a "
415 "known dma mapping"));
416
417 /*
418 * We know about this request, so we trust it enough to sync it.
419 * Unfortunately, we don't know which direction, so we'll do
420 * both directions.
421 */
422
423 error = fc_ddi_dma_sync(ip->fc_dma_handle,
424 (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORCPU);
425 error |= fc_ddi_dma_sync(ip->fc_dma_handle,
426 (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORDEV);
427
428 if (error)
429 return (fc_priv_error(cp, "Call to ddi_dma_sync failed"));
430
431 cp->nresults = fc_int2cell(0);
432 return (fc_success_op(ap, rp, cp));
433 }
434
435 static int
pfc_dma_map_out(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)436 pfc_dma_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
437 {
438 void *virt;
439 size_t len;
440 uint32_t devaddr;
441 struct fc_resource *ip;
442 int e;
443
444 if (fc_cell2int(cp->nargs) != 3)
445 return (fc_syntax_error(cp, "nargs must be 3"));
446
447 virt = fc_cell2ptr(fc_arg(cp, 2));
448 devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
449 len = fc_cell2size(fc_arg(cp, 0));
450
451 /*
452 * Find if this virt matches a request we know about
453 */
454 fc_lock_resource_list(rp);
455 for (ip = rp->head; ip != NULL; ip = ip->next) {
456 if (ip->type != RT_DMA)
457 continue;
458 if (ip->fc_dma_devaddr != devaddr)
459 continue;
460 if (ip->fc_dma_virt != virt)
461 continue;
462 if (len == ip->fc_dma_len)
463 break;
464 }
465 fc_unlock_resource_list(rp);
466
467 if (ip == NULL)
468 return (fc_priv_error(cp, "request doesn't match a "
469 "known dma mapping"));
470
471 /*
472 * ddi_dma_unbind_handle does an implied sync ...
473 */
474 e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
475 if (e != DDI_SUCCESS) {
476 cmn_err(CE_CONT, "pfc_dma_map_out: ddi_dma_unbind failed!\n");
477 }
478 fc_ddi_dma_free_handle(&ip->fc_dma_handle);
479
480 /*
481 * Tear down the physio mappings
482 */
483 fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
484
485 /*
486 * remove the resource from the list and release it.
487 */
488 fc_rem_resource(rp, ip);
489 kmem_free(ip, sizeof (struct fc_resource));
490
491 cp->nresults = fc_int2cell(0);
492 return (fc_success_op(ap, rp, cp));
493 }
494
495 static struct fc_resource *
next_dma_resource(fco_handle_t rp)496 next_dma_resource(fco_handle_t rp)
497 {
498 struct fc_resource *ip;
499
500 fc_lock_resource_list(rp);
501 for (ip = rp->head; ip != NULL; ip = ip->next)
502 if (ip->type == RT_DMA)
503 break;
504 fc_unlock_resource_list(rp);
505
506 return (ip);
507 }
508
509 static int
pfc_dma_cleanup(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)510 pfc_dma_cleanup(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
511 {
512 struct fc_resource *ip;
513 int e;
514
515 while ((ip = next_dma_resource(rp)) != NULL) {
516
517 FC_DEBUG2(9, CE_CONT, "pfc_dma_cleanup: virt %x len %x\n",
518 ip->fc_dma_virt, ip->fc_dma_len);
519
520 /*
521 * Free the dma handle
522 */
523 e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
524 if (e != DDI_SUCCESS) {
525 cmn_err(CE_CONT, "pfc_dma_cleanup: "
526 "ddi_dma_unbind failed!\n");
527 }
528 fc_ddi_dma_free_handle(&ip->fc_dma_handle);
529
530 /*
531 * Tear down the userland mapping and free the buf header
532 */
533 fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
534
535 fc_rem_resource(rp, ip);
536 kmem_free(ip, sizeof (struct fc_resource));
537 }
538
539 cp->nresults = fc_int2cell(0);
540 return (fc_success_op(ap, rp, cp));
541 }
542
543 static int
pfc_map_in(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)544 pfc_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
545 {
546 size_t len;
547 int error;
548 caddr_t virt;
549 pci_regspec_t p, *ph;
550 struct fc_resource *ip;
551 ddi_device_acc_attr_t acc;
552 ddi_acc_handle_t h;
553
554 if (fc_cell2int(cp->nargs) != 4)
555 return (fc_syntax_error(cp, "nargs must be 4"));
556
557 if (fc_cell2int(cp->nresults) < 1)
558 return (fc_syntax_error(cp, "nresults must be >= 1"));
559
560 p.pci_size_hi = 0;
561 p.pci_size_low = len = fc_cell2size(fc_arg(cp, 0));
562
563 p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 1));
564 p.pci_phys_mid = fc_cell2uint(fc_arg(cp, 2));
565 p.pci_phys_low = fc_cell2uint(fc_arg(cp, 3));
566
567 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
568
569 /*
570 * Fcode is expecting the bytes are not swapped.
571 */
572 acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
573 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
574
575 /*
576 * First We need to allocate the PCI Resource.
577 */
578 error = pci_alloc_resource(rp->child, p);
579
580 if (error) {
581 return (fc_priv_error(cp, "pci map-in failed"));
582 }
583
584 error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
585
586 if (error) {
587 return (fc_priv_error(cp, "pci map-in failed"));
588 }
589
590 cp->nresults = fc_int2cell(1);
591 fc_result(cp, 0) = fc_ptr2cell(virt);
592
593 /*
594 * Log this resource ...
595 */
596 ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
597 ip->type = RT_MAP;
598 ip->fc_map_virt = virt;
599 ip->fc_map_len = len;
600 ip->fc_map_handle = h;
601 ph = kmem_zalloc(sizeof (pci_regspec_t), KM_SLEEP);
602 *ph = p;
603 ip->fc_regspec = ph; /* cache a copy of the reg spec */
604 fc_add_resource(rp, ip);
605
606 return (fc_success_op(ap, rp, cp));
607 }
608
609 static int
pfc_map_out(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)610 pfc_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
611 {
612 caddr_t virt;
613 size_t len;
614 struct fc_resource *ip;
615
616 if (fc_cell2int(cp->nargs) != 2)
617 return (fc_syntax_error(cp, "nargs must be 2"));
618
619 virt = fc_cell2ptr(fc_arg(cp, 1));
620
621 len = fc_cell2size(fc_arg(cp, 0));
622
623 /*
624 * Find if this request matches a mapping resource we set up.
625 */
626 fc_lock_resource_list(rp);
627 for (ip = rp->head; ip != NULL; ip = ip->next) {
628 if (ip->type != RT_MAP)
629 continue;
630 if (ip->fc_map_virt != virt)
631 continue;
632 if (ip->fc_map_len == len)
633 break;
634 }
635 fc_unlock_resource_list(rp);
636
637 if (ip == NULL)
638 return (fc_priv_error(cp, "request doesn't match a "
639 "known mapping"));
640
641 pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
642
643 kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
644
645 /*
646 * remove the resource from the list and release it.
647 */
648 fc_rem_resource(rp, ip);
649 kmem_free(ip, sizeof (struct fc_resource));
650
651 cp->nresults = fc_int2cell(0);
652 return (fc_success_op(ap, rp, cp));
653 }
654
655 static int
pfc_register_fetch(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)656 pfc_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
657 {
658 size_t len;
659 caddr_t virt;
660 int error;
661 uint64_t x;
662 uint32_t l;
663 uint16_t w;
664 uint8_t b;
665 char *name = fc_cell2ptr(cp->svc_name);
666 struct fc_resource *ip;
667
668 if (fc_cell2int(cp->nargs) != 1)
669 return (fc_syntax_error(cp, "nargs must be 1"));
670
671 if (fc_cell2int(cp->nresults) < 1)
672 return (fc_syntax_error(cp, "nresults must be >= 1"));
673
674 virt = fc_cell2ptr(fc_arg(cp, 0));
675
676 /*
677 * Determine the access width .. we can switch on the 2nd
678 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
679 */
680 switch (*(name + 1)) {
681 case 'x': len = sizeof (x); break;
682 case 'l': len = sizeof (l); break;
683 case 'w': len = sizeof (w); break;
684 case 'b': len = sizeof (b); break;
685 }
686
687 /*
688 * Check the alignment ...
689 */
690 if (((intptr_t)virt & (len - 1)) != 0)
691 return (fc_priv_error(cp, "unaligned access"));
692
693 /*
694 * Find if this virt is 'within' a request we know about
695 */
696 fc_lock_resource_list(rp);
697 for (ip = rp->head; ip != NULL; ip = ip->next) {
698 if (ip->type != RT_MAP)
699 continue;
700 if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
701 ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
702 break;
703 }
704 fc_unlock_resource_list(rp);
705
706 if (ip == NULL)
707 return (fc_priv_error(cp, "request not within a "
708 "known mapping"));
709
710 /*
711 * XXX: We need access handle versions of peek/poke to move
712 * beyond the prototype ... we assume that we have hardware
713 * byte swapping enabled for pci register access here which
714 * is a huge dependency on the current implementation.
715 */
716 switch (len) {
717 case sizeof (x):
718 error = ddi_peek64(rp->child, (int64_t *)virt, (int64_t *)&x);
719 break;
720 case sizeof (l):
721 error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&l);
722 break;
723 case sizeof (w):
724 error = ddi_peek16(rp->child, (int16_t *)virt, (int16_t *)&w);
725 break;
726 case sizeof (b):
727 error = ddi_peek8(rp->child, (int8_t *)virt, (int8_t *)&b);
728 break;
729 }
730
731 if (error) {
732 return (fc_priv_error(cp, "access error"));
733 }
734
735 cp->nresults = fc_int2cell(1);
736 switch (len) {
737 case sizeof (x): fc_result(cp, 0) = x; break;
738 case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
739 case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
740 case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
741 }
742 return (fc_success_op(ap, rp, cp));
743 }
744
745 static int
pfc_register_store(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)746 pfc_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
747 {
748 size_t len;
749 caddr_t virt;
750 int error;
751 uint64_t x;
752 uint32_t l;
753 uint16_t w;
754 uint8_t b;
755 char *name = fc_cell2ptr(cp->svc_name);
756 struct fc_resource *ip;
757
758 if (fc_cell2int(cp->nargs) != 2)
759 return (fc_syntax_error(cp, "nargs must be 2"));
760
761 virt = fc_cell2ptr(fc_arg(cp, 0));
762
763 /*
764 * Determine the access width .. we can switch on the 2nd
765 * character of the name which is "rl!", "rb!" or "rw!"
766 */
767 switch (*(name + 1)) {
768 case 'x': len = sizeof (x); x = fc_arg(cp, 1); break;
769 case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
770 case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
771 case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
772 }
773
774 /*
775 * Check the alignment ...
776 */
777 if (((intptr_t)virt & (len - 1)) != 0)
778 return (fc_priv_error(cp, "unaligned access"));
779
780 /*
781 * Find if this virt is 'within' a request we know about
782 */
783 fc_lock_resource_list(rp);
784 for (ip = rp->head; ip != NULL; ip = ip->next) {
785 if (ip->type != RT_MAP)
786 continue;
787 if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
788 ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
789 break;
790 }
791 fc_unlock_resource_list(rp);
792
793 if (ip == NULL)
794 return (fc_priv_error(cp, "request not within a "
795 "known mapping"));
796
797 /*
798 * XXX: We need access handle versions of peek/poke to move
799 * beyond the prototype ... we assume that we have hardware
800 * byte swapping enabled for pci register access here which
801 * is a huge dependency on the current implementation.
802 */
803 switch (len) {
804 case sizeof (x):
805 error = ddi_poke64(rp->child, (int64_t *)virt, x);
806 break;
807 case sizeof (l):
808 error = ddi_poke32(rp->child, (int32_t *)virt, l);
809 break;
810 case sizeof (w):
811 error = ddi_poke16(rp->child, (int16_t *)virt, w);
812 break;
813 case sizeof (b):
814 error = ddi_poke8(rp->child, (int8_t *)virt, b);
815 break;
816 }
817
818 if (error) {
819 return (fc_priv_error(cp, "access error"));
820 }
821
822 cp->nresults = fc_int2cell(0);
823 return (fc_success_op(ap, rp, cp));
824 }
825
826 static int
pfc_config_fetch(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)827 pfc_config_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
828 {
829 caddr_t virt, v;
830 int error, reg, flags = 0;
831 size_t len;
832 uint32_t l, tmp;
833 uint16_t w;
834 uint8_t b;
835 char *name = fc_cell2ptr(cp->svc_name);
836 pci_regspec_t p;
837 ddi_device_acc_attr_t acc;
838 ddi_acc_handle_t h;
839
840 if (fc_cell2int(cp->nargs) != 1)
841 return (fc_syntax_error(cp, "nargs must be 1"));
842
843 if (fc_cell2int(cp->nresults) < 1)
844 return (fc_syntax_error(cp, "nresults must be >= 1"));
845
846 /*
847 * Construct a config address pci reg property from the args.
848 * arg[0] is the configuration address.
849 */
850 p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
851 p.pci_phys_mid = p.pci_phys_low = 0;
852 p.pci_size_hi = p.pci_size_low = 0;
853
854 /*
855 * Verify that the address is a configuration space address
856 * ss must be zero.
857 */
858 if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
859 cmn_err(CE_CONT, "pfc_config_fetch: "
860 "invalid config addr: %x\n", p.pci_phys_hi);
861 return (fc_priv_error(cp, "non-config addr"));
862 }
863
864 /*
865 * Extract the register number from the config address and
866 * remove the register number from the physical address.
867 */
868
869 reg = (p.pci_phys_hi & PCI_REG_REG_M) |
870 (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
871
872 p.pci_phys_hi &= PCI_BDF_bits;
873
874 /*
875 * Determine the access width .. we can switch on the 9th
876 * character of the name which is "config-{l,w,b}@"
877 */
878 switch (*(name + 7)) {
879 case 'l': len = sizeof (l); break;
880 case 'w': len = sizeof (w); break;
881 case 'b': len = sizeof (b); break;
882 }
883
884 /*
885 * Verify that the access is properly aligned
886 */
887 if ((reg & (len - 1)) != 0)
888 return (fc_priv_error(cp, "unaligned access"));
889
890 /*
891 * Map in configuration space (temporarily)
892 */
893 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
894 acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
895 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
896
897 error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
898
899 if (error) {
900 return (fc_priv_error(cp, "pci config map-in failed"));
901 }
902
903 if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
904 flags |= PCICFG_CONF_INDIRECT_MAP;
905
906 if (flags & PCICFG_CONF_INDIRECT_MAP) {
907 tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
908 error = DDI_SUCCESS;
909 } else
910 error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
911
912 if (error == DDI_SUCCESS)
913 if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
914 error = DDI_FAILURE;
915 cmn_err(CE_CONT, "fcpcii: conf probe failed.l=%x", tmp);
916 }
917
918 if (error != DDI_SUCCESS) {
919 return (fc_priv_error(cp, "pci config fetch failed"));
920 }
921
922
923 /*
924 * XXX: We need access handle versions of peek/poke to move
925 * beyond the prototype ... we assume that we have hardware
926 * byte swapping enabled for pci register access here which
927 * is a huge dependency on the current implementation.
928 */
929 v = virt + reg;
930 switch (len) {
931 case sizeof (l):
932 l = (int32_t)ddi_get32(h, (uint32_t *)v);
933 break;
934 case sizeof (w):
935 w = (int16_t)ddi_get16(h, (uint16_t *)v);
936 break;
937 case sizeof (b):
938 b = (int8_t)ddi_get8(h, (uint8_t *)v);
939 break;
940 }
941
942 /*
943 * Remove the temporary config space mapping
944 */
945 pci_unmap_phys(&h, &p);
946
947 if (error) {
948 return (fc_priv_error(cp, "access error"));
949 }
950
951 cp->nresults = fc_int2cell(1);
952 switch (len) {
953 case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
954 case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
955 case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
956 }
957
958 return (fc_success_op(ap, rp, cp));
959 }
960
961 static int
pfc_config_store(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)962 pfc_config_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
963 {
964 caddr_t virt, v;
965 int error, reg, flags = 0;
966 size_t len;
967 uint32_t l, tmp;
968 uint16_t w;
969 uint8_t b;
970 char *name = fc_cell2ptr(cp->svc_name);
971 pci_regspec_t p;
972 ddi_device_acc_attr_t acc;
973 ddi_acc_handle_t h;
974
975 if (fc_cell2int(cp->nargs) != 2)
976 return (fc_syntax_error(cp, "nargs must be 2"));
977
978 /*
979 * Construct a config address pci reg property from the args.
980 * arg[0] is the configuration address. arg[1] is the data.
981 */
982 p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
983 p.pci_phys_mid = p.pci_phys_low = 0;
984 p.pci_size_hi = p.pci_size_low = 0;
985
986 /*
987 * Verify that the address is a configuration space address
988 * ss must be zero.
989 */
990 if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
991 cmn_err(CE_CONT, "pfc_config_store: "
992 "invalid config addr: %x\n", p.pci_phys_hi);
993 return (fc_priv_error(cp, "non-config addr"));
994 }
995
996 /*
997 * Extract the register number from the config address and
998 * remove the register number from the physical address.
999 */
1000 reg = (p.pci_phys_hi & PCI_REG_REG_M) |
1001 (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
1002
1003 p.pci_phys_hi &= PCI_BDF_bits;
1004
1005 /*
1006 * Determine the access width .. we can switch on the 8th
1007 * character of the name which is "config-{l,w,b}@"
1008 */
1009 switch (*(name + 7)) {
1010 case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
1011 case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
1012 case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
1013 }
1014
1015 /*
1016 * Verify that the access is properly aligned
1017 */
1018 if ((reg & (len - 1)) != 0)
1019 return (fc_priv_error(cp, "unaligned access"));
1020
1021 /*
1022 * Map in configuration space (temporarily)
1023 */
1024 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1025 acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1026 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1027
1028 error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
1029
1030 if (error) {
1031 return (fc_priv_error(cp, "pci config map-in failed"));
1032 }
1033
1034 if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
1035 flags |= PCICFG_CONF_INDIRECT_MAP;
1036
1037 if (flags & PCICFG_CONF_INDIRECT_MAP) {
1038 tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1039 error = DDI_SUCCESS;
1040 } else
1041 error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
1042
1043 if (error == DDI_SUCCESS)
1044 if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1045 error = DDI_FAILURE;
1046 cmn_err(CE_CONT, "fcpci: conf probe failed.l=%x", tmp);
1047 }
1048
1049 if (error != DDI_SUCCESS) {
1050 return (fc_priv_error(cp, "pci config store failed"));
1051 }
1052
1053
1054 /*
1055 * XXX: We need access handle versions of peek/poke to move
1056 * beyond the prototype ... we assume that we have hardware
1057 * byte swapping enabled for pci register access here which
1058 * is a huge dependency on the current implementation.
1059 */
1060 v = virt + reg;
1061 switch (len) {
1062 case sizeof (l):
1063 ddi_put32(h, (uint32_t *)v, (uint32_t)l);
1064 break;
1065 case sizeof (w):
1066 ddi_put16(h, (uint16_t *)v, (uint16_t)w);
1067 break;
1068 case sizeof (b):
1069 ddi_put8(h, (uint8_t *)v, (uint8_t)b);
1070 break;
1071 }
1072
1073 /*
1074 * Remove the temporary config space mapping
1075 */
1076 pci_unmap_phys(&h, &p);
1077
1078 if (error) {
1079 return (fc_priv_error(cp, "access error"));
1080 }
1081
1082 cp->nresults = fc_int2cell(0);
1083 return (fc_success_op(ap, rp, cp));
1084 }
1085
1086
1087 static int
pfc_get_fcode(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1088 pfc_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1089 {
1090 caddr_t name_virt, fcode_virt;
1091 char *name, *fcode;
1092 int fcode_len, status;
1093
1094 if (fc_cell2int(cp->nargs) != 3)
1095 return (fc_syntax_error(cp, "nargs must be 3"));
1096
1097 if (fc_cell2int(cp->nresults) < 1)
1098 return (fc_syntax_error(cp, "nresults must be >= 1"));
1099
1100 name_virt = fc_cell2ptr(fc_arg(cp, 0));
1101
1102 fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1103
1104 fcode_len = fc_cell2int(fc_arg(cp, 2));
1105
1106 name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1107
1108 if (copyinstr(fc_cell2ptr(name_virt), name,
1109 FC_SVC_NAME_LEN - 1, NULL)) {
1110 status = 0;
1111 } else {
1112
1113 fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1114
1115 if ((status = prom_get_fcode(name, fcode)) != 0) {
1116
1117 if (copyout((void *)fcode, (void *)fcode_virt,
1118 fcode_len)) {
1119 cmn_err(CE_WARN, " pfc_get_fcode: Unable "
1120 "to copy out fcode image\n");
1121 status = 0;
1122 }
1123 }
1124
1125 kmem_free(fcode, fcode_len);
1126 }
1127
1128 kmem_free(name, FC_SVC_NAME_LEN);
1129
1130 cp->nresults = fc_int2cell(1);
1131 fc_result(cp, 0) = status;
1132
1133 return (fc_success_op(ap, rp, cp));
1134 }
1135
1136 static int
pfc_get_fcode_size(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1137 pfc_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1138 {
1139 caddr_t virt;
1140 char *name;
1141 int len;
1142
1143 if (fc_cell2int(cp->nargs) != 1)
1144 return (fc_syntax_error(cp, "nargs must be 1"));
1145
1146 if (fc_cell2int(cp->nresults) < 1)
1147 return (fc_syntax_error(cp, "nresults must be >= 1"));
1148
1149 virt = fc_cell2ptr(fc_arg(cp, 0));
1150
1151 name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1152
1153 if (copyinstr(fc_cell2ptr(virt), name,
1154 FC_SVC_NAME_LEN - 1, NULL)) {
1155 len = 0;
1156 } else {
1157 len = prom_get_fcode_size(name);
1158 }
1159
1160 kmem_free(name, FC_SVC_NAME_LEN);
1161
1162 cp->nresults = fc_int2cell(1);
1163 fc_result(cp, 0) = len;
1164
1165 return (fc_success_op(ap, rp, cp));
1166 }
1167
1168 /*
1169 * Return the physical probe address: lo=0, mid=0, hi-config-addr
1170 */
1171 static int
pfc_probe_address(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1172 pfc_probe_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1173 {
1174 if (fc_cell2int(cp->nargs) != 0)
1175 return (fc_syntax_error(cp, "nargs must be 0"));
1176
1177 if (fc_cell2int(cp->nresults) < 2)
1178 return (fc_syntax_error(cp, "nresults must be >= 3"));
1179
1180 cp->nresults = fc_int2cell(2);
1181 fc_result(cp, 1) = fc_int2cell(0); /* phys.lo */
1182 fc_result(cp, 0) = fc_int2cell(0); /* phys.mid */
1183
1184 return (fc_success_op(ap, rp, cp));
1185 }
1186
1187 /*
1188 * Return the phys.hi component of the probe address.
1189 */
1190 static int
pfc_probe_space(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1191 pfc_probe_space(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1192 {
1193 struct pci_ops_bus_args *ba = rp->bus_args;
1194
1195 ASSERT(ba);
1196
1197 if (fc_cell2int(cp->nargs) != 0)
1198 return (fc_syntax_error(cp, "nargs must be 0"));
1199
1200 if (fc_cell2int(cp->nresults) < 1)
1201 return (fc_syntax_error(cp, "nresults must be >= 1"));
1202
1203 cp->nresults = fc_int2cell(1);
1204 fc_result(cp, 0) = fc_uint32_t2cell(ba->config_address); /* phys.hi */
1205
1206 return (fc_success_op(ap, rp, cp));
1207 }
1208
1209 static int
pfc_config_child(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1210 pfc_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1211 {
1212 fc_phandle_t h;
1213
1214 if (fc_cell2int(cp->nargs) != 0)
1215 return (fc_syntax_error(cp, "nargs must be 0"));
1216
1217 if (fc_cell2int(cp->nresults) < 1)
1218 return (fc_syntax_error(cp, "nresults must be >= 1"));
1219
1220 h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1221
1222 cp->nresults = fc_int2cell(1);
1223 fc_result(cp, 0) = fc_phandle2cell(h);
1224
1225 return (fc_success_op(ap, rp, cp));
1226 }
1227
1228 int
pci_alloc_mem_chunk(dev_info_t * dip,uint64_t mem_align,uint64_t * mem_size,uint64_t * mem_answer)1229 pci_alloc_mem_chunk(dev_info_t *dip, uint64_t mem_align, uint64_t *mem_size,
1230 uint64_t *mem_answer)
1231 {
1232 ndi_ra_request_t req;
1233 int rval;
1234
1235 bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1236 req.ra_flags = NDI_RA_ALLOC_BOUNDED;
1237 req.ra_boundbase = 0;
1238 req.ra_boundlen = PCI_4GIG_LIMIT;
1239 req.ra_len = *mem_size;
1240 req.ra_align_mask = mem_align - 1;
1241
1242 rval = ndi_ra_alloc(dip, &req, mem_answer, mem_size,
1243 NDI_RA_TYPE_MEM, NDI_RA_PASS);
1244
1245 return (rval);
1246 }
1247 int
pci_alloc_io_chunk(dev_info_t * dip,uint64_t io_align,uint64_t * io_size,uint64_t * io_answer)1248 pci_alloc_io_chunk(dev_info_t *dip, uint64_t io_align, uint64_t *io_size,
1249 uint64_t *io_answer)
1250 {
1251 ndi_ra_request_t req;
1252 int rval;
1253
1254 bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1255 req.ra_flags = (NDI_RA_ALLOC_BOUNDED | NDI_RA_ALLOC_PARTIAL_OK);
1256 req.ra_boundbase = 0;
1257 req.ra_boundlen = PCI_4GIG_LIMIT;
1258 req.ra_len = *io_size;
1259 req.ra_align_mask = io_align - 1;
1260
1261 rval = ndi_ra_alloc(dip, &req, io_answer, io_size,
1262 NDI_RA_TYPE_IO, NDI_RA_PASS);
1263
1264 return (rval);
1265 }
1266
1267 int
pci_alloc_resource(dev_info_t * dip,pci_regspec_t phys_spec)1268 pci_alloc_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1269 {
1270 uint64_t answer;
1271 uint64_t alen;
1272 int offset, tmp;
1273 pci_regspec_t config;
1274 caddr_t virt, v;
1275 ddi_device_acc_attr_t acc;
1276 ddi_acc_handle_t h;
1277 ndi_ra_request_t request;
1278 pci_regspec_t *assigned;
1279 int assigned_len, entries, i, l, flags = 0, error;
1280
1281 l = phys_spec.pci_size_low;
1282
1283 if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1284 DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&assigned,
1285 &assigned_len) == DDI_PROP_SUCCESS) {
1286
1287 entries = assigned_len / (sizeof (pci_regspec_t));
1288
1289 /*
1290 * Walk through the assigned-addresses entries. If there is
1291 * a match, there is no need to allocate the resource.
1292 */
1293 for (i = 0; i < entries; i++) {
1294 if (assigned[i].pci_phys_hi == phys_spec.pci_phys_hi) {
1295 if (assigned[i].pci_size_low >=
1296 phys_spec.pci_size_low) {
1297 kmem_free(assigned, assigned_len);
1298 return (0);
1299 }
1300 /*
1301 * Fcode wants to assign more than what
1302 * probe found.
1303 */
1304 (void) pci_free_resource(dip, assigned[i]);
1305 /*
1306 * Go on to allocate resources.
1307 */
1308 break;
1309 }
1310 /*
1311 * Check if Fcode wants to map using different
1312 * NPT bits.
1313 */
1314 if (PCI_REG_BDFR_G(assigned[i].pci_phys_hi) ==
1315 PCI_REG_BDFR_G(phys_spec.pci_phys_hi)) {
1316 /*
1317 * It is an error to change SS bits
1318 */
1319 if (PCI_REG_ADDR_G(assigned[i].pci_phys_hi) !=
1320 PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1321
1322 FC_DEBUG2(2, CE_WARN, "Fcode changing "
1323 "ss bits in reg %x -- %x",
1324 assigned[i].pci_phys_hi,
1325 phys_spec.pci_phys_hi);
1326 }
1327
1328 /*
1329 * Allocate enough
1330 */
1331 l = MAX(assigned[i].pci_size_low,
1332 phys_spec.pci_size_low);
1333
1334 phys_spec.pci_size_low = l;
1335
1336 (void) pci_free_resource(dip, assigned[i]);
1337 /*
1338 * Go on to allocate resources.
1339 */
1340 break;
1341 }
1342 }
1343 kmem_free(assigned, assigned_len);
1344 }
1345
1346 bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1347
1348 config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1349 config.pci_phys_hi &= ~PCI_REG_REG_M;
1350 config.pci_phys_mid = config.pci_phys_low = 0;
1351 config.pci_size_hi = config.pci_size_low = 0;
1352
1353 /*
1354 * Map in configuration space (temporarily)
1355 */
1356 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1357 acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1358 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1359
1360 if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1361 return (1);
1362 }
1363
1364 if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1365 flags |= PCICFG_CONF_INDIRECT_MAP;
1366
1367 if (flags & PCICFG_CONF_INDIRECT_MAP) {
1368 tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1369 error = DDI_SUCCESS;
1370 } else
1371 error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1372
1373 if (error == DDI_SUCCESS)
1374 if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1375 error = DDI_FAILURE;
1376 }
1377
1378 if (error != DDI_SUCCESS) {
1379 return (1);
1380 }
1381
1382 request.ra_flags |= NDI_RA_ALIGN_SIZE;
1383 request.ra_boundbase = 0;
1384 request.ra_boundlen = PCI_4GIG_LIMIT;
1385
1386 offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1387
1388 v = virt + offset;
1389
1390 if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1391 request.ra_len = l;
1392 request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1393
1394 /* allocate memory space from the allocator */
1395
1396 if (ndi_ra_alloc(ddi_get_parent(dip),
1397 &request, &answer, &alen, NDI_RA_TYPE_MEM,
1398 NDI_RA_PASS) != NDI_SUCCESS) {
1399 pci_unmap_phys(&h, &config);
1400 return (1);
1401 }
1402 FC_DEBUG3(1, CE_CONT, "ROM addr = [0x%x.%x] len [0x%x]\n",
1403 HIADDR(answer), LOADDR(answer), alen);
1404
1405 /* program the low word */
1406
1407 ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1408
1409 phys_spec.pci_phys_low = LOADDR(answer);
1410 phys_spec.pci_phys_mid = HIADDR(answer);
1411 } else {
1412 request.ra_len = l;
1413
1414 switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1415 case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1416 request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1417
1418 if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1419 /*
1420 * If it is a non relocatable address,
1421 * then specify the address we want.
1422 */
1423 request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1424 request.ra_addr = (uint64_t)LADDR(
1425 phys_spec.pci_phys_low,
1426 phys_spec.pci_phys_mid);
1427 }
1428
1429 /* allocate memory space from the allocator */
1430
1431 if (ndi_ra_alloc(ddi_get_parent(dip),
1432 &request, &answer, &alen, NDI_RA_TYPE_MEM,
1433 NDI_RA_PASS) != NDI_SUCCESS) {
1434 pci_unmap_phys(&h, &config);
1435 if (request.ra_flags == NDI_RA_ALLOC_SPECIFIED)
1436 cmn_err(CE_WARN, "Unable to allocate "
1437 "non relocatable address 0x%p\n",
1438 (void *) request.ra_addr);
1439 return (1);
1440 }
1441 FC_DEBUG3(1, CE_CONT,
1442 "64 addr = [0x%x.%x] len [0x%x]\n",
1443 HIADDR(answer),
1444 LOADDR(answer),
1445 alen);
1446
1447 /* program the low word */
1448
1449 ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1450
1451 /* program the high word with value zero */
1452 v += 4;
1453 ddi_put32(h, (uint32_t *)v, HIADDR(answer));
1454
1455 phys_spec.pci_phys_low = LOADDR(answer);
1456 phys_spec.pci_phys_mid = HIADDR(answer);
1457 /*
1458 * currently support 32b address space
1459 * assignments only.
1460 */
1461 phys_spec.pci_phys_hi ^= PCI_ADDR_MEM64 ^
1462 PCI_ADDR_MEM32;
1463
1464 break;
1465
1466 case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1467 request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1468
1469 if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1470 /*
1471 * If it is a non relocatable address,
1472 * then specify the address we want.
1473 */
1474 request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1475 request.ra_addr = (uint64_t)
1476 phys_spec.pci_phys_low;
1477 }
1478
1479 /* allocate memory space from the allocator */
1480
1481 if (ndi_ra_alloc(ddi_get_parent(dip),
1482 &request, &answer, &alen, NDI_RA_TYPE_MEM,
1483 NDI_RA_PASS) != NDI_SUCCESS) {
1484 pci_unmap_phys(&h, &config);
1485 if (request.ra_flags == NDI_RA_ALLOC_SPECIFIED)
1486 cmn_err(CE_WARN, "Unable to allocate "
1487 "non relocatable address 0x%p\n",
1488 (void *) request.ra_addr);
1489 return (1);
1490 }
1491
1492 FC_DEBUG3(1, CE_CONT,
1493 "32 addr = [0x%x.%x] len [0x%x]\n",
1494 HIADDR(answer),
1495 LOADDR(answer),
1496 alen);
1497
1498 /* program the low word */
1499
1500 ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1501
1502 phys_spec.pci_phys_low = LOADDR(answer);
1503
1504 break;
1505 case PCI_REG_ADDR_G(PCI_ADDR_IO):
1506 request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1507
1508 if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1509 /*
1510 * If it is a non relocatable address,
1511 * then specify the address we want.
1512 */
1513 request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1514 request.ra_addr = (uint64_t)
1515 phys_spec.pci_phys_low;
1516 }
1517
1518 /* allocate I/O space from the allocator */
1519
1520 if (ndi_ra_alloc(ddi_get_parent(dip),
1521 &request, &answer, &alen, NDI_RA_TYPE_IO,
1522 NDI_RA_PASS) != NDI_SUCCESS) {
1523 pci_unmap_phys(&h, &config);
1524 if (request.ra_flags ==
1525 NDI_RA_ALLOC_SPECIFIED)
1526 cmn_err(CE_WARN, "Unable to allocate "
1527 "non relocatable IO Space 0x%p\n",
1528 (void *) request.ra_addr);
1529 return (1);
1530 }
1531 FC_DEBUG3(1, CE_CONT,
1532 "I/O addr = [0x%x.%x] len [0x%x]\n",
1533 HIADDR(answer),
1534 LOADDR(answer),
1535 alen);
1536
1537 ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1538
1539 phys_spec.pci_phys_low = LOADDR(answer);
1540
1541 break;
1542 default:
1543 pci_unmap_phys(&h, &config);
1544 return (1);
1545 } /* switch */
1546 }
1547
1548 /*
1549 * Now that memory locations are assigned,
1550 * update the assigned address property.
1551 */
1552 if (pfc_update_assigned_prop(dip, &phys_spec)) {
1553 pci_unmap_phys(&h, &config);
1554 return (1);
1555 }
1556
1557 pci_unmap_phys(&h, &config);
1558
1559 return (0);
1560 }
1561
1562 int
pci_free_resource(dev_info_t * dip,pci_regspec_t phys_spec)1563 pci_free_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1564 {
1565 int offset, tmp;
1566 pci_regspec_t config;
1567 caddr_t virt, v;
1568 ddi_device_acc_attr_t acc;
1569 ddi_acc_handle_t h;
1570 ndi_ra_request_t request;
1571 int l, error, flags = 0;
1572
1573 bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1574
1575 config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1576 config.pci_phys_hi &= ~PCI_REG_REG_M;
1577 config.pci_phys_mid = config.pci_phys_low = 0;
1578 config.pci_size_hi = config.pci_size_low = 0;
1579
1580 /*
1581 * Map in configuration space (temporarily)
1582 */
1583 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1584 acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1585 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1586
1587 if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1588 return (1);
1589 }
1590 if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1591 flags |= PCICFG_CONF_INDIRECT_MAP;
1592
1593 if (flags & PCICFG_CONF_INDIRECT_MAP) {
1594 tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1595 error = DDI_SUCCESS;
1596 } else
1597 error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1598
1599 if (error == DDI_SUCCESS)
1600 if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1601 error = DDI_FAILURE;
1602 }
1603 if (error != DDI_SUCCESS) {
1604 return (1);
1605 }
1606
1607
1608 offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1609
1610 v = virt + offset;
1611
1612 /*
1613 * Pick up the size to be freed. It may be different from
1614 * what probe finds.
1615 */
1616 l = phys_spec.pci_size_low;
1617
1618 if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1619 /* free memory back to the allocator */
1620 if (ndi_ra_free(ddi_get_parent(dip), phys_spec.pci_phys_low,
1621 l, NDI_RA_TYPE_MEM,
1622 NDI_RA_PASS) != NDI_SUCCESS) {
1623 pci_unmap_phys(&h, &config);
1624 return (1);
1625 }
1626
1627 /* Unmap the BAR by writing a zero */
1628
1629 ddi_put32(h, (uint32_t *)v, 0);
1630 } else {
1631 switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1632 case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1633 /* free memory back to the allocator */
1634 if (ndi_ra_free(ddi_get_parent(dip),
1635 LADDR(phys_spec.pci_phys_low,
1636 phys_spec.pci_phys_mid),
1637 l, NDI_RA_TYPE_MEM,
1638 NDI_RA_PASS) != NDI_SUCCESS) {
1639 pci_unmap_phys(&h, &config);
1640 return (1);
1641 }
1642
1643 break;
1644
1645 case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1646 /* free memory back to the allocator */
1647 if (ndi_ra_free(ddi_get_parent(dip),
1648 phys_spec.pci_phys_low,
1649 l, NDI_RA_TYPE_MEM,
1650 NDI_RA_PASS) != NDI_SUCCESS) {
1651 pci_unmap_phys(&h, &config);
1652 return (1);
1653 }
1654
1655 break;
1656 case PCI_REG_ADDR_G(PCI_ADDR_IO):
1657 /* free I/O space back to the allocator */
1658 if (ndi_ra_free(ddi_get_parent(dip),
1659 phys_spec.pci_phys_low,
1660 l, NDI_RA_TYPE_IO,
1661 NDI_RA_PASS) != NDI_SUCCESS) {
1662 pci_unmap_phys(&h, &config);
1663 return (1);
1664 }
1665 break;
1666 default:
1667 pci_unmap_phys(&h, &config);
1668 return (1);
1669 } /* switch */
1670 }
1671
1672 /*
1673 * Now that memory locations are assigned,
1674 * update the assigned address property.
1675 */
1676
1677 FC_DEBUG1(1, CE_CONT, "updating assigned-addresss for %x\n",
1678 phys_spec.pci_phys_hi);
1679
1680 if (pfc_remove_assigned_prop(dip, &phys_spec)) {
1681 pci_unmap_phys(&h, &config);
1682 return (1);
1683 }
1684
1685 pci_unmap_phys(&h, &config);
1686
1687 return (0);
1688 }
1689
1690
1691 int
pci_map_phys(dev_info_t * dip,pci_regspec_t * phys_spec,caddr_t * addrp,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handlep)1692 pci_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
1693 caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1694 ddi_acc_handle_t *handlep)
1695 {
1696 ddi_map_req_t mr;
1697 ddi_acc_hdl_t *hp;
1698 int result;
1699
1700 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1701 hp = impl_acc_hdl_get(*handlep);
1702 hp->ah_vers = VERS_ACCHDL;
1703 hp->ah_dip = dip;
1704 hp->ah_rnumber = 0;
1705 hp->ah_offset = 0;
1706 hp->ah_len = 0;
1707 hp->ah_acc = *accattrp;
1708
1709 mr.map_op = DDI_MO_MAP_LOCKED;
1710 mr.map_type = DDI_MT_REGSPEC;
1711 mr.map_obj.rp = (struct regspec *)phys_spec;
1712 mr.map_prot = PROT_READ | PROT_WRITE;
1713 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1714 mr.map_handlep = hp;
1715 mr.map_vers = DDI_MAP_VERSION;
1716
1717 result = ddi_map(dip, &mr, 0, 0, addrp);
1718
1719 if (result != DDI_SUCCESS) {
1720 impl_acc_hdl_free(*handlep);
1721 *handlep = (ddi_acc_handle_t)NULL;
1722 } else {
1723 hp->ah_addr = *addrp;
1724 }
1725
1726 return (result);
1727 }
1728
1729 void
pci_unmap_phys(ddi_acc_handle_t * handlep,pci_regspec_t * ph)1730 pci_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
1731 {
1732 ddi_map_req_t mr;
1733 ddi_acc_hdl_t *hp;
1734
1735 hp = impl_acc_hdl_get(*handlep);
1736 ASSERT(hp);
1737
1738 mr.map_op = DDI_MO_UNMAP;
1739 mr.map_type = DDI_MT_REGSPEC;
1740 mr.map_obj.rp = (struct regspec *)ph;
1741 mr.map_prot = PROT_READ | PROT_WRITE;
1742 mr.map_flags = DDI_MF_KERNEL_MAPPING;
1743 mr.map_handlep = hp;
1744 mr.map_vers = DDI_MAP_VERSION;
1745
1746 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1747 hp->ah_len, &hp->ah_addr);
1748
1749 impl_acc_hdl_free(*handlep);
1750
1751
1752 *handlep = (ddi_acc_handle_t)NULL;
1753 }
1754
1755 int
pfc_update_assigned_prop(dev_info_t * dip,pci_regspec_t * newone)1756 pfc_update_assigned_prop(dev_info_t *dip, pci_regspec_t *newone)
1757 {
1758 int alen;
1759 pci_regspec_t *assigned;
1760 caddr_t newreg;
1761 uint_t status;
1762
1763 status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1764 "assigned-addresses", (caddr_t)&assigned, &alen);
1765 switch (status) {
1766 case DDI_PROP_SUCCESS:
1767 break;
1768 case DDI_PROP_NO_MEMORY:
1769 return (1);
1770 default:
1771 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1772 "assigned-addresses", (int *)newone,
1773 sizeof (*newone)/sizeof (int));
1774 return (0);
1775 }
1776
1777 /*
1778 * Allocate memory for the existing
1779 * assigned-addresses(s) plus one and then
1780 * build it.
1781 */
1782
1783 newreg = kmem_zalloc(alen+sizeof (*newone), KM_SLEEP);
1784
1785 bcopy(assigned, newreg, alen);
1786 bcopy(newone, newreg + alen, sizeof (*newone));
1787
1788 /*
1789 * Write out the new "assigned-addresses" spec
1790 */
1791 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1792 "assigned-addresses", (int *)newreg,
1793 (alen + sizeof (*newone))/sizeof (int));
1794
1795 kmem_free((caddr_t)newreg, alen+sizeof (*newone));
1796 kmem_free(assigned, alen);
1797
1798 return (0);
1799 }
1800 int
pfc_remove_assigned_prop(dev_info_t * dip,pci_regspec_t * oldone)1801 pfc_remove_assigned_prop(dev_info_t *dip, pci_regspec_t *oldone)
1802 {
1803 int alen, new_len, num_entries, i;
1804 pci_regspec_t *assigned;
1805 uint_t status;
1806
1807 status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1808 "assigned-addresses", (caddr_t)&assigned, &alen);
1809 switch (status) {
1810 case DDI_PROP_SUCCESS:
1811 break;
1812 case DDI_PROP_NO_MEMORY:
1813 return (1);
1814 default:
1815 return (0);
1816 }
1817
1818 num_entries = alen / sizeof (pci_regspec_t);
1819 new_len = alen - sizeof (pci_regspec_t);
1820
1821 /*
1822 * Search for the memory being removed.
1823 */
1824 for (i = 0; i < num_entries; i++) {
1825 if (assigned[i].pci_phys_hi == oldone->pci_phys_hi) {
1826 if (new_len == 0) {
1827 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1828 "assigned-addresses");
1829 break;
1830 }
1831 if ((new_len - (i * sizeof (pci_regspec_t)))
1832 == 0) {
1833 FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1834 "%x removed from property (last entry)\n",
1835 oldone->pci_phys_hi);
1836 } else {
1837 bcopy((void *)(assigned + i + 1),
1838 (void *)(assigned + i),
1839 (new_len - (i * sizeof (pci_regspec_t))));
1840
1841 FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1842 "%x removed from property\n",
1843 oldone->pci_phys_hi);
1844 }
1845 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1846 dip, "assigned-addresses", (int *)assigned,
1847 (new_len/sizeof (int)));
1848
1849 break;
1850 }
1851 }
1852
1853 kmem_free(assigned, alen);
1854
1855 return (0);
1856 }
1857 /*
1858 * we recognize the non transparent bridge child nodes with the
1859 * following property. This is specific to this implementation only.
1860 * This property is specific to AP nodes only.
1861 */
1862 #define PCICFG_DEV_CONF_MAP_PROP "pci-parent-indirect"
1863
1864 /*
1865 * If a non transparent bridge drives a hotplug/hotswap bus, then
1866 * the following property must be defined for the node either by
1867 * the driver or the OBP.
1868 */
1869 #define PCICFG_BUS_CONF_MAP_PROP "pci-conf-indirect"
1870
1871 /*
1872 * this function is called only for SPARC platforms, where we may have
1873 * a mix n' match of direct vs indirectly mapped configuration space.
1874 */
1875 /*ARGSUSED*/
1876 static int
fcpci_indirect_map(dev_info_t * dip)1877 fcpci_indirect_map(dev_info_t *dip)
1878 {
1879 int rc = DDI_FAILURE;
1880
1881 if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip), 0,
1882 PCICFG_DEV_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1883 rc = DDI_SUCCESS;
1884 else
1885 if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip),
1886 0, PCICFG_BUS_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1887 rc = DDI_SUCCESS;
1888
1889 return (rc);
1890 }
1891