xref: /illumos-gate/usr/src/uts/sun4/io/efcode/fcpci.c (revision 02e56f3f1bfc8d9977bafb8cb5202f576dcded27)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * fcpci.c: Framework PCI fcode ops
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/systm.h>
35 #include <sys/pci.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/ddidmareq.h>
40 #include <sys/pci.h>
41 #include <sys/modctl.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/fcode.h>
44 #include <sys/promif.h>
45 #include <sys/promimpl.h>
46 
47 #define	PCI_NPT_bits	(PCI_RELOCAT_B | PCI_PREFETCH_B | PCI_ALIAS_B)
48 #define	PCICFG_CONF_INDIRECT_MAP	1
49 
50 static int pfc_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
51 static int pfc_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
52 static int pfc_dma_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
53 static int pfc_dma_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
54 static int pfc_dma_sync(dev_info_t *, fco_handle_t, fc_ci_t *);
55 static int pfc_dma_cleanup(dev_info_t *, fco_handle_t, fc_ci_t *);
56 
57 static int pfc_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
58 static int pfc_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
59 static int pfc_config_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
60 static int pfc_config_store(dev_info_t *, fco_handle_t, fc_ci_t *);
61 
62 static int pfc_probe_address(dev_info_t *, fco_handle_t, fc_ci_t *);
63 static int pfc_probe_space(dev_info_t *, fco_handle_t, fc_ci_t *);
64 
65 static int pfc_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
66 static int pfc_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
67 static int pfc_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
68 int prom_get_fcode_size(char *);
69 int prom_get_fcode(char *, char *);
70 int pfc_update_assigned_prop(dev_info_t *, pci_regspec_t *);
71 int pfc_remove_assigned_prop(dev_info_t *, pci_regspec_t *);
72 int pci_alloc_resource(dev_info_t *, pci_regspec_t);
73 int pci_free_resource(dev_info_t *, pci_regspec_t);
74 int pci_alloc_mem_chunk(dev_info_t *,  uint64_t, uint64_t *,  uint64_t *);
75 int pci_alloc_io_chunk(dev_info_t *,  uint64_t,  uint64_t *, uint64_t *);
76 static int fcpci_indirect_map(dev_info_t *);
77 
78 int fcpci_unloadable;
79 int no_advisory_dma;
80 
81 #ifndef	lint
82 static char _depends_on[] = "misc/fcodem misc/busra";
83 #endif
84 
85 #define	HIADDR(n) ((uint32_t)(((uint64_t)(n) & 0xFFFFFFFF00000000)>> 32))
86 #define	LOADDR(n)((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
87 #define	LADDR(lo, hi)    (((uint64_t)(hi) << 32) | (uint32_t)(lo))
88 #define	PCI_4GIG_LIMIT 0xFFFFFFFFUL
89 #define	PCI_MEMGRAN 0x100000
90 #define	PCI_IOGRAN 0x1000
91 
92 
93 /*
94  * Module linkage information for the kernel.
95  */
96 static struct modlmisc modlmisc = {
97 	&mod_miscops, "FCode pci bus functions %I%"
98 };
99 
100 static struct modlinkage modlinkage = {
101 	MODREV_1, (void *)&modlmisc, NULL
102 };
103 
104 int
105 _init(void)
106 {
107 	return (mod_install(&modlinkage));
108 }
109 
110 int
111 _fini(void)
112 {
113 	if (fcpci_unloadable)
114 		return (mod_remove(&modlinkage));
115 	return (EBUSY);
116 }
117 
118 int
119 _info(struct modinfo *modinfop)
120 {
121 	return (mod_info(&modlinkage, modinfop));
122 }
123 
124 
125 struct pfc_ops_v {
126 	char *svc_name;
127 	fc_ops_t *f;
128 };
129 
130 static struct pfc_ops_v pov[] = {
131 	{	"map-in",		pfc_map_in},
132 	{	"map-out",		pfc_map_out},
133 	{	"dma-map-in",		pfc_dma_map_in},
134 	{	"dma-map-out",		pfc_dma_map_out},
135 	{	"dma-sync",		pfc_dma_sync},
136 	{	"rx@",			pfc_register_fetch},
137 	{	"rl@",			pfc_register_fetch},
138 	{	"rw@",			pfc_register_fetch},
139 	{	"rb@",			pfc_register_fetch},
140 	{	"rx!",			pfc_register_store},
141 	{	"rl!",			pfc_register_store},
142 	{	"rw!",			pfc_register_store},
143 	{	"rb!",			pfc_register_store},
144 	{	"config-l@",		pfc_config_fetch},
145 	{	"config-w@",		pfc_config_fetch},
146 	{	"config-b@",		pfc_config_fetch},
147 	{	"config-l!",		pfc_config_store},
148 	{	"config-w!",		pfc_config_store},
149 	{	"config-b!",		pfc_config_store},
150 	{	FC_PROBE_ADDRESS,	pfc_probe_address},
151 	{	FC_PROBE_SPACE,		pfc_probe_space},
152 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
153 	{	FC_CONFIG_CHILD,	pfc_config_child},
154 	{	FC_GET_FCODE_SIZE,	pfc_get_fcode_size},
155 	{	FC_GET_FCODE,		pfc_get_fcode},
156 	{	NULL,			NULL}
157 };
158 
159 static struct pfc_ops_v shared_pov[] = {
160 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
161 	{	NULL,			NULL}
162 };
163 
164 int pci_map_phys(dev_info_t *, pci_regspec_t *,
165     caddr_t *, ddi_device_acc_attr_t *, ddi_acc_handle_t *);
166 
167 void pci_unmap_phys(ddi_acc_handle_t *, pci_regspec_t *);
168 
169 fco_handle_t
170 pci_fc_ops_alloc_handle(dev_info_t *ap, dev_info_t *child,
171     void *fcode, size_t fcode_size, char *unit_address,
172     struct pci_ops_bus_args *up)
173 {
174 	fco_handle_t rp;
175 	struct pci_ops_bus_args *bp = NULL;
176 	phandle_t h;
177 
178 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
179 	rp->next_handle = fc_ops_alloc_handle(ap, child, fcode, fcode_size,
180 	    unit_address, NULL);
181 	rp->ap = ap;
182 	rp->child = child;
183 	rp->fcode = fcode;
184 	rp->fcode_size = fcode_size;
185 	if (unit_address) {
186 		char *buf;
187 
188 		buf = kmem_zalloc(strlen(unit_address) + 1, KM_SLEEP);
189 		(void) strcpy(buf, unit_address);
190 		rp->unit_address = buf;
191 	}
192 
193 	bp = kmem_zalloc(sizeof (struct pci_ops_bus_args), KM_SLEEP);
194 	*bp = *up;
195 	rp->bus_args = bp;
196 
197 	/*
198 	 * Add the child's nodeid to our table...
199 	 */
200 	h = ddi_get_nodeid(rp->child);
201 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
202 
203 	return (rp);
204 }
205 
206 void
207 pci_fc_ops_free_handle(fco_handle_t rp)
208 {
209 	struct pci_ops_bus_args *bp;
210 	struct fc_resource *ip, *np;
211 
212 	ASSERT(rp);
213 
214 	if (rp->next_handle)
215 		fc_ops_free_handle(rp->next_handle);
216 	if (rp->unit_address)
217 		kmem_free(rp->unit_address, strlen(rp->unit_address) + 1);
218 	if ((bp = rp->bus_args) != NULL)
219 		kmem_free(bp, sizeof (struct pci_ops_bus_args));
220 
221 	/*
222 	 * Release all the resources from the resource list
223 	 * XXX: We don't handle 'unknown' types, but we don't create them.
224 	 */
225 	for (ip = rp->head; ip != NULL; ip = np) {
226 		np = ip->next;
227 		switch (ip->type) {
228 		case RT_MAP:
229 			FC_DEBUG1(1, CE_CONT, "pci_fc_ops_free: "
230 			    "pci_unmap_phys(%p)\n", ip->fc_map_handle);
231 			pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
232 			kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
233 			break;
234 		case RT_DMA:
235 			/* DMA has to be freed up at exit time */
236 			cmn_err(CE_CONT, "pfc_fc_ops_free: DMA seen!\n");
237 			break;
238 		default:
239 			cmn_err(CE_CONT, "pci_fc_ops_free: "
240 			    "unknown resource type %d\n", ip->type);
241 			break;
242 		}
243 		fc_rem_resource(rp, ip);
244 		kmem_free(ip, sizeof (struct fc_resource));
245 	}
246 	kmem_free(rp, sizeof (struct fc_resource_list));
247 }
248 
249 int
250 pci_fc_ops(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
251 {
252 	struct pfc_ops_v *pv;
253 	char *name = fc_cell2ptr(cp->svc_name);
254 
255 	ASSERT(rp);
256 
257 	/*
258 	 * First try the generic fc_ops. If the ops is a shared op,
259 	 * also call our local function.
260 	 */
261 	if (fc_ops(ap, rp->next_handle, cp) == 0) {
262 		for (pv = shared_pov; pv->svc_name != NULL; ++pv)
263 			if (strcmp(pv->svc_name, name) == 0)
264 				return (pv->f(ap, rp, cp));
265 		return (0);
266 	}
267 
268 	for (pv = pov; pv->svc_name != NULL; ++pv)
269 		if (strcmp(pv->svc_name, name) == 0)
270 			return (pv->f(ap, rp, cp));
271 
272 	FC_DEBUG1(9, CE_CONT, "pci_fc_ops: <%s> not serviced\n", name);
273 
274 	return (-1);
275 }
276 
277 /*
278  * Create a dma mapping for a given user address.
279  */
280 static int
281 pfc_dma_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
282 {
283 	ddi_dma_handle_t h;
284 	int error;
285 	caddr_t virt;
286 	size_t len;
287 	uint_t flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
288 	struct fc_resource *ip;
289 	ddi_dma_cookie_t c;
290 	struct buf *bp;
291 
292 	if (fc_cell2int(cp->nargs) != 3)
293 		return (fc_syntax_error(cp, "nargs must be 3"));
294 
295 	if (fc_cell2int(cp->nresults) < 1)
296 		return (fc_syntax_error(cp, "nresults must be >= 1"));
297 
298 	/*
299 	 * XXX: It's not clear what we should do with a non-cacheable request
300 	 */
301 	virt = fc_cell2ptr(fc_arg(cp, 2));
302 	len = fc_cell2size(fc_arg(cp, 1));
303 #ifdef	notdef
304 	cacheable = fc_cell2int(fc_arg(cp, 0));	/* XXX: do what? */
305 #endif
306 
307 	FC_DEBUG2(6, CE_CONT, "pcf_dma_map_in: virt %p, len %d\n", virt, len);
308 
309 	/*
310 	 * Set up the address space for physio from userland
311 	 */
312 	error = fc_physio_setup(&bp, virt, len);
313 
314 	if (error)  {
315 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: fc_physio_setup failed "
316 		    "error: %d  virt: %p  len %d\n", error, virt, len);
317 		return (fc_priv_error(cp, "fc_physio_setup failed"));
318 	}
319 
320 	if (no_advisory_dma == 0) {
321 		/*
322 		 * First try the advisory call to see if it's legal ..
323 		 * for advisory dma, don't pass in a ptr to a dma handle.
324 		 */
325 		FC_DEBUG0(9, CE_CONT, "pfc_dma_map_in: advisory dma_map_in\n");
326 		error = ddi_dma_buf_setup(ap, bp, flags, DDI_DMA_SLEEP,
327 		    NULL, NULL, NULL);
328 
329 		if (error)  {
330 			FC_DEBUG3(9, CE_CONT, "pfc_dma_map_in: advisory "
331 			    "dma-map-in failed error: %d  virt: %p  len %d\n",
332 			    error, virt, len);
333 			return (fc_priv_error(cp, "advisory dma-map-in "
334 			    "failed"));
335 		}
336 	}
337 
338 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: dma_map_in; bp = %p\n", bp);
339 	error = ddi_dma_buf_setup(ap, bp, flags, DDI_DMA_SLEEP,
340 	    NULL, NULL, &h);
341 
342 	if (error)  {
343 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
344 		    "error: %d  virt: %p  len %d\n", error, virt, len);
345 		return (fc_priv_error(cp, "real dma-map-in failed"));
346 	}
347 
348 	/*
349 	 * Now that the resource is mapped in, we need the dma cookie
350 	 * so we can return it to the driver.
351 	 */
352 
353 	error = fc_ddi_dma_htoc(ap, h, 0, &c);
354 	if (error)  {
355 		(void) fc_ddi_dma_free(ap, h);
356 		return (fc_priv_error(cp, "ddi_dma_htoc failed"));
357 	}
358 
359 	if (c.dmac_size < len)  {
360 		(void) fc_ddi_dma_free(ap, h);
361 		return (fc_priv_error(cp, "ddi_dma_htoc size < len"));
362 	}
363 
364 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: returning devaddr %x\n",
365 		c.dmac_address);
366 
367 	cp->nresults = fc_int2cell(1);
368 	fc_result(cp, 0) = fc_uint32_t2cell(c.dmac_address);	/* XXX size */
369 
370 	/*
371 	 * Now we have to log this resource saving the handle and buf header
372 	 */
373 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
374 	ip->type = RT_DMA;
375 	ip->fc_dma_virt = virt;
376 	ip->fc_dma_len = len;
377 	ip->fc_dma_handle = h;
378 	ip->fc_dma_devaddr = c.dmac_address;
379 	ip->fc_dma_bp = bp;
380 	fc_add_resource(rp, ip);
381 
382 	return (fc_success_op(ap, rp, cp));
383 }
384 
385 static int
386 pfc_dma_sync(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
387 {
388 	void *virt;
389 	size_t len;
390 	uint32_t devaddr;
391 	int error;
392 	struct fc_resource *ip;
393 
394 	if (fc_cell2int(cp->nargs) != 3)
395 		return (fc_syntax_error(cp, "nargs must be 3"));
396 
397 	virt = fc_cell2ptr(fc_arg(cp, 2));
398 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
399 	len = fc_cell2size(fc_arg(cp, 0));
400 
401 	/*
402 	 * Find if this virt is 'within' a request we know about
403 	 */
404 	fc_lock_resource_list(rp);
405 	for (ip = rp->head; ip != NULL; ip = ip->next) {
406 		if (ip->type != RT_DMA)
407 			continue;
408 		if (ip->fc_dma_devaddr != devaddr)
409 			continue;
410 		if (((char *)virt >= (char *)ip->fc_dma_virt) &&
411 		    (((char *)virt + len) <=
412 		    ((char *)ip->fc_dma_virt + ip->fc_dma_len)))
413 			break;
414 	}
415 	fc_unlock_resource_list(rp);
416 
417 	if (ip == NULL)
418 		return (fc_priv_error(cp, "request not within a "
419 		    "known dma mapping"));
420 
421 	/*
422 	 * We know about this request, so we trust it enough to sync it.
423 	 * Unfortunately, we don't know which direction, so we'll do
424 	 * both directions.
425 	 */
426 
427 	error = fc_ddi_dma_sync(ip->fc_dma_handle,
428 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORCPU);
429 	error |= fc_ddi_dma_sync(ip->fc_dma_handle,
430 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORDEV);
431 
432 	if (error)
433 		return (fc_priv_error(cp, "Call to ddi_dma_sync failed"));
434 
435 	cp->nresults = fc_int2cell(0);
436 	return (fc_success_op(ap, rp, cp));
437 }
438 
439 static int
440 pfc_dma_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
441 {
442 	void *virt;
443 	size_t len;
444 	uint32_t devaddr;
445 	struct fc_resource *ip;
446 
447 	if (fc_cell2int(cp->nargs) != 3)
448 		return (fc_syntax_error(cp, "nargs must be 3"));
449 
450 	virt = fc_cell2ptr(fc_arg(cp, 2));
451 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
452 	len = fc_cell2size(fc_arg(cp, 0));
453 
454 	/*
455 	 * Find if this virt matches a request we know about
456 	 */
457 	fc_lock_resource_list(rp);
458 	for (ip = rp->head; ip != NULL; ip = ip->next) {
459 		if (ip->type != RT_DMA)
460 			continue;
461 		if (ip->fc_dma_devaddr != devaddr)
462 			continue;
463 		if (ip->fc_dma_virt != virt)
464 			continue;
465 		if (len == ip->fc_dma_len)
466 			break;
467 	}
468 	fc_unlock_resource_list(rp);
469 
470 	if (ip == NULL)
471 		return (fc_priv_error(cp, "request doesn't match a "
472 		    "known dma mapping"));
473 
474 	/*
475 	 * ddi_dma_free does an implied sync ...
476 	 */
477 	if (fc_ddi_dma_free(ap, ip->fc_dma_handle))
478 		cmn_err(CE_CONT, "pfc_dma_map_out: ddi_dma_free failed!\n");
479 
480 	/*
481 	 * Tear down the physio mappings
482 	 */
483 	fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
484 
485 	/*
486 	 * remove the resource from the list and release it.
487 	 */
488 	fc_rem_resource(rp, ip);
489 	kmem_free(ip, sizeof (struct fc_resource));
490 
491 	cp->nresults = fc_int2cell(0);
492 	return (fc_success_op(ap, rp, cp));
493 }
494 
495 static struct fc_resource *
496 next_dma_resource(fco_handle_t rp)
497 {
498 	struct fc_resource *ip;
499 
500 	fc_lock_resource_list(rp);
501 	for (ip = rp->head; ip != NULL; ip = ip->next)
502 		if (ip->type == RT_DMA)
503 			break;
504 	fc_unlock_resource_list(rp);
505 
506 	return (ip);
507 }
508 
509 static int
510 pfc_dma_cleanup(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
511 {
512 	struct fc_resource *ip;
513 
514 	while ((ip = next_dma_resource(rp)) != NULL) {
515 
516 		FC_DEBUG2(9, CE_CONT, "pfc_dma_cleanup: virt %x len %x\n",
517 			ip->fc_dma_virt, ip->fc_dma_len);
518 
519 		/*
520 		 * Free the dma handle
521 		 */
522 		if (fc_ddi_dma_free(ap, ip->fc_dma_handle))
523 			cmn_err(CE_CONT, "pfc_dma_cleanup: "
524 			    "ddi_dma_free failed!\n");
525 
526 		/*
527 		 * Tear down the userland mapping and free the buf header
528 		 */
529 		fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
530 
531 		fc_rem_resource(rp, ip);
532 		kmem_free(ip, sizeof (struct fc_resource));
533 	}
534 
535 	cp->nresults = fc_int2cell(0);
536 	return (fc_success_op(ap, rp, cp));
537 }
538 
539 static int
540 pfc_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
541 {
542 	size_t len;
543 	int error;
544 	caddr_t virt;
545 	pci_regspec_t p, *ph;
546 	struct fc_resource *ip;
547 	ddi_device_acc_attr_t acc;
548 	ddi_acc_handle_t h;
549 
550 	if (fc_cell2int(cp->nargs) != 4)
551 		return (fc_syntax_error(cp, "nargs must be 4"));
552 
553 	if (fc_cell2int(cp->nresults) < 1)
554 		return (fc_syntax_error(cp, "nresults must be >= 1"));
555 
556 	p.pci_size_hi = 0;
557 	p.pci_size_low = len = fc_cell2size(fc_arg(cp, 0));
558 
559 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 1));
560 	p.pci_phys_mid = fc_cell2uint(fc_arg(cp, 2));
561 	p.pci_phys_low = fc_cell2uint(fc_arg(cp, 3));
562 
563 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
564 
565 	/*
566 	 * Fcode is expecting the bytes are not swapped.
567 	 */
568 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
569 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
570 
571 	/*
572 	 * First We need to allocate the PCI Resource.
573 	 */
574 	error = pci_alloc_resource(rp->child, p);
575 
576 	if (error)  {
577 		return (fc_priv_error(cp, "pci map-in failed"));
578 	}
579 
580 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
581 
582 	if (error)  {
583 		return (fc_priv_error(cp, "pci map-in failed"));
584 	}
585 
586 	cp->nresults = fc_int2cell(1);
587 	fc_result(cp, 0) = fc_ptr2cell(virt);
588 
589 	/*
590 	 * Log this resource ...
591 	 */
592 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
593 	ip->type = RT_MAP;
594 	ip->fc_map_virt = virt;
595 	ip->fc_map_len = len;
596 	ip->fc_map_handle = h;
597 	ph = kmem_zalloc(sizeof (pci_regspec_t), KM_SLEEP);
598 	*ph = p;
599 	ip->fc_regspec = ph;	/* cache a copy of the reg spec */
600 	fc_add_resource(rp, ip);
601 
602 	return (fc_success_op(ap, rp, cp));
603 }
604 
605 static int
606 pfc_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
607 {
608 	caddr_t virt;
609 	size_t len;
610 	struct fc_resource *ip;
611 
612 	if (fc_cell2int(cp->nargs) != 2)
613 		return (fc_syntax_error(cp, "nargs must be 2"));
614 
615 	virt = fc_cell2ptr(fc_arg(cp, 1));
616 
617 	len = fc_cell2size(fc_arg(cp, 0));
618 
619 	/*
620 	 * Find if this request matches a mapping resource we set up.
621 	 */
622 	fc_lock_resource_list(rp);
623 	for (ip = rp->head; ip != NULL; ip = ip->next) {
624 		if (ip->type != RT_MAP)
625 			continue;
626 		if (ip->fc_map_virt != virt)
627 			continue;
628 		if (ip->fc_map_len == len)
629 			break;
630 	}
631 	fc_unlock_resource_list(rp);
632 
633 	if (ip == NULL)
634 		return (fc_priv_error(cp, "request doesn't match a "
635 		    "known mapping"));
636 
637 	pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
638 
639 	kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
640 
641 	/*
642 	 * remove the resource from the list and release it.
643 	 */
644 	fc_rem_resource(rp, ip);
645 	kmem_free(ip, sizeof (struct fc_resource));
646 
647 	cp->nresults = fc_int2cell(0);
648 	return (fc_success_op(ap, rp, cp));
649 }
650 
651 static int
652 pfc_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
653 {
654 	size_t len;
655 	caddr_t virt;
656 	int error;
657 	uint64_t x;
658 	uint32_t l;
659 	uint16_t w;
660 	uint8_t b;
661 	char *name = fc_cell2ptr(cp->svc_name);
662 	struct fc_resource *ip;
663 
664 	if (fc_cell2int(cp->nargs) != 1)
665 		return (fc_syntax_error(cp, "nargs must be 1"));
666 
667 	if (fc_cell2int(cp->nresults) < 1)
668 		return (fc_syntax_error(cp, "nresults must be >= 1"));
669 
670 	virt = fc_cell2ptr(fc_arg(cp, 0));
671 
672 	/*
673 	 * Determine the access width .. we can switch on the 2nd
674 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
675 	 */
676 	switch (*(name + 1)) {
677 	case 'x':	len = sizeof (x); break;
678 	case 'l':	len = sizeof (l); break;
679 	case 'w':	len = sizeof (w); break;
680 	case 'b':	len = sizeof (b); break;
681 	}
682 
683 	/*
684 	 * Check the alignment ...
685 	 */
686 	if (((intptr_t)virt & (len - 1)) != 0)
687 		return (fc_priv_error(cp, "unaligned access"));
688 
689 	/*
690 	 * Find if this virt is 'within' a request we know about
691 	 */
692 	fc_lock_resource_list(rp);
693 	for (ip = rp->head; ip != NULL; ip = ip->next) {
694 		if (ip->type != RT_MAP)
695 			continue;
696 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
697 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
698 			break;
699 	}
700 	fc_unlock_resource_list(rp);
701 
702 	if (ip == NULL)
703 		return (fc_priv_error(cp, "request not within a "
704 		    "known mapping"));
705 
706 	/*
707 	 * XXX: We need access handle versions of peek/poke to move
708 	 * beyond the prototype ... we assume that we have hardware
709 	 * byte swapping enabled for pci register access here which
710 	 * is a huge dependency on the current implementation.
711 	 */
712 	switch (len) {
713 	case sizeof (x):
714 		error = ddi_peek64(rp->child, (int64_t *)virt, (int64_t *)&x);
715 		break;
716 	case sizeof (l):
717 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&l);
718 		break;
719 	case sizeof (w):
720 		error = ddi_peek16(rp->child, (int16_t *)virt, (int16_t *)&w);
721 		break;
722 	case sizeof (b):
723 		error = ddi_peek8(rp->child, (int8_t *)virt, (int8_t *)&b);
724 		break;
725 	}
726 
727 	if (error) {
728 		return (fc_priv_error(cp, "access error"));
729 	}
730 
731 	cp->nresults = fc_int2cell(1);
732 	switch (len) {
733 	case sizeof (x): fc_result(cp, 0) = x; break;
734 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
735 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
736 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
737 	}
738 	return (fc_success_op(ap, rp, cp));
739 }
740 
741 static int
742 pfc_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
743 {
744 	size_t len;
745 	caddr_t virt;
746 	int error;
747 	uint64_t x;
748 	uint32_t l;
749 	uint16_t w;
750 	uint8_t b;
751 	char *name = fc_cell2ptr(cp->svc_name);
752 	struct fc_resource *ip;
753 
754 	if (fc_cell2int(cp->nargs) != 2)
755 		return (fc_syntax_error(cp, "nargs must be 2"));
756 
757 	virt = fc_cell2ptr(fc_arg(cp, 0));
758 
759 	/*
760 	 * Determine the access width .. we can switch on the 2nd
761 	 * character of the name which is "rl!", "rb!" or "rw!"
762 	 */
763 	switch (*(name + 1)) {
764 	case 'x': len = sizeof (x); x = fc_arg(cp, 1); break;
765 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
766 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
767 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
768 	}
769 
770 	/*
771 	 * Check the alignment ...
772 	 */
773 	if (((intptr_t)virt & (len - 1)) != 0)
774 		return (fc_priv_error(cp, "unaligned access"));
775 
776 	/*
777 	 * Find if this virt is 'within' a request we know about
778 	 */
779 	fc_lock_resource_list(rp);
780 	for (ip = rp->head; ip != NULL; ip = ip->next) {
781 		if (ip->type != RT_MAP)
782 			continue;
783 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
784 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
785 			break;
786 	}
787 	fc_unlock_resource_list(rp);
788 
789 	if (ip == NULL)
790 		return (fc_priv_error(cp, "request not within a "
791 		    "known mapping"));
792 
793 	/*
794 	 * XXX: We need access handle versions of peek/poke to move
795 	 * beyond the prototype ... we assume that we have hardware
796 	 * byte swapping enabled for pci register access here which
797 	 * is a huge dependency on the current implementation.
798 	 */
799 	switch (len) {
800 	case sizeof (x):
801 		error = ddi_poke64(rp->child, (int64_t *)virt, x);
802 		break;
803 	case sizeof (l):
804 		error = ddi_poke32(rp->child, (int32_t *)virt, l);
805 		break;
806 	case sizeof (w):
807 		error = ddi_poke16(rp->child, (int16_t *)virt, w);
808 		break;
809 	case sizeof (b):
810 		error = ddi_poke8(rp->child, (int8_t *)virt, b);
811 		break;
812 	}
813 
814 	if (error) {
815 		return (fc_priv_error(cp, "access error"));
816 	}
817 
818 	cp->nresults = fc_int2cell(0);
819 	return (fc_success_op(ap, rp, cp));
820 }
821 
822 static int
823 pfc_config_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
824 {
825 	caddr_t virt, v;
826 	int error, reg, flags = 0;
827 	size_t len;
828 	uint32_t l, tmp;
829 	uint16_t w;
830 	uint8_t b;
831 	char *name = fc_cell2ptr(cp->svc_name);
832 	pci_regspec_t p;
833 	ddi_device_acc_attr_t acc;
834 	ddi_acc_handle_t h;
835 
836 	if (fc_cell2int(cp->nargs) != 1)
837 		return (fc_syntax_error(cp, "nargs must be 1"));
838 
839 	if (fc_cell2int(cp->nresults) < 1)
840 		return (fc_syntax_error(cp, "nresults must be >= 1"));
841 
842 	/*
843 	 * Construct a config address pci reg property from the args.
844 	 * arg[0] is the configuration address.
845 	 */
846 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
847 	p.pci_phys_mid = p.pci_phys_low = 0;
848 	p.pci_size_hi = p.pci_size_low = 0;
849 
850 	/*
851 	 * Verify that the address is a configuration space address
852 	 * ss must be zero, n,p,t must be zero.
853 	 */
854 	if (((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) ||
855 	    ((p.pci_phys_hi & PCI_NPT_bits) != 0)) {
856 		cmn_err(CE_CONT, "pfc_config_fetch: "
857 		    "invalid config addr: %x\n", p.pci_phys_hi);
858 		return (fc_priv_error(cp, "non-config addr"));
859 	}
860 
861 	/*
862 	 * Extract the register number from the config address and
863 	 * remove the register number from the physical address.
864 	 */
865 	reg = p.pci_phys_hi & PCI_REG_REG_M;
866 	p.pci_phys_hi &= ~PCI_REG_REG_M;
867 
868 	/*
869 	 * Determine the access width .. we can switch on the 9th
870 	 * character of the name which is "config-{l,w,b}@"
871 	 */
872 	switch (*(name + 7)) {
873 	case 'l':	len = sizeof (l); break;
874 	case 'w':	len = sizeof (w); break;
875 	case 'b':	len = sizeof (b); break;
876 	}
877 
878 	/*
879 	 * Verify that the access is properly aligned
880 	 */
881 	if ((reg & (len - 1)) != 0)
882 		return (fc_priv_error(cp, "unaligned access"));
883 
884 	/*
885 	 * Map in configuration space (temporarily)
886 	 */
887 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
888 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
889 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
890 
891 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
892 
893 	if (error)  {
894 		return (fc_priv_error(cp, "pci config map-in failed"));
895 	}
896 
897 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
898 		flags |= PCICFG_CONF_INDIRECT_MAP;
899 
900 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
901 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
902 		error = DDI_SUCCESS;
903 	} else
904 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
905 
906 	if (error == DDI_SUCCESS)
907 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
908 			error = DDI_FAILURE;
909 			cmn_err(CE_CONT, "fcpcii: conf probe failed.l=%x", tmp);
910 		}
911 
912 	if (error != DDI_SUCCESS) {
913 		return (fc_priv_error(cp, "pci config fetch failed"));
914 	}
915 
916 
917 	/*
918 	 * XXX: We need access handle versions of peek/poke to move
919 	 * beyond the prototype ... we assume that we have hardware
920 	 * byte swapping enabled for pci register access here which
921 	 * is a huge dependency on the current implementation.
922 	 */
923 	v = virt + reg;
924 	switch (len) {
925 	case sizeof (l):
926 		l = (int32_t)ddi_get32(h, (uint32_t *)v);
927 		break;
928 	case sizeof (w):
929 		w = (int16_t)ddi_get16(h, (uint16_t *)v);
930 		break;
931 	case sizeof (b):
932 		b = (int8_t)ddi_get8(h, (uint8_t *)v);
933 		break;
934 	}
935 
936 	/*
937 	 * Remove the temporary config space mapping
938 	 */
939 	pci_unmap_phys(&h, &p);
940 
941 	if (error) {
942 		return (fc_priv_error(cp, "access error"));
943 	}
944 
945 	cp->nresults = fc_int2cell(1);
946 	switch (len) {
947 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
948 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
949 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
950 	}
951 
952 	return (fc_success_op(ap, rp, cp));
953 }
954 
955 static int
956 pfc_config_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
957 {
958 	caddr_t virt, v;
959 	int error, reg, flags = 0;
960 	size_t len;
961 	uint32_t l, tmp;
962 	uint16_t w;
963 	uint8_t b;
964 	char *name = fc_cell2ptr(cp->svc_name);
965 	pci_regspec_t p;
966 	ddi_device_acc_attr_t acc;
967 	ddi_acc_handle_t h;
968 
969 	if (fc_cell2int(cp->nargs) != 2)
970 		return (fc_syntax_error(cp, "nargs must be 2"));
971 
972 	/*
973 	 * Construct a config address pci reg property from the args.
974 	 * arg[0] is the configuration address. arg[1] is the data.
975 	 */
976 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
977 	p.pci_phys_mid = p.pci_phys_low = 0;
978 	p.pci_size_hi = p.pci_size_low = 0;
979 
980 	/*
981 	 * Verify that the address is a configuration space address
982 	 * ss must be zero, n,p,t must be zero.
983 	 */
984 	if (((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) ||
985 	    ((p.pci_phys_hi & PCI_NPT_bits) != 0)) {
986 		cmn_err(CE_CONT, "pfc_config_store: "
987 		    "invalid config addr: %x\n", p.pci_phys_hi);
988 		return (fc_priv_error(cp, "non-config addr"));
989 	}
990 
991 	/*
992 	 * Extract the register number from the config address and
993 	 * remove the register number from the physical address.
994 	 */
995 	reg = p.pci_phys_hi & PCI_REG_REG_M;
996 	p.pci_phys_hi &= ~PCI_REG_REG_M;
997 
998 	/*
999 	 * Determine the access width .. we can switch on the 8th
1000 	 * character of the name which is "config-{l,w,b}@"
1001 	 */
1002 	switch (*(name + 7)) {
1003 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
1004 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
1005 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
1006 	}
1007 
1008 	/*
1009 	 * Verify that the access is properly aligned
1010 	 */
1011 	if ((reg & (len - 1)) != 0)
1012 		return (fc_priv_error(cp, "unaligned access"));
1013 
1014 	/*
1015 	 * Map in configuration space (temporarily)
1016 	 */
1017 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1018 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1019 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1020 
1021 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
1022 
1023 	if (error)  {
1024 		return (fc_priv_error(cp, "pci config map-in failed"));
1025 	}
1026 
1027 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
1028 		flags |= PCICFG_CONF_INDIRECT_MAP;
1029 
1030 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1031 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1032 		error = DDI_SUCCESS;
1033 	} else
1034 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
1035 
1036 	if (error == DDI_SUCCESS)
1037 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1038 			error = DDI_FAILURE;
1039 			cmn_err(CE_CONT, "fcpci: conf probe failed.l=%x", tmp);
1040 		}
1041 
1042 	if (error != DDI_SUCCESS) {
1043 		return (fc_priv_error(cp, "pci config store failed"));
1044 	}
1045 
1046 
1047 	/*
1048 	 * XXX: We need access handle versions of peek/poke to move
1049 	 * beyond the prototype ... we assume that we have hardware
1050 	 * byte swapping enabled for pci register access here which
1051 	 * is a huge dependency on the current implementation.
1052 	 */
1053 	v = virt + reg;
1054 	switch (len) {
1055 	case sizeof (l):
1056 		ddi_put32(h, (uint32_t *)v, (uint32_t)l);
1057 		break;
1058 	case sizeof (w):
1059 		ddi_put16(h, (uint16_t *)v, (uint16_t)w);
1060 		break;
1061 	case sizeof (b):
1062 		ddi_put8(h, (uint8_t *)v, (uint8_t)b);
1063 		break;
1064 	}
1065 
1066 	/*
1067 	 * Remove the temporary config space mapping
1068 	 */
1069 	pci_unmap_phys(&h, &p);
1070 
1071 	if (error) {
1072 		return (fc_priv_error(cp, "access error"));
1073 	}
1074 
1075 	cp->nresults = fc_int2cell(0);
1076 	return (fc_success_op(ap, rp, cp));
1077 }
1078 
1079 
1080 static int
1081 pfc_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1082 {
1083 	caddr_t name_virt, fcode_virt;
1084 	char *name, *fcode;
1085 	int fcode_len, status;
1086 
1087 	if (fc_cell2int(cp->nargs) != 3)
1088 		return (fc_syntax_error(cp, "nargs must be 3"));
1089 
1090 	if (fc_cell2int(cp->nresults) < 1)
1091 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1092 
1093 	name_virt = fc_cell2ptr(fc_arg(cp, 0));
1094 
1095 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1096 
1097 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1098 
1099 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1100 
1101 	if (copyinstr(fc_cell2ptr(name_virt), name,
1102 	    FC_SVC_NAME_LEN - 1, NULL))  {
1103 		status = 0;
1104 	} else {
1105 
1106 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1107 
1108 		if ((status = prom_get_fcode(name, fcode)) != 0) {
1109 
1110 			if (copyout((void *)fcode, (void *)fcode_virt,
1111 			    fcode_len)) {
1112 				cmn_err(CE_WARN, " pfc_get_fcode: Unable "
1113 				    "to copy out fcode image\n");
1114 				status = 0;
1115 			}
1116 		}
1117 
1118 		kmem_free(fcode, fcode_len);
1119 	}
1120 
1121 	kmem_free(name, FC_SVC_NAME_LEN);
1122 
1123 	cp->nresults = fc_int2cell(1);
1124 	fc_result(cp, 0) = status;
1125 
1126 	return (fc_success_op(ap, rp, cp));
1127 }
1128 
1129 static int
1130 pfc_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1131 {
1132 	caddr_t virt;
1133 	char *name;
1134 	int len;
1135 
1136 	if (fc_cell2int(cp->nargs) != 1)
1137 		return (fc_syntax_error(cp, "nargs must be 1"));
1138 
1139 	if (fc_cell2int(cp->nresults) < 1)
1140 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1141 
1142 	virt = fc_cell2ptr(fc_arg(cp, 0));
1143 
1144 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1145 
1146 	if (copyinstr(fc_cell2ptr(virt), name,
1147 	    FC_SVC_NAME_LEN - 1, NULL))  {
1148 		len = 0;
1149 	} else {
1150 		len = prom_get_fcode_size(name);
1151 	}
1152 
1153 	kmem_free(name, FC_SVC_NAME_LEN);
1154 
1155 	cp->nresults = fc_int2cell(1);
1156 	fc_result(cp, 0) = len;
1157 
1158 	return (fc_success_op(ap, rp, cp));
1159 }
1160 
1161 /*
1162  * Return the physical probe address: lo=0, mid=0, hi-config-addr
1163  */
1164 static int
1165 pfc_probe_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1166 {
1167 	if (fc_cell2int(cp->nargs) != 0)
1168 		return (fc_syntax_error(cp, "nargs must be 0"));
1169 
1170 	if (fc_cell2int(cp->nresults) < 2)
1171 		return (fc_syntax_error(cp, "nresults must be >= 3"));
1172 
1173 	cp->nresults = fc_int2cell(2);
1174 	fc_result(cp, 1) = fc_int2cell(0);	/* phys.lo */
1175 	fc_result(cp, 0) = fc_int2cell(0);	/* phys.mid */
1176 
1177 	return (fc_success_op(ap, rp, cp));
1178 }
1179 
1180 /*
1181  * Return the phys.hi component of the probe address.
1182  */
1183 static int
1184 pfc_probe_space(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1185 {
1186 	struct pci_ops_bus_args *ba = rp->bus_args;
1187 
1188 	ASSERT(ba);
1189 
1190 	if (fc_cell2int(cp->nargs) != 0)
1191 		return (fc_syntax_error(cp, "nargs must be 0"));
1192 
1193 	if (fc_cell2int(cp->nresults) < 1)
1194 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1195 
1196 	cp->nresults = fc_int2cell(1);
1197 	fc_result(cp, 0) = fc_uint32_t2cell(ba->config_address); /* phys.hi */
1198 
1199 	return (fc_success_op(ap, rp, cp));
1200 }
1201 
1202 static int
1203 pfc_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1204 {
1205 	fc_phandle_t h;
1206 
1207 	if (fc_cell2int(cp->nargs) != 0)
1208 		return (fc_syntax_error(cp, "nargs must be 0"));
1209 
1210 	if (fc_cell2int(cp->nresults) < 1)
1211 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1212 
1213 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1214 
1215 	cp->nresults = fc_int2cell(1);
1216 	fc_result(cp, 0) = fc_phandle2cell(h);
1217 
1218 	return (fc_success_op(ap, rp, cp));
1219 }
1220 
1221 int
1222 pci_alloc_mem_chunk(dev_info_t *dip, uint64_t mem_align, uint64_t *mem_size,
1223     uint64_t *mem_answer)
1224 {
1225 	ndi_ra_request_t req;
1226 	int rval;
1227 
1228 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1229 	req.ra_flags = NDI_RA_ALLOC_BOUNDED;
1230 	req.ra_boundbase = 0;
1231 	req.ra_boundlen = PCI_4GIG_LIMIT;
1232 	req.ra_len = *mem_size;
1233 	req.ra_align_mask = mem_align - 1;
1234 
1235 	rval = ndi_ra_alloc(dip, &req, mem_answer, mem_size,
1236 	    NDI_RA_TYPE_MEM, NDI_RA_PASS);
1237 
1238 	return (rval);
1239 }
1240 int
1241 pci_alloc_io_chunk(dev_info_t *dip, uint64_t io_align, uint64_t *io_size,
1242     uint64_t *io_answer)
1243 {
1244 	ndi_ra_request_t req;
1245 	int rval;
1246 
1247 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1248 	req.ra_flags = (NDI_RA_ALLOC_BOUNDED | NDI_RA_ALLOC_PARTIAL_OK);
1249 	req.ra_boundbase = 0;
1250 	req.ra_boundlen = PCI_4GIG_LIMIT;
1251 	req.ra_len = *io_size;
1252 	req.ra_align_mask = io_align - 1;
1253 
1254 	rval = ndi_ra_alloc(dip, &req, io_answer, io_size,
1255 	    NDI_RA_TYPE_IO, NDI_RA_PASS);
1256 
1257 	return (rval);
1258 }
1259 
1260 int
1261 pci_alloc_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1262 {
1263 	uint64_t answer;
1264 	uint64_t alen;
1265 	int offset, tmp;
1266 	pci_regspec_t config;
1267 	caddr_t virt, v;
1268 	ddi_device_acc_attr_t acc;
1269 	ddi_acc_handle_t h;
1270 	ndi_ra_request_t request;
1271 	pci_regspec_t *assigned;
1272 	int assigned_len, entries, i, l, flags = 0, error;
1273 
1274 	l = phys_spec.pci_size_low;
1275 
1276 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1277 	    DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&assigned,
1278 	    &assigned_len) == DDI_PROP_SUCCESS) {
1279 
1280 		entries = assigned_len / (sizeof (pci_regspec_t));
1281 
1282 		/*
1283 		 * Walk through the assigned-addresses entries. If there is
1284 		 * a match, there is no need to allocate the resource.
1285 		 */
1286 		for (i = 0; i < entries; i++) {
1287 			if (assigned[i].pci_phys_hi == phys_spec.pci_phys_hi) {
1288 				if (assigned[i].pci_size_low >=
1289 				    phys_spec.pci_size_low) {
1290 					kmem_free(assigned, assigned_len);
1291 					return (0);
1292 				}
1293 				/*
1294 				 * Fcode wants to assign more than what
1295 				 * probe found.
1296 				 */
1297 				(void) pci_free_resource(dip, assigned[i]);
1298 				/*
1299 				 * Go on to allocate resources.
1300 				 */
1301 				break;
1302 			}
1303 			/*
1304 			 * Check if Fcode wants to map using different
1305 			 * NPT bits.
1306 			 */
1307 			if (PCI_REG_BDFR_G(assigned[i].pci_phys_hi) ==
1308 			    PCI_REG_BDFR_G(phys_spec.pci_phys_hi)) {
1309 				/*
1310 				 * It is an error to change SS bits
1311 				 */
1312 				if (PCI_REG_ADDR_G(assigned[i].pci_phys_hi) !=
1313 				    PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1314 
1315 					cmn_err(CE_WARN, "Fcode changing ss "
1316 					    "bits in reg %x -- %x",
1317 					    assigned[i].pci_phys_hi,
1318 					    phys_spec.pci_phys_hi);
1319 
1320 					kmem_free(assigned, assigned_len);
1321 					return (1);
1322 				}
1323 
1324 				/*
1325 				 * Allocate enough
1326 				 */
1327 				l = MAX(assigned[i].pci_size_low,
1328 				    phys_spec.pci_size_low);
1329 
1330 				(void) pci_free_resource(dip, assigned[i]);
1331 				/*
1332 				 * Go on to allocate resources.
1333 				 */
1334 				break;
1335 			}
1336 		}
1337 		kmem_free(assigned, assigned_len);
1338 	}
1339 
1340 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1341 
1342 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1343 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1344 	config.pci_phys_mid = config.pci_phys_low = 0;
1345 	config.pci_size_hi = config.pci_size_low = 0;
1346 
1347 	/*
1348 	 * Map in configuration space (temporarily)
1349 	 */
1350 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1351 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1352 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1353 
1354 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1355 		return (1);
1356 	}
1357 
1358 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1359 		flags |= PCICFG_CONF_INDIRECT_MAP;
1360 
1361 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1362 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1363 		error = DDI_SUCCESS;
1364 	} else
1365 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1366 
1367 	if (error == DDI_SUCCESS)
1368 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1369 			error = DDI_FAILURE;
1370 		}
1371 
1372 	if (error != DDI_SUCCESS) {
1373 		return (1);
1374 	}
1375 
1376 	request.ra_flags |= NDI_RA_ALIGN_SIZE;
1377 	request.ra_boundbase = 0;
1378 	request.ra_boundlen = PCI_4GIG_LIMIT;
1379 
1380 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1381 
1382 	v = virt + offset;
1383 
1384 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1385 		request.ra_len = l;
1386 		request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1387 
1388 		/* allocate memory space from the allocator */
1389 
1390 		if (ndi_ra_alloc(ddi_get_parent(dip),
1391 			&request, &answer, &alen,
1392 			NDI_RA_TYPE_MEM, NDI_RA_PASS)
1393 					!= NDI_SUCCESS) {
1394 			pci_unmap_phys(&h, &config);
1395 			return (1);
1396 		}
1397 		FC_DEBUG3(1, CE_CONT, "ROM addr = [0x%x.%x] len [0x%x]\n",
1398 			HIADDR(answer),
1399 			LOADDR(answer),
1400 			alen);
1401 
1402 		/* program the low word */
1403 
1404 		ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1405 
1406 		phys_spec.pci_phys_low = LOADDR(answer);
1407 		phys_spec.pci_phys_mid = HIADDR(answer);
1408 	} else {
1409 		request.ra_len = l;
1410 
1411 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1412 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1413 			request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1414 
1415 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1416 				/*
1417 				 * If it is a non relocatable address,
1418 				 * then specify the address we want.
1419 				 */
1420 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1421 				request.ra_addr = (uint64_t)LADDR(
1422 				    phys_spec.pci_phys_low,
1423 				    phys_spec.pci_phys_mid);
1424 			}
1425 
1426 			/* allocate memory space from the allocator */
1427 
1428 			if (ndi_ra_alloc(ddi_get_parent(dip),
1429 				&request, &answer, &alen,
1430 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1431 						!= NDI_SUCCESS) {
1432 				pci_unmap_phys(&h, &config);
1433 				if (request.ra_flags ==
1434 				    NDI_RA_ALLOC_SPECIFIED)
1435 					cmn_err(CE_WARN, "Unable to allocate "
1436 					    "non relocatable address 0x%p\n",
1437 					    (void *) request.ra_addr);
1438 				return (1);
1439 			}
1440 			FC_DEBUG3(1, CE_CONT,
1441 			    "64 addr = [0x%x.%x] len [0x%x]\n",
1442 			    HIADDR(answer),
1443 			    LOADDR(answer),
1444 			    alen);
1445 
1446 			/* program the low word */
1447 
1448 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1449 
1450 			/* program the high word with value zero */
1451 			v += 4;
1452 			ddi_put32(h, (uint32_t *)v, HIADDR(answer));
1453 
1454 			phys_spec.pci_phys_low = LOADDR(answer);
1455 			phys_spec.pci_phys_mid = HIADDR(answer);
1456 
1457 			break;
1458 
1459 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1460 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1461 
1462 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1463 				/*
1464 				 * If it is a non relocatable address,
1465 				 * then specify the address we want.
1466 				 */
1467 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1468 				request.ra_addr = (uint64_t)
1469 				    phys_spec.pci_phys_low;
1470 			}
1471 
1472 			/* allocate memory space from the allocator */
1473 
1474 			if (ndi_ra_alloc(ddi_get_parent(dip),
1475 				&request, &answer, &alen,
1476 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1477 						!= NDI_SUCCESS) {
1478 				pci_unmap_phys(&h, &config);
1479 				if (request.ra_flags ==
1480 				    NDI_RA_ALLOC_SPECIFIED)
1481 					cmn_err(CE_WARN, "Unable to allocate "
1482 					    "non relocatable address 0x%p\n",
1483 					    (void *) request.ra_addr);
1484 				return (1);
1485 			}
1486 
1487 			FC_DEBUG3(1, CE_CONT,
1488 			    "32 addr = [0x%x.%x] len [0x%x]\n",
1489 			    HIADDR(answer),
1490 			    LOADDR(answer),
1491 			    alen);
1492 
1493 			/* program the low word */
1494 
1495 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1496 
1497 			phys_spec.pci_phys_low = LOADDR(answer);
1498 
1499 			break;
1500 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1501 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1502 
1503 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1504 				/*
1505 				 * If it is a non relocatable address,
1506 				 * then specify the address we want.
1507 				 */
1508 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1509 				request.ra_addr = (uint64_t)
1510 				    phys_spec.pci_phys_low;
1511 			}
1512 
1513 			/* allocate I/O space from the allocator */
1514 
1515 			if (ndi_ra_alloc(ddi_get_parent(dip),
1516 				&request, &answer, &alen,
1517 				NDI_RA_TYPE_IO, NDI_RA_PASS)
1518 						!= NDI_SUCCESS) {
1519 				pci_unmap_phys(&h, &config);
1520 				if (request.ra_flags ==
1521 				    NDI_RA_ALLOC_SPECIFIED)
1522 					cmn_err(CE_WARN, "Unable to allocate "
1523 					    "non relocatable IO Space 0x%p\n",
1524 					    (void *) request.ra_addr);
1525 				return (1);
1526 			}
1527 			FC_DEBUG3(1, CE_CONT,
1528 			    "I/O addr = [0x%x.%x] len [0x%x]\n",
1529 			    HIADDR(answer),
1530 			    LOADDR(answer),
1531 			    alen);
1532 
1533 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1534 
1535 			phys_spec.pci_phys_low = LOADDR(answer);
1536 
1537 			break;
1538 		default:
1539 			pci_unmap_phys(&h, &config);
1540 			return (1);
1541 		} /* switch */
1542 	}
1543 
1544 	/*
1545 	 * Now that memory locations are assigned,
1546 	 * update the assigned address property.
1547 	 */
1548 	if (pfc_update_assigned_prop(dip, &phys_spec)) {
1549 		pci_unmap_phys(&h, &config);
1550 		return (1);
1551 	}
1552 
1553 	pci_unmap_phys(&h, &config);
1554 
1555 	return (0);
1556 }
1557 
1558 int
1559 pci_free_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1560 {
1561 	int offset, tmp;
1562 	pci_regspec_t config;
1563 	caddr_t virt, v;
1564 	ddi_device_acc_attr_t acc;
1565 	ddi_acc_handle_t h;
1566 	ndi_ra_request_t request;
1567 	int l, error, flags = 0;
1568 
1569 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1570 
1571 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1572 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1573 	config.pci_phys_mid = config.pci_phys_low = 0;
1574 	config.pci_size_hi = config.pci_size_low = 0;
1575 
1576 	/*
1577 	 * Map in configuration space (temporarily)
1578 	 */
1579 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1580 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1581 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1582 
1583 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1584 		return (1);
1585 	}
1586 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1587 		flags |= PCICFG_CONF_INDIRECT_MAP;
1588 
1589 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1590 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1591 		error = DDI_SUCCESS;
1592 	} else
1593 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1594 
1595 	if (error == DDI_SUCCESS)
1596 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1597 			error = DDI_FAILURE;
1598 		}
1599 	if (error != DDI_SUCCESS) {
1600 		return (1);
1601 	}
1602 
1603 
1604 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1605 
1606 	v = virt + offset;
1607 
1608 	/*
1609 	 * Pick up the size to be freed. It may be different from
1610 	 * what probe finds.
1611 	 */
1612 	l = phys_spec.pci_size_low;
1613 
1614 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1615 		/* free memory back to the allocator */
1616 		if (ndi_ra_free(ddi_get_parent(dip), phys_spec.pci_phys_low,
1617 		    l, NDI_RA_TYPE_MEM,
1618 		    NDI_RA_PASS) != NDI_SUCCESS) {
1619 			pci_unmap_phys(&h, &config);
1620 			return (1);
1621 		}
1622 
1623 		/* Unmap the BAR by writing a zero */
1624 
1625 		ddi_put32(h, (uint32_t *)v, 0);
1626 	} else {
1627 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1628 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1629 			/* free memory back to the allocator */
1630 			if (ndi_ra_free(ddi_get_parent(dip),
1631 			    LADDR(phys_spec.pci_phys_low,
1632 			    phys_spec.pci_phys_mid),
1633 			    l, NDI_RA_TYPE_MEM,
1634 			    NDI_RA_PASS) != NDI_SUCCESS) {
1635 				pci_unmap_phys(&h, &config);
1636 				return (1);
1637 			}
1638 
1639 			break;
1640 
1641 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1642 			/* free memory back to the allocator */
1643 			if (ndi_ra_free(ddi_get_parent(dip),
1644 			    phys_spec.pci_phys_low,
1645 			    l, NDI_RA_TYPE_MEM,
1646 			    NDI_RA_PASS) != NDI_SUCCESS) {
1647 				pci_unmap_phys(&h, &config);
1648 				return (1);
1649 			}
1650 
1651 			break;
1652 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1653 			/* free I/O space back to the allocator */
1654 			if (ndi_ra_free(ddi_get_parent(dip),
1655 			    phys_spec.pci_phys_low,
1656 			    l, NDI_RA_TYPE_IO,
1657 			    NDI_RA_PASS) != NDI_SUCCESS) {
1658 				pci_unmap_phys(&h, &config);
1659 				return (1);
1660 			}
1661 			break;
1662 		default:
1663 			pci_unmap_phys(&h, &config);
1664 			return (1);
1665 		} /* switch */
1666 	}
1667 
1668 	/*
1669 	 * Now that memory locations are assigned,
1670 	 * update the assigned address property.
1671 	 */
1672 
1673 	FC_DEBUG1(1, CE_CONT, "updating assigned-addresss for %x\n",
1674 	    phys_spec.pci_phys_hi);
1675 
1676 	if (pfc_remove_assigned_prop(dip, &phys_spec)) {
1677 		pci_unmap_phys(&h, &config);
1678 		return (1);
1679 	}
1680 
1681 	pci_unmap_phys(&h, &config);
1682 
1683 	return (0);
1684 }
1685 
1686 
1687 int
1688 pci_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
1689 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1690 	ddi_acc_handle_t *handlep)
1691 {
1692 	ddi_map_req_t mr;
1693 	ddi_acc_hdl_t *hp;
1694 	int result;
1695 
1696 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1697 	hp = impl_acc_hdl_get(*handlep);
1698 	hp->ah_vers = VERS_ACCHDL;
1699 	hp->ah_dip = dip;
1700 	hp->ah_rnumber = 0;
1701 	hp->ah_offset = 0;
1702 	hp->ah_len = 0;
1703 	hp->ah_acc = *accattrp;
1704 
1705 	mr.map_op = DDI_MO_MAP_LOCKED;
1706 	mr.map_type = DDI_MT_REGSPEC;
1707 	mr.map_obj.rp = (struct regspec *)phys_spec;
1708 	mr.map_prot = PROT_READ | PROT_WRITE;
1709 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1710 	mr.map_handlep = hp;
1711 	mr.map_vers = DDI_MAP_VERSION;
1712 
1713 	result = ddi_map(dip, &mr, 0, 0, addrp);
1714 
1715 	if (result != DDI_SUCCESS) {
1716 		impl_acc_hdl_free(*handlep);
1717 		*handlep = (ddi_acc_handle_t)NULL;
1718 	} else {
1719 		hp->ah_addr = *addrp;
1720 	}
1721 
1722 	return (result);
1723 }
1724 
1725 void
1726 pci_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
1727 {
1728 	ddi_map_req_t mr;
1729 	ddi_acc_hdl_t *hp;
1730 
1731 	hp = impl_acc_hdl_get(*handlep);
1732 	ASSERT(hp);
1733 
1734 	mr.map_op = DDI_MO_UNMAP;
1735 	mr.map_type = DDI_MT_REGSPEC;
1736 	mr.map_obj.rp = (struct regspec *)ph;
1737 	mr.map_prot = PROT_READ | PROT_WRITE;
1738 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1739 	mr.map_handlep = hp;
1740 	mr.map_vers = DDI_MAP_VERSION;
1741 
1742 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1743 		hp->ah_len, &hp->ah_addr);
1744 
1745 	impl_acc_hdl_free(*handlep);
1746 
1747 
1748 	*handlep = (ddi_acc_handle_t)NULL;
1749 }
1750 
1751 int
1752 pfc_update_assigned_prop(dev_info_t *dip, pci_regspec_t *newone)
1753 {
1754 	int		alen;
1755 	pci_regspec_t	*assigned;
1756 	caddr_t		newreg;
1757 	uint_t		status;
1758 
1759 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1760 		"assigned-addresses", (caddr_t)&assigned, &alen);
1761 	switch (status) {
1762 		case DDI_PROP_SUCCESS:
1763 		break;
1764 		case DDI_PROP_NO_MEMORY:
1765 			return (1);
1766 		default:
1767 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1768 			"assigned-addresses", (int *)newone,
1769 				sizeof (*newone)/sizeof (int));
1770 			return (0);
1771 	}
1772 
1773 	/*
1774 	 * Allocate memory for the existing
1775 	 * assigned-addresses(s) plus one and then
1776 	 * build it.
1777 	 */
1778 
1779 	newreg = kmem_zalloc(alen+sizeof (*newone), KM_SLEEP);
1780 
1781 	bcopy(assigned, newreg, alen);
1782 	bcopy(newone, newreg + alen, sizeof (*newone));
1783 
1784 	/*
1785 	 * Write out the new "assigned-addresses" spec
1786 	 */
1787 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1788 		"assigned-addresses", (int *)newreg,
1789 		(alen + sizeof (*newone))/sizeof (int));
1790 
1791 	kmem_free((caddr_t)newreg, alen+sizeof (*newone));
1792 
1793 	return (0);
1794 }
1795 int
1796 pfc_remove_assigned_prop(dev_info_t *dip, pci_regspec_t *oldone)
1797 {
1798 	int		alen, new_len, num_entries, i;
1799 	pci_regspec_t	*assigned;
1800 	uint_t		status;
1801 
1802 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1803 		"assigned-addresses", (caddr_t)&assigned, &alen);
1804 	switch (status) {
1805 		case DDI_PROP_SUCCESS:
1806 		break;
1807 		case DDI_PROP_NO_MEMORY:
1808 			return (1);
1809 		default:
1810 			return (0);
1811 	}
1812 
1813 	num_entries = alen / sizeof (pci_regspec_t);
1814 	new_len = alen - sizeof (pci_regspec_t);
1815 
1816 	/*
1817 	 * Search for the memory being removed.
1818 	 */
1819 	for (i = 0; i < num_entries; i++) {
1820 		if (assigned[i].pci_phys_hi == oldone->pci_phys_hi) {
1821 			if (new_len == 0) {
1822 				(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1823 				    "assigned-addresses");
1824 				break;
1825 			}
1826 			if ((new_len - (i * sizeof (pci_regspec_t)))
1827 			    == 0) {
1828 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1829 				    "%x removed from property (last entry)\n",
1830 				    oldone->pci_phys_hi);
1831 			} else {
1832 				bcopy((void *)(assigned + i + 1),
1833 				    (void *)(assigned + i),
1834 				    (new_len - (i * sizeof (pci_regspec_t))));
1835 
1836 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1837 				    "%x removed from property\n",
1838 				    oldone->pci_phys_hi);
1839 			}
1840 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1841 			    dip, "assigned-addresses", (int *)assigned,
1842 			    (new_len/sizeof (int)));
1843 
1844 			break;
1845 		}
1846 	}
1847 
1848 	return (0);
1849 }
1850 /*
1851  * we recognize the non transparent bridge child nodes with the
1852  * following property. This is specific to this implementation only.
1853  * This property is specific to AP nodes only.
1854  */
1855 #define	PCICFG_DEV_CONF_MAP_PROP		"pci-parent-indirect"
1856 
1857 /*
1858  * If a non transparent bridge drives a hotplug/hotswap bus, then
1859  * the following property must be defined for the node either by
1860  * the driver or the OBP.
1861  */
1862 #define	PCICFG_BUS_CONF_MAP_PROP		"pci-conf-indirect"
1863 
1864 /*
1865  * this function is called only for SPARC platforms, where we may have
1866  * a mix n' match of direct vs indirectly mapped configuration space.
1867  */
1868 /*ARGSUSED*/
1869 static int
1870 fcpci_indirect_map(dev_info_t *dip)
1871 {
1872 	int rc = DDI_FAILURE;
1873 
1874 	if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip), DDI_PROP_DONTPASS,
1875 			PCICFG_DEV_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1876 		rc = DDI_SUCCESS;
1877 	else
1878 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip),
1879 				DDI_PROP_DONTPASS, PCICFG_BUS_CONF_MAP_PROP,
1880 				DDI_FAILURE) != DDI_FAILURE)
1881 			rc = DDI_SUCCESS;
1882 
1883 	return (rc);
1884 }
1885