xref: /titanic_41/usr/src/uts/sun4/io/efcode/fcpci.c (revision 7d2d870ed78c1c0b10f15787cf9a400bb0b28fda)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * fcpci.c: Framework PCI fcode ops
29  */
30 #include <sys/types.h>
31 #include <sys/kmem.h>
32 #include <sys/systm.h>
33 #include <sys/pci.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/sunndi.h>
37 #include <sys/ddidmareq.h>
38 #include <sys/pci.h>
39 #include <sys/modctl.h>
40 #include <sys/ndi_impldefs.h>
41 #include <sys/fcode.h>
42 #include <sys/promif.h>
43 #include <sys/promimpl.h>
44 #include <sys/ddi_implfuncs.h>
45 
46 #define	PCI_NPT_bits		(PCI_RELOCAT_B | PCI_PREFETCH_B | PCI_ALIAS_B)
47 #define	PCI_BDF_bits		(PCI_REG_BDFR_M & ~PCI_REG_REG_M)
48 
49 #define	PCICFG_CONF_INDIRECT_MAP	1
50 
51 static int pfc_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
52 static int pfc_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
53 static int pfc_dma_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
54 static int pfc_dma_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
55 static int pfc_dma_sync(dev_info_t *, fco_handle_t, fc_ci_t *);
56 static int pfc_dma_cleanup(dev_info_t *, fco_handle_t, fc_ci_t *);
57 
58 static int pfc_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
59 static int pfc_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
60 static int pfc_config_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
61 static int pfc_config_store(dev_info_t *, fco_handle_t, fc_ci_t *);
62 
63 static int pfc_probe_address(dev_info_t *, fco_handle_t, fc_ci_t *);
64 static int pfc_probe_space(dev_info_t *, fco_handle_t, fc_ci_t *);
65 
66 static int pfc_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
67 static int pfc_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
68 static int pfc_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
69 int prom_get_fcode_size(char *);
70 int prom_get_fcode(char *, char *);
71 int pfc_update_assigned_prop(dev_info_t *, pci_regspec_t *);
72 int pfc_remove_assigned_prop(dev_info_t *, pci_regspec_t *);
73 int pci_alloc_resource(dev_info_t *, pci_regspec_t);
74 int pci_free_resource(dev_info_t *, pci_regspec_t);
75 int pci_alloc_mem_chunk(dev_info_t *,  uint64_t, uint64_t *,  uint64_t *);
76 int pci_alloc_io_chunk(dev_info_t *,  uint64_t,  uint64_t *, uint64_t *);
77 static int fcpci_indirect_map(dev_info_t *);
78 
79 int fcpci_unloadable;
80 
81 static ddi_dma_attr_t fcpci_dma_attr = {
82 	DMA_ATTR_V0,	/* version number */
83 	0x0,		/* lowest usable address */
84 	0xFFFFFFFFull,	/* high DMA address range */
85 	0xFFFFFFFFull,	/* DMA counter register */
86 	1,		/* DMA address alignment */
87 	1,		/* DMA burstsizes */
88 	1,		/* min effective DMA size */
89 	0xFFFFFFFFull,	/* max DMA xfer size */
90 	0xFFFFFFFFull,	/* segment boundary */
91 	1,		 /* s/g list length */
92 	1,		/* granularity of device */
93 	0		/* DMA transfer flags */
94 };
95 
96 #ifndef	lint
97 char _depends_on[] = "misc/fcodem misc/busra";
98 #endif
99 
100 #define	HIADDR(n) ((uint32_t)(((uint64_t)(n) & 0xFFFFFFFF00000000)>> 32))
101 #define	LOADDR(n)((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
102 #define	LADDR(lo, hi)    (((uint64_t)(hi) << 32) | (uint32_t)(lo))
103 #define	PCI_4GIG_LIMIT 0xFFFFFFFFUL
104 #define	PCI_MEMGRAN 0x100000
105 #define	PCI_IOGRAN 0x1000
106 
107 
108 /*
109  * Module linkage information for the kernel.
110  */
111 static struct modlmisc modlmisc = {
112 	&mod_miscops, "FCode pci bus functions"
113 };
114 
115 static struct modlinkage modlinkage = {
116 	MODREV_1, (void *)&modlmisc, NULL
117 };
118 
119 int
120 _init(void)
121 {
122 	return (mod_install(&modlinkage));
123 }
124 
125 int
126 _fini(void)
127 {
128 	if (fcpci_unloadable)
129 		return (mod_remove(&modlinkage));
130 	return (EBUSY);
131 }
132 
133 int
134 _info(struct modinfo *modinfop)
135 {
136 	return (mod_info(&modlinkage, modinfop));
137 }
138 
139 
140 struct pfc_ops_v {
141 	char *svc_name;
142 	fc_ops_t *f;
143 };
144 
145 static struct pfc_ops_v pov[] = {
146 	{	"map-in",		pfc_map_in},
147 	{	"map-out",		pfc_map_out},
148 	{	"dma-map-in",		pfc_dma_map_in},
149 	{	"dma-map-out",		pfc_dma_map_out},
150 	{	"dma-sync",		pfc_dma_sync},
151 	{	"rx@",			pfc_register_fetch},
152 	{	"rl@",			pfc_register_fetch},
153 	{	"rw@",			pfc_register_fetch},
154 	{	"rb@",			pfc_register_fetch},
155 	{	"rx!",			pfc_register_store},
156 	{	"rl!",			pfc_register_store},
157 	{	"rw!",			pfc_register_store},
158 	{	"rb!",			pfc_register_store},
159 	{	"config-l@",		pfc_config_fetch},
160 	{	"config-w@",		pfc_config_fetch},
161 	{	"config-b@",		pfc_config_fetch},
162 	{	"config-l!",		pfc_config_store},
163 	{	"config-w!",		pfc_config_store},
164 	{	"config-b!",		pfc_config_store},
165 	{	FC_PROBE_ADDRESS,	pfc_probe_address},
166 	{	FC_PROBE_SPACE,		pfc_probe_space},
167 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
168 	{	FC_CONFIG_CHILD,	pfc_config_child},
169 	{	FC_GET_FCODE_SIZE,	pfc_get_fcode_size},
170 	{	FC_GET_FCODE,		pfc_get_fcode},
171 	{	NULL,			NULL}
172 };
173 
174 static struct pfc_ops_v shared_pov[] = {
175 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
176 	{	NULL,			NULL}
177 };
178 
179 int pci_map_phys(dev_info_t *, pci_regspec_t *,
180     caddr_t *, ddi_device_acc_attr_t *, ddi_acc_handle_t *);
181 
182 void pci_unmap_phys(ddi_acc_handle_t *, pci_regspec_t *);
183 
184 fco_handle_t
185 pci_fc_ops_alloc_handle(dev_info_t *ap, dev_info_t *child,
186     void *fcode, size_t fcode_size, char *unit_address,
187     struct pci_ops_bus_args *up)
188 {
189 	fco_handle_t rp;
190 	struct pci_ops_bus_args *bp = NULL;
191 	phandle_t h;
192 
193 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
194 	rp->next_handle = fc_ops_alloc_handle(ap, child, fcode, fcode_size,
195 	    unit_address, NULL);
196 	rp->ap = ap;
197 	rp->child = child;
198 	rp->fcode = fcode;
199 	rp->fcode_size = fcode_size;
200 	if (unit_address) {
201 		char *buf;
202 
203 		buf = kmem_zalloc(strlen(unit_address) + 1, KM_SLEEP);
204 		(void) strcpy(buf, unit_address);
205 		rp->unit_address = buf;
206 	}
207 
208 	bp = kmem_zalloc(sizeof (struct pci_ops_bus_args), KM_SLEEP);
209 	*bp = *up;
210 	rp->bus_args = bp;
211 
212 	/*
213 	 * Add the child's nodeid to our table...
214 	 */
215 	h = ddi_get_nodeid(rp->child);
216 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
217 
218 	return (rp);
219 }
220 
221 void
222 pci_fc_ops_free_handle(fco_handle_t rp)
223 {
224 	struct pci_ops_bus_args *bp;
225 	struct fc_resource *ip, *np;
226 
227 	ASSERT(rp);
228 
229 	if (rp->next_handle)
230 		fc_ops_free_handle(rp->next_handle);
231 	if (rp->unit_address)
232 		kmem_free(rp->unit_address, strlen(rp->unit_address) + 1);
233 	if ((bp = rp->bus_args) != NULL)
234 		kmem_free(bp, sizeof (struct pci_ops_bus_args));
235 
236 	/*
237 	 * Release all the resources from the resource list
238 	 * XXX: We don't handle 'unknown' types, but we don't create them.
239 	 */
240 	for (ip = rp->head; ip != NULL; ip = np) {
241 		np = ip->next;
242 		switch (ip->type) {
243 		case RT_MAP:
244 			FC_DEBUG1(1, CE_CONT, "pci_fc_ops_free: "
245 			    "pci_unmap_phys(%p)\n", ip->fc_map_handle);
246 			pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
247 			kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
248 			break;
249 		case RT_DMA:
250 			/* DMA has to be freed up at exit time */
251 			cmn_err(CE_CONT, "pfc_fc_ops_free: DMA seen!\n");
252 			break;
253 		default:
254 			cmn_err(CE_CONT, "pci_fc_ops_free: "
255 			    "unknown resource type %d\n", ip->type);
256 			break;
257 		}
258 		fc_rem_resource(rp, ip);
259 		kmem_free(ip, sizeof (struct fc_resource));
260 	}
261 	kmem_free(rp, sizeof (struct fc_resource_list));
262 }
263 
264 int
265 pci_fc_ops(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
266 {
267 	struct pfc_ops_v *pv;
268 	char *name = fc_cell2ptr(cp->svc_name);
269 
270 	ASSERT(rp);
271 
272 	/*
273 	 * First try the generic fc_ops. If the ops is a shared op,
274 	 * also call our local function.
275 	 */
276 	if (fc_ops(ap, rp->next_handle, cp) == 0) {
277 		for (pv = shared_pov; pv->svc_name != NULL; ++pv)
278 			if (strcmp(pv->svc_name, name) == 0)
279 				return (pv->f(ap, rp, cp));
280 		return (0);
281 	}
282 
283 	for (pv = pov; pv->svc_name != NULL; ++pv)
284 		if (strcmp(pv->svc_name, name) == 0)
285 			return (pv->f(ap, rp, cp));
286 
287 	FC_DEBUG1(9, CE_CONT, "pci_fc_ops: <%s> not serviced\n", name);
288 
289 	return (-1);
290 }
291 
292 /*
293  * Create a dma mapping for a given user address.
294  */
295 static int
296 pfc_dma_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
297 {
298 	ddi_dma_handle_t h;
299 	int error;
300 	caddr_t virt;
301 	size_t len;
302 	uint_t flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
303 	struct fc_resource *ip;
304 	ddi_dma_cookie_t c;
305 	struct buf *bp;
306 	uint_t ccnt;
307 
308 	if (fc_cell2int(cp->nargs) != 3)
309 		return (fc_syntax_error(cp, "nargs must be 3"));
310 
311 	if (fc_cell2int(cp->nresults) < 1)
312 		return (fc_syntax_error(cp, "nresults must be >= 1"));
313 
314 	/*
315 	 * XXX: It's not clear what we should do with a non-cacheable request
316 	 */
317 	virt = fc_cell2ptr(fc_arg(cp, 2));
318 	len = fc_cell2size(fc_arg(cp, 1));
319 #ifdef	notdef
320 	cacheable = fc_cell2int(fc_arg(cp, 0));	/* XXX: do what? */
321 #endif
322 
323 	FC_DEBUG2(6, CE_CONT, "pcf_dma_map_in: virt %p, len %d\n", virt, len);
324 
325 	/*
326 	 * Set up the address space for physio from userland
327 	 */
328 	error = fc_physio_setup(&bp, virt, len);
329 
330 	if (error)  {
331 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: fc_physio_setup failed "
332 		    "error: %d  virt: %p  len %d\n", error, virt, len);
333 		return (fc_priv_error(cp, "fc_physio_setup failed"));
334 	}
335 
336 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: dma_map_in; bp = %p\n", bp);
337 	error = fc_ddi_dma_alloc_handle(ap, &fcpci_dma_attr, DDI_DMA_SLEEP,
338 	    NULL, &h);
339 	if (error != DDI_SUCCESS)  {
340 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
341 		    "error: %d  virt: %p  len %d\n", error, virt, len);
342 		return (fc_priv_error(cp, "real dma-map-in failed"));
343 	}
344 
345 	error = fc_ddi_dma_buf_bind_handle(h, bp, flags, DDI_DMA_SLEEP, NULL,
346 	    &c, &ccnt);
347 	if ((error != DDI_DMA_MAPPED) || (ccnt != 1)) {
348 		fc_ddi_dma_free_handle(&h);
349 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
350 		    "error: %d  virt: %p  len %d\n", error, virt, len);
351 		return (fc_priv_error(cp, "real dma-map-in failed"));
352 	}
353 
354 	if (c.dmac_size < len)  {
355 		error = fc_ddi_dma_unbind_handle(h);
356 		if (error != DDI_SUCCESS) {
357 			return (fc_priv_error(cp, "ddi_dma_unbind error"));
358 		}
359 		fc_ddi_dma_free_handle(&h);
360 		return (fc_priv_error(cp, "ddi_dma_buf_bind size < len"));
361 	}
362 
363 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: returning devaddr %x\n",
364 	    c.dmac_address);
365 
366 	cp->nresults = fc_int2cell(1);
367 	fc_result(cp, 0) = fc_uint32_t2cell(c.dmac_address);	/* XXX size */
368 
369 	/*
370 	 * Now we have to log this resource saving the handle and buf header
371 	 */
372 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
373 	ip->type = RT_DMA;
374 	ip->fc_dma_virt = virt;
375 	ip->fc_dma_len = len;
376 	ip->fc_dma_handle = h;
377 	ip->fc_dma_devaddr = c.dmac_address;
378 	ip->fc_dma_bp = bp;
379 	fc_add_resource(rp, ip);
380 
381 	return (fc_success_op(ap, rp, cp));
382 }
383 
384 static int
385 pfc_dma_sync(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
386 {
387 	void *virt;
388 	size_t len;
389 	uint32_t devaddr;
390 	int error;
391 	struct fc_resource *ip;
392 
393 	if (fc_cell2int(cp->nargs) != 3)
394 		return (fc_syntax_error(cp, "nargs must be 3"));
395 
396 	virt = fc_cell2ptr(fc_arg(cp, 2));
397 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
398 	len = fc_cell2size(fc_arg(cp, 0));
399 
400 	/*
401 	 * Find if this virt is 'within' a request we know about
402 	 */
403 	fc_lock_resource_list(rp);
404 	for (ip = rp->head; ip != NULL; ip = ip->next) {
405 		if (ip->type != RT_DMA)
406 			continue;
407 		if (ip->fc_dma_devaddr != devaddr)
408 			continue;
409 		if (((char *)virt >= (char *)ip->fc_dma_virt) &&
410 		    (((char *)virt + len) <=
411 		    ((char *)ip->fc_dma_virt + ip->fc_dma_len)))
412 			break;
413 	}
414 	fc_unlock_resource_list(rp);
415 
416 	if (ip == NULL)
417 		return (fc_priv_error(cp, "request not within a "
418 		    "known dma mapping"));
419 
420 	/*
421 	 * We know about this request, so we trust it enough to sync it.
422 	 * Unfortunately, we don't know which direction, so we'll do
423 	 * both directions.
424 	 */
425 
426 	error = fc_ddi_dma_sync(ip->fc_dma_handle,
427 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORCPU);
428 	error |= fc_ddi_dma_sync(ip->fc_dma_handle,
429 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORDEV);
430 
431 	if (error)
432 		return (fc_priv_error(cp, "Call to ddi_dma_sync failed"));
433 
434 	cp->nresults = fc_int2cell(0);
435 	return (fc_success_op(ap, rp, cp));
436 }
437 
438 static int
439 pfc_dma_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
440 {
441 	void *virt;
442 	size_t len;
443 	uint32_t devaddr;
444 	struct fc_resource *ip;
445 	int e;
446 
447 	if (fc_cell2int(cp->nargs) != 3)
448 		return (fc_syntax_error(cp, "nargs must be 3"));
449 
450 	virt = fc_cell2ptr(fc_arg(cp, 2));
451 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
452 	len = fc_cell2size(fc_arg(cp, 0));
453 
454 	/*
455 	 * Find if this virt matches a request we know about
456 	 */
457 	fc_lock_resource_list(rp);
458 	for (ip = rp->head; ip != NULL; ip = ip->next) {
459 		if (ip->type != RT_DMA)
460 			continue;
461 		if (ip->fc_dma_devaddr != devaddr)
462 			continue;
463 		if (ip->fc_dma_virt != virt)
464 			continue;
465 		if (len == ip->fc_dma_len)
466 			break;
467 	}
468 	fc_unlock_resource_list(rp);
469 
470 	if (ip == NULL)
471 		return (fc_priv_error(cp, "request doesn't match a "
472 		    "known dma mapping"));
473 
474 	/*
475 	 * ddi_dma_unbind_handle does an implied sync ...
476 	 */
477 	e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
478 	if (e != DDI_SUCCESS) {
479 		cmn_err(CE_CONT, "pfc_dma_map_out: ddi_dma_unbind failed!\n");
480 	}
481 	fc_ddi_dma_free_handle(&ip->fc_dma_handle);
482 
483 	/*
484 	 * Tear down the physio mappings
485 	 */
486 	fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
487 
488 	/*
489 	 * remove the resource from the list and release it.
490 	 */
491 	fc_rem_resource(rp, ip);
492 	kmem_free(ip, sizeof (struct fc_resource));
493 
494 	cp->nresults = fc_int2cell(0);
495 	return (fc_success_op(ap, rp, cp));
496 }
497 
498 static struct fc_resource *
499 next_dma_resource(fco_handle_t rp)
500 {
501 	struct fc_resource *ip;
502 
503 	fc_lock_resource_list(rp);
504 	for (ip = rp->head; ip != NULL; ip = ip->next)
505 		if (ip->type == RT_DMA)
506 			break;
507 	fc_unlock_resource_list(rp);
508 
509 	return (ip);
510 }
511 
512 static int
513 pfc_dma_cleanup(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
514 {
515 	struct fc_resource *ip;
516 	int e;
517 
518 	while ((ip = next_dma_resource(rp)) != NULL) {
519 
520 		FC_DEBUG2(9, CE_CONT, "pfc_dma_cleanup: virt %x len %x\n",
521 		    ip->fc_dma_virt, ip->fc_dma_len);
522 
523 		/*
524 		 * Free the dma handle
525 		 */
526 		e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
527 		if (e != DDI_SUCCESS) {
528 			cmn_err(CE_CONT, "pfc_dma_cleanup: "
529 			    "ddi_dma_unbind failed!\n");
530 		}
531 		fc_ddi_dma_free_handle(&ip->fc_dma_handle);
532 
533 		/*
534 		 * Tear down the userland mapping and free the buf header
535 		 */
536 		fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
537 
538 		fc_rem_resource(rp, ip);
539 		kmem_free(ip, sizeof (struct fc_resource));
540 	}
541 
542 	cp->nresults = fc_int2cell(0);
543 	return (fc_success_op(ap, rp, cp));
544 }
545 
546 static int
547 pfc_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
548 {
549 	size_t len;
550 	int error;
551 	caddr_t virt;
552 	pci_regspec_t p, *ph;
553 	struct fc_resource *ip;
554 	ddi_device_acc_attr_t acc;
555 	ddi_acc_handle_t h;
556 
557 	if (fc_cell2int(cp->nargs) != 4)
558 		return (fc_syntax_error(cp, "nargs must be 4"));
559 
560 	if (fc_cell2int(cp->nresults) < 1)
561 		return (fc_syntax_error(cp, "nresults must be >= 1"));
562 
563 	p.pci_size_hi = 0;
564 	p.pci_size_low = len = fc_cell2size(fc_arg(cp, 0));
565 
566 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 1));
567 	p.pci_phys_mid = fc_cell2uint(fc_arg(cp, 2));
568 	p.pci_phys_low = fc_cell2uint(fc_arg(cp, 3));
569 
570 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
571 
572 	/*
573 	 * Fcode is expecting the bytes are not swapped.
574 	 */
575 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
576 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
577 
578 	/*
579 	 * First We need to allocate the PCI Resource.
580 	 */
581 	error = pci_alloc_resource(rp->child, p);
582 
583 	if (error)  {
584 		return (fc_priv_error(cp, "pci map-in failed"));
585 	}
586 
587 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
588 
589 	if (error)  {
590 		return (fc_priv_error(cp, "pci map-in failed"));
591 	}
592 
593 	cp->nresults = fc_int2cell(1);
594 	fc_result(cp, 0) = fc_ptr2cell(virt);
595 
596 	/*
597 	 * Log this resource ...
598 	 */
599 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
600 	ip->type = RT_MAP;
601 	ip->fc_map_virt = virt;
602 	ip->fc_map_len = len;
603 	ip->fc_map_handle = h;
604 	ph = kmem_zalloc(sizeof (pci_regspec_t), KM_SLEEP);
605 	*ph = p;
606 	ip->fc_regspec = ph;	/* cache a copy of the reg spec */
607 	fc_add_resource(rp, ip);
608 
609 	return (fc_success_op(ap, rp, cp));
610 }
611 
612 static int
613 pfc_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
614 {
615 	caddr_t virt;
616 	size_t len;
617 	struct fc_resource *ip;
618 
619 	if (fc_cell2int(cp->nargs) != 2)
620 		return (fc_syntax_error(cp, "nargs must be 2"));
621 
622 	virt = fc_cell2ptr(fc_arg(cp, 1));
623 
624 	len = fc_cell2size(fc_arg(cp, 0));
625 
626 	/*
627 	 * Find if this request matches a mapping resource we set up.
628 	 */
629 	fc_lock_resource_list(rp);
630 	for (ip = rp->head; ip != NULL; ip = ip->next) {
631 		if (ip->type != RT_MAP)
632 			continue;
633 		if (ip->fc_map_virt != virt)
634 			continue;
635 		if (ip->fc_map_len == len)
636 			break;
637 	}
638 	fc_unlock_resource_list(rp);
639 
640 	if (ip == NULL)
641 		return (fc_priv_error(cp, "request doesn't match a "
642 		    "known mapping"));
643 
644 	pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
645 
646 	kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
647 
648 	/*
649 	 * remove the resource from the list and release it.
650 	 */
651 	fc_rem_resource(rp, ip);
652 	kmem_free(ip, sizeof (struct fc_resource));
653 
654 	cp->nresults = fc_int2cell(0);
655 	return (fc_success_op(ap, rp, cp));
656 }
657 
658 static int
659 pfc_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
660 {
661 	size_t len;
662 	caddr_t virt;
663 	int error;
664 	uint64_t x;
665 	uint32_t l;
666 	uint16_t w;
667 	uint8_t b;
668 	char *name = fc_cell2ptr(cp->svc_name);
669 	struct fc_resource *ip;
670 
671 	if (fc_cell2int(cp->nargs) != 1)
672 		return (fc_syntax_error(cp, "nargs must be 1"));
673 
674 	if (fc_cell2int(cp->nresults) < 1)
675 		return (fc_syntax_error(cp, "nresults must be >= 1"));
676 
677 	virt = fc_cell2ptr(fc_arg(cp, 0));
678 
679 	/*
680 	 * Determine the access width .. we can switch on the 2nd
681 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
682 	 */
683 	switch (*(name + 1)) {
684 	case 'x':	len = sizeof (x); break;
685 	case 'l':	len = sizeof (l); break;
686 	case 'w':	len = sizeof (w); break;
687 	case 'b':	len = sizeof (b); break;
688 	}
689 
690 	/*
691 	 * Check the alignment ...
692 	 */
693 	if (((intptr_t)virt & (len - 1)) != 0)
694 		return (fc_priv_error(cp, "unaligned access"));
695 
696 	/*
697 	 * Find if this virt is 'within' a request we know about
698 	 */
699 	fc_lock_resource_list(rp);
700 	for (ip = rp->head; ip != NULL; ip = ip->next) {
701 		if (ip->type != RT_MAP)
702 			continue;
703 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
704 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
705 			break;
706 	}
707 	fc_unlock_resource_list(rp);
708 
709 	if (ip == NULL)
710 		return (fc_priv_error(cp, "request not within a "
711 		    "known mapping"));
712 
713 	/*
714 	 * XXX: We need access handle versions of peek/poke to move
715 	 * beyond the prototype ... we assume that we have hardware
716 	 * byte swapping enabled for pci register access here which
717 	 * is a huge dependency on the current implementation.
718 	 */
719 	switch (len) {
720 	case sizeof (x):
721 		error = ddi_peek64(rp->child, (int64_t *)virt, (int64_t *)&x);
722 		break;
723 	case sizeof (l):
724 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&l);
725 		break;
726 	case sizeof (w):
727 		error = ddi_peek16(rp->child, (int16_t *)virt, (int16_t *)&w);
728 		break;
729 	case sizeof (b):
730 		error = ddi_peek8(rp->child, (int8_t *)virt, (int8_t *)&b);
731 		break;
732 	}
733 
734 	if (error) {
735 		return (fc_priv_error(cp, "access error"));
736 	}
737 
738 	cp->nresults = fc_int2cell(1);
739 	switch (len) {
740 	case sizeof (x): fc_result(cp, 0) = x; break;
741 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
742 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
743 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
744 	}
745 	return (fc_success_op(ap, rp, cp));
746 }
747 
748 static int
749 pfc_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
750 {
751 	size_t len;
752 	caddr_t virt;
753 	int error;
754 	uint64_t x;
755 	uint32_t l;
756 	uint16_t w;
757 	uint8_t b;
758 	char *name = fc_cell2ptr(cp->svc_name);
759 	struct fc_resource *ip;
760 
761 	if (fc_cell2int(cp->nargs) != 2)
762 		return (fc_syntax_error(cp, "nargs must be 2"));
763 
764 	virt = fc_cell2ptr(fc_arg(cp, 0));
765 
766 	/*
767 	 * Determine the access width .. we can switch on the 2nd
768 	 * character of the name which is "rl!", "rb!" or "rw!"
769 	 */
770 	switch (*(name + 1)) {
771 	case 'x': len = sizeof (x); x = fc_arg(cp, 1); break;
772 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
773 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
774 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
775 	}
776 
777 	/*
778 	 * Check the alignment ...
779 	 */
780 	if (((intptr_t)virt & (len - 1)) != 0)
781 		return (fc_priv_error(cp, "unaligned access"));
782 
783 	/*
784 	 * Find if this virt is 'within' a request we know about
785 	 */
786 	fc_lock_resource_list(rp);
787 	for (ip = rp->head; ip != NULL; ip = ip->next) {
788 		if (ip->type != RT_MAP)
789 			continue;
790 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
791 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
792 			break;
793 	}
794 	fc_unlock_resource_list(rp);
795 
796 	if (ip == NULL)
797 		return (fc_priv_error(cp, "request not within a "
798 		    "known mapping"));
799 
800 	/*
801 	 * XXX: We need access handle versions of peek/poke to move
802 	 * beyond the prototype ... we assume that we have hardware
803 	 * byte swapping enabled for pci register access here which
804 	 * is a huge dependency on the current implementation.
805 	 */
806 	switch (len) {
807 	case sizeof (x):
808 		error = ddi_poke64(rp->child, (int64_t *)virt, x);
809 		break;
810 	case sizeof (l):
811 		error = ddi_poke32(rp->child, (int32_t *)virt, l);
812 		break;
813 	case sizeof (w):
814 		error = ddi_poke16(rp->child, (int16_t *)virt, w);
815 		break;
816 	case sizeof (b):
817 		error = ddi_poke8(rp->child, (int8_t *)virt, b);
818 		break;
819 	}
820 
821 	if (error) {
822 		return (fc_priv_error(cp, "access error"));
823 	}
824 
825 	cp->nresults = fc_int2cell(0);
826 	return (fc_success_op(ap, rp, cp));
827 }
828 
829 static int
830 pfc_config_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
831 {
832 	caddr_t virt, v;
833 	int error, reg, flags = 0;
834 	size_t len;
835 	uint32_t l, tmp;
836 	uint16_t w;
837 	uint8_t b;
838 	char *name = fc_cell2ptr(cp->svc_name);
839 	pci_regspec_t p;
840 	ddi_device_acc_attr_t acc;
841 	ddi_acc_handle_t h;
842 
843 	if (fc_cell2int(cp->nargs) != 1)
844 		return (fc_syntax_error(cp, "nargs must be 1"));
845 
846 	if (fc_cell2int(cp->nresults) < 1)
847 		return (fc_syntax_error(cp, "nresults must be >= 1"));
848 
849 	/*
850 	 * Construct a config address pci reg property from the args.
851 	 * arg[0] is the configuration address.
852 	 */
853 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
854 	p.pci_phys_mid = p.pci_phys_low = 0;
855 	p.pci_size_hi = p.pci_size_low = 0;
856 
857 	/*
858 	 * Verify that the address is a configuration space address
859 	 * ss must be zero.
860 	 */
861 	if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
862 		cmn_err(CE_CONT, "pfc_config_fetch: "
863 		    "invalid config addr: %x\n", p.pci_phys_hi);
864 		return (fc_priv_error(cp, "non-config addr"));
865 	}
866 
867 	/*
868 	 * Extract the register number from the config address and
869 	 * remove the register number from the physical address.
870 	 */
871 
872 	reg = (p.pci_phys_hi & PCI_REG_REG_M) |
873 	    (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
874 
875 	p.pci_phys_hi &= PCI_BDF_bits;
876 
877 	/*
878 	 * Determine the access width .. we can switch on the 9th
879 	 * character of the name which is "config-{l,w,b}@"
880 	 */
881 	switch (*(name + 7)) {
882 	case 'l':	len = sizeof (l); break;
883 	case 'w':	len = sizeof (w); break;
884 	case 'b':	len = sizeof (b); break;
885 	}
886 
887 	/*
888 	 * Verify that the access is properly aligned
889 	 */
890 	if ((reg & (len - 1)) != 0)
891 		return (fc_priv_error(cp, "unaligned access"));
892 
893 	/*
894 	 * Map in configuration space (temporarily)
895 	 */
896 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
897 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
898 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
899 
900 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
901 
902 	if (error)  {
903 		return (fc_priv_error(cp, "pci config map-in failed"));
904 	}
905 
906 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
907 		flags |= PCICFG_CONF_INDIRECT_MAP;
908 
909 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
910 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
911 		error = DDI_SUCCESS;
912 	} else
913 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
914 
915 	if (error == DDI_SUCCESS)
916 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
917 			error = DDI_FAILURE;
918 			cmn_err(CE_CONT, "fcpcii: conf probe failed.l=%x", tmp);
919 		}
920 
921 	if (error != DDI_SUCCESS) {
922 		return (fc_priv_error(cp, "pci config fetch failed"));
923 	}
924 
925 
926 	/*
927 	 * XXX: We need access handle versions of peek/poke to move
928 	 * beyond the prototype ... we assume that we have hardware
929 	 * byte swapping enabled for pci register access here which
930 	 * is a huge dependency on the current implementation.
931 	 */
932 	v = virt + reg;
933 	switch (len) {
934 	case sizeof (l):
935 		l = (int32_t)ddi_get32(h, (uint32_t *)v);
936 		break;
937 	case sizeof (w):
938 		w = (int16_t)ddi_get16(h, (uint16_t *)v);
939 		break;
940 	case sizeof (b):
941 		b = (int8_t)ddi_get8(h, (uint8_t *)v);
942 		break;
943 	}
944 
945 	/*
946 	 * Remove the temporary config space mapping
947 	 */
948 	pci_unmap_phys(&h, &p);
949 
950 	if (error) {
951 		return (fc_priv_error(cp, "access error"));
952 	}
953 
954 	cp->nresults = fc_int2cell(1);
955 	switch (len) {
956 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
957 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
958 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
959 	}
960 
961 	return (fc_success_op(ap, rp, cp));
962 }
963 
964 static int
965 pfc_config_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
966 {
967 	caddr_t virt, v;
968 	int error, reg, flags = 0;
969 	size_t len;
970 	uint32_t l, tmp;
971 	uint16_t w;
972 	uint8_t b;
973 	char *name = fc_cell2ptr(cp->svc_name);
974 	pci_regspec_t p;
975 	ddi_device_acc_attr_t acc;
976 	ddi_acc_handle_t h;
977 
978 	if (fc_cell2int(cp->nargs) != 2)
979 		return (fc_syntax_error(cp, "nargs must be 2"));
980 
981 	/*
982 	 * Construct a config address pci reg property from the args.
983 	 * arg[0] is the configuration address. arg[1] is the data.
984 	 */
985 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
986 	p.pci_phys_mid = p.pci_phys_low = 0;
987 	p.pci_size_hi = p.pci_size_low = 0;
988 
989 	/*
990 	 * Verify that the address is a configuration space address
991 	 * ss must be zero.
992 	 */
993 	if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
994 		cmn_err(CE_CONT, "pfc_config_store: "
995 		    "invalid config addr: %x\n", p.pci_phys_hi);
996 		return (fc_priv_error(cp, "non-config addr"));
997 	}
998 
999 	/*
1000 	 * Extract the register number from the config address and
1001 	 * remove the register number from the physical address.
1002 	 */
1003 	reg = (p.pci_phys_hi & PCI_REG_REG_M) |
1004 	    (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
1005 
1006 	p.pci_phys_hi &= PCI_BDF_bits;
1007 
1008 	/*
1009 	 * Determine the access width .. we can switch on the 8th
1010 	 * character of the name which is "config-{l,w,b}@"
1011 	 */
1012 	switch (*(name + 7)) {
1013 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
1014 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
1015 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
1016 	}
1017 
1018 	/*
1019 	 * Verify that the access is properly aligned
1020 	 */
1021 	if ((reg & (len - 1)) != 0)
1022 		return (fc_priv_error(cp, "unaligned access"));
1023 
1024 	/*
1025 	 * Map in configuration space (temporarily)
1026 	 */
1027 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1028 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1029 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1030 
1031 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
1032 
1033 	if (error)  {
1034 		return (fc_priv_error(cp, "pci config map-in failed"));
1035 	}
1036 
1037 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
1038 		flags |= PCICFG_CONF_INDIRECT_MAP;
1039 
1040 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1041 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1042 		error = DDI_SUCCESS;
1043 	} else
1044 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
1045 
1046 	if (error == DDI_SUCCESS)
1047 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1048 			error = DDI_FAILURE;
1049 			cmn_err(CE_CONT, "fcpci: conf probe failed.l=%x", tmp);
1050 		}
1051 
1052 	if (error != DDI_SUCCESS) {
1053 		return (fc_priv_error(cp, "pci config store failed"));
1054 	}
1055 
1056 
1057 	/*
1058 	 * XXX: We need access handle versions of peek/poke to move
1059 	 * beyond the prototype ... we assume that we have hardware
1060 	 * byte swapping enabled for pci register access here which
1061 	 * is a huge dependency on the current implementation.
1062 	 */
1063 	v = virt + reg;
1064 	switch (len) {
1065 	case sizeof (l):
1066 		ddi_put32(h, (uint32_t *)v, (uint32_t)l);
1067 		break;
1068 	case sizeof (w):
1069 		ddi_put16(h, (uint16_t *)v, (uint16_t)w);
1070 		break;
1071 	case sizeof (b):
1072 		ddi_put8(h, (uint8_t *)v, (uint8_t)b);
1073 		break;
1074 	}
1075 
1076 	/*
1077 	 * Remove the temporary config space mapping
1078 	 */
1079 	pci_unmap_phys(&h, &p);
1080 
1081 	if (error) {
1082 		return (fc_priv_error(cp, "access error"));
1083 	}
1084 
1085 	cp->nresults = fc_int2cell(0);
1086 	return (fc_success_op(ap, rp, cp));
1087 }
1088 
1089 
1090 static int
1091 pfc_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1092 {
1093 	caddr_t name_virt, fcode_virt;
1094 	char *name, *fcode;
1095 	int fcode_len, status;
1096 
1097 	if (fc_cell2int(cp->nargs) != 3)
1098 		return (fc_syntax_error(cp, "nargs must be 3"));
1099 
1100 	if (fc_cell2int(cp->nresults) < 1)
1101 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1102 
1103 	name_virt = fc_cell2ptr(fc_arg(cp, 0));
1104 
1105 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1106 
1107 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1108 
1109 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1110 
1111 	if (copyinstr(fc_cell2ptr(name_virt), name,
1112 	    FC_SVC_NAME_LEN - 1, NULL))  {
1113 		status = 0;
1114 	} else {
1115 
1116 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1117 
1118 		if ((status = prom_get_fcode(name, fcode)) != 0) {
1119 
1120 			if (copyout((void *)fcode, (void *)fcode_virt,
1121 			    fcode_len)) {
1122 				cmn_err(CE_WARN, " pfc_get_fcode: Unable "
1123 				    "to copy out fcode image\n");
1124 				status = 0;
1125 			}
1126 		}
1127 
1128 		kmem_free(fcode, fcode_len);
1129 	}
1130 
1131 	kmem_free(name, FC_SVC_NAME_LEN);
1132 
1133 	cp->nresults = fc_int2cell(1);
1134 	fc_result(cp, 0) = status;
1135 
1136 	return (fc_success_op(ap, rp, cp));
1137 }
1138 
1139 static int
1140 pfc_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1141 {
1142 	caddr_t virt;
1143 	char *name;
1144 	int len;
1145 
1146 	if (fc_cell2int(cp->nargs) != 1)
1147 		return (fc_syntax_error(cp, "nargs must be 1"));
1148 
1149 	if (fc_cell2int(cp->nresults) < 1)
1150 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1151 
1152 	virt = fc_cell2ptr(fc_arg(cp, 0));
1153 
1154 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1155 
1156 	if (copyinstr(fc_cell2ptr(virt), name,
1157 	    FC_SVC_NAME_LEN - 1, NULL))  {
1158 		len = 0;
1159 	} else {
1160 		len = prom_get_fcode_size(name);
1161 	}
1162 
1163 	kmem_free(name, FC_SVC_NAME_LEN);
1164 
1165 	cp->nresults = fc_int2cell(1);
1166 	fc_result(cp, 0) = len;
1167 
1168 	return (fc_success_op(ap, rp, cp));
1169 }
1170 
1171 /*
1172  * Return the physical probe address: lo=0, mid=0, hi-config-addr
1173  */
1174 static int
1175 pfc_probe_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1176 {
1177 	if (fc_cell2int(cp->nargs) != 0)
1178 		return (fc_syntax_error(cp, "nargs must be 0"));
1179 
1180 	if (fc_cell2int(cp->nresults) < 2)
1181 		return (fc_syntax_error(cp, "nresults must be >= 3"));
1182 
1183 	cp->nresults = fc_int2cell(2);
1184 	fc_result(cp, 1) = fc_int2cell(0);	/* phys.lo */
1185 	fc_result(cp, 0) = fc_int2cell(0);	/* phys.mid */
1186 
1187 	return (fc_success_op(ap, rp, cp));
1188 }
1189 
1190 /*
1191  * Return the phys.hi component of the probe address.
1192  */
1193 static int
1194 pfc_probe_space(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1195 {
1196 	struct pci_ops_bus_args *ba = rp->bus_args;
1197 
1198 	ASSERT(ba);
1199 
1200 	if (fc_cell2int(cp->nargs) != 0)
1201 		return (fc_syntax_error(cp, "nargs must be 0"));
1202 
1203 	if (fc_cell2int(cp->nresults) < 1)
1204 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1205 
1206 	cp->nresults = fc_int2cell(1);
1207 	fc_result(cp, 0) = fc_uint32_t2cell(ba->config_address); /* phys.hi */
1208 
1209 	return (fc_success_op(ap, rp, cp));
1210 }
1211 
1212 static int
1213 pfc_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1214 {
1215 	fc_phandle_t h;
1216 
1217 	if (fc_cell2int(cp->nargs) != 0)
1218 		return (fc_syntax_error(cp, "nargs must be 0"));
1219 
1220 	if (fc_cell2int(cp->nresults) < 1)
1221 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1222 
1223 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1224 
1225 	cp->nresults = fc_int2cell(1);
1226 	fc_result(cp, 0) = fc_phandle2cell(h);
1227 
1228 	return (fc_success_op(ap, rp, cp));
1229 }
1230 
1231 int
1232 pci_alloc_mem_chunk(dev_info_t *dip, uint64_t mem_align, uint64_t *mem_size,
1233     uint64_t *mem_answer)
1234 {
1235 	ndi_ra_request_t req;
1236 	int rval;
1237 
1238 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1239 	req.ra_flags = NDI_RA_ALLOC_BOUNDED;
1240 	req.ra_boundbase = 0;
1241 	req.ra_boundlen = PCI_4GIG_LIMIT;
1242 	req.ra_len = *mem_size;
1243 	req.ra_align_mask = mem_align - 1;
1244 
1245 	rval = ndi_ra_alloc(dip, &req, mem_answer, mem_size,
1246 	    NDI_RA_TYPE_MEM, NDI_RA_PASS);
1247 
1248 	return (rval);
1249 }
1250 int
1251 pci_alloc_io_chunk(dev_info_t *dip, uint64_t io_align, uint64_t *io_size,
1252     uint64_t *io_answer)
1253 {
1254 	ndi_ra_request_t req;
1255 	int rval;
1256 
1257 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1258 	req.ra_flags = (NDI_RA_ALLOC_BOUNDED | NDI_RA_ALLOC_PARTIAL_OK);
1259 	req.ra_boundbase = 0;
1260 	req.ra_boundlen = PCI_4GIG_LIMIT;
1261 	req.ra_len = *io_size;
1262 	req.ra_align_mask = io_align - 1;
1263 
1264 	rval = ndi_ra_alloc(dip, &req, io_answer, io_size,
1265 	    NDI_RA_TYPE_IO, NDI_RA_PASS);
1266 
1267 	return (rval);
1268 }
1269 
1270 int
1271 pci_alloc_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1272 {
1273 	uint64_t answer;
1274 	uint64_t alen;
1275 	int offset, tmp;
1276 	pci_regspec_t config;
1277 	caddr_t virt, v;
1278 	ddi_device_acc_attr_t acc;
1279 	ddi_acc_handle_t h;
1280 	ndi_ra_request_t request;
1281 	pci_regspec_t *assigned;
1282 	int assigned_len, entries, i, l, flags = 0, error;
1283 
1284 	l = phys_spec.pci_size_low;
1285 
1286 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1287 	    DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&assigned,
1288 	    &assigned_len) == DDI_PROP_SUCCESS) {
1289 
1290 		entries = assigned_len / (sizeof (pci_regspec_t));
1291 
1292 		/*
1293 		 * Walk through the assigned-addresses entries. If there is
1294 		 * a match, there is no need to allocate the resource.
1295 		 */
1296 		for (i = 0; i < entries; i++) {
1297 			if (assigned[i].pci_phys_hi == phys_spec.pci_phys_hi) {
1298 				if (assigned[i].pci_size_low >=
1299 				    phys_spec.pci_size_low) {
1300 					kmem_free(assigned, assigned_len);
1301 					return (0);
1302 				}
1303 				/*
1304 				 * Fcode wants to assign more than what
1305 				 * probe found.
1306 				 */
1307 				(void) pci_free_resource(dip, assigned[i]);
1308 				/*
1309 				 * Go on to allocate resources.
1310 				 */
1311 				break;
1312 			}
1313 			/*
1314 			 * Check if Fcode wants to map using different
1315 			 * NPT bits.
1316 			 */
1317 			if (PCI_REG_BDFR_G(assigned[i].pci_phys_hi) ==
1318 			    PCI_REG_BDFR_G(phys_spec.pci_phys_hi)) {
1319 				/*
1320 				 * It is an error to change SS bits
1321 				 */
1322 				if (PCI_REG_ADDR_G(assigned[i].pci_phys_hi) !=
1323 				    PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1324 
1325 					FC_DEBUG2(2, CE_WARN, "Fcode changing "
1326 					    "ss bits in reg %x -- %x",
1327 					    assigned[i].pci_phys_hi,
1328 					    phys_spec.pci_phys_hi);
1329 				}
1330 
1331 				/*
1332 				 * Allocate enough
1333 				 */
1334 				l = MAX(assigned[i].pci_size_low,
1335 				    phys_spec.pci_size_low);
1336 
1337 				phys_spec.pci_size_low = l;
1338 
1339 				(void) pci_free_resource(dip, assigned[i]);
1340 				/*
1341 				 * Go on to allocate resources.
1342 				 */
1343 				break;
1344 			}
1345 		}
1346 		kmem_free(assigned, assigned_len);
1347 	}
1348 
1349 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1350 
1351 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1352 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1353 	config.pci_phys_mid = config.pci_phys_low = 0;
1354 	config.pci_size_hi = config.pci_size_low = 0;
1355 
1356 	/*
1357 	 * Map in configuration space (temporarily)
1358 	 */
1359 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1360 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1361 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1362 
1363 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1364 		return (1);
1365 	}
1366 
1367 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1368 		flags |= PCICFG_CONF_INDIRECT_MAP;
1369 
1370 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1371 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1372 		error = DDI_SUCCESS;
1373 	} else
1374 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1375 
1376 	if (error == DDI_SUCCESS)
1377 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1378 			error = DDI_FAILURE;
1379 		}
1380 
1381 	if (error != DDI_SUCCESS) {
1382 		return (1);
1383 	}
1384 
1385 	request.ra_flags |= NDI_RA_ALIGN_SIZE;
1386 	request.ra_boundbase = 0;
1387 	request.ra_boundlen = PCI_4GIG_LIMIT;
1388 
1389 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1390 
1391 	v = virt + offset;
1392 
1393 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1394 		request.ra_len = l;
1395 		request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1396 
1397 		/* allocate memory space from the allocator */
1398 
1399 		if (ndi_ra_alloc(ddi_get_parent(dip),
1400 		    &request, &answer, &alen, NDI_RA_TYPE_MEM,
1401 		    NDI_RA_PASS) != NDI_SUCCESS) {
1402 			pci_unmap_phys(&h, &config);
1403 			return (1);
1404 		}
1405 		FC_DEBUG3(1, CE_CONT, "ROM addr = [0x%x.%x] len [0x%x]\n",
1406 		    HIADDR(answer), LOADDR(answer), alen);
1407 
1408 		/* program the low word */
1409 
1410 		ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1411 
1412 		phys_spec.pci_phys_low = LOADDR(answer);
1413 		phys_spec.pci_phys_mid = HIADDR(answer);
1414 	} else {
1415 		request.ra_len = l;
1416 
1417 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1418 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1419 			request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1420 
1421 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1422 				/*
1423 				 * If it is a non relocatable address,
1424 				 * then specify the address we want.
1425 				 */
1426 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1427 				request.ra_addr = (uint64_t)LADDR(
1428 				    phys_spec.pci_phys_low,
1429 				    phys_spec.pci_phys_mid);
1430 			}
1431 
1432 			/* allocate memory space from the allocator */
1433 
1434 			if (ndi_ra_alloc(ddi_get_parent(dip),
1435 			    &request, &answer, &alen, NDI_RA_TYPE_MEM,
1436 			    NDI_RA_PASS) != NDI_SUCCESS) {
1437 				pci_unmap_phys(&h, &config);
1438 				if (request.ra_flags == NDI_RA_ALLOC_SPECIFIED)
1439 					cmn_err(CE_WARN, "Unable to allocate "
1440 					    "non relocatable address 0x%p\n",
1441 					    (void *) request.ra_addr);
1442 				return (1);
1443 			}
1444 			FC_DEBUG3(1, CE_CONT,
1445 			    "64 addr = [0x%x.%x] len [0x%x]\n",
1446 			    HIADDR(answer),
1447 			    LOADDR(answer),
1448 			    alen);
1449 
1450 			/* program the low word */
1451 
1452 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1453 
1454 			/* program the high word with value zero */
1455 			v += 4;
1456 			ddi_put32(h, (uint32_t *)v, HIADDR(answer));
1457 
1458 			phys_spec.pci_phys_low = LOADDR(answer);
1459 			phys_spec.pci_phys_mid = HIADDR(answer);
1460 			/*
1461 			 * currently support 32b address space
1462 			 * assignments only.
1463 			 */
1464 			phys_spec.pci_phys_hi ^= PCI_ADDR_MEM64 ^
1465 			    PCI_ADDR_MEM32;
1466 
1467 			break;
1468 
1469 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1470 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1471 
1472 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1473 				/*
1474 				 * If it is a non relocatable address,
1475 				 * then specify the address we want.
1476 				 */
1477 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1478 				request.ra_addr = (uint64_t)
1479 				    phys_spec.pci_phys_low;
1480 			}
1481 
1482 			/* allocate memory space from the allocator */
1483 
1484 			if (ndi_ra_alloc(ddi_get_parent(dip),
1485 			    &request, &answer, &alen, NDI_RA_TYPE_MEM,
1486 			    NDI_RA_PASS) != NDI_SUCCESS) {
1487 				pci_unmap_phys(&h, &config);
1488 				if (request.ra_flags == NDI_RA_ALLOC_SPECIFIED)
1489 					cmn_err(CE_WARN, "Unable to allocate "
1490 					    "non relocatable address 0x%p\n",
1491 					    (void *) request.ra_addr);
1492 				return (1);
1493 			}
1494 
1495 			FC_DEBUG3(1, CE_CONT,
1496 			    "32 addr = [0x%x.%x] len [0x%x]\n",
1497 			    HIADDR(answer),
1498 			    LOADDR(answer),
1499 			    alen);
1500 
1501 			/* program the low word */
1502 
1503 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1504 
1505 			phys_spec.pci_phys_low = LOADDR(answer);
1506 
1507 			break;
1508 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1509 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1510 
1511 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1512 				/*
1513 				 * If it is a non relocatable address,
1514 				 * then specify the address we want.
1515 				 */
1516 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1517 				request.ra_addr = (uint64_t)
1518 				    phys_spec.pci_phys_low;
1519 			}
1520 
1521 			/* allocate I/O space from the allocator */
1522 
1523 			if (ndi_ra_alloc(ddi_get_parent(dip),
1524 			    &request, &answer, &alen, NDI_RA_TYPE_IO,
1525 			    NDI_RA_PASS) != NDI_SUCCESS) {
1526 				pci_unmap_phys(&h, &config);
1527 				if (request.ra_flags ==
1528 				    NDI_RA_ALLOC_SPECIFIED)
1529 					cmn_err(CE_WARN, "Unable to allocate "
1530 					    "non relocatable IO Space 0x%p\n",
1531 					    (void *) request.ra_addr);
1532 				return (1);
1533 			}
1534 			FC_DEBUG3(1, CE_CONT,
1535 			    "I/O addr = [0x%x.%x] len [0x%x]\n",
1536 			    HIADDR(answer),
1537 			    LOADDR(answer),
1538 			    alen);
1539 
1540 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1541 
1542 			phys_spec.pci_phys_low = LOADDR(answer);
1543 
1544 			break;
1545 		default:
1546 			pci_unmap_phys(&h, &config);
1547 			return (1);
1548 		} /* switch */
1549 	}
1550 
1551 	/*
1552 	 * Now that memory locations are assigned,
1553 	 * update the assigned address property.
1554 	 */
1555 	if (pfc_update_assigned_prop(dip, &phys_spec)) {
1556 		pci_unmap_phys(&h, &config);
1557 		return (1);
1558 	}
1559 
1560 	pci_unmap_phys(&h, &config);
1561 
1562 	return (0);
1563 }
1564 
1565 int
1566 pci_free_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1567 {
1568 	int offset, tmp;
1569 	pci_regspec_t config;
1570 	caddr_t virt, v;
1571 	ddi_device_acc_attr_t acc;
1572 	ddi_acc_handle_t h;
1573 	ndi_ra_request_t request;
1574 	int l, error, flags = 0;
1575 
1576 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1577 
1578 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1579 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1580 	config.pci_phys_mid = config.pci_phys_low = 0;
1581 	config.pci_size_hi = config.pci_size_low = 0;
1582 
1583 	/*
1584 	 * Map in configuration space (temporarily)
1585 	 */
1586 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1587 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1588 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1589 
1590 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1591 		return (1);
1592 	}
1593 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1594 		flags |= PCICFG_CONF_INDIRECT_MAP;
1595 
1596 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1597 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1598 		error = DDI_SUCCESS;
1599 	} else
1600 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1601 
1602 	if (error == DDI_SUCCESS)
1603 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1604 			error = DDI_FAILURE;
1605 		}
1606 	if (error != DDI_SUCCESS) {
1607 		return (1);
1608 	}
1609 
1610 
1611 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1612 
1613 	v = virt + offset;
1614 
1615 	/*
1616 	 * Pick up the size to be freed. It may be different from
1617 	 * what probe finds.
1618 	 */
1619 	l = phys_spec.pci_size_low;
1620 
1621 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1622 		/* free memory back to the allocator */
1623 		if (ndi_ra_free(ddi_get_parent(dip), phys_spec.pci_phys_low,
1624 		    l, NDI_RA_TYPE_MEM,
1625 		    NDI_RA_PASS) != NDI_SUCCESS) {
1626 			pci_unmap_phys(&h, &config);
1627 			return (1);
1628 		}
1629 
1630 		/* Unmap the BAR by writing a zero */
1631 
1632 		ddi_put32(h, (uint32_t *)v, 0);
1633 	} else {
1634 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1635 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1636 			/* free memory back to the allocator */
1637 			if (ndi_ra_free(ddi_get_parent(dip),
1638 			    LADDR(phys_spec.pci_phys_low,
1639 			    phys_spec.pci_phys_mid),
1640 			    l, NDI_RA_TYPE_MEM,
1641 			    NDI_RA_PASS) != NDI_SUCCESS) {
1642 				pci_unmap_phys(&h, &config);
1643 				return (1);
1644 			}
1645 
1646 			break;
1647 
1648 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1649 			/* free memory back to the allocator */
1650 			if (ndi_ra_free(ddi_get_parent(dip),
1651 			    phys_spec.pci_phys_low,
1652 			    l, NDI_RA_TYPE_MEM,
1653 			    NDI_RA_PASS) != NDI_SUCCESS) {
1654 				pci_unmap_phys(&h, &config);
1655 				return (1);
1656 			}
1657 
1658 			break;
1659 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1660 			/* free I/O space back to the allocator */
1661 			if (ndi_ra_free(ddi_get_parent(dip),
1662 			    phys_spec.pci_phys_low,
1663 			    l, NDI_RA_TYPE_IO,
1664 			    NDI_RA_PASS) != NDI_SUCCESS) {
1665 				pci_unmap_phys(&h, &config);
1666 				return (1);
1667 			}
1668 			break;
1669 		default:
1670 			pci_unmap_phys(&h, &config);
1671 			return (1);
1672 		} /* switch */
1673 	}
1674 
1675 	/*
1676 	 * Now that memory locations are assigned,
1677 	 * update the assigned address property.
1678 	 */
1679 
1680 	FC_DEBUG1(1, CE_CONT, "updating assigned-addresss for %x\n",
1681 	    phys_spec.pci_phys_hi);
1682 
1683 	if (pfc_remove_assigned_prop(dip, &phys_spec)) {
1684 		pci_unmap_phys(&h, &config);
1685 		return (1);
1686 	}
1687 
1688 	pci_unmap_phys(&h, &config);
1689 
1690 	return (0);
1691 }
1692 
1693 
1694 int
1695 pci_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
1696 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1697 	ddi_acc_handle_t *handlep)
1698 {
1699 	ddi_map_req_t mr;
1700 	ddi_acc_hdl_t *hp;
1701 	int result;
1702 
1703 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1704 	hp = impl_acc_hdl_get(*handlep);
1705 	hp->ah_vers = VERS_ACCHDL;
1706 	hp->ah_dip = dip;
1707 	hp->ah_rnumber = 0;
1708 	hp->ah_offset = 0;
1709 	hp->ah_len = 0;
1710 	hp->ah_acc = *accattrp;
1711 
1712 	mr.map_op = DDI_MO_MAP_LOCKED;
1713 	mr.map_type = DDI_MT_REGSPEC;
1714 	mr.map_obj.rp = (struct regspec *)phys_spec;
1715 	mr.map_prot = PROT_READ | PROT_WRITE;
1716 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1717 	mr.map_handlep = hp;
1718 	mr.map_vers = DDI_MAP_VERSION;
1719 
1720 	result = ddi_map(dip, &mr, 0, 0, addrp);
1721 
1722 	if (result != DDI_SUCCESS) {
1723 		impl_acc_hdl_free(*handlep);
1724 		*handlep = (ddi_acc_handle_t)NULL;
1725 	} else {
1726 		hp->ah_addr = *addrp;
1727 	}
1728 
1729 	return (result);
1730 }
1731 
1732 void
1733 pci_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
1734 {
1735 	ddi_map_req_t mr;
1736 	ddi_acc_hdl_t *hp;
1737 
1738 	hp = impl_acc_hdl_get(*handlep);
1739 	ASSERT(hp);
1740 
1741 	mr.map_op = DDI_MO_UNMAP;
1742 	mr.map_type = DDI_MT_REGSPEC;
1743 	mr.map_obj.rp = (struct regspec *)ph;
1744 	mr.map_prot = PROT_READ | PROT_WRITE;
1745 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1746 	mr.map_handlep = hp;
1747 	mr.map_vers = DDI_MAP_VERSION;
1748 
1749 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1750 	    hp->ah_len, &hp->ah_addr);
1751 
1752 	impl_acc_hdl_free(*handlep);
1753 
1754 
1755 	*handlep = (ddi_acc_handle_t)NULL;
1756 }
1757 
1758 int
1759 pfc_update_assigned_prop(dev_info_t *dip, pci_regspec_t *newone)
1760 {
1761 	int		alen;
1762 	pci_regspec_t	*assigned;
1763 	caddr_t		newreg;
1764 	uint_t		status;
1765 
1766 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1767 	    "assigned-addresses", (caddr_t)&assigned, &alen);
1768 	switch (status) {
1769 		case DDI_PROP_SUCCESS:
1770 		break;
1771 		case DDI_PROP_NO_MEMORY:
1772 			return (1);
1773 		default:
1774 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1775 			    "assigned-addresses", (int *)newone,
1776 			    sizeof (*newone)/sizeof (int));
1777 			return (0);
1778 	}
1779 
1780 	/*
1781 	 * Allocate memory for the existing
1782 	 * assigned-addresses(s) plus one and then
1783 	 * build it.
1784 	 */
1785 
1786 	newreg = kmem_zalloc(alen+sizeof (*newone), KM_SLEEP);
1787 
1788 	bcopy(assigned, newreg, alen);
1789 	bcopy(newone, newreg + alen, sizeof (*newone));
1790 
1791 	/*
1792 	 * Write out the new "assigned-addresses" spec
1793 	 */
1794 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1795 	    "assigned-addresses", (int *)newreg,
1796 	    (alen + sizeof (*newone))/sizeof (int));
1797 
1798 	kmem_free((caddr_t)newreg, alen+sizeof (*newone));
1799 	kmem_free(assigned, alen);
1800 
1801 	return (0);
1802 }
1803 int
1804 pfc_remove_assigned_prop(dev_info_t *dip, pci_regspec_t *oldone)
1805 {
1806 	int		alen, new_len, num_entries, i;
1807 	pci_regspec_t	*assigned;
1808 	uint_t		status;
1809 
1810 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1811 	    "assigned-addresses", (caddr_t)&assigned, &alen);
1812 	switch (status) {
1813 		case DDI_PROP_SUCCESS:
1814 		break;
1815 		case DDI_PROP_NO_MEMORY:
1816 			return (1);
1817 		default:
1818 			return (0);
1819 	}
1820 
1821 	num_entries = alen / sizeof (pci_regspec_t);
1822 	new_len = alen - sizeof (pci_regspec_t);
1823 
1824 	/*
1825 	 * Search for the memory being removed.
1826 	 */
1827 	for (i = 0; i < num_entries; i++) {
1828 		if (assigned[i].pci_phys_hi == oldone->pci_phys_hi) {
1829 			if (new_len == 0) {
1830 				(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1831 				    "assigned-addresses");
1832 				break;
1833 			}
1834 			if ((new_len - (i * sizeof (pci_regspec_t)))
1835 			    == 0) {
1836 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1837 				    "%x removed from property (last entry)\n",
1838 				    oldone->pci_phys_hi);
1839 			} else {
1840 				bcopy((void *)(assigned + i + 1),
1841 				    (void *)(assigned + i),
1842 				    (new_len - (i * sizeof (pci_regspec_t))));
1843 
1844 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1845 				    "%x removed from property\n",
1846 				    oldone->pci_phys_hi);
1847 			}
1848 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1849 			    dip, "assigned-addresses", (int *)assigned,
1850 			    (new_len/sizeof (int)));
1851 
1852 			break;
1853 		}
1854 	}
1855 
1856 	kmem_free(assigned, alen);
1857 
1858 	return (0);
1859 }
1860 /*
1861  * we recognize the non transparent bridge child nodes with the
1862  * following property. This is specific to this implementation only.
1863  * This property is specific to AP nodes only.
1864  */
1865 #define	PCICFG_DEV_CONF_MAP_PROP		"pci-parent-indirect"
1866 
1867 /*
1868  * If a non transparent bridge drives a hotplug/hotswap bus, then
1869  * the following property must be defined for the node either by
1870  * the driver or the OBP.
1871  */
1872 #define	PCICFG_BUS_CONF_MAP_PROP		"pci-conf-indirect"
1873 
1874 /*
1875  * this function is called only for SPARC platforms, where we may have
1876  * a mix n' match of direct vs indirectly mapped configuration space.
1877  */
1878 /*ARGSUSED*/
1879 static int
1880 fcpci_indirect_map(dev_info_t *dip)
1881 {
1882 	int rc = DDI_FAILURE;
1883 
1884 	if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip), 0,
1885 	    PCICFG_DEV_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1886 		rc = DDI_SUCCESS;
1887 	else
1888 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip),
1889 		    0, PCICFG_BUS_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1890 			rc = DDI_SUCCESS;
1891 
1892 	return (rc);
1893 }
1894