xref: /titanic_41/usr/src/uts/sun4/io/efcode/fcpci.c (revision 6528affb110ab8cf8b4464874b4a07f3f937475d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * fcpci.c: Framework PCI fcode ops
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/systm.h>
35 #include <sys/pci.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/ddidmareq.h>
40 #include <sys/pci.h>
41 #include <sys/modctl.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/fcode.h>
44 #include <sys/promif.h>
45 #include <sys/promimpl.h>
46 #include <sys/ddi_implfuncs.h>
47 
48 #define	PCI_NPT_bits		(PCI_RELOCAT_B | PCI_PREFETCH_B | PCI_ALIAS_B)
49 #define	PCI_BDF_bits		(PCI_REG_BDFR_M & ~PCI_REG_REG_M)
50 
51 #define	PCICFG_CONF_INDIRECT_MAP	1
52 
53 static int pfc_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
54 static int pfc_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
55 static int pfc_dma_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
56 static int pfc_dma_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
57 static int pfc_dma_sync(dev_info_t *, fco_handle_t, fc_ci_t *);
58 static int pfc_dma_cleanup(dev_info_t *, fco_handle_t, fc_ci_t *);
59 
60 static int pfc_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
61 static int pfc_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
62 static int pfc_config_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
63 static int pfc_config_store(dev_info_t *, fco_handle_t, fc_ci_t *);
64 
65 static int pfc_probe_address(dev_info_t *, fco_handle_t, fc_ci_t *);
66 static int pfc_probe_space(dev_info_t *, fco_handle_t, fc_ci_t *);
67 
68 static int pfc_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
69 static int pfc_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
70 static int pfc_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
71 int prom_get_fcode_size(char *);
72 int prom_get_fcode(char *, char *);
73 int pfc_update_assigned_prop(dev_info_t *, pci_regspec_t *);
74 int pfc_remove_assigned_prop(dev_info_t *, pci_regspec_t *);
75 int pci_alloc_resource(dev_info_t *, pci_regspec_t);
76 int pci_free_resource(dev_info_t *, pci_regspec_t);
77 int pci_alloc_mem_chunk(dev_info_t *,  uint64_t, uint64_t *,  uint64_t *);
78 int pci_alloc_io_chunk(dev_info_t *,  uint64_t,  uint64_t *, uint64_t *);
79 static int fcpci_indirect_map(dev_info_t *);
80 
81 int fcpci_unloadable;
82 
83 static ddi_dma_attr_t fcpci_dma_attr = {
84 	DMA_ATTR_V0,	/* version number */
85 	0x0,		/* lowest usable address */
86 	0xFFFFFFFFull,	/* high DMA address range */
87 	0xFFFFFFFFull,	/* DMA counter register */
88 	1,		/* DMA address alignment */
89 	1,		/* DMA burstsizes */
90 	1,		/* min effective DMA size */
91 	0xFFFFFFFFull,	/* max DMA xfer size */
92 	0xFFFFFFFFull,	/* segment boundary */
93 	1,		 /* s/g list length */
94 	1,		/* granularity of device */
95 	0		/* DMA transfer flags */
96 };
97 
98 #ifndef	lint
99 char _depends_on[] = "misc/fcodem misc/busra";
100 #endif
101 
102 #define	HIADDR(n) ((uint32_t)(((uint64_t)(n) & 0xFFFFFFFF00000000)>> 32))
103 #define	LOADDR(n)((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
104 #define	LADDR(lo, hi)    (((uint64_t)(hi) << 32) | (uint32_t)(lo))
105 #define	PCI_4GIG_LIMIT 0xFFFFFFFFUL
106 #define	PCI_MEMGRAN 0x100000
107 #define	PCI_IOGRAN 0x1000
108 
109 
110 /*
111  * Module linkage information for the kernel.
112  */
113 static struct modlmisc modlmisc = {
114 	&mod_miscops, "FCode pci bus functions %I%"
115 };
116 
117 static struct modlinkage modlinkage = {
118 	MODREV_1, (void *)&modlmisc, NULL
119 };
120 
121 int
122 _init(void)
123 {
124 	return (mod_install(&modlinkage));
125 }
126 
127 int
128 _fini(void)
129 {
130 	if (fcpci_unloadable)
131 		return (mod_remove(&modlinkage));
132 	return (EBUSY);
133 }
134 
135 int
136 _info(struct modinfo *modinfop)
137 {
138 	return (mod_info(&modlinkage, modinfop));
139 }
140 
141 
142 struct pfc_ops_v {
143 	char *svc_name;
144 	fc_ops_t *f;
145 };
146 
147 static struct pfc_ops_v pov[] = {
148 	{	"map-in",		pfc_map_in},
149 	{	"map-out",		pfc_map_out},
150 	{	"dma-map-in",		pfc_dma_map_in},
151 	{	"dma-map-out",		pfc_dma_map_out},
152 	{	"dma-sync",		pfc_dma_sync},
153 	{	"rx@",			pfc_register_fetch},
154 	{	"rl@",			pfc_register_fetch},
155 	{	"rw@",			pfc_register_fetch},
156 	{	"rb@",			pfc_register_fetch},
157 	{	"rx!",			pfc_register_store},
158 	{	"rl!",			pfc_register_store},
159 	{	"rw!",			pfc_register_store},
160 	{	"rb!",			pfc_register_store},
161 	{	"config-l@",		pfc_config_fetch},
162 	{	"config-w@",		pfc_config_fetch},
163 	{	"config-b@",		pfc_config_fetch},
164 	{	"config-l!",		pfc_config_store},
165 	{	"config-w!",		pfc_config_store},
166 	{	"config-b!",		pfc_config_store},
167 	{	FC_PROBE_ADDRESS,	pfc_probe_address},
168 	{	FC_PROBE_SPACE,		pfc_probe_space},
169 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
170 	{	FC_CONFIG_CHILD,	pfc_config_child},
171 	{	FC_GET_FCODE_SIZE,	pfc_get_fcode_size},
172 	{	FC_GET_FCODE,		pfc_get_fcode},
173 	{	NULL,			NULL}
174 };
175 
176 static struct pfc_ops_v shared_pov[] = {
177 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
178 	{	NULL,			NULL}
179 };
180 
181 int pci_map_phys(dev_info_t *, pci_regspec_t *,
182     caddr_t *, ddi_device_acc_attr_t *, ddi_acc_handle_t *);
183 
184 void pci_unmap_phys(ddi_acc_handle_t *, pci_regspec_t *);
185 
186 fco_handle_t
187 pci_fc_ops_alloc_handle(dev_info_t *ap, dev_info_t *child,
188     void *fcode, size_t fcode_size, char *unit_address,
189     struct pci_ops_bus_args *up)
190 {
191 	fco_handle_t rp;
192 	struct pci_ops_bus_args *bp = NULL;
193 	phandle_t h;
194 
195 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
196 	rp->next_handle = fc_ops_alloc_handle(ap, child, fcode, fcode_size,
197 	    unit_address, NULL);
198 	rp->ap = ap;
199 	rp->child = child;
200 	rp->fcode = fcode;
201 	rp->fcode_size = fcode_size;
202 	if (unit_address) {
203 		char *buf;
204 
205 		buf = kmem_zalloc(strlen(unit_address) + 1, KM_SLEEP);
206 		(void) strcpy(buf, unit_address);
207 		rp->unit_address = buf;
208 	}
209 
210 	bp = kmem_zalloc(sizeof (struct pci_ops_bus_args), KM_SLEEP);
211 	*bp = *up;
212 	rp->bus_args = bp;
213 
214 	/*
215 	 * Add the child's nodeid to our table...
216 	 */
217 	h = ddi_get_nodeid(rp->child);
218 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
219 
220 	return (rp);
221 }
222 
223 void
224 pci_fc_ops_free_handle(fco_handle_t rp)
225 {
226 	struct pci_ops_bus_args *bp;
227 	struct fc_resource *ip, *np;
228 
229 	ASSERT(rp);
230 
231 	if (rp->next_handle)
232 		fc_ops_free_handle(rp->next_handle);
233 	if (rp->unit_address)
234 		kmem_free(rp->unit_address, strlen(rp->unit_address) + 1);
235 	if ((bp = rp->bus_args) != NULL)
236 		kmem_free(bp, sizeof (struct pci_ops_bus_args));
237 
238 	/*
239 	 * Release all the resources from the resource list
240 	 * XXX: We don't handle 'unknown' types, but we don't create them.
241 	 */
242 	for (ip = rp->head; ip != NULL; ip = np) {
243 		np = ip->next;
244 		switch (ip->type) {
245 		case RT_MAP:
246 			FC_DEBUG1(1, CE_CONT, "pci_fc_ops_free: "
247 			    "pci_unmap_phys(%p)\n", ip->fc_map_handle);
248 			pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
249 			kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
250 			break;
251 		case RT_DMA:
252 			/* DMA has to be freed up at exit time */
253 			cmn_err(CE_CONT, "pfc_fc_ops_free: DMA seen!\n");
254 			break;
255 		default:
256 			cmn_err(CE_CONT, "pci_fc_ops_free: "
257 			    "unknown resource type %d\n", ip->type);
258 			break;
259 		}
260 		fc_rem_resource(rp, ip);
261 		kmem_free(ip, sizeof (struct fc_resource));
262 	}
263 	kmem_free(rp, sizeof (struct fc_resource_list));
264 }
265 
266 int
267 pci_fc_ops(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
268 {
269 	struct pfc_ops_v *pv;
270 	char *name = fc_cell2ptr(cp->svc_name);
271 
272 	ASSERT(rp);
273 
274 	/*
275 	 * First try the generic fc_ops. If the ops is a shared op,
276 	 * also call our local function.
277 	 */
278 	if (fc_ops(ap, rp->next_handle, cp) == 0) {
279 		for (pv = shared_pov; pv->svc_name != NULL; ++pv)
280 			if (strcmp(pv->svc_name, name) == 0)
281 				return (pv->f(ap, rp, cp));
282 		return (0);
283 	}
284 
285 	for (pv = pov; pv->svc_name != NULL; ++pv)
286 		if (strcmp(pv->svc_name, name) == 0)
287 			return (pv->f(ap, rp, cp));
288 
289 	FC_DEBUG1(9, CE_CONT, "pci_fc_ops: <%s> not serviced\n", name);
290 
291 	return (-1);
292 }
293 
294 /*
295  * Create a dma mapping for a given user address.
296  */
297 static int
298 pfc_dma_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
299 {
300 	ddi_dma_handle_t h;
301 	int error;
302 	caddr_t virt;
303 	size_t len;
304 	uint_t flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
305 	struct fc_resource *ip;
306 	ddi_dma_cookie_t c;
307 	struct buf *bp;
308 	uint_t ccnt;
309 
310 	if (fc_cell2int(cp->nargs) != 3)
311 		return (fc_syntax_error(cp, "nargs must be 3"));
312 
313 	if (fc_cell2int(cp->nresults) < 1)
314 		return (fc_syntax_error(cp, "nresults must be >= 1"));
315 
316 	/*
317 	 * XXX: It's not clear what we should do with a non-cacheable request
318 	 */
319 	virt = fc_cell2ptr(fc_arg(cp, 2));
320 	len = fc_cell2size(fc_arg(cp, 1));
321 #ifdef	notdef
322 	cacheable = fc_cell2int(fc_arg(cp, 0));	/* XXX: do what? */
323 #endif
324 
325 	FC_DEBUG2(6, CE_CONT, "pcf_dma_map_in: virt %p, len %d\n", virt, len);
326 
327 	/*
328 	 * Set up the address space for physio from userland
329 	 */
330 	error = fc_physio_setup(&bp, virt, len);
331 
332 	if (error)  {
333 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: fc_physio_setup failed "
334 		    "error: %d  virt: %p  len %d\n", error, virt, len);
335 		return (fc_priv_error(cp, "fc_physio_setup failed"));
336 	}
337 
338 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: dma_map_in; bp = %p\n", bp);
339 	error = fc_ddi_dma_alloc_handle(ap, &fcpci_dma_attr, DDI_DMA_SLEEP,
340 	    NULL, &h);
341 	if (error != DDI_SUCCESS)  {
342 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
343 		    "error: %d  virt: %p  len %d\n", error, virt, len);
344 		return (fc_priv_error(cp, "real dma-map-in failed"));
345 	}
346 
347 	error = fc_ddi_dma_buf_bind_handle(h, bp, flags, DDI_DMA_SLEEP, NULL,
348 	    &c, &ccnt);
349 	if ((error != DDI_DMA_MAPPED) || (ccnt != 1)) {
350 		fc_ddi_dma_free_handle(&h);
351 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
352 		    "error: %d  virt: %p  len %d\n", error, virt, len);
353 		return (fc_priv_error(cp, "real dma-map-in failed"));
354 	}
355 
356 	if (c.dmac_size < len)  {
357 		error = fc_ddi_dma_unbind_handle(h);
358 		if (error != DDI_SUCCESS) {
359 			return (fc_priv_error(cp, "ddi_dma_unbind error"));
360 		}
361 		fc_ddi_dma_free_handle(&h);
362 		return (fc_priv_error(cp, "ddi_dma_buf_bind size < len"));
363 	}
364 
365 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: returning devaddr %x\n",
366 		c.dmac_address);
367 
368 	cp->nresults = fc_int2cell(1);
369 	fc_result(cp, 0) = fc_uint32_t2cell(c.dmac_address);	/* XXX size */
370 
371 	/*
372 	 * Now we have to log this resource saving the handle and buf header
373 	 */
374 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
375 	ip->type = RT_DMA;
376 	ip->fc_dma_virt = virt;
377 	ip->fc_dma_len = len;
378 	ip->fc_dma_handle = h;
379 	ip->fc_dma_devaddr = c.dmac_address;
380 	ip->fc_dma_bp = bp;
381 	fc_add_resource(rp, ip);
382 
383 	return (fc_success_op(ap, rp, cp));
384 }
385 
386 static int
387 pfc_dma_sync(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
388 {
389 	void *virt;
390 	size_t len;
391 	uint32_t devaddr;
392 	int error;
393 	struct fc_resource *ip;
394 
395 	if (fc_cell2int(cp->nargs) != 3)
396 		return (fc_syntax_error(cp, "nargs must be 3"));
397 
398 	virt = fc_cell2ptr(fc_arg(cp, 2));
399 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
400 	len = fc_cell2size(fc_arg(cp, 0));
401 
402 	/*
403 	 * Find if this virt is 'within' a request we know about
404 	 */
405 	fc_lock_resource_list(rp);
406 	for (ip = rp->head; ip != NULL; ip = ip->next) {
407 		if (ip->type != RT_DMA)
408 			continue;
409 		if (ip->fc_dma_devaddr != devaddr)
410 			continue;
411 		if (((char *)virt >= (char *)ip->fc_dma_virt) &&
412 		    (((char *)virt + len) <=
413 		    ((char *)ip->fc_dma_virt + ip->fc_dma_len)))
414 			break;
415 	}
416 	fc_unlock_resource_list(rp);
417 
418 	if (ip == NULL)
419 		return (fc_priv_error(cp, "request not within a "
420 		    "known dma mapping"));
421 
422 	/*
423 	 * We know about this request, so we trust it enough to sync it.
424 	 * Unfortunately, we don't know which direction, so we'll do
425 	 * both directions.
426 	 */
427 
428 	error = fc_ddi_dma_sync(ip->fc_dma_handle,
429 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORCPU);
430 	error |= fc_ddi_dma_sync(ip->fc_dma_handle,
431 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORDEV);
432 
433 	if (error)
434 		return (fc_priv_error(cp, "Call to ddi_dma_sync failed"));
435 
436 	cp->nresults = fc_int2cell(0);
437 	return (fc_success_op(ap, rp, cp));
438 }
439 
440 static int
441 pfc_dma_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
442 {
443 	void *virt;
444 	size_t len;
445 	uint32_t devaddr;
446 	struct fc_resource *ip;
447 	int e;
448 
449 	if (fc_cell2int(cp->nargs) != 3)
450 		return (fc_syntax_error(cp, "nargs must be 3"));
451 
452 	virt = fc_cell2ptr(fc_arg(cp, 2));
453 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
454 	len = fc_cell2size(fc_arg(cp, 0));
455 
456 	/*
457 	 * Find if this virt matches a request we know about
458 	 */
459 	fc_lock_resource_list(rp);
460 	for (ip = rp->head; ip != NULL; ip = ip->next) {
461 		if (ip->type != RT_DMA)
462 			continue;
463 		if (ip->fc_dma_devaddr != devaddr)
464 			continue;
465 		if (ip->fc_dma_virt != virt)
466 			continue;
467 		if (len == ip->fc_dma_len)
468 			break;
469 	}
470 	fc_unlock_resource_list(rp);
471 
472 	if (ip == NULL)
473 		return (fc_priv_error(cp, "request doesn't match a "
474 		    "known dma mapping"));
475 
476 	/*
477 	 * ddi_dma_unbind_handle does an implied sync ...
478 	 */
479 	e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
480 	if (e != DDI_SUCCESS) {
481 		cmn_err(CE_CONT, "pfc_dma_map_out: ddi_dma_unbind failed!\n");
482 	}
483 	fc_ddi_dma_free_handle(&ip->fc_dma_handle);
484 
485 	/*
486 	 * Tear down the physio mappings
487 	 */
488 	fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
489 
490 	/*
491 	 * remove the resource from the list and release it.
492 	 */
493 	fc_rem_resource(rp, ip);
494 	kmem_free(ip, sizeof (struct fc_resource));
495 
496 	cp->nresults = fc_int2cell(0);
497 	return (fc_success_op(ap, rp, cp));
498 }
499 
500 static struct fc_resource *
501 next_dma_resource(fco_handle_t rp)
502 {
503 	struct fc_resource *ip;
504 
505 	fc_lock_resource_list(rp);
506 	for (ip = rp->head; ip != NULL; ip = ip->next)
507 		if (ip->type == RT_DMA)
508 			break;
509 	fc_unlock_resource_list(rp);
510 
511 	return (ip);
512 }
513 
514 static int
515 pfc_dma_cleanup(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
516 {
517 	struct fc_resource *ip;
518 	int e;
519 
520 	while ((ip = next_dma_resource(rp)) != NULL) {
521 
522 		FC_DEBUG2(9, CE_CONT, "pfc_dma_cleanup: virt %x len %x\n",
523 			ip->fc_dma_virt, ip->fc_dma_len);
524 
525 		/*
526 		 * Free the dma handle
527 		 */
528 		e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
529 		if (e != DDI_SUCCESS) {
530 			cmn_err(CE_CONT, "pfc_dma_cleanup: "
531 			    "ddi_dma_unbind failed!\n");
532 		}
533 		fc_ddi_dma_free_handle(&ip->fc_dma_handle);
534 
535 		/*
536 		 * Tear down the userland mapping and free the buf header
537 		 */
538 		fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
539 
540 		fc_rem_resource(rp, ip);
541 		kmem_free(ip, sizeof (struct fc_resource));
542 	}
543 
544 	cp->nresults = fc_int2cell(0);
545 	return (fc_success_op(ap, rp, cp));
546 }
547 
548 static int
549 pfc_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
550 {
551 	size_t len;
552 	int error;
553 	caddr_t virt;
554 	pci_regspec_t p, *ph;
555 	struct fc_resource *ip;
556 	ddi_device_acc_attr_t acc;
557 	ddi_acc_handle_t h;
558 
559 	if (fc_cell2int(cp->nargs) != 4)
560 		return (fc_syntax_error(cp, "nargs must be 4"));
561 
562 	if (fc_cell2int(cp->nresults) < 1)
563 		return (fc_syntax_error(cp, "nresults must be >= 1"));
564 
565 	p.pci_size_hi = 0;
566 	p.pci_size_low = len = fc_cell2size(fc_arg(cp, 0));
567 
568 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 1));
569 	p.pci_phys_mid = fc_cell2uint(fc_arg(cp, 2));
570 	p.pci_phys_low = fc_cell2uint(fc_arg(cp, 3));
571 
572 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
573 
574 	/*
575 	 * Fcode is expecting the bytes are not swapped.
576 	 */
577 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
578 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
579 
580 	/*
581 	 * First We need to allocate the PCI Resource.
582 	 */
583 	error = pci_alloc_resource(rp->child, p);
584 
585 	if (error)  {
586 		return (fc_priv_error(cp, "pci map-in failed"));
587 	}
588 
589 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
590 
591 	if (error)  {
592 		return (fc_priv_error(cp, "pci map-in failed"));
593 	}
594 
595 	cp->nresults = fc_int2cell(1);
596 	fc_result(cp, 0) = fc_ptr2cell(virt);
597 
598 	/*
599 	 * Log this resource ...
600 	 */
601 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
602 	ip->type = RT_MAP;
603 	ip->fc_map_virt = virt;
604 	ip->fc_map_len = len;
605 	ip->fc_map_handle = h;
606 	ph = kmem_zalloc(sizeof (pci_regspec_t), KM_SLEEP);
607 	*ph = p;
608 	ip->fc_regspec = ph;	/* cache a copy of the reg spec */
609 	fc_add_resource(rp, ip);
610 
611 	return (fc_success_op(ap, rp, cp));
612 }
613 
614 static int
615 pfc_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
616 {
617 	caddr_t virt;
618 	size_t len;
619 	struct fc_resource *ip;
620 
621 	if (fc_cell2int(cp->nargs) != 2)
622 		return (fc_syntax_error(cp, "nargs must be 2"));
623 
624 	virt = fc_cell2ptr(fc_arg(cp, 1));
625 
626 	len = fc_cell2size(fc_arg(cp, 0));
627 
628 	/*
629 	 * Find if this request matches a mapping resource we set up.
630 	 */
631 	fc_lock_resource_list(rp);
632 	for (ip = rp->head; ip != NULL; ip = ip->next) {
633 		if (ip->type != RT_MAP)
634 			continue;
635 		if (ip->fc_map_virt != virt)
636 			continue;
637 		if (ip->fc_map_len == len)
638 			break;
639 	}
640 	fc_unlock_resource_list(rp);
641 
642 	if (ip == NULL)
643 		return (fc_priv_error(cp, "request doesn't match a "
644 		    "known mapping"));
645 
646 	pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
647 
648 	kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
649 
650 	/*
651 	 * remove the resource from the list and release it.
652 	 */
653 	fc_rem_resource(rp, ip);
654 	kmem_free(ip, sizeof (struct fc_resource));
655 
656 	cp->nresults = fc_int2cell(0);
657 	return (fc_success_op(ap, rp, cp));
658 }
659 
660 static int
661 pfc_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
662 {
663 	size_t len;
664 	caddr_t virt;
665 	int error;
666 	uint64_t x;
667 	uint32_t l;
668 	uint16_t w;
669 	uint8_t b;
670 	char *name = fc_cell2ptr(cp->svc_name);
671 	struct fc_resource *ip;
672 
673 	if (fc_cell2int(cp->nargs) != 1)
674 		return (fc_syntax_error(cp, "nargs must be 1"));
675 
676 	if (fc_cell2int(cp->nresults) < 1)
677 		return (fc_syntax_error(cp, "nresults must be >= 1"));
678 
679 	virt = fc_cell2ptr(fc_arg(cp, 0));
680 
681 	/*
682 	 * Determine the access width .. we can switch on the 2nd
683 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
684 	 */
685 	switch (*(name + 1)) {
686 	case 'x':	len = sizeof (x); break;
687 	case 'l':	len = sizeof (l); break;
688 	case 'w':	len = sizeof (w); break;
689 	case 'b':	len = sizeof (b); break;
690 	}
691 
692 	/*
693 	 * Check the alignment ...
694 	 */
695 	if (((intptr_t)virt & (len - 1)) != 0)
696 		return (fc_priv_error(cp, "unaligned access"));
697 
698 	/*
699 	 * Find if this virt is 'within' a request we know about
700 	 */
701 	fc_lock_resource_list(rp);
702 	for (ip = rp->head; ip != NULL; ip = ip->next) {
703 		if (ip->type != RT_MAP)
704 			continue;
705 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
706 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
707 			break;
708 	}
709 	fc_unlock_resource_list(rp);
710 
711 	if (ip == NULL)
712 		return (fc_priv_error(cp, "request not within a "
713 		    "known mapping"));
714 
715 	/*
716 	 * XXX: We need access handle versions of peek/poke to move
717 	 * beyond the prototype ... we assume that we have hardware
718 	 * byte swapping enabled for pci register access here which
719 	 * is a huge dependency on the current implementation.
720 	 */
721 	switch (len) {
722 	case sizeof (x):
723 		error = ddi_peek64(rp->child, (int64_t *)virt, (int64_t *)&x);
724 		break;
725 	case sizeof (l):
726 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&l);
727 		break;
728 	case sizeof (w):
729 		error = ddi_peek16(rp->child, (int16_t *)virt, (int16_t *)&w);
730 		break;
731 	case sizeof (b):
732 		error = ddi_peek8(rp->child, (int8_t *)virt, (int8_t *)&b);
733 		break;
734 	}
735 
736 	if (error) {
737 		return (fc_priv_error(cp, "access error"));
738 	}
739 
740 	cp->nresults = fc_int2cell(1);
741 	switch (len) {
742 	case sizeof (x): fc_result(cp, 0) = x; break;
743 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
744 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
745 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
746 	}
747 	return (fc_success_op(ap, rp, cp));
748 }
749 
750 static int
751 pfc_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
752 {
753 	size_t len;
754 	caddr_t virt;
755 	int error;
756 	uint64_t x;
757 	uint32_t l;
758 	uint16_t w;
759 	uint8_t b;
760 	char *name = fc_cell2ptr(cp->svc_name);
761 	struct fc_resource *ip;
762 
763 	if (fc_cell2int(cp->nargs) != 2)
764 		return (fc_syntax_error(cp, "nargs must be 2"));
765 
766 	virt = fc_cell2ptr(fc_arg(cp, 0));
767 
768 	/*
769 	 * Determine the access width .. we can switch on the 2nd
770 	 * character of the name which is "rl!", "rb!" or "rw!"
771 	 */
772 	switch (*(name + 1)) {
773 	case 'x': len = sizeof (x); x = fc_arg(cp, 1); break;
774 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
775 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
776 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
777 	}
778 
779 	/*
780 	 * Check the alignment ...
781 	 */
782 	if (((intptr_t)virt & (len - 1)) != 0)
783 		return (fc_priv_error(cp, "unaligned access"));
784 
785 	/*
786 	 * Find if this virt is 'within' a request we know about
787 	 */
788 	fc_lock_resource_list(rp);
789 	for (ip = rp->head; ip != NULL; ip = ip->next) {
790 		if (ip->type != RT_MAP)
791 			continue;
792 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
793 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
794 			break;
795 	}
796 	fc_unlock_resource_list(rp);
797 
798 	if (ip == NULL)
799 		return (fc_priv_error(cp, "request not within a "
800 		    "known mapping"));
801 
802 	/*
803 	 * XXX: We need access handle versions of peek/poke to move
804 	 * beyond the prototype ... we assume that we have hardware
805 	 * byte swapping enabled for pci register access here which
806 	 * is a huge dependency on the current implementation.
807 	 */
808 	switch (len) {
809 	case sizeof (x):
810 		error = ddi_poke64(rp->child, (int64_t *)virt, x);
811 		break;
812 	case sizeof (l):
813 		error = ddi_poke32(rp->child, (int32_t *)virt, l);
814 		break;
815 	case sizeof (w):
816 		error = ddi_poke16(rp->child, (int16_t *)virt, w);
817 		break;
818 	case sizeof (b):
819 		error = ddi_poke8(rp->child, (int8_t *)virt, b);
820 		break;
821 	}
822 
823 	if (error) {
824 		return (fc_priv_error(cp, "access error"));
825 	}
826 
827 	cp->nresults = fc_int2cell(0);
828 	return (fc_success_op(ap, rp, cp));
829 }
830 
831 static int
832 pfc_config_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
833 {
834 	caddr_t virt, v;
835 	int error, reg, flags = 0;
836 	size_t len;
837 	uint32_t l, tmp;
838 	uint16_t w;
839 	uint8_t b;
840 	char *name = fc_cell2ptr(cp->svc_name);
841 	pci_regspec_t p;
842 	ddi_device_acc_attr_t acc;
843 	ddi_acc_handle_t h;
844 
845 	if (fc_cell2int(cp->nargs) != 1)
846 		return (fc_syntax_error(cp, "nargs must be 1"));
847 
848 	if (fc_cell2int(cp->nresults) < 1)
849 		return (fc_syntax_error(cp, "nresults must be >= 1"));
850 
851 	/*
852 	 * Construct a config address pci reg property from the args.
853 	 * arg[0] is the configuration address.
854 	 */
855 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
856 	p.pci_phys_mid = p.pci_phys_low = 0;
857 	p.pci_size_hi = p.pci_size_low = 0;
858 
859 	/*
860 	 * Verify that the address is a configuration space address
861 	 * ss must be zero.
862 	 */
863 	if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
864 		cmn_err(CE_CONT, "pfc_config_fetch: "
865 		    "invalid config addr: %x\n", p.pci_phys_hi);
866 		return (fc_priv_error(cp, "non-config addr"));
867 	}
868 
869 	/*
870 	 * Extract the register number from the config address and
871 	 * remove the register number from the physical address.
872 	 */
873 
874 	reg = (p.pci_phys_hi & PCI_REG_REG_M) |
875 	    (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
876 
877 	p.pci_phys_hi &= PCI_BDF_bits;
878 
879 	/*
880 	 * Determine the access width .. we can switch on the 9th
881 	 * character of the name which is "config-{l,w,b}@"
882 	 */
883 	switch (*(name + 7)) {
884 	case 'l':	len = sizeof (l); break;
885 	case 'w':	len = sizeof (w); break;
886 	case 'b':	len = sizeof (b); break;
887 	}
888 
889 	/*
890 	 * Verify that the access is properly aligned
891 	 */
892 	if ((reg & (len - 1)) != 0)
893 		return (fc_priv_error(cp, "unaligned access"));
894 
895 	/*
896 	 * Map in configuration space (temporarily)
897 	 */
898 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
899 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
900 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
901 
902 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
903 
904 	if (error)  {
905 		return (fc_priv_error(cp, "pci config map-in failed"));
906 	}
907 
908 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
909 		flags |= PCICFG_CONF_INDIRECT_MAP;
910 
911 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
912 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
913 		error = DDI_SUCCESS;
914 	} else
915 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
916 
917 	if (error == DDI_SUCCESS)
918 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
919 			error = DDI_FAILURE;
920 			cmn_err(CE_CONT, "fcpcii: conf probe failed.l=%x", tmp);
921 		}
922 
923 	if (error != DDI_SUCCESS) {
924 		return (fc_priv_error(cp, "pci config fetch failed"));
925 	}
926 
927 
928 	/*
929 	 * XXX: We need access handle versions of peek/poke to move
930 	 * beyond the prototype ... we assume that we have hardware
931 	 * byte swapping enabled for pci register access here which
932 	 * is a huge dependency on the current implementation.
933 	 */
934 	v = virt + reg;
935 	switch (len) {
936 	case sizeof (l):
937 		l = (int32_t)ddi_get32(h, (uint32_t *)v);
938 		break;
939 	case sizeof (w):
940 		w = (int16_t)ddi_get16(h, (uint16_t *)v);
941 		break;
942 	case sizeof (b):
943 		b = (int8_t)ddi_get8(h, (uint8_t *)v);
944 		break;
945 	}
946 
947 	/*
948 	 * Remove the temporary config space mapping
949 	 */
950 	pci_unmap_phys(&h, &p);
951 
952 	if (error) {
953 		return (fc_priv_error(cp, "access error"));
954 	}
955 
956 	cp->nresults = fc_int2cell(1);
957 	switch (len) {
958 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
959 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
960 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
961 	}
962 
963 	return (fc_success_op(ap, rp, cp));
964 }
965 
966 static int
967 pfc_config_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
968 {
969 	caddr_t virt, v;
970 	int error, reg, flags = 0;
971 	size_t len;
972 	uint32_t l, tmp;
973 	uint16_t w;
974 	uint8_t b;
975 	char *name = fc_cell2ptr(cp->svc_name);
976 	pci_regspec_t p;
977 	ddi_device_acc_attr_t acc;
978 	ddi_acc_handle_t h;
979 
980 	if (fc_cell2int(cp->nargs) != 2)
981 		return (fc_syntax_error(cp, "nargs must be 2"));
982 
983 	/*
984 	 * Construct a config address pci reg property from the args.
985 	 * arg[0] is the configuration address. arg[1] is the data.
986 	 */
987 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
988 	p.pci_phys_mid = p.pci_phys_low = 0;
989 	p.pci_size_hi = p.pci_size_low = 0;
990 
991 	/*
992 	 * Verify that the address is a configuration space address
993 	 * ss must be zero.
994 	 */
995 	if ((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) {
996 		cmn_err(CE_CONT, "pfc_config_store: "
997 		    "invalid config addr: %x\n", p.pci_phys_hi);
998 		return (fc_priv_error(cp, "non-config addr"));
999 	}
1000 
1001 	/*
1002 	 * Extract the register number from the config address and
1003 	 * remove the register number from the physical address.
1004 	 */
1005 	reg = (p.pci_phys_hi & PCI_REG_REG_M) |
1006 	    (((p.pci_phys_hi & PCI_REG_EXTREG_M) >> PCI_REG_EXTREG_SHIFT) << 8);
1007 
1008 	p.pci_phys_hi &= PCI_BDF_bits;
1009 
1010 	/*
1011 	 * Determine the access width .. we can switch on the 8th
1012 	 * character of the name which is "config-{l,w,b}@"
1013 	 */
1014 	switch (*(name + 7)) {
1015 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
1016 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
1017 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
1018 	}
1019 
1020 	/*
1021 	 * Verify that the access is properly aligned
1022 	 */
1023 	if ((reg & (len - 1)) != 0)
1024 		return (fc_priv_error(cp, "unaligned access"));
1025 
1026 	/*
1027 	 * Map in configuration space (temporarily)
1028 	 */
1029 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1030 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1031 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1032 
1033 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
1034 
1035 	if (error)  {
1036 		return (fc_priv_error(cp, "pci config map-in failed"));
1037 	}
1038 
1039 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
1040 		flags |= PCICFG_CONF_INDIRECT_MAP;
1041 
1042 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1043 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1044 		error = DDI_SUCCESS;
1045 	} else
1046 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
1047 
1048 	if (error == DDI_SUCCESS)
1049 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1050 			error = DDI_FAILURE;
1051 			cmn_err(CE_CONT, "fcpci: conf probe failed.l=%x", tmp);
1052 		}
1053 
1054 	if (error != DDI_SUCCESS) {
1055 		return (fc_priv_error(cp, "pci config store failed"));
1056 	}
1057 
1058 
1059 	/*
1060 	 * XXX: We need access handle versions of peek/poke to move
1061 	 * beyond the prototype ... we assume that we have hardware
1062 	 * byte swapping enabled for pci register access here which
1063 	 * is a huge dependency on the current implementation.
1064 	 */
1065 	v = virt + reg;
1066 	switch (len) {
1067 	case sizeof (l):
1068 		ddi_put32(h, (uint32_t *)v, (uint32_t)l);
1069 		break;
1070 	case sizeof (w):
1071 		ddi_put16(h, (uint16_t *)v, (uint16_t)w);
1072 		break;
1073 	case sizeof (b):
1074 		ddi_put8(h, (uint8_t *)v, (uint8_t)b);
1075 		break;
1076 	}
1077 
1078 	/*
1079 	 * Remove the temporary config space mapping
1080 	 */
1081 	pci_unmap_phys(&h, &p);
1082 
1083 	if (error) {
1084 		return (fc_priv_error(cp, "access error"));
1085 	}
1086 
1087 	cp->nresults = fc_int2cell(0);
1088 	return (fc_success_op(ap, rp, cp));
1089 }
1090 
1091 
1092 static int
1093 pfc_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1094 {
1095 	caddr_t name_virt, fcode_virt;
1096 	char *name, *fcode;
1097 	int fcode_len, status;
1098 
1099 	if (fc_cell2int(cp->nargs) != 3)
1100 		return (fc_syntax_error(cp, "nargs must be 3"));
1101 
1102 	if (fc_cell2int(cp->nresults) < 1)
1103 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1104 
1105 	name_virt = fc_cell2ptr(fc_arg(cp, 0));
1106 
1107 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1108 
1109 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1110 
1111 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1112 
1113 	if (copyinstr(fc_cell2ptr(name_virt), name,
1114 	    FC_SVC_NAME_LEN - 1, NULL))  {
1115 		status = 0;
1116 	} else {
1117 
1118 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1119 
1120 		if ((status = prom_get_fcode(name, fcode)) != 0) {
1121 
1122 			if (copyout((void *)fcode, (void *)fcode_virt,
1123 			    fcode_len)) {
1124 				cmn_err(CE_WARN, " pfc_get_fcode: Unable "
1125 				    "to copy out fcode image\n");
1126 				status = 0;
1127 			}
1128 		}
1129 
1130 		kmem_free(fcode, fcode_len);
1131 	}
1132 
1133 	kmem_free(name, FC_SVC_NAME_LEN);
1134 
1135 	cp->nresults = fc_int2cell(1);
1136 	fc_result(cp, 0) = status;
1137 
1138 	return (fc_success_op(ap, rp, cp));
1139 }
1140 
1141 static int
1142 pfc_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1143 {
1144 	caddr_t virt;
1145 	char *name;
1146 	int len;
1147 
1148 	if (fc_cell2int(cp->nargs) != 1)
1149 		return (fc_syntax_error(cp, "nargs must be 1"));
1150 
1151 	if (fc_cell2int(cp->nresults) < 1)
1152 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1153 
1154 	virt = fc_cell2ptr(fc_arg(cp, 0));
1155 
1156 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1157 
1158 	if (copyinstr(fc_cell2ptr(virt), name,
1159 	    FC_SVC_NAME_LEN - 1, NULL))  {
1160 		len = 0;
1161 	} else {
1162 		len = prom_get_fcode_size(name);
1163 	}
1164 
1165 	kmem_free(name, FC_SVC_NAME_LEN);
1166 
1167 	cp->nresults = fc_int2cell(1);
1168 	fc_result(cp, 0) = len;
1169 
1170 	return (fc_success_op(ap, rp, cp));
1171 }
1172 
1173 /*
1174  * Return the physical probe address: lo=0, mid=0, hi-config-addr
1175  */
1176 static int
1177 pfc_probe_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1178 {
1179 	if (fc_cell2int(cp->nargs) != 0)
1180 		return (fc_syntax_error(cp, "nargs must be 0"));
1181 
1182 	if (fc_cell2int(cp->nresults) < 2)
1183 		return (fc_syntax_error(cp, "nresults must be >= 3"));
1184 
1185 	cp->nresults = fc_int2cell(2);
1186 	fc_result(cp, 1) = fc_int2cell(0);	/* phys.lo */
1187 	fc_result(cp, 0) = fc_int2cell(0);	/* phys.mid */
1188 
1189 	return (fc_success_op(ap, rp, cp));
1190 }
1191 
1192 /*
1193  * Return the phys.hi component of the probe address.
1194  */
1195 static int
1196 pfc_probe_space(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1197 {
1198 	struct pci_ops_bus_args *ba = rp->bus_args;
1199 
1200 	ASSERT(ba);
1201 
1202 	if (fc_cell2int(cp->nargs) != 0)
1203 		return (fc_syntax_error(cp, "nargs must be 0"));
1204 
1205 	if (fc_cell2int(cp->nresults) < 1)
1206 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1207 
1208 	cp->nresults = fc_int2cell(1);
1209 	fc_result(cp, 0) = fc_uint32_t2cell(ba->config_address); /* phys.hi */
1210 
1211 	return (fc_success_op(ap, rp, cp));
1212 }
1213 
1214 static int
1215 pfc_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1216 {
1217 	fc_phandle_t h;
1218 
1219 	if (fc_cell2int(cp->nargs) != 0)
1220 		return (fc_syntax_error(cp, "nargs must be 0"));
1221 
1222 	if (fc_cell2int(cp->nresults) < 1)
1223 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1224 
1225 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1226 
1227 	cp->nresults = fc_int2cell(1);
1228 	fc_result(cp, 0) = fc_phandle2cell(h);
1229 
1230 	return (fc_success_op(ap, rp, cp));
1231 }
1232 
1233 int
1234 pci_alloc_mem_chunk(dev_info_t *dip, uint64_t mem_align, uint64_t *mem_size,
1235     uint64_t *mem_answer)
1236 {
1237 	ndi_ra_request_t req;
1238 	int rval;
1239 
1240 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1241 	req.ra_flags = NDI_RA_ALLOC_BOUNDED;
1242 	req.ra_boundbase = 0;
1243 	req.ra_boundlen = PCI_4GIG_LIMIT;
1244 	req.ra_len = *mem_size;
1245 	req.ra_align_mask = mem_align - 1;
1246 
1247 	rval = ndi_ra_alloc(dip, &req, mem_answer, mem_size,
1248 	    NDI_RA_TYPE_MEM, NDI_RA_PASS);
1249 
1250 	return (rval);
1251 }
1252 int
1253 pci_alloc_io_chunk(dev_info_t *dip, uint64_t io_align, uint64_t *io_size,
1254     uint64_t *io_answer)
1255 {
1256 	ndi_ra_request_t req;
1257 	int rval;
1258 
1259 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1260 	req.ra_flags = (NDI_RA_ALLOC_BOUNDED | NDI_RA_ALLOC_PARTIAL_OK);
1261 	req.ra_boundbase = 0;
1262 	req.ra_boundlen = PCI_4GIG_LIMIT;
1263 	req.ra_len = *io_size;
1264 	req.ra_align_mask = io_align - 1;
1265 
1266 	rval = ndi_ra_alloc(dip, &req, io_answer, io_size,
1267 	    NDI_RA_TYPE_IO, NDI_RA_PASS);
1268 
1269 	return (rval);
1270 }
1271 
1272 int
1273 pci_alloc_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1274 {
1275 	uint64_t answer;
1276 	uint64_t alen;
1277 	int offset, tmp;
1278 	pci_regspec_t config;
1279 	caddr_t virt, v;
1280 	ddi_device_acc_attr_t acc;
1281 	ddi_acc_handle_t h;
1282 	ndi_ra_request_t request;
1283 	pci_regspec_t *assigned;
1284 	int assigned_len, entries, i, l, flags = 0, error;
1285 
1286 	l = phys_spec.pci_size_low;
1287 
1288 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1289 	    DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&assigned,
1290 	    &assigned_len) == DDI_PROP_SUCCESS) {
1291 
1292 		entries = assigned_len / (sizeof (pci_regspec_t));
1293 
1294 		/*
1295 		 * Walk through the assigned-addresses entries. If there is
1296 		 * a match, there is no need to allocate the resource.
1297 		 */
1298 		for (i = 0; i < entries; i++) {
1299 			if (assigned[i].pci_phys_hi == phys_spec.pci_phys_hi) {
1300 				if (assigned[i].pci_size_low >=
1301 				    phys_spec.pci_size_low) {
1302 					kmem_free(assigned, assigned_len);
1303 					return (0);
1304 				}
1305 				/*
1306 				 * Fcode wants to assign more than what
1307 				 * probe found.
1308 				 */
1309 				(void) pci_free_resource(dip, assigned[i]);
1310 				/*
1311 				 * Go on to allocate resources.
1312 				 */
1313 				break;
1314 			}
1315 			/*
1316 			 * Check if Fcode wants to map using different
1317 			 * NPT bits.
1318 			 */
1319 			if (PCI_REG_BDFR_G(assigned[i].pci_phys_hi) ==
1320 			    PCI_REG_BDFR_G(phys_spec.pci_phys_hi)) {
1321 				/*
1322 				 * It is an error to change SS bits
1323 				 */
1324 				if (PCI_REG_ADDR_G(assigned[i].pci_phys_hi) !=
1325 				    PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1326 
1327 					FC_DEBUG2(2, CE_WARN, "Fcode changing "
1328 					    "ss bits in reg %x -- %x",
1329 					    assigned[i].pci_phys_hi,
1330 					    phys_spec.pci_phys_hi);
1331 				}
1332 
1333 				/*
1334 				 * Allocate enough
1335 				 */
1336 				l = MAX(assigned[i].pci_size_low,
1337 				    phys_spec.pci_size_low);
1338 
1339 				(void) pci_free_resource(dip, assigned[i]);
1340 				/*
1341 				 * Go on to allocate resources.
1342 				 */
1343 				break;
1344 			}
1345 		}
1346 		kmem_free(assigned, assigned_len);
1347 	}
1348 
1349 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1350 
1351 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1352 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1353 	config.pci_phys_mid = config.pci_phys_low = 0;
1354 	config.pci_size_hi = config.pci_size_low = 0;
1355 
1356 	/*
1357 	 * Map in configuration space (temporarily)
1358 	 */
1359 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1360 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1361 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1362 
1363 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1364 		return (1);
1365 	}
1366 
1367 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1368 		flags |= PCICFG_CONF_INDIRECT_MAP;
1369 
1370 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1371 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1372 		error = DDI_SUCCESS;
1373 	} else
1374 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1375 
1376 	if (error == DDI_SUCCESS)
1377 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1378 			error = DDI_FAILURE;
1379 		}
1380 
1381 	if (error != DDI_SUCCESS) {
1382 		return (1);
1383 	}
1384 
1385 	request.ra_flags |= NDI_RA_ALIGN_SIZE;
1386 	request.ra_boundbase = 0;
1387 	request.ra_boundlen = PCI_4GIG_LIMIT;
1388 
1389 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1390 
1391 	v = virt + offset;
1392 
1393 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1394 		request.ra_len = l;
1395 		request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1396 
1397 		/* allocate memory space from the allocator */
1398 
1399 		if (ndi_ra_alloc(ddi_get_parent(dip),
1400 			&request, &answer, &alen,
1401 			NDI_RA_TYPE_MEM, NDI_RA_PASS)
1402 					!= NDI_SUCCESS) {
1403 			pci_unmap_phys(&h, &config);
1404 			return (1);
1405 		}
1406 		FC_DEBUG3(1, CE_CONT, "ROM addr = [0x%x.%x] len [0x%x]\n",
1407 			HIADDR(answer),
1408 			LOADDR(answer),
1409 			alen);
1410 
1411 		/* program the low word */
1412 
1413 		ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1414 
1415 		phys_spec.pci_phys_low = LOADDR(answer);
1416 		phys_spec.pci_phys_mid = HIADDR(answer);
1417 	} else {
1418 		request.ra_len = l;
1419 
1420 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1421 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1422 			request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1423 
1424 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1425 				/*
1426 				 * If it is a non relocatable address,
1427 				 * then specify the address we want.
1428 				 */
1429 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1430 				request.ra_addr = (uint64_t)LADDR(
1431 				    phys_spec.pci_phys_low,
1432 				    phys_spec.pci_phys_mid);
1433 			}
1434 
1435 			/* allocate memory space from the allocator */
1436 
1437 			if (ndi_ra_alloc(ddi_get_parent(dip),
1438 				&request, &answer, &alen,
1439 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1440 						!= NDI_SUCCESS) {
1441 				pci_unmap_phys(&h, &config);
1442 				if (request.ra_flags ==
1443 				    NDI_RA_ALLOC_SPECIFIED)
1444 					cmn_err(CE_WARN, "Unable to allocate "
1445 					    "non relocatable address 0x%p\n",
1446 					    (void *) request.ra_addr);
1447 				return (1);
1448 			}
1449 			FC_DEBUG3(1, CE_CONT,
1450 			    "64 addr = [0x%x.%x] len [0x%x]\n",
1451 			    HIADDR(answer),
1452 			    LOADDR(answer),
1453 			    alen);
1454 
1455 			/* program the low word */
1456 
1457 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1458 
1459 			/* program the high word with value zero */
1460 			v += 4;
1461 			ddi_put32(h, (uint32_t *)v, HIADDR(answer));
1462 
1463 			phys_spec.pci_phys_low = LOADDR(answer);
1464 			phys_spec.pci_phys_mid = HIADDR(answer);
1465 			/*
1466 			 * currently support 32b address space
1467 			 * assignments only.
1468 			 */
1469 			phys_spec.pci_phys_hi ^= PCI_ADDR_MEM64 ^
1470 							PCI_ADDR_MEM32;
1471 
1472 			break;
1473 
1474 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1475 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1476 
1477 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1478 				/*
1479 				 * If it is a non relocatable address,
1480 				 * then specify the address we want.
1481 				 */
1482 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1483 				request.ra_addr = (uint64_t)
1484 				    phys_spec.pci_phys_low;
1485 			}
1486 
1487 			/* allocate memory space from the allocator */
1488 
1489 			if (ndi_ra_alloc(ddi_get_parent(dip),
1490 				&request, &answer, &alen,
1491 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1492 						!= NDI_SUCCESS) {
1493 				pci_unmap_phys(&h, &config);
1494 				if (request.ra_flags ==
1495 				    NDI_RA_ALLOC_SPECIFIED)
1496 					cmn_err(CE_WARN, "Unable to allocate "
1497 					    "non relocatable address 0x%p\n",
1498 					    (void *) request.ra_addr);
1499 				return (1);
1500 			}
1501 
1502 			FC_DEBUG3(1, CE_CONT,
1503 			    "32 addr = [0x%x.%x] len [0x%x]\n",
1504 			    HIADDR(answer),
1505 			    LOADDR(answer),
1506 			    alen);
1507 
1508 			/* program the low word */
1509 
1510 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1511 
1512 			phys_spec.pci_phys_low = LOADDR(answer);
1513 
1514 			break;
1515 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1516 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1517 
1518 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1519 				/*
1520 				 * If it is a non relocatable address,
1521 				 * then specify the address we want.
1522 				 */
1523 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1524 				request.ra_addr = (uint64_t)
1525 				    phys_spec.pci_phys_low;
1526 			}
1527 
1528 			/* allocate I/O space from the allocator */
1529 
1530 			if (ndi_ra_alloc(ddi_get_parent(dip),
1531 				&request, &answer, &alen,
1532 				NDI_RA_TYPE_IO, NDI_RA_PASS)
1533 						!= NDI_SUCCESS) {
1534 				pci_unmap_phys(&h, &config);
1535 				if (request.ra_flags ==
1536 				    NDI_RA_ALLOC_SPECIFIED)
1537 					cmn_err(CE_WARN, "Unable to allocate "
1538 					    "non relocatable IO Space 0x%p\n",
1539 					    (void *) request.ra_addr);
1540 				return (1);
1541 			}
1542 			FC_DEBUG3(1, CE_CONT,
1543 			    "I/O addr = [0x%x.%x] len [0x%x]\n",
1544 			    HIADDR(answer),
1545 			    LOADDR(answer),
1546 			    alen);
1547 
1548 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1549 
1550 			phys_spec.pci_phys_low = LOADDR(answer);
1551 
1552 			break;
1553 		default:
1554 			pci_unmap_phys(&h, &config);
1555 			return (1);
1556 		} /* switch */
1557 	}
1558 
1559 	/*
1560 	 * Now that memory locations are assigned,
1561 	 * update the assigned address property.
1562 	 */
1563 	if (pfc_update_assigned_prop(dip, &phys_spec)) {
1564 		pci_unmap_phys(&h, &config);
1565 		return (1);
1566 	}
1567 
1568 	pci_unmap_phys(&h, &config);
1569 
1570 	return (0);
1571 }
1572 
1573 int
1574 pci_free_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1575 {
1576 	int offset, tmp;
1577 	pci_regspec_t config;
1578 	caddr_t virt, v;
1579 	ddi_device_acc_attr_t acc;
1580 	ddi_acc_handle_t h;
1581 	ndi_ra_request_t request;
1582 	int l, error, flags = 0;
1583 
1584 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1585 
1586 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1587 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1588 	config.pci_phys_mid = config.pci_phys_low = 0;
1589 	config.pci_size_hi = config.pci_size_low = 0;
1590 
1591 	/*
1592 	 * Map in configuration space (temporarily)
1593 	 */
1594 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1595 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1596 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1597 
1598 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1599 		return (1);
1600 	}
1601 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1602 		flags |= PCICFG_CONF_INDIRECT_MAP;
1603 
1604 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1605 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1606 		error = DDI_SUCCESS;
1607 	} else
1608 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1609 
1610 	if (error == DDI_SUCCESS)
1611 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1612 			error = DDI_FAILURE;
1613 		}
1614 	if (error != DDI_SUCCESS) {
1615 		return (1);
1616 	}
1617 
1618 
1619 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1620 
1621 	v = virt + offset;
1622 
1623 	/*
1624 	 * Pick up the size to be freed. It may be different from
1625 	 * what probe finds.
1626 	 */
1627 	l = phys_spec.pci_size_low;
1628 
1629 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1630 		/* free memory back to the allocator */
1631 		if (ndi_ra_free(ddi_get_parent(dip), phys_spec.pci_phys_low,
1632 		    l, NDI_RA_TYPE_MEM,
1633 		    NDI_RA_PASS) != NDI_SUCCESS) {
1634 			pci_unmap_phys(&h, &config);
1635 			return (1);
1636 		}
1637 
1638 		/* Unmap the BAR by writing a zero */
1639 
1640 		ddi_put32(h, (uint32_t *)v, 0);
1641 	} else {
1642 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1643 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1644 			/* free memory back to the allocator */
1645 			if (ndi_ra_free(ddi_get_parent(dip),
1646 			    LADDR(phys_spec.pci_phys_low,
1647 			    phys_spec.pci_phys_mid),
1648 			    l, NDI_RA_TYPE_MEM,
1649 			    NDI_RA_PASS) != NDI_SUCCESS) {
1650 				pci_unmap_phys(&h, &config);
1651 				return (1);
1652 			}
1653 
1654 			break;
1655 
1656 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1657 			/* free memory back to the allocator */
1658 			if (ndi_ra_free(ddi_get_parent(dip),
1659 			    phys_spec.pci_phys_low,
1660 			    l, NDI_RA_TYPE_MEM,
1661 			    NDI_RA_PASS) != NDI_SUCCESS) {
1662 				pci_unmap_phys(&h, &config);
1663 				return (1);
1664 			}
1665 
1666 			break;
1667 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1668 			/* free I/O space back to the allocator */
1669 			if (ndi_ra_free(ddi_get_parent(dip),
1670 			    phys_spec.pci_phys_low,
1671 			    l, NDI_RA_TYPE_IO,
1672 			    NDI_RA_PASS) != NDI_SUCCESS) {
1673 				pci_unmap_phys(&h, &config);
1674 				return (1);
1675 			}
1676 			break;
1677 		default:
1678 			pci_unmap_phys(&h, &config);
1679 			return (1);
1680 		} /* switch */
1681 	}
1682 
1683 	/*
1684 	 * Now that memory locations are assigned,
1685 	 * update the assigned address property.
1686 	 */
1687 
1688 	FC_DEBUG1(1, CE_CONT, "updating assigned-addresss for %x\n",
1689 	    phys_spec.pci_phys_hi);
1690 
1691 	if (pfc_remove_assigned_prop(dip, &phys_spec)) {
1692 		pci_unmap_phys(&h, &config);
1693 		return (1);
1694 	}
1695 
1696 	pci_unmap_phys(&h, &config);
1697 
1698 	return (0);
1699 }
1700 
1701 
1702 int
1703 pci_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
1704 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1705 	ddi_acc_handle_t *handlep)
1706 {
1707 	ddi_map_req_t mr;
1708 	ddi_acc_hdl_t *hp;
1709 	int result;
1710 
1711 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1712 	hp = impl_acc_hdl_get(*handlep);
1713 	hp->ah_vers = VERS_ACCHDL;
1714 	hp->ah_dip = dip;
1715 	hp->ah_rnumber = 0;
1716 	hp->ah_offset = 0;
1717 	hp->ah_len = 0;
1718 	hp->ah_acc = *accattrp;
1719 
1720 	mr.map_op = DDI_MO_MAP_LOCKED;
1721 	mr.map_type = DDI_MT_REGSPEC;
1722 	mr.map_obj.rp = (struct regspec *)phys_spec;
1723 	mr.map_prot = PROT_READ | PROT_WRITE;
1724 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1725 	mr.map_handlep = hp;
1726 	mr.map_vers = DDI_MAP_VERSION;
1727 
1728 	result = ddi_map(dip, &mr, 0, 0, addrp);
1729 
1730 	if (result != DDI_SUCCESS) {
1731 		impl_acc_hdl_free(*handlep);
1732 		*handlep = (ddi_acc_handle_t)NULL;
1733 	} else {
1734 		hp->ah_addr = *addrp;
1735 	}
1736 
1737 	return (result);
1738 }
1739 
1740 void
1741 pci_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
1742 {
1743 	ddi_map_req_t mr;
1744 	ddi_acc_hdl_t *hp;
1745 
1746 	hp = impl_acc_hdl_get(*handlep);
1747 	ASSERT(hp);
1748 
1749 	mr.map_op = DDI_MO_UNMAP;
1750 	mr.map_type = DDI_MT_REGSPEC;
1751 	mr.map_obj.rp = (struct regspec *)ph;
1752 	mr.map_prot = PROT_READ | PROT_WRITE;
1753 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1754 	mr.map_handlep = hp;
1755 	mr.map_vers = DDI_MAP_VERSION;
1756 
1757 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1758 		hp->ah_len, &hp->ah_addr);
1759 
1760 	impl_acc_hdl_free(*handlep);
1761 
1762 
1763 	*handlep = (ddi_acc_handle_t)NULL;
1764 }
1765 
1766 int
1767 pfc_update_assigned_prop(dev_info_t *dip, pci_regspec_t *newone)
1768 {
1769 	int		alen;
1770 	pci_regspec_t	*assigned;
1771 	caddr_t		newreg;
1772 	uint_t		status;
1773 
1774 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1775 		"assigned-addresses", (caddr_t)&assigned, &alen);
1776 	switch (status) {
1777 		case DDI_PROP_SUCCESS:
1778 		break;
1779 		case DDI_PROP_NO_MEMORY:
1780 			return (1);
1781 		default:
1782 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1783 			"assigned-addresses", (int *)newone,
1784 				sizeof (*newone)/sizeof (int));
1785 			return (0);
1786 	}
1787 
1788 	/*
1789 	 * Allocate memory for the existing
1790 	 * assigned-addresses(s) plus one and then
1791 	 * build it.
1792 	 */
1793 
1794 	newreg = kmem_zalloc(alen+sizeof (*newone), KM_SLEEP);
1795 
1796 	bcopy(assigned, newreg, alen);
1797 	bcopy(newone, newreg + alen, sizeof (*newone));
1798 
1799 	/*
1800 	 * Write out the new "assigned-addresses" spec
1801 	 */
1802 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1803 		"assigned-addresses", (int *)newreg,
1804 		(alen + sizeof (*newone))/sizeof (int));
1805 
1806 	kmem_free((caddr_t)newreg, alen+sizeof (*newone));
1807 
1808 	return (0);
1809 }
1810 int
1811 pfc_remove_assigned_prop(dev_info_t *dip, pci_regspec_t *oldone)
1812 {
1813 	int		alen, new_len, num_entries, i;
1814 	pci_regspec_t	*assigned;
1815 	uint_t		status;
1816 
1817 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1818 		"assigned-addresses", (caddr_t)&assigned, &alen);
1819 	switch (status) {
1820 		case DDI_PROP_SUCCESS:
1821 		break;
1822 		case DDI_PROP_NO_MEMORY:
1823 			return (1);
1824 		default:
1825 			return (0);
1826 	}
1827 
1828 	num_entries = alen / sizeof (pci_regspec_t);
1829 	new_len = alen - sizeof (pci_regspec_t);
1830 
1831 	/*
1832 	 * Search for the memory being removed.
1833 	 */
1834 	for (i = 0; i < num_entries; i++) {
1835 		if (assigned[i].pci_phys_hi == oldone->pci_phys_hi) {
1836 			if (new_len == 0) {
1837 				(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1838 				    "assigned-addresses");
1839 				break;
1840 			}
1841 			if ((new_len - (i * sizeof (pci_regspec_t)))
1842 			    == 0) {
1843 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1844 				    "%x removed from property (last entry)\n",
1845 				    oldone->pci_phys_hi);
1846 			} else {
1847 				bcopy((void *)(assigned + i + 1),
1848 				    (void *)(assigned + i),
1849 				    (new_len - (i * sizeof (pci_regspec_t))));
1850 
1851 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1852 				    "%x removed from property\n",
1853 				    oldone->pci_phys_hi);
1854 			}
1855 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1856 			    dip, "assigned-addresses", (int *)assigned,
1857 			    (new_len/sizeof (int)));
1858 
1859 			break;
1860 		}
1861 	}
1862 
1863 	return (0);
1864 }
1865 /*
1866  * we recognize the non transparent bridge child nodes with the
1867  * following property. This is specific to this implementation only.
1868  * This property is specific to AP nodes only.
1869  */
1870 #define	PCICFG_DEV_CONF_MAP_PROP		"pci-parent-indirect"
1871 
1872 /*
1873  * If a non transparent bridge drives a hotplug/hotswap bus, then
1874  * the following property must be defined for the node either by
1875  * the driver or the OBP.
1876  */
1877 #define	PCICFG_BUS_CONF_MAP_PROP		"pci-conf-indirect"
1878 
1879 /*
1880  * this function is called only for SPARC platforms, where we may have
1881  * a mix n' match of direct vs indirectly mapped configuration space.
1882  */
1883 /*ARGSUSED*/
1884 static int
1885 fcpci_indirect_map(dev_info_t *dip)
1886 {
1887 	int rc = DDI_FAILURE;
1888 
1889 	if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip), 0,
1890 			PCICFG_DEV_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1891 		rc = DDI_SUCCESS;
1892 	else
1893 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip),
1894 				0, PCICFG_BUS_CONF_MAP_PROP,
1895 				DDI_FAILURE) != DDI_FAILURE)
1896 			rc = DDI_SUCCESS;
1897 
1898 	return (rc);
1899 }
1900