xref: /titanic_44/usr/src/uts/sun4/io/efcode/fcpci.c (revision 0400f3c0732a5d19212a4bdd9fedd71f69d3cd51)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * fcpci.c: Framework PCI fcode ops
31  */
32 #include <sys/types.h>
33 #include <sys/kmem.h>
34 #include <sys/systm.h>
35 #include <sys/pci.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/sunndi.h>
39 #include <sys/ddidmareq.h>
40 #include <sys/pci.h>
41 #include <sys/modctl.h>
42 #include <sys/ndi_impldefs.h>
43 #include <sys/fcode.h>
44 #include <sys/promif.h>
45 #include <sys/promimpl.h>
46 #include <sys/ddi_implfuncs.h>
47 
48 #define	PCI_NPT_bits	(PCI_RELOCAT_B | PCI_PREFETCH_B | PCI_ALIAS_B)
49 #define	PCICFG_CONF_INDIRECT_MAP	1
50 
51 static int pfc_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
52 static int pfc_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
53 static int pfc_dma_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
54 static int pfc_dma_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
55 static int pfc_dma_sync(dev_info_t *, fco_handle_t, fc_ci_t *);
56 static int pfc_dma_cleanup(dev_info_t *, fco_handle_t, fc_ci_t *);
57 
58 static int pfc_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
59 static int pfc_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
60 static int pfc_config_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
61 static int pfc_config_store(dev_info_t *, fco_handle_t, fc_ci_t *);
62 
63 static int pfc_probe_address(dev_info_t *, fco_handle_t, fc_ci_t *);
64 static int pfc_probe_space(dev_info_t *, fco_handle_t, fc_ci_t *);
65 
66 static int pfc_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
67 static int pfc_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
68 static int pfc_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
69 int prom_get_fcode_size(char *);
70 int prom_get_fcode(char *, char *);
71 int pfc_update_assigned_prop(dev_info_t *, pci_regspec_t *);
72 int pfc_remove_assigned_prop(dev_info_t *, pci_regspec_t *);
73 int pci_alloc_resource(dev_info_t *, pci_regspec_t);
74 int pci_free_resource(dev_info_t *, pci_regspec_t);
75 int pci_alloc_mem_chunk(dev_info_t *,  uint64_t, uint64_t *,  uint64_t *);
76 int pci_alloc_io_chunk(dev_info_t *,  uint64_t,  uint64_t *, uint64_t *);
77 static int fcpci_indirect_map(dev_info_t *);
78 
79 int fcpci_unloadable;
80 
81 #ifndef	lint
82 static char _depends_on[] = "misc/fcodem misc/busra";
83 #endif
84 
85 #define	HIADDR(n) ((uint32_t)(((uint64_t)(n) & 0xFFFFFFFF00000000)>> 32))
86 #define	LOADDR(n)((uint32_t)((uint64_t)(n) & 0x00000000FFFFFFFF))
87 #define	LADDR(lo, hi)    (((uint64_t)(hi) << 32) | (uint32_t)(lo))
88 #define	PCI_4GIG_LIMIT 0xFFFFFFFFUL
89 #define	PCI_MEMGRAN 0x100000
90 #define	PCI_IOGRAN 0x1000
91 
92 
93 /*
94  * Module linkage information for the kernel.
95  */
96 static struct modlmisc modlmisc = {
97 	&mod_miscops, "FCode pci bus functions %I%"
98 };
99 
100 static struct modlinkage modlinkage = {
101 	MODREV_1, (void *)&modlmisc, NULL
102 };
103 
104 int
105 _init(void)
106 {
107 	return (mod_install(&modlinkage));
108 }
109 
110 int
111 _fini(void)
112 {
113 	if (fcpci_unloadable)
114 		return (mod_remove(&modlinkage));
115 	return (EBUSY);
116 }
117 
118 int
119 _info(struct modinfo *modinfop)
120 {
121 	return (mod_info(&modlinkage, modinfop));
122 }
123 
124 
125 struct pfc_ops_v {
126 	char *svc_name;
127 	fc_ops_t *f;
128 };
129 
130 static struct pfc_ops_v pov[] = {
131 	{	"map-in",		pfc_map_in},
132 	{	"map-out",		pfc_map_out},
133 	{	"dma-map-in",		pfc_dma_map_in},
134 	{	"dma-map-out",		pfc_dma_map_out},
135 	{	"dma-sync",		pfc_dma_sync},
136 	{	"rx@",			pfc_register_fetch},
137 	{	"rl@",			pfc_register_fetch},
138 	{	"rw@",			pfc_register_fetch},
139 	{	"rb@",			pfc_register_fetch},
140 	{	"rx!",			pfc_register_store},
141 	{	"rl!",			pfc_register_store},
142 	{	"rw!",			pfc_register_store},
143 	{	"rb!",			pfc_register_store},
144 	{	"config-l@",		pfc_config_fetch},
145 	{	"config-w@",		pfc_config_fetch},
146 	{	"config-b@",		pfc_config_fetch},
147 	{	"config-l!",		pfc_config_store},
148 	{	"config-w!",		pfc_config_store},
149 	{	"config-b!",		pfc_config_store},
150 	{	FC_PROBE_ADDRESS,	pfc_probe_address},
151 	{	FC_PROBE_SPACE,		pfc_probe_space},
152 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
153 	{	FC_CONFIG_CHILD,	pfc_config_child},
154 	{	FC_GET_FCODE_SIZE,	pfc_get_fcode_size},
155 	{	FC_GET_FCODE,		pfc_get_fcode},
156 	{	NULL,			NULL}
157 };
158 
159 static struct pfc_ops_v shared_pov[] = {
160 	{	FC_SVC_EXIT,		pfc_dma_cleanup},
161 	{	NULL,			NULL}
162 };
163 
164 int pci_map_phys(dev_info_t *, pci_regspec_t *,
165     caddr_t *, ddi_device_acc_attr_t *, ddi_acc_handle_t *);
166 
167 void pci_unmap_phys(ddi_acc_handle_t *, pci_regspec_t *);
168 
169 fco_handle_t
170 pci_fc_ops_alloc_handle(dev_info_t *ap, dev_info_t *child,
171     void *fcode, size_t fcode_size, char *unit_address,
172     struct pci_ops_bus_args *up)
173 {
174 	fco_handle_t rp;
175 	struct pci_ops_bus_args *bp = NULL;
176 	phandle_t h;
177 
178 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
179 	rp->next_handle = fc_ops_alloc_handle(ap, child, fcode, fcode_size,
180 	    unit_address, NULL);
181 	rp->ap = ap;
182 	rp->child = child;
183 	rp->fcode = fcode;
184 	rp->fcode_size = fcode_size;
185 	if (unit_address) {
186 		char *buf;
187 
188 		buf = kmem_zalloc(strlen(unit_address) + 1, KM_SLEEP);
189 		(void) strcpy(buf, unit_address);
190 		rp->unit_address = buf;
191 	}
192 
193 	bp = kmem_zalloc(sizeof (struct pci_ops_bus_args), KM_SLEEP);
194 	*bp = *up;
195 	rp->bus_args = bp;
196 
197 	/*
198 	 * Add the child's nodeid to our table...
199 	 */
200 	h = ddi_get_nodeid(rp->child);
201 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
202 
203 	return (rp);
204 }
205 
206 void
207 pci_fc_ops_free_handle(fco_handle_t rp)
208 {
209 	struct pci_ops_bus_args *bp;
210 	struct fc_resource *ip, *np;
211 
212 	ASSERT(rp);
213 
214 	if (rp->next_handle)
215 		fc_ops_free_handle(rp->next_handle);
216 	if (rp->unit_address)
217 		kmem_free(rp->unit_address, strlen(rp->unit_address) + 1);
218 	if ((bp = rp->bus_args) != NULL)
219 		kmem_free(bp, sizeof (struct pci_ops_bus_args));
220 
221 	/*
222 	 * Release all the resources from the resource list
223 	 * XXX: We don't handle 'unknown' types, but we don't create them.
224 	 */
225 	for (ip = rp->head; ip != NULL; ip = np) {
226 		np = ip->next;
227 		switch (ip->type) {
228 		case RT_MAP:
229 			FC_DEBUG1(1, CE_CONT, "pci_fc_ops_free: "
230 			    "pci_unmap_phys(%p)\n", ip->fc_map_handle);
231 			pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
232 			kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
233 			break;
234 		case RT_DMA:
235 			/* DMA has to be freed up at exit time */
236 			cmn_err(CE_CONT, "pfc_fc_ops_free: DMA seen!\n");
237 			break;
238 		default:
239 			cmn_err(CE_CONT, "pci_fc_ops_free: "
240 			    "unknown resource type %d\n", ip->type);
241 			break;
242 		}
243 		fc_rem_resource(rp, ip);
244 		kmem_free(ip, sizeof (struct fc_resource));
245 	}
246 	kmem_free(rp, sizeof (struct fc_resource_list));
247 }
248 
249 int
250 pci_fc_ops(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
251 {
252 	struct pfc_ops_v *pv;
253 	char *name = fc_cell2ptr(cp->svc_name);
254 
255 	ASSERT(rp);
256 
257 	/*
258 	 * First try the generic fc_ops. If the ops is a shared op,
259 	 * also call our local function.
260 	 */
261 	if (fc_ops(ap, rp->next_handle, cp) == 0) {
262 		for (pv = shared_pov; pv->svc_name != NULL; ++pv)
263 			if (strcmp(pv->svc_name, name) == 0)
264 				return (pv->f(ap, rp, cp));
265 		return (0);
266 	}
267 
268 	for (pv = pov; pv->svc_name != NULL; ++pv)
269 		if (strcmp(pv->svc_name, name) == 0)
270 			return (pv->f(ap, rp, cp));
271 
272 	FC_DEBUG1(9, CE_CONT, "pci_fc_ops: <%s> not serviced\n", name);
273 
274 	return (-1);
275 }
276 
277 /*
278  * Create a dma mapping for a given user address.
279  */
280 static int
281 pfc_dma_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
282 {
283 	ddi_dma_handle_t h;
284 	int error;
285 	caddr_t virt;
286 	size_t len;
287 	uint_t flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
288 	struct fc_resource *ip;
289 	ddi_dma_cookie_t c;
290 	struct buf *bp;
291 	ddi_dma_attr_t attr;
292 	uint_t ccnt;
293 
294 	if (fc_cell2int(cp->nargs) != 3)
295 		return (fc_syntax_error(cp, "nargs must be 3"));
296 
297 	if (fc_cell2int(cp->nresults) < 1)
298 		return (fc_syntax_error(cp, "nresults must be >= 1"));
299 
300 	/*
301 	 * XXX: It's not clear what we should do with a non-cacheable request
302 	 */
303 	virt = fc_cell2ptr(fc_arg(cp, 2));
304 	len = fc_cell2size(fc_arg(cp, 1));
305 #ifdef	notdef
306 	cacheable = fc_cell2int(fc_arg(cp, 0));	/* XXX: do what? */
307 #endif
308 
309 	FC_DEBUG2(6, CE_CONT, "pcf_dma_map_in: virt %p, len %d\n", virt, len);
310 
311 	/*
312 	 * Set up the address space for physio from userland
313 	 */
314 	error = fc_physio_setup(&bp, virt, len);
315 
316 	if (error)  {
317 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: fc_physio_setup failed "
318 		    "error: %d  virt: %p  len %d\n", error, virt, len);
319 		return (fc_priv_error(cp, "fc_physio_setup failed"));
320 	}
321 
322 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: dma_map_in; bp = %p\n", bp);
323 	error = fc_ddi_dma_alloc_handle(ap, &attr, DDI_DMA_SLEEP, NULL, &h);
324 	if (error != DDI_SUCCESS)  {
325 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
326 		    "error: %d  virt: %p  len %d\n", error, virt, len);
327 		return (fc_priv_error(cp, "real dma-map-in failed"));
328 	}
329 
330 	error = fc_ddi_dma_buf_bind_handle(h, bp, flags, DDI_DMA_SLEEP, NULL,
331 	    &c, &ccnt);
332 	if ((error != DDI_DMA_MAPPED) || (ccnt != 1)) {
333 		fc_ddi_dma_free_handle(&h);
334 		FC_DEBUG3(1, CE_CONT, "pfc_dma_map_in: real dma-map-in failed "
335 		    "error: %d  virt: %p  len %d\n", error, virt, len);
336 		return (fc_priv_error(cp, "real dma-map-in failed"));
337 	}
338 
339 	if (c.dmac_size < len)  {
340 		error = fc_ddi_dma_unbind_handle(h);
341 		if (error != DDI_SUCCESS) {
342 			return (fc_priv_error(cp, "ddi_dma_unbind error"));
343 		}
344 		fc_ddi_dma_free_handle(&h);
345 		return (fc_priv_error(cp, "ddi_dma_buf_bind size < len"));
346 	}
347 
348 	FC_DEBUG1(9, CE_CONT, "pfc_dma_map_in: returning devaddr %x\n",
349 		c.dmac_address);
350 
351 	cp->nresults = fc_int2cell(1);
352 	fc_result(cp, 0) = fc_uint32_t2cell(c.dmac_address);	/* XXX size */
353 
354 	/*
355 	 * Now we have to log this resource saving the handle and buf header
356 	 */
357 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
358 	ip->type = RT_DMA;
359 	ip->fc_dma_virt = virt;
360 	ip->fc_dma_len = len;
361 	ip->fc_dma_handle = h;
362 	ip->fc_dma_devaddr = c.dmac_address;
363 	ip->fc_dma_bp = bp;
364 	fc_add_resource(rp, ip);
365 
366 	return (fc_success_op(ap, rp, cp));
367 }
368 
369 static int
370 pfc_dma_sync(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
371 {
372 	void *virt;
373 	size_t len;
374 	uint32_t devaddr;
375 	int error;
376 	struct fc_resource *ip;
377 
378 	if (fc_cell2int(cp->nargs) != 3)
379 		return (fc_syntax_error(cp, "nargs must be 3"));
380 
381 	virt = fc_cell2ptr(fc_arg(cp, 2));
382 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
383 	len = fc_cell2size(fc_arg(cp, 0));
384 
385 	/*
386 	 * Find if this virt is 'within' a request we know about
387 	 */
388 	fc_lock_resource_list(rp);
389 	for (ip = rp->head; ip != NULL; ip = ip->next) {
390 		if (ip->type != RT_DMA)
391 			continue;
392 		if (ip->fc_dma_devaddr != devaddr)
393 			continue;
394 		if (((char *)virt >= (char *)ip->fc_dma_virt) &&
395 		    (((char *)virt + len) <=
396 		    ((char *)ip->fc_dma_virt + ip->fc_dma_len)))
397 			break;
398 	}
399 	fc_unlock_resource_list(rp);
400 
401 	if (ip == NULL)
402 		return (fc_priv_error(cp, "request not within a "
403 		    "known dma mapping"));
404 
405 	/*
406 	 * We know about this request, so we trust it enough to sync it.
407 	 * Unfortunately, we don't know which direction, so we'll do
408 	 * both directions.
409 	 */
410 
411 	error = fc_ddi_dma_sync(ip->fc_dma_handle,
412 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORCPU);
413 	error |= fc_ddi_dma_sync(ip->fc_dma_handle,
414 	    (char *)virt - (char *)ip->fc_dma_virt, len, DDI_DMA_SYNC_FORDEV);
415 
416 	if (error)
417 		return (fc_priv_error(cp, "Call to ddi_dma_sync failed"));
418 
419 	cp->nresults = fc_int2cell(0);
420 	return (fc_success_op(ap, rp, cp));
421 }
422 
423 static int
424 pfc_dma_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
425 {
426 	void *virt;
427 	size_t len;
428 	uint32_t devaddr;
429 	struct fc_resource *ip;
430 	int e;
431 
432 	if (fc_cell2int(cp->nargs) != 3)
433 		return (fc_syntax_error(cp, "nargs must be 3"));
434 
435 	virt = fc_cell2ptr(fc_arg(cp, 2));
436 	devaddr = fc_cell2uint32_t(fc_arg(cp, 1));
437 	len = fc_cell2size(fc_arg(cp, 0));
438 
439 	/*
440 	 * Find if this virt matches a request we know about
441 	 */
442 	fc_lock_resource_list(rp);
443 	for (ip = rp->head; ip != NULL; ip = ip->next) {
444 		if (ip->type != RT_DMA)
445 			continue;
446 		if (ip->fc_dma_devaddr != devaddr)
447 			continue;
448 		if (ip->fc_dma_virt != virt)
449 			continue;
450 		if (len == ip->fc_dma_len)
451 			break;
452 	}
453 	fc_unlock_resource_list(rp);
454 
455 	if (ip == NULL)
456 		return (fc_priv_error(cp, "request doesn't match a "
457 		    "known dma mapping"));
458 
459 	/*
460 	 * ddi_dma_unbind_handle does an implied sync ...
461 	 */
462 	e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
463 	if (e != DDI_SUCCESS) {
464 		cmn_err(CE_CONT, "pfc_dma_map_out: ddi_dma_unbind failed!\n");
465 	}
466 	fc_ddi_dma_free_handle(&ip->fc_dma_handle);
467 
468 	/*
469 	 * Tear down the physio mappings
470 	 */
471 	fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
472 
473 	/*
474 	 * remove the resource from the list and release it.
475 	 */
476 	fc_rem_resource(rp, ip);
477 	kmem_free(ip, sizeof (struct fc_resource));
478 
479 	cp->nresults = fc_int2cell(0);
480 	return (fc_success_op(ap, rp, cp));
481 }
482 
483 static struct fc_resource *
484 next_dma_resource(fco_handle_t rp)
485 {
486 	struct fc_resource *ip;
487 
488 	fc_lock_resource_list(rp);
489 	for (ip = rp->head; ip != NULL; ip = ip->next)
490 		if (ip->type == RT_DMA)
491 			break;
492 	fc_unlock_resource_list(rp);
493 
494 	return (ip);
495 }
496 
497 static int
498 pfc_dma_cleanup(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
499 {
500 	struct fc_resource *ip;
501 	int e;
502 
503 	while ((ip = next_dma_resource(rp)) != NULL) {
504 
505 		FC_DEBUG2(9, CE_CONT, "pfc_dma_cleanup: virt %x len %x\n",
506 			ip->fc_dma_virt, ip->fc_dma_len);
507 
508 		/*
509 		 * Free the dma handle
510 		 */
511 		e = fc_ddi_dma_unbind_handle(ip->fc_dma_handle);
512 		if (e != DDI_SUCCESS) {
513 			cmn_err(CE_CONT, "pfc_dma_cleanup: "
514 			    "ddi_dma_unbind failed!\n");
515 		}
516 		fc_ddi_dma_free_handle(&ip->fc_dma_handle);
517 
518 		/*
519 		 * Tear down the userland mapping and free the buf header
520 		 */
521 		fc_physio_free(&ip->fc_dma_bp, ip->fc_dma_virt, ip->fc_dma_len);
522 
523 		fc_rem_resource(rp, ip);
524 		kmem_free(ip, sizeof (struct fc_resource));
525 	}
526 
527 	cp->nresults = fc_int2cell(0);
528 	return (fc_success_op(ap, rp, cp));
529 }
530 
531 static int
532 pfc_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
533 {
534 	size_t len;
535 	int error;
536 	caddr_t virt;
537 	pci_regspec_t p, *ph;
538 	struct fc_resource *ip;
539 	ddi_device_acc_attr_t acc;
540 	ddi_acc_handle_t h;
541 
542 	if (fc_cell2int(cp->nargs) != 4)
543 		return (fc_syntax_error(cp, "nargs must be 4"));
544 
545 	if (fc_cell2int(cp->nresults) < 1)
546 		return (fc_syntax_error(cp, "nresults must be >= 1"));
547 
548 	p.pci_size_hi = 0;
549 	p.pci_size_low = len = fc_cell2size(fc_arg(cp, 0));
550 
551 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 1));
552 	p.pci_phys_mid = fc_cell2uint(fc_arg(cp, 2));
553 	p.pci_phys_low = fc_cell2uint(fc_arg(cp, 3));
554 
555 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
556 
557 	/*
558 	 * Fcode is expecting the bytes are not swapped.
559 	 */
560 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
561 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
562 
563 	/*
564 	 * First We need to allocate the PCI Resource.
565 	 */
566 	error = pci_alloc_resource(rp->child, p);
567 
568 	if (error)  {
569 		return (fc_priv_error(cp, "pci map-in failed"));
570 	}
571 
572 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
573 
574 	if (error)  {
575 		return (fc_priv_error(cp, "pci map-in failed"));
576 	}
577 
578 	cp->nresults = fc_int2cell(1);
579 	fc_result(cp, 0) = fc_ptr2cell(virt);
580 
581 	/*
582 	 * Log this resource ...
583 	 */
584 	ip = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
585 	ip->type = RT_MAP;
586 	ip->fc_map_virt = virt;
587 	ip->fc_map_len = len;
588 	ip->fc_map_handle = h;
589 	ph = kmem_zalloc(sizeof (pci_regspec_t), KM_SLEEP);
590 	*ph = p;
591 	ip->fc_regspec = ph;	/* cache a copy of the reg spec */
592 	fc_add_resource(rp, ip);
593 
594 	return (fc_success_op(ap, rp, cp));
595 }
596 
597 static int
598 pfc_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
599 {
600 	caddr_t virt;
601 	size_t len;
602 	struct fc_resource *ip;
603 
604 	if (fc_cell2int(cp->nargs) != 2)
605 		return (fc_syntax_error(cp, "nargs must be 2"));
606 
607 	virt = fc_cell2ptr(fc_arg(cp, 1));
608 
609 	len = fc_cell2size(fc_arg(cp, 0));
610 
611 	/*
612 	 * Find if this request matches a mapping resource we set up.
613 	 */
614 	fc_lock_resource_list(rp);
615 	for (ip = rp->head; ip != NULL; ip = ip->next) {
616 		if (ip->type != RT_MAP)
617 			continue;
618 		if (ip->fc_map_virt != virt)
619 			continue;
620 		if (ip->fc_map_len == len)
621 			break;
622 	}
623 	fc_unlock_resource_list(rp);
624 
625 	if (ip == NULL)
626 		return (fc_priv_error(cp, "request doesn't match a "
627 		    "known mapping"));
628 
629 	pci_unmap_phys(&ip->fc_map_handle, ip->fc_regspec);
630 
631 	kmem_free(ip->fc_regspec, sizeof (pci_regspec_t));
632 
633 	/*
634 	 * remove the resource from the list and release it.
635 	 */
636 	fc_rem_resource(rp, ip);
637 	kmem_free(ip, sizeof (struct fc_resource));
638 
639 	cp->nresults = fc_int2cell(0);
640 	return (fc_success_op(ap, rp, cp));
641 }
642 
643 static int
644 pfc_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
645 {
646 	size_t len;
647 	caddr_t virt;
648 	int error;
649 	uint64_t x;
650 	uint32_t l;
651 	uint16_t w;
652 	uint8_t b;
653 	char *name = fc_cell2ptr(cp->svc_name);
654 	struct fc_resource *ip;
655 
656 	if (fc_cell2int(cp->nargs) != 1)
657 		return (fc_syntax_error(cp, "nargs must be 1"));
658 
659 	if (fc_cell2int(cp->nresults) < 1)
660 		return (fc_syntax_error(cp, "nresults must be >= 1"));
661 
662 	virt = fc_cell2ptr(fc_arg(cp, 0));
663 
664 	/*
665 	 * Determine the access width .. we can switch on the 2nd
666 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
667 	 */
668 	switch (*(name + 1)) {
669 	case 'x':	len = sizeof (x); break;
670 	case 'l':	len = sizeof (l); break;
671 	case 'w':	len = sizeof (w); break;
672 	case 'b':	len = sizeof (b); break;
673 	}
674 
675 	/*
676 	 * Check the alignment ...
677 	 */
678 	if (((intptr_t)virt & (len - 1)) != 0)
679 		return (fc_priv_error(cp, "unaligned access"));
680 
681 	/*
682 	 * Find if this virt is 'within' a request we know about
683 	 */
684 	fc_lock_resource_list(rp);
685 	for (ip = rp->head; ip != NULL; ip = ip->next) {
686 		if (ip->type != RT_MAP)
687 			continue;
688 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
689 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
690 			break;
691 	}
692 	fc_unlock_resource_list(rp);
693 
694 	if (ip == NULL)
695 		return (fc_priv_error(cp, "request not within a "
696 		    "known mapping"));
697 
698 	/*
699 	 * XXX: We need access handle versions of peek/poke to move
700 	 * beyond the prototype ... we assume that we have hardware
701 	 * byte swapping enabled for pci register access here which
702 	 * is a huge dependency on the current implementation.
703 	 */
704 	switch (len) {
705 	case sizeof (x):
706 		error = ddi_peek64(rp->child, (int64_t *)virt, (int64_t *)&x);
707 		break;
708 	case sizeof (l):
709 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&l);
710 		break;
711 	case sizeof (w):
712 		error = ddi_peek16(rp->child, (int16_t *)virt, (int16_t *)&w);
713 		break;
714 	case sizeof (b):
715 		error = ddi_peek8(rp->child, (int8_t *)virt, (int8_t *)&b);
716 		break;
717 	}
718 
719 	if (error) {
720 		return (fc_priv_error(cp, "access error"));
721 	}
722 
723 	cp->nresults = fc_int2cell(1);
724 	switch (len) {
725 	case sizeof (x): fc_result(cp, 0) = x; break;
726 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
727 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
728 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
729 	}
730 	return (fc_success_op(ap, rp, cp));
731 }
732 
733 static int
734 pfc_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
735 {
736 	size_t len;
737 	caddr_t virt;
738 	int error;
739 	uint64_t x;
740 	uint32_t l;
741 	uint16_t w;
742 	uint8_t b;
743 	char *name = fc_cell2ptr(cp->svc_name);
744 	struct fc_resource *ip;
745 
746 	if (fc_cell2int(cp->nargs) != 2)
747 		return (fc_syntax_error(cp, "nargs must be 2"));
748 
749 	virt = fc_cell2ptr(fc_arg(cp, 0));
750 
751 	/*
752 	 * Determine the access width .. we can switch on the 2nd
753 	 * character of the name which is "rl!", "rb!" or "rw!"
754 	 */
755 	switch (*(name + 1)) {
756 	case 'x': len = sizeof (x); x = fc_arg(cp, 1); break;
757 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
758 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
759 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
760 	}
761 
762 	/*
763 	 * Check the alignment ...
764 	 */
765 	if (((intptr_t)virt & (len - 1)) != 0)
766 		return (fc_priv_error(cp, "unaligned access"));
767 
768 	/*
769 	 * Find if this virt is 'within' a request we know about
770 	 */
771 	fc_lock_resource_list(rp);
772 	for (ip = rp->head; ip != NULL; ip = ip->next) {
773 		if (ip->type != RT_MAP)
774 			continue;
775 		if ((virt >= (caddr_t)ip->fc_map_virt) && ((virt + len) <=
776 		    ((caddr_t)ip->fc_map_virt + ip->fc_map_len)))
777 			break;
778 	}
779 	fc_unlock_resource_list(rp);
780 
781 	if (ip == NULL)
782 		return (fc_priv_error(cp, "request not within a "
783 		    "known mapping"));
784 
785 	/*
786 	 * XXX: We need access handle versions of peek/poke to move
787 	 * beyond the prototype ... we assume that we have hardware
788 	 * byte swapping enabled for pci register access here which
789 	 * is a huge dependency on the current implementation.
790 	 */
791 	switch (len) {
792 	case sizeof (x):
793 		error = ddi_poke64(rp->child, (int64_t *)virt, x);
794 		break;
795 	case sizeof (l):
796 		error = ddi_poke32(rp->child, (int32_t *)virt, l);
797 		break;
798 	case sizeof (w):
799 		error = ddi_poke16(rp->child, (int16_t *)virt, w);
800 		break;
801 	case sizeof (b):
802 		error = ddi_poke8(rp->child, (int8_t *)virt, b);
803 		break;
804 	}
805 
806 	if (error) {
807 		return (fc_priv_error(cp, "access error"));
808 	}
809 
810 	cp->nresults = fc_int2cell(0);
811 	return (fc_success_op(ap, rp, cp));
812 }
813 
814 static int
815 pfc_config_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
816 {
817 	caddr_t virt, v;
818 	int error, reg, flags = 0;
819 	size_t len;
820 	uint32_t l, tmp;
821 	uint16_t w;
822 	uint8_t b;
823 	char *name = fc_cell2ptr(cp->svc_name);
824 	pci_regspec_t p;
825 	ddi_device_acc_attr_t acc;
826 	ddi_acc_handle_t h;
827 
828 	if (fc_cell2int(cp->nargs) != 1)
829 		return (fc_syntax_error(cp, "nargs must be 1"));
830 
831 	if (fc_cell2int(cp->nresults) < 1)
832 		return (fc_syntax_error(cp, "nresults must be >= 1"));
833 
834 	/*
835 	 * Construct a config address pci reg property from the args.
836 	 * arg[0] is the configuration address.
837 	 */
838 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
839 	p.pci_phys_mid = p.pci_phys_low = 0;
840 	p.pci_size_hi = p.pci_size_low = 0;
841 
842 	/*
843 	 * Verify that the address is a configuration space address
844 	 * ss must be zero, n,p,t must be zero.
845 	 */
846 	if (((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) ||
847 	    ((p.pci_phys_hi & PCI_NPT_bits) != 0)) {
848 		cmn_err(CE_CONT, "pfc_config_fetch: "
849 		    "invalid config addr: %x\n", p.pci_phys_hi);
850 		return (fc_priv_error(cp, "non-config addr"));
851 	}
852 
853 	/*
854 	 * Extract the register number from the config address and
855 	 * remove the register number from the physical address.
856 	 */
857 	reg = p.pci_phys_hi & PCI_REG_REG_M;
858 	p.pci_phys_hi &= ~PCI_REG_REG_M;
859 
860 	/*
861 	 * Determine the access width .. we can switch on the 9th
862 	 * character of the name which is "config-{l,w,b}@"
863 	 */
864 	switch (*(name + 7)) {
865 	case 'l':	len = sizeof (l); break;
866 	case 'w':	len = sizeof (w); break;
867 	case 'b':	len = sizeof (b); break;
868 	}
869 
870 	/*
871 	 * Verify that the access is properly aligned
872 	 */
873 	if ((reg & (len - 1)) != 0)
874 		return (fc_priv_error(cp, "unaligned access"));
875 
876 	/*
877 	 * Map in configuration space (temporarily)
878 	 */
879 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
880 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
881 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
882 
883 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
884 
885 	if (error)  {
886 		return (fc_priv_error(cp, "pci config map-in failed"));
887 	}
888 
889 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
890 		flags |= PCICFG_CONF_INDIRECT_MAP;
891 
892 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
893 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
894 		error = DDI_SUCCESS;
895 	} else
896 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
897 
898 	if (error == DDI_SUCCESS)
899 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
900 			error = DDI_FAILURE;
901 			cmn_err(CE_CONT, "fcpcii: conf probe failed.l=%x", tmp);
902 		}
903 
904 	if (error != DDI_SUCCESS) {
905 		return (fc_priv_error(cp, "pci config fetch failed"));
906 	}
907 
908 
909 	/*
910 	 * XXX: We need access handle versions of peek/poke to move
911 	 * beyond the prototype ... we assume that we have hardware
912 	 * byte swapping enabled for pci register access here which
913 	 * is a huge dependency on the current implementation.
914 	 */
915 	v = virt + reg;
916 	switch (len) {
917 	case sizeof (l):
918 		l = (int32_t)ddi_get32(h, (uint32_t *)v);
919 		break;
920 	case sizeof (w):
921 		w = (int16_t)ddi_get16(h, (uint16_t *)v);
922 		break;
923 	case sizeof (b):
924 		b = (int8_t)ddi_get8(h, (uint8_t *)v);
925 		break;
926 	}
927 
928 	/*
929 	 * Remove the temporary config space mapping
930 	 */
931 	pci_unmap_phys(&h, &p);
932 
933 	if (error) {
934 		return (fc_priv_error(cp, "access error"));
935 	}
936 
937 	cp->nresults = fc_int2cell(1);
938 	switch (len) {
939 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
940 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
941 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
942 	}
943 
944 	return (fc_success_op(ap, rp, cp));
945 }
946 
947 static int
948 pfc_config_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
949 {
950 	caddr_t virt, v;
951 	int error, reg, flags = 0;
952 	size_t len;
953 	uint32_t l, tmp;
954 	uint16_t w;
955 	uint8_t b;
956 	char *name = fc_cell2ptr(cp->svc_name);
957 	pci_regspec_t p;
958 	ddi_device_acc_attr_t acc;
959 	ddi_acc_handle_t h;
960 
961 	if (fc_cell2int(cp->nargs) != 2)
962 		return (fc_syntax_error(cp, "nargs must be 2"));
963 
964 	/*
965 	 * Construct a config address pci reg property from the args.
966 	 * arg[0] is the configuration address. arg[1] is the data.
967 	 */
968 	p.pci_phys_hi = fc_cell2uint(fc_arg(cp, 0));
969 	p.pci_phys_mid = p.pci_phys_low = 0;
970 	p.pci_size_hi = p.pci_size_low = 0;
971 
972 	/*
973 	 * Verify that the address is a configuration space address
974 	 * ss must be zero, n,p,t must be zero.
975 	 */
976 	if (((p.pci_phys_hi & PCI_ADDR_MASK) != PCI_ADDR_CONFIG) ||
977 	    ((p.pci_phys_hi & PCI_NPT_bits) != 0)) {
978 		cmn_err(CE_CONT, "pfc_config_store: "
979 		    "invalid config addr: %x\n", p.pci_phys_hi);
980 		return (fc_priv_error(cp, "non-config addr"));
981 	}
982 
983 	/*
984 	 * Extract the register number from the config address and
985 	 * remove the register number from the physical address.
986 	 */
987 	reg = p.pci_phys_hi & PCI_REG_REG_M;
988 	p.pci_phys_hi &= ~PCI_REG_REG_M;
989 
990 	/*
991 	 * Determine the access width .. we can switch on the 8th
992 	 * character of the name which is "config-{l,w,b}@"
993 	 */
994 	switch (*(name + 7)) {
995 	case 'l': len = sizeof (l); l = fc_cell2uint32_t(fc_arg(cp, 1)); break;
996 	case 'w': len = sizeof (w); w = fc_cell2uint16_t(fc_arg(cp, 1)); break;
997 	case 'b': len = sizeof (b); b = fc_cell2uint8_t(fc_arg(cp, 1)); break;
998 	}
999 
1000 	/*
1001 	 * Verify that the access is properly aligned
1002 	 */
1003 	if ((reg & (len - 1)) != 0)
1004 		return (fc_priv_error(cp, "unaligned access"));
1005 
1006 	/*
1007 	 * Map in configuration space (temporarily)
1008 	 */
1009 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1010 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1011 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1012 
1013 	error = pci_map_phys(rp->child, &p, &virt, &acc, &h);
1014 
1015 	if (error)  {
1016 		return (fc_priv_error(cp, "pci config map-in failed"));
1017 	}
1018 
1019 	if (fcpci_indirect_map(rp->child) == DDI_SUCCESS)
1020 		flags |= PCICFG_CONF_INDIRECT_MAP;
1021 
1022 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1023 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1024 		error = DDI_SUCCESS;
1025 	} else
1026 		error = ddi_peek32(rp->child, (int32_t *)virt, (int32_t *)&tmp);
1027 
1028 	if (error == DDI_SUCCESS)
1029 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1030 			error = DDI_FAILURE;
1031 			cmn_err(CE_CONT, "fcpci: conf probe failed.l=%x", tmp);
1032 		}
1033 
1034 	if (error != DDI_SUCCESS) {
1035 		return (fc_priv_error(cp, "pci config store failed"));
1036 	}
1037 
1038 
1039 	/*
1040 	 * XXX: We need access handle versions of peek/poke to move
1041 	 * beyond the prototype ... we assume that we have hardware
1042 	 * byte swapping enabled for pci register access here which
1043 	 * is a huge dependency on the current implementation.
1044 	 */
1045 	v = virt + reg;
1046 	switch (len) {
1047 	case sizeof (l):
1048 		ddi_put32(h, (uint32_t *)v, (uint32_t)l);
1049 		break;
1050 	case sizeof (w):
1051 		ddi_put16(h, (uint16_t *)v, (uint16_t)w);
1052 		break;
1053 	case sizeof (b):
1054 		ddi_put8(h, (uint8_t *)v, (uint8_t)b);
1055 		break;
1056 	}
1057 
1058 	/*
1059 	 * Remove the temporary config space mapping
1060 	 */
1061 	pci_unmap_phys(&h, &p);
1062 
1063 	if (error) {
1064 		return (fc_priv_error(cp, "access error"));
1065 	}
1066 
1067 	cp->nresults = fc_int2cell(0);
1068 	return (fc_success_op(ap, rp, cp));
1069 }
1070 
1071 
1072 static int
1073 pfc_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1074 {
1075 	caddr_t name_virt, fcode_virt;
1076 	char *name, *fcode;
1077 	int fcode_len, status;
1078 
1079 	if (fc_cell2int(cp->nargs) != 3)
1080 		return (fc_syntax_error(cp, "nargs must be 3"));
1081 
1082 	if (fc_cell2int(cp->nresults) < 1)
1083 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1084 
1085 	name_virt = fc_cell2ptr(fc_arg(cp, 0));
1086 
1087 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1088 
1089 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1090 
1091 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1092 
1093 	if (copyinstr(fc_cell2ptr(name_virt), name,
1094 	    FC_SVC_NAME_LEN - 1, NULL))  {
1095 		status = 0;
1096 	} else {
1097 
1098 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1099 
1100 		if ((status = prom_get_fcode(name, fcode)) != 0) {
1101 
1102 			if (copyout((void *)fcode, (void *)fcode_virt,
1103 			    fcode_len)) {
1104 				cmn_err(CE_WARN, " pfc_get_fcode: Unable "
1105 				    "to copy out fcode image\n");
1106 				status = 0;
1107 			}
1108 		}
1109 
1110 		kmem_free(fcode, fcode_len);
1111 	}
1112 
1113 	kmem_free(name, FC_SVC_NAME_LEN);
1114 
1115 	cp->nresults = fc_int2cell(1);
1116 	fc_result(cp, 0) = status;
1117 
1118 	return (fc_success_op(ap, rp, cp));
1119 }
1120 
1121 static int
1122 pfc_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1123 {
1124 	caddr_t virt;
1125 	char *name;
1126 	int len;
1127 
1128 	if (fc_cell2int(cp->nargs) != 1)
1129 		return (fc_syntax_error(cp, "nargs must be 1"));
1130 
1131 	if (fc_cell2int(cp->nresults) < 1)
1132 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1133 
1134 	virt = fc_cell2ptr(fc_arg(cp, 0));
1135 
1136 	name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1137 
1138 	if (copyinstr(fc_cell2ptr(virt), name,
1139 	    FC_SVC_NAME_LEN - 1, NULL))  {
1140 		len = 0;
1141 	} else {
1142 		len = prom_get_fcode_size(name);
1143 	}
1144 
1145 	kmem_free(name, FC_SVC_NAME_LEN);
1146 
1147 	cp->nresults = fc_int2cell(1);
1148 	fc_result(cp, 0) = len;
1149 
1150 	return (fc_success_op(ap, rp, cp));
1151 }
1152 
1153 /*
1154  * Return the physical probe address: lo=0, mid=0, hi-config-addr
1155  */
1156 static int
1157 pfc_probe_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1158 {
1159 	if (fc_cell2int(cp->nargs) != 0)
1160 		return (fc_syntax_error(cp, "nargs must be 0"));
1161 
1162 	if (fc_cell2int(cp->nresults) < 2)
1163 		return (fc_syntax_error(cp, "nresults must be >= 3"));
1164 
1165 	cp->nresults = fc_int2cell(2);
1166 	fc_result(cp, 1) = fc_int2cell(0);	/* phys.lo */
1167 	fc_result(cp, 0) = fc_int2cell(0);	/* phys.mid */
1168 
1169 	return (fc_success_op(ap, rp, cp));
1170 }
1171 
1172 /*
1173  * Return the phys.hi component of the probe address.
1174  */
1175 static int
1176 pfc_probe_space(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1177 {
1178 	struct pci_ops_bus_args *ba = rp->bus_args;
1179 
1180 	ASSERT(ba);
1181 
1182 	if (fc_cell2int(cp->nargs) != 0)
1183 		return (fc_syntax_error(cp, "nargs must be 0"));
1184 
1185 	if (fc_cell2int(cp->nresults) < 1)
1186 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1187 
1188 	cp->nresults = fc_int2cell(1);
1189 	fc_result(cp, 0) = fc_uint32_t2cell(ba->config_address); /* phys.hi */
1190 
1191 	return (fc_success_op(ap, rp, cp));
1192 }
1193 
1194 static int
1195 pfc_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1196 {
1197 	fc_phandle_t h;
1198 
1199 	if (fc_cell2int(cp->nargs) != 0)
1200 		return (fc_syntax_error(cp, "nargs must be 0"));
1201 
1202 	if (fc_cell2int(cp->nresults) < 1)
1203 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1204 
1205 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1206 
1207 	cp->nresults = fc_int2cell(1);
1208 	fc_result(cp, 0) = fc_phandle2cell(h);
1209 
1210 	return (fc_success_op(ap, rp, cp));
1211 }
1212 
1213 int
1214 pci_alloc_mem_chunk(dev_info_t *dip, uint64_t mem_align, uint64_t *mem_size,
1215     uint64_t *mem_answer)
1216 {
1217 	ndi_ra_request_t req;
1218 	int rval;
1219 
1220 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1221 	req.ra_flags = NDI_RA_ALLOC_BOUNDED;
1222 	req.ra_boundbase = 0;
1223 	req.ra_boundlen = PCI_4GIG_LIMIT;
1224 	req.ra_len = *mem_size;
1225 	req.ra_align_mask = mem_align - 1;
1226 
1227 	rval = ndi_ra_alloc(dip, &req, mem_answer, mem_size,
1228 	    NDI_RA_TYPE_MEM, NDI_RA_PASS);
1229 
1230 	return (rval);
1231 }
1232 int
1233 pci_alloc_io_chunk(dev_info_t *dip, uint64_t io_align, uint64_t *io_size,
1234     uint64_t *io_answer)
1235 {
1236 	ndi_ra_request_t req;
1237 	int rval;
1238 
1239 	bzero((caddr_t)&req, sizeof (ndi_ra_request_t));
1240 	req.ra_flags = (NDI_RA_ALLOC_BOUNDED | NDI_RA_ALLOC_PARTIAL_OK);
1241 	req.ra_boundbase = 0;
1242 	req.ra_boundlen = PCI_4GIG_LIMIT;
1243 	req.ra_len = *io_size;
1244 	req.ra_align_mask = io_align - 1;
1245 
1246 	rval = ndi_ra_alloc(dip, &req, io_answer, io_size,
1247 	    NDI_RA_TYPE_IO, NDI_RA_PASS);
1248 
1249 	return (rval);
1250 }
1251 
1252 int
1253 pci_alloc_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1254 {
1255 	uint64_t answer;
1256 	uint64_t alen;
1257 	int offset, tmp;
1258 	pci_regspec_t config;
1259 	caddr_t virt, v;
1260 	ddi_device_acc_attr_t acc;
1261 	ddi_acc_handle_t h;
1262 	ndi_ra_request_t request;
1263 	pci_regspec_t *assigned;
1264 	int assigned_len, entries, i, l, flags = 0, error;
1265 
1266 	l = phys_spec.pci_size_low;
1267 
1268 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1269 	    DDI_PROP_DONTPASS, "assigned-addresses", (caddr_t)&assigned,
1270 	    &assigned_len) == DDI_PROP_SUCCESS) {
1271 
1272 		entries = assigned_len / (sizeof (pci_regspec_t));
1273 
1274 		/*
1275 		 * Walk through the assigned-addresses entries. If there is
1276 		 * a match, there is no need to allocate the resource.
1277 		 */
1278 		for (i = 0; i < entries; i++) {
1279 			if (assigned[i].pci_phys_hi == phys_spec.pci_phys_hi) {
1280 				if (assigned[i].pci_size_low >=
1281 				    phys_spec.pci_size_low) {
1282 					kmem_free(assigned, assigned_len);
1283 					return (0);
1284 				}
1285 				/*
1286 				 * Fcode wants to assign more than what
1287 				 * probe found.
1288 				 */
1289 				(void) pci_free_resource(dip, assigned[i]);
1290 				/*
1291 				 * Go on to allocate resources.
1292 				 */
1293 				break;
1294 			}
1295 			/*
1296 			 * Check if Fcode wants to map using different
1297 			 * NPT bits.
1298 			 */
1299 			if (PCI_REG_BDFR_G(assigned[i].pci_phys_hi) ==
1300 			    PCI_REG_BDFR_G(phys_spec.pci_phys_hi)) {
1301 				/*
1302 				 * It is an error to change SS bits
1303 				 */
1304 				if (PCI_REG_ADDR_G(assigned[i].pci_phys_hi) !=
1305 				    PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1306 
1307 					cmn_err(CE_WARN, "Fcode changing ss "
1308 					    "bits in reg %x -- %x",
1309 					    assigned[i].pci_phys_hi,
1310 					    phys_spec.pci_phys_hi);
1311 
1312 					kmem_free(assigned, assigned_len);
1313 					return (1);
1314 				}
1315 
1316 				/*
1317 				 * Allocate enough
1318 				 */
1319 				l = MAX(assigned[i].pci_size_low,
1320 				    phys_spec.pci_size_low);
1321 
1322 				(void) pci_free_resource(dip, assigned[i]);
1323 				/*
1324 				 * Go on to allocate resources.
1325 				 */
1326 				break;
1327 			}
1328 		}
1329 		kmem_free(assigned, assigned_len);
1330 	}
1331 
1332 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1333 
1334 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1335 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1336 	config.pci_phys_mid = config.pci_phys_low = 0;
1337 	config.pci_size_hi = config.pci_size_low = 0;
1338 
1339 	/*
1340 	 * Map in configuration space (temporarily)
1341 	 */
1342 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1343 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1344 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1345 
1346 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1347 		return (1);
1348 	}
1349 
1350 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1351 		flags |= PCICFG_CONF_INDIRECT_MAP;
1352 
1353 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1354 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1355 		error = DDI_SUCCESS;
1356 	} else
1357 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1358 
1359 	if (error == DDI_SUCCESS)
1360 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1361 			error = DDI_FAILURE;
1362 		}
1363 
1364 	if (error != DDI_SUCCESS) {
1365 		return (1);
1366 	}
1367 
1368 	request.ra_flags |= NDI_RA_ALIGN_SIZE;
1369 	request.ra_boundbase = 0;
1370 	request.ra_boundlen = PCI_4GIG_LIMIT;
1371 
1372 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1373 
1374 	v = virt + offset;
1375 
1376 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1377 		request.ra_len = l;
1378 		request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1379 
1380 		/* allocate memory space from the allocator */
1381 
1382 		if (ndi_ra_alloc(ddi_get_parent(dip),
1383 			&request, &answer, &alen,
1384 			NDI_RA_TYPE_MEM, NDI_RA_PASS)
1385 					!= NDI_SUCCESS) {
1386 			pci_unmap_phys(&h, &config);
1387 			return (1);
1388 		}
1389 		FC_DEBUG3(1, CE_CONT, "ROM addr = [0x%x.%x] len [0x%x]\n",
1390 			HIADDR(answer),
1391 			LOADDR(answer),
1392 			alen);
1393 
1394 		/* program the low word */
1395 
1396 		ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1397 
1398 		phys_spec.pci_phys_low = LOADDR(answer);
1399 		phys_spec.pci_phys_mid = HIADDR(answer);
1400 	} else {
1401 		request.ra_len = l;
1402 
1403 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1404 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1405 			request.ra_flags ^= NDI_RA_ALLOC_BOUNDED;
1406 
1407 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1408 				/*
1409 				 * If it is a non relocatable address,
1410 				 * then specify the address we want.
1411 				 */
1412 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1413 				request.ra_addr = (uint64_t)LADDR(
1414 				    phys_spec.pci_phys_low,
1415 				    phys_spec.pci_phys_mid);
1416 			}
1417 
1418 			/* allocate memory space from the allocator */
1419 
1420 			if (ndi_ra_alloc(ddi_get_parent(dip),
1421 				&request, &answer, &alen,
1422 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1423 						!= NDI_SUCCESS) {
1424 				pci_unmap_phys(&h, &config);
1425 				if (request.ra_flags ==
1426 				    NDI_RA_ALLOC_SPECIFIED)
1427 					cmn_err(CE_WARN, "Unable to allocate "
1428 					    "non relocatable address 0x%p\n",
1429 					    (void *) request.ra_addr);
1430 				return (1);
1431 			}
1432 			FC_DEBUG3(1, CE_CONT,
1433 			    "64 addr = [0x%x.%x] len [0x%x]\n",
1434 			    HIADDR(answer),
1435 			    LOADDR(answer),
1436 			    alen);
1437 
1438 			/* program the low word */
1439 
1440 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1441 
1442 			/* program the high word with value zero */
1443 			v += 4;
1444 			ddi_put32(h, (uint32_t *)v, HIADDR(answer));
1445 
1446 			phys_spec.pci_phys_low = LOADDR(answer);
1447 			phys_spec.pci_phys_mid = HIADDR(answer);
1448 
1449 			break;
1450 
1451 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1452 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1453 
1454 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1455 				/*
1456 				 * If it is a non relocatable address,
1457 				 * then specify the address we want.
1458 				 */
1459 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1460 				request.ra_addr = (uint64_t)
1461 				    phys_spec.pci_phys_low;
1462 			}
1463 
1464 			/* allocate memory space from the allocator */
1465 
1466 			if (ndi_ra_alloc(ddi_get_parent(dip),
1467 				&request, &answer, &alen,
1468 				NDI_RA_TYPE_MEM, NDI_RA_PASS)
1469 						!= NDI_SUCCESS) {
1470 				pci_unmap_phys(&h, &config);
1471 				if (request.ra_flags ==
1472 				    NDI_RA_ALLOC_SPECIFIED)
1473 					cmn_err(CE_WARN, "Unable to allocate "
1474 					    "non relocatable address 0x%p\n",
1475 					    (void *) request.ra_addr);
1476 				return (1);
1477 			}
1478 
1479 			FC_DEBUG3(1, CE_CONT,
1480 			    "32 addr = [0x%x.%x] len [0x%x]\n",
1481 			    HIADDR(answer),
1482 			    LOADDR(answer),
1483 			    alen);
1484 
1485 			/* program the low word */
1486 
1487 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1488 
1489 			phys_spec.pci_phys_low = LOADDR(answer);
1490 
1491 			break;
1492 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1493 			request.ra_flags |= NDI_RA_ALLOC_BOUNDED;
1494 
1495 			if (phys_spec.pci_phys_hi & PCI_REG_REL_M) {
1496 				/*
1497 				 * If it is a non relocatable address,
1498 				 * then specify the address we want.
1499 				 */
1500 				request.ra_flags = NDI_RA_ALLOC_SPECIFIED;
1501 				request.ra_addr = (uint64_t)
1502 				    phys_spec.pci_phys_low;
1503 			}
1504 
1505 			/* allocate I/O space from the allocator */
1506 
1507 			if (ndi_ra_alloc(ddi_get_parent(dip),
1508 				&request, &answer, &alen,
1509 				NDI_RA_TYPE_IO, NDI_RA_PASS)
1510 						!= NDI_SUCCESS) {
1511 				pci_unmap_phys(&h, &config);
1512 				if (request.ra_flags ==
1513 				    NDI_RA_ALLOC_SPECIFIED)
1514 					cmn_err(CE_WARN, "Unable to allocate "
1515 					    "non relocatable IO Space 0x%p\n",
1516 					    (void *) request.ra_addr);
1517 				return (1);
1518 			}
1519 			FC_DEBUG3(1, CE_CONT,
1520 			    "I/O addr = [0x%x.%x] len [0x%x]\n",
1521 			    HIADDR(answer),
1522 			    LOADDR(answer),
1523 			    alen);
1524 
1525 			ddi_put32(h, (uint32_t *)v, LOADDR(answer));
1526 
1527 			phys_spec.pci_phys_low = LOADDR(answer);
1528 
1529 			break;
1530 		default:
1531 			pci_unmap_phys(&h, &config);
1532 			return (1);
1533 		} /* switch */
1534 	}
1535 
1536 	/*
1537 	 * Now that memory locations are assigned,
1538 	 * update the assigned address property.
1539 	 */
1540 	if (pfc_update_assigned_prop(dip, &phys_spec)) {
1541 		pci_unmap_phys(&h, &config);
1542 		return (1);
1543 	}
1544 
1545 	pci_unmap_phys(&h, &config);
1546 
1547 	return (0);
1548 }
1549 
1550 int
1551 pci_free_resource(dev_info_t *dip, pci_regspec_t phys_spec)
1552 {
1553 	int offset, tmp;
1554 	pci_regspec_t config;
1555 	caddr_t virt, v;
1556 	ddi_device_acc_attr_t acc;
1557 	ddi_acc_handle_t h;
1558 	ndi_ra_request_t request;
1559 	int l, error, flags = 0;
1560 
1561 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1562 
1563 	config.pci_phys_hi = PCI_CONF_ADDR_MASK & phys_spec.pci_phys_hi;
1564 	config.pci_phys_hi &= ~PCI_REG_REG_M;
1565 	config.pci_phys_mid = config.pci_phys_low = 0;
1566 	config.pci_size_hi = config.pci_size_low = 0;
1567 
1568 	/*
1569 	 * Map in configuration space (temporarily)
1570 	 */
1571 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1572 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1573 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1574 
1575 	if (error = pci_map_phys(dip, &config, &virt, &acc, &h)) {
1576 		return (1);
1577 	}
1578 	if (fcpci_indirect_map(dip) == DDI_SUCCESS)
1579 		flags |= PCICFG_CONF_INDIRECT_MAP;
1580 
1581 	if (flags & PCICFG_CONF_INDIRECT_MAP) {
1582 		tmp = (int32_t)ddi_get32(h, (uint32_t *)virt);
1583 		error = DDI_SUCCESS;
1584 	} else
1585 		error = ddi_peek32(dip, (int32_t *)virt, (int32_t *)&tmp);
1586 
1587 	if (error == DDI_SUCCESS)
1588 		if ((tmp == (int32_t)0xffffffff) || (tmp == -1)) {
1589 			error = DDI_FAILURE;
1590 		}
1591 	if (error != DDI_SUCCESS) {
1592 		return (1);
1593 	}
1594 
1595 
1596 	offset = PCI_REG_REG_G(phys_spec.pci_phys_hi);
1597 
1598 	v = virt + offset;
1599 
1600 	/*
1601 	 * Pick up the size to be freed. It may be different from
1602 	 * what probe finds.
1603 	 */
1604 	l = phys_spec.pci_size_low;
1605 
1606 	if (PCI_REG_REG_G(phys_spec.pci_phys_hi) == PCI_CONF_ROM) {
1607 		/* free memory back to the allocator */
1608 		if (ndi_ra_free(ddi_get_parent(dip), phys_spec.pci_phys_low,
1609 		    l, NDI_RA_TYPE_MEM,
1610 		    NDI_RA_PASS) != NDI_SUCCESS) {
1611 			pci_unmap_phys(&h, &config);
1612 			return (1);
1613 		}
1614 
1615 		/* Unmap the BAR by writing a zero */
1616 
1617 		ddi_put32(h, (uint32_t *)v, 0);
1618 	} else {
1619 		switch (PCI_REG_ADDR_G(phys_spec.pci_phys_hi)) {
1620 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1621 			/* free memory back to the allocator */
1622 			if (ndi_ra_free(ddi_get_parent(dip),
1623 			    LADDR(phys_spec.pci_phys_low,
1624 			    phys_spec.pci_phys_mid),
1625 			    l, NDI_RA_TYPE_MEM,
1626 			    NDI_RA_PASS) != NDI_SUCCESS) {
1627 				pci_unmap_phys(&h, &config);
1628 				return (1);
1629 			}
1630 
1631 			break;
1632 
1633 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1634 			/* free memory back to the allocator */
1635 			if (ndi_ra_free(ddi_get_parent(dip),
1636 			    phys_spec.pci_phys_low,
1637 			    l, NDI_RA_TYPE_MEM,
1638 			    NDI_RA_PASS) != NDI_SUCCESS) {
1639 				pci_unmap_phys(&h, &config);
1640 				return (1);
1641 			}
1642 
1643 			break;
1644 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1645 			/* free I/O space back to the allocator */
1646 			if (ndi_ra_free(ddi_get_parent(dip),
1647 			    phys_spec.pci_phys_low,
1648 			    l, NDI_RA_TYPE_IO,
1649 			    NDI_RA_PASS) != NDI_SUCCESS) {
1650 				pci_unmap_phys(&h, &config);
1651 				return (1);
1652 			}
1653 			break;
1654 		default:
1655 			pci_unmap_phys(&h, &config);
1656 			return (1);
1657 		} /* switch */
1658 	}
1659 
1660 	/*
1661 	 * Now that memory locations are assigned,
1662 	 * update the assigned address property.
1663 	 */
1664 
1665 	FC_DEBUG1(1, CE_CONT, "updating assigned-addresss for %x\n",
1666 	    phys_spec.pci_phys_hi);
1667 
1668 	if (pfc_remove_assigned_prop(dip, &phys_spec)) {
1669 		pci_unmap_phys(&h, &config);
1670 		return (1);
1671 	}
1672 
1673 	pci_unmap_phys(&h, &config);
1674 
1675 	return (0);
1676 }
1677 
1678 
1679 int
1680 pci_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
1681 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1682 	ddi_acc_handle_t *handlep)
1683 {
1684 	ddi_map_req_t mr;
1685 	ddi_acc_hdl_t *hp;
1686 	int result;
1687 
1688 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1689 	hp = impl_acc_hdl_get(*handlep);
1690 	hp->ah_vers = VERS_ACCHDL;
1691 	hp->ah_dip = dip;
1692 	hp->ah_rnumber = 0;
1693 	hp->ah_offset = 0;
1694 	hp->ah_len = 0;
1695 	hp->ah_acc = *accattrp;
1696 
1697 	mr.map_op = DDI_MO_MAP_LOCKED;
1698 	mr.map_type = DDI_MT_REGSPEC;
1699 	mr.map_obj.rp = (struct regspec *)phys_spec;
1700 	mr.map_prot = PROT_READ | PROT_WRITE;
1701 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1702 	mr.map_handlep = hp;
1703 	mr.map_vers = DDI_MAP_VERSION;
1704 
1705 	result = ddi_map(dip, &mr, 0, 0, addrp);
1706 
1707 	if (result != DDI_SUCCESS) {
1708 		impl_acc_hdl_free(*handlep);
1709 		*handlep = (ddi_acc_handle_t)NULL;
1710 	} else {
1711 		hp->ah_addr = *addrp;
1712 	}
1713 
1714 	return (result);
1715 }
1716 
1717 void
1718 pci_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
1719 {
1720 	ddi_map_req_t mr;
1721 	ddi_acc_hdl_t *hp;
1722 
1723 	hp = impl_acc_hdl_get(*handlep);
1724 	ASSERT(hp);
1725 
1726 	mr.map_op = DDI_MO_UNMAP;
1727 	mr.map_type = DDI_MT_REGSPEC;
1728 	mr.map_obj.rp = (struct regspec *)ph;
1729 	mr.map_prot = PROT_READ | PROT_WRITE;
1730 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1731 	mr.map_handlep = hp;
1732 	mr.map_vers = DDI_MAP_VERSION;
1733 
1734 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1735 		hp->ah_len, &hp->ah_addr);
1736 
1737 	impl_acc_hdl_free(*handlep);
1738 
1739 
1740 	*handlep = (ddi_acc_handle_t)NULL;
1741 }
1742 
1743 int
1744 pfc_update_assigned_prop(dev_info_t *dip, pci_regspec_t *newone)
1745 {
1746 	int		alen;
1747 	pci_regspec_t	*assigned;
1748 	caddr_t		newreg;
1749 	uint_t		status;
1750 
1751 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1752 		"assigned-addresses", (caddr_t)&assigned, &alen);
1753 	switch (status) {
1754 		case DDI_PROP_SUCCESS:
1755 		break;
1756 		case DDI_PROP_NO_MEMORY:
1757 			return (1);
1758 		default:
1759 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1760 			"assigned-addresses", (int *)newone,
1761 				sizeof (*newone)/sizeof (int));
1762 			return (0);
1763 	}
1764 
1765 	/*
1766 	 * Allocate memory for the existing
1767 	 * assigned-addresses(s) plus one and then
1768 	 * build it.
1769 	 */
1770 
1771 	newreg = kmem_zalloc(alen+sizeof (*newone), KM_SLEEP);
1772 
1773 	bcopy(assigned, newreg, alen);
1774 	bcopy(newone, newreg + alen, sizeof (*newone));
1775 
1776 	/*
1777 	 * Write out the new "assigned-addresses" spec
1778 	 */
1779 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1780 		"assigned-addresses", (int *)newreg,
1781 		(alen + sizeof (*newone))/sizeof (int));
1782 
1783 	kmem_free((caddr_t)newreg, alen+sizeof (*newone));
1784 
1785 	return (0);
1786 }
1787 int
1788 pfc_remove_assigned_prop(dev_info_t *dip, pci_regspec_t *oldone)
1789 {
1790 	int		alen, new_len, num_entries, i;
1791 	pci_regspec_t	*assigned;
1792 	uint_t		status;
1793 
1794 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1795 		"assigned-addresses", (caddr_t)&assigned, &alen);
1796 	switch (status) {
1797 		case DDI_PROP_SUCCESS:
1798 		break;
1799 		case DDI_PROP_NO_MEMORY:
1800 			return (1);
1801 		default:
1802 			return (0);
1803 	}
1804 
1805 	num_entries = alen / sizeof (pci_regspec_t);
1806 	new_len = alen - sizeof (pci_regspec_t);
1807 
1808 	/*
1809 	 * Search for the memory being removed.
1810 	 */
1811 	for (i = 0; i < num_entries; i++) {
1812 		if (assigned[i].pci_phys_hi == oldone->pci_phys_hi) {
1813 			if (new_len == 0) {
1814 				(void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1815 				    "assigned-addresses");
1816 				break;
1817 			}
1818 			if ((new_len - (i * sizeof (pci_regspec_t)))
1819 			    == 0) {
1820 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1821 				    "%x removed from property (last entry)\n",
1822 				    oldone->pci_phys_hi);
1823 			} else {
1824 				bcopy((void *)(assigned + i + 1),
1825 				    (void *)(assigned + i),
1826 				    (new_len - (i * sizeof (pci_regspec_t))));
1827 
1828 				FC_DEBUG1(1, CE_CONT, "assigned-address entry "
1829 				    "%x removed from property\n",
1830 				    oldone->pci_phys_hi);
1831 			}
1832 			(void) ndi_prop_update_int_array(DDI_DEV_T_NONE,
1833 			    dip, "assigned-addresses", (int *)assigned,
1834 			    (new_len/sizeof (int)));
1835 
1836 			break;
1837 		}
1838 	}
1839 
1840 	return (0);
1841 }
1842 /*
1843  * we recognize the non transparent bridge child nodes with the
1844  * following property. This is specific to this implementation only.
1845  * This property is specific to AP nodes only.
1846  */
1847 #define	PCICFG_DEV_CONF_MAP_PROP		"pci-parent-indirect"
1848 
1849 /*
1850  * If a non transparent bridge drives a hotplug/hotswap bus, then
1851  * the following property must be defined for the node either by
1852  * the driver or the OBP.
1853  */
1854 #define	PCICFG_BUS_CONF_MAP_PROP		"pci-conf-indirect"
1855 
1856 /*
1857  * this function is called only for SPARC platforms, where we may have
1858  * a mix n' match of direct vs indirectly mapped configuration space.
1859  */
1860 /*ARGSUSED*/
1861 static int
1862 fcpci_indirect_map(dev_info_t *dip)
1863 {
1864 	int rc = DDI_FAILURE;
1865 
1866 	if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip), DDI_PROP_DONTPASS,
1867 			PCICFG_DEV_CONF_MAP_PROP, DDI_FAILURE) != DDI_FAILURE)
1868 		rc = DDI_SUCCESS;
1869 	else
1870 		if (ddi_getprop(DDI_DEV_T_ANY, ddi_get_parent(dip),
1871 				DDI_PROP_DONTPASS, PCICFG_BUS_CONF_MAP_PROP,
1872 				DDI_FAILURE) != DDI_FAILURE)
1873 			rc = DDI_SUCCESS;
1874 
1875 	return (rc);
1876 }
1877