xref: /titanic_51/usr/src/uts/common/io/busra.c (revision e18306b13ed357bd545696aa96b53617b64db4a3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright 2012 Milan Jurik. All rights reserved.
25  */
26 
27 #if defined(DEBUG)
28 #define	BUSRA_DEBUG
29 #endif
30 
31 /*
32  * This module provides a set of resource management interfaces
33  * to manage bus resources globally in the system.
34  *
35  * The bus nexus drivers are typically responsible to setup resource
36  * maps for the bus resources available for a bus instance. However
37  * this module also provides resource setup functions for PCI bus
38  * (used by both SPARC and X86 platforms) and ISA bus instances (used
39  * only for X86 platforms).
40  */
41 
42 #include <sys/types.h>
43 #include <sys/systm.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ddi_impldefs.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/kmem.h>
50 #include <sys/pctypes.h>
51 #include <sys/modctl.h>
52 #include <sys/debug.h>
53 #include <sys/spl.h>
54 #include <sys/pci.h>
55 #include <sys/autoconf.h>
56 
57 #if defined(BUSRA_DEBUG)
58 int busra_debug = 0;
59 #define	DEBUGPRT \
60 	if (busra_debug) cmn_err
61 
62 #else
63 #define	DEBUGPRT \
64 	if (0) cmn_err
65 #endif
66 
67 
68 /*
69  * global mutex that protects the global list of resource maps.
70  */
71 kmutex_t ra_lock;
72 
73 /*
74  * basic resource element
75  */
76 struct ra_resource {
77 	struct ra_resource *ra_next;
78 	uint64_t	ra_base;
79 	uint64_t 	ra_len;
80 };
81 
82 /*
83  * link list element for the list of dips (and their resource ranges)
84  * for a particular resource type.
85  * ra_rangeset points to the list of resources available
86  * for this type and this dip.
87  */
88 struct ra_dip_type  {
89 	struct ra_dip_type *ra_next;
90 	struct ra_resource  *ra_rangeset;
91 	dev_info_t *ra_dip;
92 };
93 
94 
95 /*
96  * link list element for list of types resources. Each element
97  * has all resources for a particular type.
98  */
99 struct ra_type_map {
100 	struct ra_type_map *ra_next;
101 	struct ra_dip_type *ra_dip_list;
102 	char *type;
103 };
104 
105 
106 /*
107  * place holder to keep the head of the whole global list.
108  * the address of the first typemap would be stored in it.
109  */
110 static struct ra_type_map	*ra_map_list_head = NULL;
111 
112 
113 /*
114  * This is the loadable module wrapper.
115  * It is essentially boilerplate so isn't documented
116  */
117 extern struct mod_ops mod_miscops;
118 
119 #ifdef BUSRA_DEBUG
120 void ra_dump_all(char *, dev_info_t *);
121 #endif
122 
123 /* internal function prototypes */
124 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
125     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
126     uint32_t flag);
127 static int isnot_pow2(uint64_t value);
128 static int claim_pci_busnum(dev_info_t *dip, void *arg);
129 static int ra_map_exist(dev_info_t *dip, char *type);
130 
131 static int pci_get_available_prop(dev_info_t *dip, uint64_t base,
132     uint64_t len, char *busra_type);
133 static int pci_put_available_prop(dev_info_t *dip, uint64_t base,
134     uint64_t len, char *busra_type);
135 static uint32_t pci_type_ra2pci(char *type);
136 static boolean_t is_pcie_fabric(dev_info_t *dip);
137 
138 #define	PCI_ADDR_TYPE_MASK	(PCI_REG_ADDR_M | PCI_REG_PF_M)
139 #define	PCI_ADDR_TYPE_INVAL	0xffffffff
140 
141 #define	RA_INSERT(prev, el) \
142 	el->ra_next = *prev; \
143 	*prev = el;
144 
145 #define	RA_REMOVE(prev, el) \
146 	*prev = el->ra_next;
147 
148 
149 static struct modlmisc modlmisc = {
150 	&mod_miscops,		/* Type of module. This one is a module */
151 	"Bus Resource Allocator (BUSRA)",	/* Name of the module. */
152 };
153 
154 static struct modlinkage modlinkage = {
155 	MODREV_1, (void *)&modlmisc, NULL
156 };
157 
158 int
159 _init()
160 {
161 	int	ret;
162 
163 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
164 	    (void *)(intptr_t)__ipltospl(SPL7 - 1));
165 	if ((ret = mod_install(&modlinkage)) != 0) {
166 		mutex_destroy(&ra_lock);
167 	}
168 	return (ret);
169 }
170 
171 int
172 _fini()
173 {
174 	int	ret;
175 
176 	mutex_enter(&ra_lock);
177 
178 	if (ra_map_list_head != NULL) {
179 		mutex_exit(&ra_lock);
180 		return (EBUSY);
181 	}
182 
183 	ret = mod_remove(&modlinkage);
184 
185 	mutex_exit(&ra_lock);
186 
187 	if (ret == 0)
188 		mutex_destroy(&ra_lock);
189 
190 	return (ret);
191 }
192 
193 int
194 _info(struct modinfo *modinfop)
195 
196 {
197 	return (mod_info(&modlinkage, modinfop));
198 }
199 
200 /*
201  * set up an empty resource map for a given type and dip
202  */
203 int
204 ndi_ra_map_setup(dev_info_t *dip, char *type)
205 {
206 	struct ra_type_map  *typemapp;
207 	struct ra_dip_type  *dipmap;
208 	struct ra_dip_type  **backdip;
209 	struct ra_type_map  **backtype;
210 
211 
212 	mutex_enter(&ra_lock);
213 
214 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
215 
216 	if (dipmap == NULL) {
217 		if (backtype == NULL) {
218 			typemapp = (struct ra_type_map *)
219 			    kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
220 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
221 			    KM_SLEEP);
222 			(void) strcpy(typemapp->type, type);
223 			RA_INSERT(&ra_map_list_head, typemapp);
224 		} else {
225 			typemapp = *backtype;
226 		}
227 		if (backdip == NULL) {
228 			/* allocate and insert in list of dips for this type */
229 			dipmap = (struct ra_dip_type *)
230 			    kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
231 			dipmap->ra_dip = dip;
232 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
233 		}
234 	}
235 
236 	mutex_exit(&ra_lock);
237 	return (NDI_SUCCESS);
238 }
239 
240 /*
241  * destroys a resource map for a given dip and type
242  */
243 int
244 ndi_ra_map_destroy(dev_info_t *dip, char *type)
245 {
246 	struct ra_dip_type	*dipmap;
247 	struct ra_dip_type	**backdip;
248 	struct ra_type_map  	**backtype, *typemap;
249 	struct ra_resource	*range;
250 
251 	mutex_enter(&ra_lock);
252 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
253 
254 	if (dipmap == NULL) {
255 		mutex_exit(&ra_lock);
256 		return (NDI_FAILURE);
257 	}
258 
259 	/*
260 	 * destroy all resources for this dip
261 	 * remove dip from type list
262 	 */
263 	ASSERT((backdip != NULL) && (backtype != NULL));
264 	while (dipmap->ra_rangeset != NULL) {
265 		range = dipmap->ra_rangeset;
266 		RA_REMOVE(&dipmap->ra_rangeset, range);
267 		kmem_free((caddr_t)range, sizeof (*range));
268 	}
269 	/* remove from dip list */
270 	RA_REMOVE(backdip, dipmap);
271 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
272 	if ((*backtype)->ra_dip_list == NULL) {
273 		/*
274 		 * This was the last dip with this resource type.
275 		 * Remove the type from the global list.
276 		 */
277 		typemap = *backtype;
278 		RA_REMOVE(backtype, (*backtype));
279 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
280 		kmem_free((caddr_t)typemap, sizeof (*typemap));
281 	}
282 
283 	mutex_exit(&ra_lock);
284 	return (NDI_SUCCESS);
285 }
286 
287 static int
288 ra_map_exist(dev_info_t *dip, char *type)
289 {
290 	struct ra_dip_type  **backdip;
291 	struct ra_type_map  **backtype;
292 
293 	mutex_enter(&ra_lock);
294 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
295 		mutex_exit(&ra_lock);
296 		return (NDI_FAILURE);
297 	}
298 
299 	mutex_exit(&ra_lock);
300 	return (NDI_SUCCESS);
301 }
302 /*
303  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
304  * if found, backdip and backtype will be updated to point to the previous
305  * dip in the list and previous type for this dip in the list.
306  * If no such type at all in the resource list both backdip and backtype
307  * will be null. If the type found but no dip, back dip will be null.
308  */
309 
310 static struct ra_dip_type *
311 find_dip_map_resources(dev_info_t *dip, char *type,
312     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
313     uint32_t flag)
314 {
315 	struct ra_type_map **prevmap;
316 	struct ra_dip_type *dipmap, **prevdip;
317 
318 	ASSERT(mutex_owned(&ra_lock));
319 	prevdip = NULL;
320 	dipmap = NULL;
321 	prevmap = &ra_map_list_head;
322 
323 	while (*prevmap) {
324 		if (strcmp((*prevmap)->type, type) == 0)
325 			break;
326 		prevmap = &(*prevmap)->ra_next;
327 	}
328 
329 	if (*prevmap) {
330 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
331 			prevdip = &(*prevmap)->ra_dip_list;
332 			dipmap = *prevdip;
333 
334 			while (dipmap) {
335 				if (dipmap->ra_dip == dip)
336 					break;
337 				prevdip =  &dipmap->ra_next;
338 				dipmap = dipmap->ra_next;
339 			}
340 
341 			if (dipmap != NULL) {
342 				/* found it */
343 				break;
344 			}
345 
346 			if (!(flag & NDI_RA_PASS)) {
347 				break;
348 			}
349 		}
350 	}
351 
352 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
353 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
354 
355 	return (dipmap);
356 }
357 
358 int
359 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
360     uint32_t flag)
361 {
362 	struct ra_dip_type *dipmap;
363 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
364 	struct ra_resource  *mapp, **backp;
365 	uint64_t newend, mapend;
366 	struct ra_dip_type **backdip;
367 	struct ra_type_map **backtype;
368 
369 	if (len == 0) {
370 		return (NDI_SUCCESS);
371 	}
372 
373 	mutex_enter(&ra_lock);
374 
375 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
376 	    flag)) == NULL) {
377 		mutex_exit(&ra_lock);
378 		return (NDI_FAILURE);
379 	}
380 
381 	mapp = dipmap->ra_rangeset;
382 	backp = &dipmap->ra_rangeset;
383 
384 	/* now find where range lies and fix things up */
385 	newend = base + len;
386 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
387 		mapend = mapp->ra_base + mapp->ra_len;
388 
389 		/* check for overlap first */
390 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
391 		    (base > mapp->ra_base && base < mapend)) {
392 			/* overlap with mapp */
393 			overlapmap = mapp;
394 			goto overlap;
395 		} else if ((base == mapend && mapp->ra_next) &&
396 		    (newend > mapp->ra_next->ra_base)) {
397 			/* overlap with mapp->ra_next */
398 			overlapmap = mapp->ra_next;
399 			goto overlap;
400 		}
401 
402 		if (newend == mapp->ra_base) {
403 			/* simple - on front */
404 			mapp->ra_base = base;
405 			mapp->ra_len += len;
406 			/*
407 			 * don't need to check if it merges with
408 			 * previous since that would match on on end
409 			 */
410 			break;
411 		} else if (base == mapend) {
412 			/* simple - on end */
413 			mapp->ra_len += len;
414 			if (mapp->ra_next &&
415 			    (newend == mapp->ra_next->ra_base)) {
416 				/* merge with next node */
417 				oldmap = mapp->ra_next;
418 				mapp->ra_len += oldmap->ra_len;
419 				RA_REMOVE(&mapp->ra_next, oldmap);
420 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
421 			}
422 			break;
423 		} else if (base < mapp->ra_base) {
424 			/* somewhere in between so just an insert */
425 			newmap = (struct ra_resource *)
426 			    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
427 			newmap->ra_base = base;
428 			newmap->ra_len = len;
429 			RA_INSERT(backp, newmap);
430 			break;
431 		}
432 	}
433 	if (mapp == NULL) {
434 		/* stick on end */
435 		newmap = (struct ra_resource *)
436 		    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
437 		newmap->ra_base = base;
438 		newmap->ra_len = len;
439 		RA_INSERT(backp, newmap);
440 	}
441 
442 	mutex_exit(&ra_lock);
443 
444 	/*
445 	 * Update dip's "available" property, adding this piece of
446 	 * resource to the pool.
447 	 */
448 	(void) pci_put_available_prop(dipmap->ra_dip, base, len, type);
449 done:
450 	return (NDI_SUCCESS);
451 
452 overlap:
453 	/*
454 	 * Bad free may happen on some x86 platforms with BIOS exporting
455 	 * incorrect resource maps. The system is otherwise functioning
456 	 * normally. We send such messages to syslog only.
457 	 */
458 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
459 	    (void *)dip, type);
460 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
461 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
462 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
463 	    overlapmap->ra_len);
464 
465 	mutex_exit(&ra_lock);
466 	return (NDI_FAILURE);
467 }
468 
469 /* check to see if value is power of 2 or not. */
470 static int
471 isnot_pow2(uint64_t value)
472 {
473 	uint32_t low;
474 	uint32_t hi;
475 
476 	low = value & 0xffffffff;
477 	hi = value >> 32;
478 
479 	/*
480 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
481 	 * won't work correctly for 64bit values
482 	 */
483 	if ((ddi_ffs(low) == ddi_fls(low)) &&
484 	    (ddi_ffs(hi) == ddi_fls(hi)))
485 		return (0);
486 	return (1);
487 }
488 
489 static  void
490 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
491 	    uint64_t base, uint64_t len)
492 {
493 	struct ra_resource *newmap;
494 	uint64_t newlen;
495 
496 	if (base != mapp->ra_base) {
497 		/* in the middle or end */
498 		newlen = base - mapp->ra_base;
499 		if ((mapp->ra_len - newlen) == len) {
500 			/* on the end */
501 			mapp->ra_len = newlen;
502 		} else {
503 			/* in the middle */
504 			newmap = (struct ra_resource *)
505 			    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
506 			newmap->ra_base = base + len;
507 			newmap->ra_len = mapp->ra_len - (len + newlen);
508 			mapp->ra_len = newlen;
509 			RA_INSERT(&(mapp->ra_next), newmap);
510 		}
511 	} else {
512 		/* at the beginning */
513 		mapp->ra_base += len;
514 		mapp->ra_len -= len;
515 		if (mapp->ra_len == 0) {
516 			/* remove the whole node */
517 			RA_REMOVE(backp, mapp);
518 			kmem_free((caddr_t)mapp, sizeof (*mapp));
519 		}
520 	}
521 }
522 
523 int
524 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
525     uint64_t *retlenp, char *type, uint32_t flag)
526 {
527 	struct ra_dip_type *dipmap;
528 	struct ra_resource *mapp, **backp, **backlargestp;
529 	uint64_t mask = 0;
530 	uint64_t len, remlen, largestbase, largestlen;
531 	uint64_t base, oldbase, lower, upper;
532 	struct ra_dip_type  **backdip;
533 	struct ra_type_map  **backtype;
534 	int  rval = NDI_FAILURE;
535 
536 
537 	len = req->ra_len;
538 
539 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
540 		if (isnot_pow2(req->ra_len)) {
541 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
542 			    PRIx64, req->ra_len);
543 			*retbasep = 0;
544 			*retlenp = 0;
545 			return (NDI_FAILURE);
546 		}
547 	}
548 
549 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
550 	    req->ra_align_mask;
551 
552 
553 	mutex_enter(&ra_lock);
554 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
555 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
556 		mutex_exit(&ra_lock);
557 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
558 		return (NDI_FAILURE);
559 	}
560 
561 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
562 	    PRIx64 "\n", (void *)mapp, len, mask);
563 
564 	backp = &(dipmap->ra_rangeset);
565 	backlargestp = NULL;
566 	largestbase = 0;
567 	largestlen = 0;
568 
569 	lower = 0;
570 	upper = ~(uint64_t)0;
571 
572 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
573 		/* bounded so skip to first possible */
574 		lower = req->ra_boundbase;
575 		upper = req->ra_boundlen + lower;
576 		if ((upper == 0) || (upper < req->ra_boundlen))
577 			upper = ~(uint64_t)0;
578 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
579 		    PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
580 		    "\n", mapp->ra_len, len, mapp->ra_base, mask);
581 		for (; mapp != NULL && (mapp->ra_base + mapp->ra_len) < lower;
582 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
583 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
584 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
585 				/*
586 				 * This elements end goes beyond max uint64_t.
587 				 * potential candidate, check end against lower
588 				 * would not be precise.
589 				 */
590 				break;
591 
592 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
593 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
594 			}
595 
596 	}
597 
598 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
599 		/* first fit - not user specified */
600 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
601 		    "lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
602 		for (; mapp != NULL && mapp->ra_base <= upper;
603 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
604 
605 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
606 			    ", len = %" PRIx64 "", mapp->ra_len, len);
607 			base = mapp->ra_base;
608 			if (base < lower) {
609 				base = lower;
610 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
611 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
612 				    base, mapp->ra_base, mask);
613 			}
614 
615 			if ((base & mask) != 0) {
616 				oldbase = base;
617 				/*
618 				 * failed a critical constraint
619 				 * adjust and see if it still fits
620 				 */
621 				base = base & ~mask;
622 				base += (mask + 1);
623 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
624 				    base);
625 
626 				/*
627 				 * Check to see if the new base is past
628 				 * the end of the resource.
629 				 */
630 				if (base >= (oldbase + mapp->ra_len + 1)) {
631 					continue;
632 				}
633 			}
634 
635 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
636 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
637 					remlen = upper - base;
638 				else
639 					remlen = mapp->ra_len -
640 					    (base - mapp->ra_base);
641 
642 				if ((backlargestp == NULL) ||
643 				    (largestlen < remlen)) {
644 
645 					backlargestp = backp;
646 					largestbase = base;
647 					largestlen = remlen;
648 				}
649 			}
650 
651 			if (mapp->ra_len >= len) {
652 				/* a candidate -- apply constraints */
653 				if ((len > (mapp->ra_len -
654 				    (base - mapp->ra_base))) ||
655 				    ((len - 1 + base) > upper)) {
656 					continue;
657 				}
658 
659 				/* we have a fit */
660 
661 				DEBUGPRT(CE_CONT, "\thave a fit\n");
662 
663 				adjust_link(backp, mapp, base, len);
664 				rval = NDI_SUCCESS;
665 				break;
666 
667 			}
668 		}
669 	} else {
670 		/* want an exact value/fit */
671 		base = req->ra_addr;
672 		len = req->ra_len;
673 		for (; mapp != NULL && mapp->ra_base <= upper;
674 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
675 			if (base >= mapp->ra_base &&
676 			    ((base - mapp->ra_base) < mapp->ra_len)) {
677 				/*
678 				 * This is the node with he requested base in
679 				 * its range
680 				 */
681 				if ((len > mapp->ra_len) ||
682 				    (base - mapp->ra_base >
683 				    mapp->ra_len - len)) {
684 					/* length requirement not satisfied */
685 					if (req->ra_flags &
686 					    NDI_RA_ALLOC_PARTIAL_OK) {
687 						if ((upper - mapp->ra_base)
688 						    < mapp->ra_len)
689 							remlen = upper - base;
690 						else
691 							remlen =
692 							    mapp->ra_len -
693 							    (base -
694 							    mapp->ra_base);
695 					}
696 					backlargestp = backp;
697 					largestbase = base;
698 					largestlen = remlen;
699 					base = 0;
700 				} else {
701 					/* We have a match */
702 					adjust_link(backp, mapp, base, len);
703 					rval = NDI_SUCCESS;
704 				}
705 				break;
706 			}
707 		}
708 	}
709 
710 	if ((rval != NDI_SUCCESS) &&
711 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
712 	    (backlargestp != NULL)) {
713 		adjust_link(backlargestp, *backlargestp, largestbase,
714 		    largestlen);
715 
716 		base = largestbase;
717 		len = largestlen;
718 		rval = NDI_RA_PARTIAL_REQ;
719 	}
720 
721 	mutex_exit(&ra_lock);
722 
723 	if (rval == NDI_FAILURE) {
724 		*retbasep = 0;
725 		*retlenp = 0;
726 	} else {
727 		*retbasep = base;
728 		*retlenp = len;
729 	}
730 
731 	/*
732 	 * Update dip's "available" property, substract this piece of
733 	 * resource from the pool.
734 	 */
735 	if ((rval == NDI_SUCCESS) || (rval == NDI_RA_PARTIAL_REQ))
736 		(void) pci_get_available_prop(dipmap->ra_dip,
737 		    *retbasep, *retlenp, type);
738 
739 	return (rval);
740 }
741 
742 /*
743  * isa_resource_setup
744  *	check for /used-resources and initialize
745  *	based on info there.  If no /used-resources,
746  *	fail.
747  */
748 int
749 isa_resource_setup()
750 {
751 	dev_info_t *used, *usedpdip;
752 	/*
753 	 * note that at this time bootconf creates 32 bit properties for
754 	 * io-space and device-memory
755 	 */
756 	struct iorange {
757 		uint32_t	base;
758 		uint32_t	len;
759 	} *iorange;
760 	struct memrange {
761 		uint32_t	base;
762 		uint32_t	len;
763 	} *memrange;
764 	uint32_t *irq;
765 	int proplen;
766 	int i, len;
767 	int maxrange;
768 	ndi_ra_request_t req;
769 	uint64_t retbase;
770 	uint64_t retlen;
771 
772 	used = ddi_find_devinfo("used-resources", -1, 0);
773 	if (used == NULL) {
774 		DEBUGPRT(CE_CONT,
775 		    "isa_resource_setup: used-resources not found");
776 		return (NDI_FAILURE);
777 	}
778 
779 	/*
780 	 * initialize to all resources being present
781 	 * and then remove the ones in use.
782 	 */
783 
784 	usedpdip = ddi_root_node();
785 
786 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
787 	    (void *)used, (void *)usedpdip);
788 
789 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
790 		return (NDI_FAILURE);
791 	}
792 
793 	/* initialize io space, highest end base is 0xffff */
794 	/* note that length is highest addr + 1 since starts from 0 */
795 
796 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
797 
798 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
799 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
800 		maxrange = proplen / sizeof (struct iorange);
801 		/* remove the "used" I/O resources */
802 		for (i = 0; i < maxrange; i++) {
803 			bzero((caddr_t)&req, sizeof (req));
804 			req.ra_addr =  (uint64_t)iorange[i].base;
805 			req.ra_len = (uint64_t)iorange[i].len;
806 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
807 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
808 			    NDI_RA_TYPE_IO, 0);
809 		}
810 
811 		kmem_free((caddr_t)iorange, proplen);
812 	}
813 
814 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
815 		return (NDI_FAILURE);
816 	}
817 	/* initialize memory space where highest end base is 0xffffffff */
818 	/* note that length is highest addr + 1 since starts from 0 */
819 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
820 	    NDI_RA_TYPE_MEM, 0);
821 
822 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
823 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
824 		maxrange = proplen / sizeof (struct memrange);
825 		/* remove the "used" memory resources */
826 		for (i = 0; i < maxrange; i++) {
827 			bzero((caddr_t)&req, sizeof (req));
828 			req.ra_addr = (uint64_t)memrange[i].base;
829 			req.ra_len = (uint64_t)memrange[i].len;
830 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
831 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
832 			    NDI_RA_TYPE_MEM, 0);
833 		}
834 
835 		kmem_free((caddr_t)memrange, proplen);
836 	}
837 
838 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
839 		return (NDI_FAILURE);
840 	}
841 
842 	/* initialize the interrupt space */
843 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
844 
845 #if defined(__i386) || defined(__amd64)
846 	bzero(&req, sizeof (req));
847 	req.ra_addr = 2;	/* 2 == 9 so never allow */
848 	req.ra_len = 1;
849 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
850 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
851 	    NDI_RA_TYPE_INTR, 0);
852 #endif
853 
854 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
855 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
856 		/* Initialize available interrupts by negating the used */
857 		len = (proplen / sizeof (uint32_t));
858 		for (i = 0; i < len; i++) {
859 			bzero((caddr_t)&req, sizeof (req));
860 			req.ra_addr = (uint64_t)irq[i];
861 			req.ra_len = 1;
862 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
863 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
864 			    NDI_RA_TYPE_INTR, 0);
865 		}
866 		kmem_free((caddr_t)irq, proplen);
867 	}
868 
869 #ifdef BUSRA_DEBUG
870 	if (busra_debug) {
871 		(void) ra_dump_all(NULL, usedpdip);
872 	}
873 #endif
874 	return (NDI_SUCCESS);
875 
876 }
877 
878 #ifdef BUSRA_DEBUG
879 void
880 ra_dump_all(char *type, dev_info_t *dip)
881 {
882 
883 	struct ra_type_map *typemap;
884 	struct ra_dip_type *dipmap;
885 	struct ra_resource *res;
886 
887 	typemap =  (struct ra_type_map *)ra_map_list_head;
888 
889 	for (; typemap != NULL; typemap = typemap->ra_next) {
890 		if (type != NULL) {
891 			if (strcmp(typemap->type, type) != 0)
892 				continue;
893 		}
894 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
895 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
896 		    dipmap = dipmap->ra_next) {
897 			if (dip != NULL) {
898 				if ((dipmap->ra_dip) != dip)
899 					continue;
900 			}
901 			cmn_err(CE_CONT, "  dip is %p\n",
902 			    (void *)dipmap->ra_dip);
903 			for (res = dipmap->ra_rangeset; res != NULL;
904 			    res = res->ra_next) {
905 				cmn_err(CE_CONT, "\t  range is %" PRIx64
906 				    " %" PRIx64 "\n", res->ra_base,
907 				    res->ra_len);
908 			}
909 			if (dip != NULL)
910 				break;
911 		}
912 		if (type != NULL)
913 			break;
914 	}
915 }
916 #endif
917 
918 struct bus_range {	/* 1275 "bus-range" property definition */
919 	uint32_t lo;
920 	uint32_t hi;
921 } pci_bus_range;
922 
923 struct busnum_ctrl {
924 	int	rv;
925 	dev_info_t *dip;
926 	struct	bus_range *range;
927 };
928 
929 
930 /*
931  * Setup resource map for the pci bus node based on the "available"
932  * property and "bus-range" property.
933  */
934 int
935 pci_resource_setup(dev_info_t *dip)
936 {
937 	pci_regspec_t *regs;
938 	int rlen, rcount, i;
939 	char bus_type[16] = "(unknown)";
940 	int len;
941 	struct busnum_ctrl ctrl;
942 	int circular_count;
943 	int rval = NDI_SUCCESS;
944 
945 	/*
946 	 * If this is a pci bus node then look for "available" property
947 	 * to find the available resources on this bus.
948 	 */
949 	len = sizeof (bus_type);
950 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
951 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
952 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
953 		return (NDI_FAILURE);
954 
955 	/* it is not a pci/pci-ex bus type */
956 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
957 		return (NDI_FAILURE);
958 
959 	/*
960 	 * The pci-hotplug project addresses adding the call
961 	 * to pci_resource_setup from pci nexus driver.
962 	 * However that project would initially be only for x86,
963 	 * so for sparc pcmcia-pci support we still need to call
964 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
965 	 * are updated to call pci_resource_setup this portion of the
966 	 * code would really become an assert to make sure this
967 	 * function is not called for the same dip twice.
968 	 */
969 	/*
970 	 * Another user for the check below is hotplug PCI/PCIe bridges.
971 	 *
972 	 * For PCI/PCIE devices under a PCIE hierarchy, ndi_ra_alloc/free
973 	 * will update the devinfo node's "available" property, to reflect
974 	 * the fact that a piece of resource has been removed/added to
975 	 * a devinfo node.
976 	 * During probe of a new PCI bridge in the hotplug case, PCI
977 	 * configurator firstly allocates maximum MEM/IO from its parent,
978 	 * then calls ndi_ra_free() to use these resources to setup busra
979 	 * pool for the new bridge, as well as adding these resources to
980 	 * the "available" property of the new devinfo node. Then configu-
981 	 * rator will attach driver for the bridge before probing its
982 	 * children, and the bridge driver will then initialize its hotplug
983 	 * contollers (if it supports hotplug) and HPC driver will call
984 	 * this function to setup the busra pool, but the resource pool
985 	 * has already been setup at the first of pcicfg_probe_bridge(),
986 	 * thus we need the check below to return directly in this case.
987 	 * Otherwise the ndi_ra_free() below will see overlapping resources.
988 	 */
989 	{
990 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
991 			return (NDI_FAILURE);
992 		}
993 	}
994 
995 
996 	/*
997 	 * Create empty resource maps first.
998 	 *
999 	 * NOTE: If all the allocated resources are already assigned to
1000 	 * device(s) in the hot plug slot then "available" property may not
1001 	 * be present. But, subsequent hot plug operation may unconfigure
1002 	 * the device in the slot and try to free up it's resources. So,
1003 	 * at the minimum we should create empty maps here.
1004 	 */
1005 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
1006 		return (NDI_FAILURE);
1007 	}
1008 
1009 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
1010 		return (NDI_FAILURE);
1011 	}
1012 
1013 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
1014 		return (NDI_FAILURE);
1015 	}
1016 
1017 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
1018 	    NDI_FAILURE) {
1019 		return (NDI_FAILURE);
1020 	}
1021 
1022 	/* read the "available" property if it is available */
1023 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1024 	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
1025 		/*
1026 		 * Remove "available" property as the entries will be
1027 		 * re-created in ndi_ra_free() below, note prom based
1028 		 * property will not be removed. But in ndi_ra_free()
1029 		 * we'll be creating non prom based property entries.
1030 		 */
1031 		(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1032 		/*
1033 		 * create the available resource list for both memory and
1034 		 * io space
1035 		 */
1036 		rcount = rlen / sizeof (pci_regspec_t);
1037 		for (i = 0; i < rcount; i++) {
1038 			switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
1039 			case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1040 				(void) ndi_ra_free(dip,
1041 				    (uint64_t)regs[i].pci_phys_low,
1042 				    (uint64_t)regs[i].pci_size_low,
1043 				    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1044 				    NDI_RA_TYPE_PCI_PREFETCH_MEM :
1045 				    NDI_RA_TYPE_MEM,
1046 				    0);
1047 				break;
1048 			case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1049 				(void) ndi_ra_free(dip,
1050 				    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1051 				    ((uint64_t)(regs[i].pci_phys_low)),
1052 				    ((uint64_t)(regs[i].pci_size_hi) << 32) |
1053 				    ((uint64_t)(regs[i].pci_size_low)),
1054 				    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1055 				    NDI_RA_TYPE_PCI_PREFETCH_MEM :
1056 				    NDI_RA_TYPE_MEM,
1057 				    0);
1058 				break;
1059 			case PCI_REG_ADDR_G(PCI_ADDR_IO):
1060 				(void) ndi_ra_free(dip,
1061 				    (uint64_t)regs[i].pci_phys_low,
1062 				    (uint64_t)regs[i].pci_size_low,
1063 				    NDI_RA_TYPE_IO,
1064 				    0);
1065 				break;
1066 			case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1067 				break;
1068 			default:
1069 				cmn_err(CE_WARN,
1070 				    "pci_resource_setup: bad addr type: %x\n",
1071 				    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1072 				break;
1073 			}
1074 		}
1075 		kmem_free(regs, rlen);
1076 	}
1077 
1078 	/*
1079 	 * update resource map for available bus numbers if the node
1080 	 * has available-bus-range or bus-range property.
1081 	 */
1082 	len = sizeof (struct bus_range);
1083 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1084 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1085 	    DDI_SUCCESS) {
1086 		/*
1087 		 * Add bus numbers in the range to the free list.
1088 		 */
1089 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1090 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1091 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1092 	} else {
1093 		/*
1094 		 * We don't have an available-bus-range property. If, instead,
1095 		 * we have a bus-range property we add all the bus numbers
1096 		 * in that range to the free list but we must then scan
1097 		 * for pci-pci bridges on this bus to find out the if there
1098 		 * are any of those bus numbers already in use. If so, we can
1099 		 * reclaim them.
1100 		 */
1101 		len = sizeof (struct bus_range);
1102 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1103 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1104 		    &len) == DDI_SUCCESS) {
1105 			if (pci_bus_range.lo != pci_bus_range.hi) {
1106 				/*
1107 				 * Add bus numbers other than the secondary
1108 				 * bus number to the free list.
1109 				 */
1110 				(void) ndi_ra_free(dip,
1111 				    (uint64_t)pci_bus_range.lo + 1,
1112 				    (uint64_t)pci_bus_range.hi -
1113 				    (uint64_t)pci_bus_range.lo,
1114 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1115 
1116 				/* scan for pci-pci bridges */
1117 				ctrl.rv = DDI_SUCCESS;
1118 				ctrl.dip = dip;
1119 				ctrl.range = &pci_bus_range;
1120 				ndi_devi_enter(dip, &circular_count);
1121 				ddi_walk_devs(ddi_get_child(dip),
1122 				    claim_pci_busnum, (void *)&ctrl);
1123 				ndi_devi_exit(dip, circular_count);
1124 				if (ctrl.rv != DDI_SUCCESS) {
1125 					/* failed to create the map */
1126 					(void) ndi_ra_map_destroy(dip,
1127 					    NDI_RA_TYPE_PCI_BUSNUM);
1128 					rval = NDI_FAILURE;
1129 				}
1130 			}
1131 		}
1132 	}
1133 
1134 #ifdef BUSRA_DEBUG
1135 	if (busra_debug) {
1136 		(void) ra_dump_all(NULL, dip);
1137 	}
1138 #endif
1139 
1140 	return (rval);
1141 }
1142 
1143 /*
1144  * If the device is a PCI bus device (i.e bus-range property exists) then
1145  * claim the bus numbers used by the device from the specified bus
1146  * resource map.
1147  */
1148 static int
1149 claim_pci_busnum(dev_info_t *dip, void *arg)
1150 {
1151 	struct bus_range pci_bus_range;
1152 	struct busnum_ctrl *ctrl;
1153 	ndi_ra_request_t req;
1154 	char bus_type[16] = "(unknown)";
1155 	int len;
1156 	uint64_t base;
1157 	uint64_t retlen;
1158 
1159 	ctrl = (struct busnum_ctrl *)arg;
1160 
1161 	/* check if this is a PCI bus node */
1162 	len = sizeof (bus_type);
1163 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1164 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1165 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1166 		return (DDI_WALK_PRUNECHILD);
1167 
1168 	/* it is not a pci/pci-ex bus type */
1169 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1170 		return (DDI_WALK_PRUNECHILD);
1171 
1172 	/* look for the bus-range property */
1173 	len = sizeof (struct bus_range);
1174 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1175 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1176 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1177 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1178 
1179 			/* claim the bus range from the bus resource map */
1180 			bzero((caddr_t)&req, sizeof (req));
1181 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1182 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1183 			req.ra_len = (uint64_t)pci_bus_range.hi -
1184 			    (uint64_t)pci_bus_range.lo + 1;
1185 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1186 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1187 				return (DDI_WALK_PRUNECHILD);
1188 		}
1189 	}
1190 
1191 	/*
1192 	 * Error return.
1193 	 */
1194 	ctrl->rv = DDI_FAILURE;
1195 	return (DDI_WALK_TERMINATE);
1196 }
1197 
1198 void
1199 pci_resource_destroy(dev_info_t *dip)
1200 {
1201 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1202 
1203 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1204 
1205 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1206 
1207 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1208 }
1209 
1210 
1211 int
1212 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1213 {
1214 	int i;
1215 
1216 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1217 		return (NDI_FAILURE);
1218 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1219 		return (NDI_FAILURE);
1220 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1221 		return (NDI_FAILURE);
1222 
1223 	/* for each entry in the PCI "available" property */
1224 	for (i = 0; i < entries; i++, avail_p++) {
1225 		if (avail_p->pci_phys_hi == -1u)
1226 			goto err;
1227 
1228 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1229 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1230 			(void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1231 			    (uint64_t)avail_p->pci_size_low,
1232 			    (avail_p->pci_phys_hi & PCI_REG_PF_M) ?
1233 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1234 			    0);
1235 			}
1236 			break;
1237 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1238 			(void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1239 			    (uint64_t)avail_p->pci_size_low, NDI_RA_TYPE_IO, 0);
1240 			break;
1241 		default:
1242 			goto err;
1243 		}
1244 	}
1245 #ifdef BUSRA_DEBUG
1246 	if (busra_debug) {
1247 		(void) ra_dump_all(NULL, dip);
1248 	}
1249 #endif
1250 	return (NDI_SUCCESS);
1251 
1252 err:
1253 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1254 	    i, avail_p->pci_phys_hi);
1255 	return (NDI_FAILURE);
1256 }
1257 
1258 /*
1259  * Return true if the devinfo node resides on PCI or PCI Express bus,
1260  * sitting in a PCI Express hierarchy.
1261  */
1262 static boolean_t
1263 is_pcie_fabric(dev_info_t *dip)
1264 {
1265 	dev_info_t *root = ddi_root_node();
1266 	dev_info_t *pdip;
1267 	boolean_t found = B_FALSE;
1268 	char *bus;
1269 
1270 	/*
1271 	 * Is this pci/pcie ?
1272 	 */
1273 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1274 	    DDI_PROP_DONTPASS, "device_type", &bus) !=
1275 	    DDI_PROP_SUCCESS) {
1276 		DEBUGPRT(CE_WARN, "is_pcie_fabric: cannot find "
1277 		    "\"device_type\" property for dip %p\n", (void *)dip);
1278 		return (B_FALSE);
1279 	}
1280 
1281 	if (strcmp(bus, "pciex") == 0) {
1282 		/* pcie bus, done */
1283 		ddi_prop_free(bus);
1284 		return (B_TRUE);
1285 	} else if (strcmp(bus, "pci") == 0) {
1286 		/*
1287 		 * pci bus, fall through to check if it resides in
1288 		 * a pcie hierarchy.
1289 		 */
1290 		ddi_prop_free(bus);
1291 	} else {
1292 		/* other bus, return failure */
1293 		ddi_prop_free(bus);
1294 		return (B_FALSE);
1295 	}
1296 
1297 	/*
1298 	 * Does this device reside in a pcie fabric ?
1299 	 */
1300 	for (pdip = ddi_get_parent(dip); pdip && (pdip != root) &&
1301 	    !found; pdip = ddi_get_parent(pdip)) {
1302 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
1303 		    DDI_PROP_DONTPASS, "device_type", &bus) !=
1304 		    DDI_PROP_SUCCESS)
1305 			break;
1306 
1307 		if (strcmp(bus, "pciex") == 0)
1308 			found = B_TRUE;
1309 
1310 		ddi_prop_free(bus);
1311 	}
1312 
1313 	return (found);
1314 }
1315 
1316 /*
1317  * Remove a piece of IO/MEM resource from "available" property of 'dip'.
1318  */
1319 static int
1320 pci_get_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1321     char *busra_type)
1322 {
1323 	pci_regspec_t	*regs, *newregs;
1324 	uint_t		status;
1325 	int		rlen, rcount;
1326 	int		i, j, k;
1327 	uint64_t	dlen;
1328 	boolean_t	found = B_FALSE;
1329 	uint32_t	type;
1330 
1331 	/* check if we're manipulating MEM/IO resource */
1332 	if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1333 		return (DDI_SUCCESS);
1334 
1335 	/* check if dip is a pci/pcie device resides in a pcie fabric */
1336 	if (!is_pcie_fabric(dip))
1337 		return (DDI_SUCCESS);
1338 
1339 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1340 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1341 	    "available", (caddr_t)&regs, &rlen);
1342 
1343 	ASSERT(status == DDI_SUCCESS);
1344 	if (status != DDI_SUCCESS)
1345 		return (status);
1346 
1347 	/*
1348 	 * The updated "available" property will at most have one more entry
1349 	 * than existing one (when the requested range is in the middle of
1350 	 * the matched property entry)
1351 	 */
1352 	newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1353 
1354 	rcount = rlen / sizeof (pci_regspec_t);
1355 	for (i = 0, j = 0; i < rcount; i++) {
1356 		if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1357 			uint64_t range_base, range_len;
1358 
1359 			range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1360 			    ((uint64_t)(regs[i].pci_phys_low));
1361 			range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1362 			    ((uint64_t)(regs[i].pci_size_low));
1363 
1364 			if ((base < range_base) ||
1365 			    (base + len > range_base + range_len)) {
1366 				/*
1367 				 * not a match, copy the entry
1368 				 */
1369 				goto copy_entry;
1370 			}
1371 
1372 			/*
1373 			 * range_base	base	base+len	range_base
1374 			 *					+range_len
1375 			 *   +------------+-----------+----------+
1376 			 *   |		  |///////////|		 |
1377 			 *   +------------+-----------+----------+
1378 			 */
1379 			/*
1380 			 * Found a match, remove the range out of this entry.
1381 			 */
1382 			found = B_TRUE;
1383 
1384 			dlen = base - range_base;
1385 			if (dlen != 0) {
1386 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1387 				newregs[j].pci_phys_mid =
1388 				    (uint32_t)(range_base >> 32);
1389 				newregs[j].pci_phys_low =
1390 				    (uint32_t)(range_base);
1391 				newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1392 				newregs[j].pci_size_low = (uint32_t)dlen;
1393 				j++;
1394 			}
1395 
1396 			dlen = (range_base + range_len) - (base + len);
1397 			if (dlen != 0) {
1398 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1399 				newregs[j].pci_phys_mid =
1400 				    (uint32_t)((base + len)>> 32);
1401 				newregs[j].pci_phys_low =
1402 				    (uint32_t)(base + len);
1403 				newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1404 				newregs[j].pci_size_low = (uint32_t)dlen;
1405 				j++;
1406 			}
1407 
1408 			/*
1409 			 * We've allocated the resource from the matched
1410 			 * entry, almost finished but still need to copy
1411 			 * the rest entries from the original property
1412 			 * array.
1413 			 */
1414 			for (k = i + 1; k < rcount; k++) {
1415 				newregs[j] = regs[k];
1416 				j++;
1417 			}
1418 
1419 			goto done;
1420 
1421 		} else {
1422 copy_entry:
1423 			newregs[j] = regs[i];
1424 			j++;
1425 		}
1426 	}
1427 
1428 done:
1429 	/*
1430 	 * This should not fail so assert it. For non-debug kernel we don't
1431 	 * want to panic thus only logging a warning message.
1432 	 */
1433 	ASSERT(found == B_TRUE);
1434 	if (!found) {
1435 		cmn_err(CE_WARN, "pci_get_available_prop: failed to remove "
1436 		    "resource from dip %p : base 0x%" PRIx64 ", len 0x%" PRIX64
1437 		    ", type 0x%x\n", (void *)dip, base, len, type);
1438 		kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1439 		kmem_free(regs, rlen);
1440 
1441 		return (DDI_FAILURE);
1442 	}
1443 
1444 	/*
1445 	 * Found the resources from parent, update the "available"
1446 	 * property.
1447 	 */
1448 	if (j == 0) {
1449 		/* all the resources are consumed, remove the property */
1450 		(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1451 	} else {
1452 		/*
1453 		 * There are still resource available in the parent dip,
1454 		 * update with the remaining resources.
1455 		 */
1456 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1457 		    "available", (int *)newregs,
1458 		    (j * sizeof (pci_regspec_t)) / sizeof (int));
1459 	}
1460 
1461 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1462 	kmem_free(regs, rlen);
1463 
1464 	return (DDI_SUCCESS);
1465 }
1466 
1467 /*
1468  * Add a piece of IO/MEM resource to "available" property of 'dip'.
1469  */
1470 static int
1471 pci_put_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1472     char *busra_type)
1473 {
1474 	pci_regspec_t	*regs, *newregs;
1475 	uint_t		status;
1476 	int		rlen, rcount;
1477 	int		i, j, k;
1478 	int		matched = 0;
1479 	uint64_t	orig_base = base;
1480 	uint64_t	orig_len = len;
1481 	uint32_t	type;
1482 
1483 	/* check if we're manipulating MEM/IO resource */
1484 	if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1485 		return (DDI_SUCCESS);
1486 
1487 	/* check if dip is a pci/pcie device resides in a pcie fabric */
1488 	if (!is_pcie_fabric(dip))
1489 		return (DDI_SUCCESS);
1490 
1491 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1492 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1493 	    "available", (caddr_t)&regs, &rlen);
1494 
1495 	switch (status) {
1496 		case DDI_PROP_NOT_FOUND:
1497 			goto not_found;
1498 
1499 		case DDI_PROP_SUCCESS:
1500 			break;
1501 
1502 		default:
1503 			return (status);
1504 	}
1505 
1506 	/*
1507 	 * The "available" property exist on the node, try to put this
1508 	 * resource back, merge if there are adjacent resources.
1509 	 *
1510 	 * The updated "available" property will at most have one more entry
1511 	 * than existing one (when there is no adjacent entries thus the new
1512 	 * resource is appended at the end)
1513 	 */
1514 	newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1515 
1516 	rcount = rlen / sizeof (pci_regspec_t);
1517 	for (i = 0, j = 0; i < rcount; i++) {
1518 		if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1519 			uint64_t range_base, range_len;
1520 
1521 			range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1522 			    ((uint64_t)(regs[i].pci_phys_low));
1523 			range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1524 			    ((uint64_t)(regs[i].pci_size_low));
1525 
1526 			if ((base + len < range_base) ||
1527 			    (base > range_base + range_len)) {
1528 				/*
1529 				 * Not adjacent, copy the entry and contiue
1530 				 */
1531 				goto copy_entry;
1532 			}
1533 
1534 			/*
1535 			 * Adjacent or overlap?
1536 			 *
1537 			 * Should not have overlapping resources so assert it.
1538 			 * For non-debug kernel we don't want to panic thus
1539 			 * only logging a warning message.
1540 			 */
1541 #if 0
1542 			ASSERT((base + len == range_base) ||
1543 			    (base == range_base + range_len));
1544 #endif
1545 			if ((base + len != range_base) &&
1546 			    (base != range_base + range_len)) {
1547 				cmn_err(CE_WARN, "pci_put_available_prop: "
1548 				    "failed to add resource to dip %p : "
1549 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1550 				    "overlaps with existing resource "
1551 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 "\n",
1552 				    (void *)dip, orig_base, orig_len,
1553 				    range_base, range_len);
1554 
1555 				goto failure;
1556 			}
1557 
1558 			/*
1559 			 * On the left:
1560 			 *
1561 			 * base		range_base
1562 			 *   +-------------+-------------+
1563 			 *   |/////////////|		 |
1564 			 *   +-------------+-------------+
1565 			 *	len		range_len
1566 			 *
1567 			 * On the right:
1568 			 *
1569 			 * range_base	 base
1570 			 *   +-------------+-------------+
1571 			 *   |		   |/////////////|
1572 			 *   +-------------+-------------+
1573 			 *	range_len	len
1574 			 */
1575 			/*
1576 			 * There are at most two piece of resources adjacent
1577 			 * with this resource, assert it.
1578 			 */
1579 			ASSERT(matched < 2);
1580 
1581 			if (!(matched < 2)) {
1582 				cmn_err(CE_WARN, "pci_put_available_prop: "
1583 				    "failed to add resource to dip %p : "
1584 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1585 				    "found overlaps in existing resources\n",
1586 				    (void *)dip, orig_base, orig_len);
1587 
1588 				goto failure;
1589 			}
1590 
1591 			/* setup base & len to refer to the merged range */
1592 			len += range_len;
1593 			if (base == range_base + range_len)
1594 				base = range_base;
1595 
1596 			if (matched == 0) {
1597 				/*
1598 				 * One adjacent entry, add this resource in
1599 				 */
1600 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1601 				newregs[j].pci_phys_mid =
1602 				    (uint32_t)(base >> 32);
1603 				newregs[j].pci_phys_low = (uint32_t)(base);
1604 				newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1605 				newregs[j].pci_size_low = (uint32_t)len;
1606 
1607 				matched = 1;
1608 				k = j;
1609 				j++;
1610 			} else { /* matched == 1 */
1611 				/*
1612 				 * Two adjacent entries, merge them together
1613 				 */
1614 				newregs[k].pci_phys_hi = regs[i].pci_phys_hi;
1615 				newregs[k].pci_phys_mid =
1616 				    (uint32_t)(base >> 32);
1617 				newregs[k].pci_phys_low = (uint32_t)(base);
1618 				newregs[k].pci_size_hi = (uint32_t)(len >> 32);
1619 				newregs[k].pci_size_low = (uint32_t)len;
1620 
1621 				matched = 2;
1622 			}
1623 		} else {
1624 copy_entry:
1625 			newregs[j] = regs[i];
1626 			j++;
1627 		}
1628 	}
1629 
1630 	if (matched == 0) {
1631 		/* No adjacent entries, append at end */
1632 		ASSERT(j == rcount);
1633 
1634 		/*
1635 		 * According to page 15 of 1275 spec, bit "n" of "available"
1636 		 * should be set to 1.
1637 		 */
1638 		newregs[j].pci_phys_hi = type;
1639 		newregs[j].pci_phys_hi |= PCI_REG_REL_M;
1640 
1641 		newregs[j].pci_phys_mid = (uint32_t)(base >> 32);
1642 		newregs[j].pci_phys_low = (uint32_t)base;
1643 		newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1644 		newregs[j].pci_size_low = (uint32_t)len;
1645 
1646 		j++;
1647 	}
1648 
1649 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1650 	    "available", (int *)newregs,
1651 	    (j * sizeof (pci_regspec_t)) / sizeof (int));
1652 
1653 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1654 	kmem_free(regs, rlen);
1655 	return (DDI_SUCCESS);
1656 
1657 not_found:
1658 	/*
1659 	 * There is no "available" property on the parent node, create it.
1660 	 */
1661 	newregs = kmem_alloc(sizeof (pci_regspec_t), KM_SLEEP);
1662 
1663 	/*
1664 	 * According to page 15 of 1275 spec, bit "n" of "available" should
1665 	 * be set to 1.
1666 	 */
1667 	newregs[0].pci_phys_hi = type;
1668 	newregs[0].pci_phys_hi |= PCI_REG_REL_M;
1669 
1670 	newregs[0].pci_phys_mid = (uint32_t)(base >> 32);
1671 	newregs[0].pci_phys_low = (uint32_t)base;
1672 	newregs[0].pci_size_hi = (uint32_t)(len >> 32);
1673 	newregs[0].pci_size_low = (uint32_t)len;
1674 
1675 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1676 	    "available", (int *)newregs,
1677 	    sizeof (pci_regspec_t) / sizeof (int));
1678 	kmem_free(newregs, sizeof (pci_regspec_t));
1679 	return (DDI_SUCCESS);
1680 
1681 failure:
1682 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1683 	kmem_free(regs, rlen);
1684 	return (DDI_FAILURE);
1685 }
1686 
1687 static uint32_t
1688 pci_type_ra2pci(char *type)
1689 {
1690 	uint32_t	pci_type = PCI_ADDR_TYPE_INVAL;
1691 
1692 	/*
1693 	 * No 64 bit mem support for now
1694 	 */
1695 	if (strcmp(type, NDI_RA_TYPE_IO) == 0) {
1696 		pci_type = PCI_ADDR_IO;
1697 
1698 	} else if (strcmp(type, NDI_RA_TYPE_MEM) == 0) {
1699 		pci_type = PCI_ADDR_MEM32;
1700 
1701 	} else if (strcmp(type, NDI_RA_TYPE_PCI_PREFETCH_MEM)  == 0) {
1702 		pci_type = PCI_ADDR_MEM32;
1703 		pci_type |= PCI_REG_PF_M;
1704 	}
1705 
1706 	return (pci_type);
1707 }
1708