xref: /titanic_51/usr/src/uts/common/io/busra.c (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #if defined(DEBUG)
30 #define	BUSRA_DEBUG
31 #endif
32 
33 /*
34  * This module provides a set of resource management interfaces
35  * to manage bus resources globally in the system.
36  *
37  * The bus nexus drivers are typically responsible to setup resource
38  * maps for the bus resources available for a bus instance. However
39  * this module also provides resource setup functions for PCI bus
40  * (used by both SPARC and X86 platforms) and ISA bus instances (used
41  * only for X86 platforms).
42  */
43 
44 #include <sys/types.h>
45 #include <sys/systm.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ndi_impldefs.h>
51 #include <sys/kmem.h>
52 #include <sys/pctypes.h>
53 #include <sys/modctl.h>
54 #include <sys/debug.h>
55 #include <sys/spl.h>
56 #include <sys/pci.h>
57 #include <sys/autoconf.h>
58 
59 #if defined(BUSRA_DEBUG)
60 int busra_debug = 0;
61 #define	DEBUGPRT \
62 	if (busra_debug) cmn_err
63 
64 #else
65 #define	DEBUGPRT \
66 	if (0) cmn_err
67 #endif
68 
69 
70 /*
71  * global mutex that protects the global list of resource maps.
72  */
73 kmutex_t ra_lock;
74 
75 /*
76  * basic resource element
77  */
78 struct ra_resource {
79 	struct ra_resource *ra_next;
80 	uint64_t	ra_base;
81 	uint64_t 	ra_len;
82 };
83 
84 /*
85  * link list element for the list of dips (and their resource ranges)
86  * for a particular resource type.
87  * ra_rangeset points to the list of resources available
88  * for this type and this dip.
89  */
90 struct ra_dip_type  {
91 	struct ra_dip_type *ra_next;
92 	struct ra_resource  *ra_rangeset;
93 	dev_info_t *ra_dip;
94 };
95 
96 
97 /*
98  * link list element for list of types resources. Each element
99  * has all resources for a particular type.
100  */
101 struct ra_type_map {
102 	struct ra_type_map *ra_next;
103 	struct ra_dip_type *ra_dip_list;
104 	char *type;
105 };
106 
107 
108 /*
109  * place holder to keep the head of the whole global list.
110  * the address of the first typemap would be stored in it.
111  */
112 static struct ra_type_map	*ra_map_list_head = NULL;
113 
114 
115 /*
116  * This is the loadable module wrapper.
117  * It is essentially boilerplate so isn't documented
118  */
119 extern struct mod_ops mod_miscops;
120 
121 #ifdef BUSRA_DEBUG
122 void ra_dump_all();
123 #endif
124 
125 /* internal function prototypes */
126 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
127     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
128     uint32_t flag);
129 static int isnot_pow2(uint64_t value);
130 static int claim_pci_busnum(dev_info_t *dip, void *arg);
131 static int ra_map_exist(dev_info_t *dip, char *type);
132 
133 
134 #define	RA_INSERT(prev, el) \
135 	el->ra_next = *prev; \
136 	*prev = el;
137 
138 #define	RA_REMOVE(prev, el) \
139 	*prev = el->ra_next;
140 
141 
142 static struct modlmisc modlmisc = {
143 	&mod_miscops,		/* Type of module. This one is a module */
144 	"Bus Resource Allocator (BUSRA) 1.36",	/* Name of the module. */
145 };
146 
147 static struct modlinkage modlinkage = {
148 	MODREV_1, (void *)&modlmisc, NULL
149 };
150 
151 int
152 _init()
153 {
154 	int	ret;
155 
156 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
157 		(void *)(intptr_t)__ipltospl(SPL7 - 1));
158 	if ((ret = mod_install(&modlinkage)) != 0) {
159 		mutex_destroy(&ra_lock);
160 	}
161 	return (ret);
162 }
163 
164 int
165 _fini()
166 {
167 	int	ret;
168 
169 	mutex_enter(&ra_lock);
170 
171 	if (ra_map_list_head != NULL) {
172 		mutex_exit(&ra_lock);
173 		return (EBUSY);
174 	}
175 
176 	ret = mod_remove(&modlinkage);
177 
178 	mutex_exit(&ra_lock);
179 
180 	if (ret == 0)
181 		mutex_destroy(&ra_lock);
182 
183 	return (ret);
184 }
185 
186 int
187 _info(struct modinfo *modinfop)
188 
189 {
190 	return (mod_info(&modlinkage, modinfop));
191 }
192 
193 /*
194  * set up an empty resource map for a given type and dip
195  */
196 int
197 ndi_ra_map_setup(dev_info_t *dip, char *type)
198 {
199 	struct ra_type_map  *typemapp;
200 	struct ra_dip_type  *dipmap;
201 	struct ra_dip_type  **backdip;
202 	struct ra_type_map  **backtype;
203 
204 
205 	mutex_enter(&ra_lock);
206 
207 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
208 
209 	if (dipmap == NULL) {
210 		if (backtype == NULL) {
211 			typemapp = (struct ra_type_map *)
212 			kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
213 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
214 				KM_SLEEP);
215 			(void) strcpy(typemapp->type, type);
216 			RA_INSERT(&ra_map_list_head, typemapp);
217 		} else {
218 			typemapp = *backtype;
219 		}
220 		if (backdip == NULL) {
221 			/* allocate and insert in list of dips for this type */
222 			dipmap = (struct ra_dip_type *)
223 			kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
224 			dipmap->ra_dip = dip;
225 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
226 		}
227 	}
228 
229 	mutex_exit(&ra_lock);
230 	return (NDI_SUCCESS);
231 }
232 
233 /*
234  * destroys a resource map for a given dip and type
235  */
236 int
237 ndi_ra_map_destroy(dev_info_t *dip, char *type)
238 {
239 	struct ra_dip_type	*dipmap;
240 	struct ra_dip_type	**backdip;
241 	struct ra_type_map  	**backtype, *typemap;
242 	struct ra_resource	*range;
243 
244 	mutex_enter(&ra_lock);
245 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
246 
247 	if (dipmap == NULL) {
248 		mutex_exit(&ra_lock);
249 		return (NDI_FAILURE);
250 	}
251 
252 	/*
253 	 * destroy all resources for this dip
254 	 * remove dip from type list
255 	 */
256 	ASSERT((backdip != NULL) && (backtype != NULL));
257 	while (dipmap->ra_rangeset != NULL) {
258 		range = dipmap->ra_rangeset;
259 		RA_REMOVE(&dipmap->ra_rangeset, range);
260 		kmem_free((caddr_t)range, sizeof (*range));
261 	}
262 	/* remove from dip list */
263 	RA_REMOVE(backdip, dipmap);
264 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
265 	if ((*backtype)->ra_dip_list == NULL) {
266 		/*
267 		 * This was the last dip with this resource type.
268 		 * Remove the type from the global list.
269 		 */
270 		typemap = *backtype;
271 		RA_REMOVE(backtype, (*backtype));
272 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
273 		kmem_free((caddr_t)typemap, sizeof (*typemap));
274 	}
275 
276 	mutex_exit(&ra_lock);
277 	return (NDI_SUCCESS);
278 }
279 
280 static int
281 ra_map_exist(dev_info_t *dip, char *type)
282 {
283 	struct ra_dip_type  **backdip;
284 	struct ra_type_map  **backtype;
285 
286 	mutex_enter(&ra_lock);
287 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
288 		mutex_exit(&ra_lock);
289 		return (NDI_FAILURE);
290 	}
291 
292 	mutex_exit(&ra_lock);
293 	return (NDI_SUCCESS);
294 }
295 /*
296  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
297  * if found, backdip and backtype will be updated to point to the previous
298  * dip in the list and previous type for this dip in the list.
299  * If no such type at all in the resource list both backdip and backtype
300  * will be null. If the type found but no dip, back dip will be null.
301  */
302 
303 static struct ra_dip_type *
304 find_dip_map_resources(dev_info_t *dip, char *type,
305     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
306     uint32_t flag)
307 {
308 	struct ra_type_map **prevmap;
309 	struct ra_dip_type *dipmap, **prevdip;
310 
311 	ASSERT(mutex_owned(&ra_lock));
312 	prevdip = NULL;
313 	dipmap = NULL;
314 	prevmap = &ra_map_list_head;
315 
316 	while (*prevmap) {
317 		if (strcmp((*prevmap)->type, type) == 0)
318 			break;
319 		prevmap = &(*prevmap)->ra_next;
320 	}
321 
322 	if (*prevmap) {
323 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
324 			prevdip = &(*prevmap)->ra_dip_list;
325 			dipmap = *prevdip;
326 
327 			while (dipmap) {
328 				if (dipmap->ra_dip == dip)
329 					break;
330 				prevdip =  &dipmap->ra_next;
331 				dipmap = dipmap->ra_next;
332 			}
333 
334 			if (dipmap != NULL) {
335 				/* found it */
336 				break;
337 			}
338 
339 			if (!(flag & NDI_RA_PASS)) {
340 				break;
341 			}
342 		}
343 	}
344 
345 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
346 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
347 
348 	return (dipmap);
349 }
350 
351 int
352 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
353     uint32_t flag)
354 {
355 	struct ra_dip_type *dipmap;
356 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
357 	struct ra_resource  *mapp, **backp;
358 	uint64_t newend, mapend;
359 	struct ra_dip_type **backdip;
360 	struct ra_type_map **backtype;
361 
362 	if (len == 0) {
363 		return (NDI_SUCCESS);
364 	}
365 
366 	mutex_enter(&ra_lock);
367 
368 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
369 	    flag)) == NULL) {
370 		mutex_exit(&ra_lock);
371 		return (NDI_FAILURE);
372 	}
373 
374 	mapp = dipmap->ra_rangeset;
375 	backp = &dipmap->ra_rangeset;
376 
377 	/* now find where range lies and fix things up */
378 	newend = base + len;
379 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
380 		mapend = mapp->ra_base + mapp->ra_len;
381 
382 		/* check for overlap first */
383 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
384 		    (base > mapp->ra_base && base < mapend)) {
385 			/* overlap with mapp */
386 			overlapmap = mapp;
387 			goto overlap;
388 		} else if ((base == mapend && mapp->ra_next) &&
389 		    (newend > mapp->ra_next->ra_base)) {
390 			/* overlap with mapp->ra_next */
391 			overlapmap = mapp->ra_next;
392 			goto overlap;
393 		}
394 
395 		if (newend == mapp->ra_base) {
396 			/* simple - on front */
397 			mapp->ra_base = base;
398 			mapp->ra_len += len;
399 			/*
400 			 * don't need to check if it merges with
401 			 * previous since that would match on on end
402 			 */
403 			break;
404 		} else if (base == mapend) {
405 			/* simple - on end */
406 			mapp->ra_len += len;
407 			if (mapp->ra_next &&
408 			    (newend == mapp->ra_next->ra_base)) {
409 				/* merge with next node */
410 				oldmap = mapp->ra_next;
411 				mapp->ra_len += oldmap->ra_len;
412 				RA_REMOVE(&mapp->ra_next, oldmap);
413 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
414 			}
415 			break;
416 		} else if (base < mapp->ra_base) {
417 			/* somewhere in between so just an insert */
418 			newmap = (struct ra_resource *)
419 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
420 			newmap->ra_base = base;
421 			newmap->ra_len = len;
422 			RA_INSERT(backp, newmap);
423 			break;
424 		}
425 	}
426 	if (mapp == NULL) {
427 		/* stick on end */
428 		newmap = (struct ra_resource *)
429 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
430 		newmap->ra_base = base;
431 		newmap->ra_len = len;
432 		RA_INSERT(backp, newmap);
433 	}
434 
435 	mutex_exit(&ra_lock);
436 	return (NDI_SUCCESS);
437 
438 overlap:
439 	/*
440 	 * Bad free may happen on some x86 platforms with BIOS exporting
441 	 * incorrect resource maps. The system is otherwise functioning
442 	 * normally. We send such messages to syslog only.
443 	 */
444 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
445 	    (void *)dip, type);
446 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
447 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
448 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
449 	    overlapmap->ra_len);
450 
451 	mutex_exit(&ra_lock);
452 	return (NDI_FAILURE);
453 }
454 
455 /* check to see if value is power of 2 or not. */
456 static int
457 isnot_pow2(uint64_t value)
458 {
459 	uint32_t low;
460 	uint32_t hi;
461 
462 	low = value & 0xffffffff;
463 	hi = value >> 32;
464 
465 	/*
466 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
467 	 * won't work correctly for 64bit values
468 	 */
469 	if ((ddi_ffs(low) == ddi_fls(low)) &&
470 	    (ddi_ffs(hi) == ddi_fls(hi)))
471 		return (0);
472 	return (1);
473 }
474 
475 static  void
476 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
477 	    uint64_t base, uint64_t len)
478 {
479 	struct ra_resource *newmap;
480 	uint64_t newlen;
481 
482 	if (base != mapp->ra_base) {
483 		/* in the middle or end */
484 		newlen = base - mapp->ra_base;
485 		if ((mapp->ra_len - newlen) == len) {
486 			/* on the end */
487 			mapp->ra_len = newlen;
488 		} else {
489 			/* in the middle */
490 			newmap = (struct ra_resource *)
491 					kmem_zalloc(sizeof (*newmap), KM_SLEEP);
492 			newmap->ra_base = base + len;
493 			newmap->ra_len = mapp->ra_len -
494 				(len + newlen);
495 			mapp->ra_len = newlen;
496 			RA_INSERT(&(mapp->ra_next), newmap);
497 		}
498 	} else {
499 		/* at the beginning */
500 		mapp->ra_base += len;
501 		mapp->ra_len -= len;
502 		if (mapp->ra_len == 0) {
503 			/* remove the whole node */
504 			RA_REMOVE(backp, mapp);
505 			kmem_free((caddr_t)mapp, sizeof (*mapp));
506 		}
507 	}
508 }
509 
510 int
511 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
512     uint64_t *retlenp, char *type, uint32_t flag)
513 {
514 	struct ra_dip_type *dipmap;
515 	struct ra_resource *mapp, **backp, **backlargestp;
516 	uint64_t mask = 0;
517 	uint64_t len, remlen, largestbase, largestlen;
518 	uint64_t base, oldbase, lower, upper;
519 	struct ra_dip_type  **backdip;
520 	struct ra_type_map  **backtype;
521 	int  rval = NDI_FAILURE;
522 
523 
524 	len = req->ra_len;
525 
526 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
527 		if (isnot_pow2(req->ra_len)) {
528 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
529 				PRIx64, req->ra_len);
530 			*retbasep = 0;
531 			*retlenp = 0;
532 			return (NDI_FAILURE);
533 		}
534 	}
535 
536 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
537 	    req->ra_align_mask;
538 
539 
540 	mutex_enter(&ra_lock);
541 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
542 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
543 		mutex_exit(&ra_lock);
544 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
545 		return (NDI_FAILURE);
546 	}
547 
548 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
549 			PRIx64 "\n", (void *)mapp, len, mask);
550 
551 	backp = &(dipmap->ra_rangeset);
552 	backlargestp = NULL;
553 	largestbase = 0;
554 	largestlen = 0;
555 
556 	lower = 0;
557 	upper = ~(uint64_t)0;
558 
559 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
560 		/* bounded so skip to first possible */
561 		lower = req->ra_boundbase;
562 		upper = req->ra_boundlen + lower;
563 		if ((upper == 0) || (upper < req->ra_boundlen))
564 			upper = ~(uint64_t)0;
565 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
566 				PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
567 				"\n", mapp->ra_len, len, mapp->ra_base, mask);
568 		for (; mapp != NULL &&
569 			(mapp->ra_base + mapp->ra_len) < lower;
570 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
571 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
572 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
573 				/*
574 				 * This elements end goes beyond max uint64_t.
575 				 * potential candidate, check end against lower
576 				 * would not be precise.
577 				 */
578 				break;
579 
580 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
581 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
582 			}
583 
584 	}
585 
586 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
587 		/* first fit - not user specified */
588 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
589 			"lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
590 		for (; mapp != NULL && mapp->ra_base <= upper;
591 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
592 
593 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
594 			    ", len = %" PRIx64 "", mapp->ra_len, len);
595 			base = mapp->ra_base;
596 			if (base < lower) {
597 				base = lower;
598 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
599 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
600 				    base, mapp->ra_base, mask);
601 			}
602 
603 			if ((base & mask) != 0) {
604 				oldbase = base;
605 				/*
606 				 * failed a critical constraint
607 				 * adjust and see if it still fits
608 				 */
609 				base = base & ~mask;
610 				base += (mask + 1);
611 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
612 					base);
613 
614 				/*
615 				 * Check to see if the new base is past
616 				 * the end of the resource.
617 				 */
618 				if (base >= (oldbase + mapp->ra_len + 1)) {
619 					continue;
620 				}
621 			}
622 
623 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
624 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
625 					remlen = upper - base;
626 				else
627 					remlen = mapp->ra_len -
628 						(base - mapp->ra_base);
629 
630 				if ((backlargestp == NULL) ||
631 				    (largestlen < remlen)) {
632 
633 					backlargestp = backp;
634 					largestbase = base;
635 					largestlen = remlen;
636 				}
637 			}
638 
639 			if (mapp->ra_len >= len) {
640 				/* a candidate -- apply constraints */
641 				if ((len > (mapp->ra_len -
642 				    (base - mapp->ra_base))) ||
643 				    ((len - 1 + base) > upper)) {
644 					continue;
645 				}
646 
647 				/* we have a fit */
648 
649 				DEBUGPRT(CE_CONT, "\thave a fit\n");
650 
651 				adjust_link(backp, mapp, base, len);
652 				rval = NDI_SUCCESS;
653 				break;
654 
655 			}
656 		}
657 	} else {
658 		/* want an exact value/fit */
659 		base = req->ra_addr;
660 		len = req->ra_len;
661 		for (; mapp != NULL && mapp->ra_base <= upper;
662 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
663 			if (base >= mapp->ra_base &&
664 			    ((base - mapp->ra_base) < mapp->ra_len)) {
665 				/*
666 				 * This is the node with he requested base in
667 				 * its range
668 				 */
669 				if ((len > mapp->ra_len) ||
670 				    (base - mapp->ra_base >
671 				    mapp->ra_len - len)) {
672 					/* length requirement not satisfied */
673 					if (req->ra_flags &
674 					    NDI_RA_ALLOC_PARTIAL_OK) {
675 						if ((upper - mapp->ra_base)
676 						    < mapp->ra_len)
677 							remlen = upper - base;
678 						else
679 							remlen =
680 							    mapp->ra_len -
681 							    (base -
682 							    mapp->ra_base);
683 					}
684 					backlargestp = backp;
685 					largestbase = base;
686 					largestlen = remlen;
687 					base = 0;
688 				} else {
689 					/* We have a match */
690 					adjust_link(backp, mapp, base, len);
691 					rval = NDI_SUCCESS;
692 				}
693 				break;
694 			}
695 		}
696 	}
697 
698 	if ((rval != NDI_SUCCESS) &&
699 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
700 	    (backlargestp != NULL)) {
701 		adjust_link(backlargestp, *backlargestp, largestbase,
702 			largestlen);
703 
704 		base = largestbase;
705 		len = largestlen;
706 		rval = NDI_RA_PARTIAL_REQ;
707 	}
708 
709 	mutex_exit(&ra_lock);
710 
711 	if (rval == NDI_FAILURE) {
712 		*retbasep = 0;
713 		*retlenp = 0;
714 	} else {
715 		*retbasep = base;
716 		*retlenp = len;
717 	}
718 	return (rval);
719 }
720 
721 /*
722  * isa_resource_setup
723  *	check for /used-resources and initialize
724  *	based on info there.  If no /used-resources,
725  *	fail.
726  */
727 int
728 isa_resource_setup()
729 {
730 	dev_info_t *used, *usedpdip;
731 	/*
732 	 * note that at this time bootconf creates 32 bit properties for
733 	 * io-space and device-memory
734 	 */
735 	struct iorange {
736 		uint32_t	base;
737 		uint32_t	len;
738 	} *iorange;
739 	struct memrange {
740 		uint32_t	base;
741 		uint32_t	len;
742 	} *memrange;
743 	uint32_t *irq;
744 	int proplen;
745 	int i, len;
746 	int maxrange;
747 	ndi_ra_request_t req;
748 	uint64_t retbase;
749 	uint64_t retlen;
750 
751 	used = ddi_find_devinfo("used-resources", -1, 0);
752 	if (used == NULL) {
753 		DEBUGPRT(CE_CONT,
754 			"isa_resource_setup: used-resources not found");
755 		return (NDI_FAILURE);
756 	}
757 
758 	/*
759 	 * initialize to all resources being present
760 	 * and then remove the ones in use.
761 	 */
762 
763 	usedpdip = ddi_root_node();
764 
765 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
766 	    (void *)used, (void *)usedpdip);
767 
768 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
769 		return (NDI_FAILURE);
770 	}
771 
772 	/* initialize io space, highest end base is 0xffff */
773 	/* note that length is highest addr + 1 since starts from 0 */
774 
775 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
776 
777 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
778 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
779 		maxrange = proplen / sizeof (struct iorange);
780 		/* remove the "used" I/O resources */
781 		for (i = 0; i < maxrange; i++) {
782 			bzero((caddr_t)&req, sizeof (req));
783 			req.ra_addr =  (uint64_t)iorange[i].base;
784 			req.ra_len = (uint64_t)iorange[i].len;
785 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
786 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
787 			    NDI_RA_TYPE_IO, 0);
788 		}
789 
790 		kmem_free((caddr_t)iorange, proplen);
791 	}
792 
793 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
794 		return (NDI_FAILURE);
795 	}
796 	/* initialize memory space where highest end base is 0xffffffff */
797 	/* note that length is highest addr + 1 since starts from 0 */
798 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
799 	    NDI_RA_TYPE_MEM, 0);
800 
801 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
802 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
803 		maxrange = proplen / sizeof (struct memrange);
804 		/* remove the "used" memory resources */
805 		for (i = 0; i < maxrange; i++) {
806 			bzero((caddr_t)&req, sizeof (req));
807 			req.ra_addr = (uint64_t)memrange[i].base;
808 			req.ra_len = (uint64_t)memrange[i].len;
809 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
810 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
811 			    NDI_RA_TYPE_MEM, 0);
812 		}
813 
814 		kmem_free((caddr_t)memrange, proplen);
815 	}
816 
817 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
818 		return (NDI_FAILURE);
819 	}
820 
821 	/* initialize the interrupt space */
822 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
823 
824 #if defined(__i386) || defined(__amd64)
825 	bzero(&req, sizeof (req));
826 	req.ra_addr = 2;	/* 2 == 9 so never allow */
827 	req.ra_len = 1;
828 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
829 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
830 	    NDI_RA_TYPE_INTR, 0);
831 #endif
832 
833 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
834 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
835 		/* Initialize available interrupts by negating the used */
836 		len = (proplen / sizeof (uint32_t));
837 		for (i = 0; i < len; i++) {
838 			bzero((caddr_t)&req, sizeof (req));
839 			req.ra_addr = (uint64_t)irq[i];
840 			req.ra_len = 1;
841 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
842 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
843 			    NDI_RA_TYPE_INTR, 0);
844 		}
845 		kmem_free((caddr_t)irq, proplen);
846 	}
847 
848 #ifdef BUSRA_DEBUG
849 	if (busra_debug) {
850 		(void) ra_dump_all(NULL, usedpdip);
851 	}
852 #endif
853 	return (NDI_SUCCESS);
854 
855 }
856 
857 #ifdef BUSRA_DEBUG
858 void
859 ra_dump_all(char *type, dev_info_t *dip)
860 {
861 
862 	struct ra_type_map *typemap;
863 	struct ra_dip_type *dipmap;
864 	struct ra_resource *res;
865 
866 	typemap =  (struct ra_type_map *)ra_map_list_head;
867 
868 	for (; typemap != NULL; typemap = typemap->ra_next) {
869 		if (type != NULL) {
870 			if (strcmp(typemap->type, type) != 0)
871 				continue;
872 		}
873 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
874 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
875 			dipmap = dipmap->ra_next) {
876 			if (dip != NULL) {
877 				if ((dipmap->ra_dip) != dip)
878 					continue;
879 			}
880 			cmn_err(CE_CONT, "  dip is %p\n",
881 			    (void *)dipmap->ra_dip);
882 			for (res = dipmap->ra_rangeset; res != NULL;
883 				res = res->ra_next) {
884 				cmn_err(CE_CONT, "\t  range is %" PRIx64
885 				    " %" PRIx64 "\n", res->ra_base,
886 				    res->ra_len);
887 			}
888 			if (dip != NULL)
889 				break;
890 		}
891 		if (type != NULL)
892 			break;
893 	}
894 }
895 #endif
896 
897 struct bus_range {	/* 1275 "bus-range" property definition */
898 	uint32_t lo;
899 	uint32_t hi;
900 } pci_bus_range;
901 
902 struct busnum_ctrl {
903 	int	rv;
904 	dev_info_t *dip;
905 	struct	bus_range *range;
906 };
907 
908 
909 /*
910  * Setup resource map for the pci bus node based on the "available"
911  * property and "bus-range" property.
912  */
913 int
914 pci_resource_setup(dev_info_t *dip)
915 {
916 	pci_regspec_t *regs;
917 	int rlen, rcount, i;
918 	char bus_type[16] = "(unknown)";
919 	int len;
920 	struct busnum_ctrl ctrl;
921 	int circular_count;
922 	int rval = NDI_SUCCESS;
923 
924 	/*
925 	 * If this is a pci bus node then look for "available" property
926 	 * to find the available resources on this bus.
927 	 */
928 	len = sizeof (bus_type);
929 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
930 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
931 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
932 		return (NDI_FAILURE);
933 
934 	/* it is not a pci/pci-ex bus type */
935 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
936 		return (NDI_FAILURE);
937 
938 	/*
939 	 * The pci-hotplug project addresses adding the call
940 	 * to pci_resource_setup from pci nexus driver.
941 	 * However that project would initially be only for x86,
942 	 * so for sparc pcmcia-pci support we still need to call
943 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
944 	 * are updated to call pci_resource_setup this portion of the
945 	 * code would really become an assert to make sure this
946 	 * function is not called for the same dip twice.
947 	 */
948 	{
949 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
950 			return (NDI_FAILURE);
951 		}
952 	}
953 
954 
955 	/*
956 	 * Create empty resource maps first.
957 	 *
958 	 * NOTE: If all the allocated resources are already assigned to
959 	 * device(s) in the hot plug slot then "available" property may not
960 	 * be present. But, subsequent hot plug operation may unconfigure
961 	 * the device in the slot and try to free up it's resources. So,
962 	 * at the minimum we should create empty maps here.
963 	 */
964 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
965 		return (NDI_FAILURE);
966 	}
967 
968 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
969 		return (NDI_FAILURE);
970 	}
971 
972 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
973 		return (NDI_FAILURE);
974 	}
975 
976 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
977 	    NDI_FAILURE) {
978 		return (NDI_FAILURE);
979 	}
980 
981 	/* read the "available" property if it is available */
982 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
983 	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
984 		/*
985 		 * create the available resource list for both memory and
986 		 * io space
987 		 */
988 		rcount = rlen / sizeof (pci_regspec_t);
989 		for (i = 0; i < rcount; i++) {
990 		    switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
991 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
992 			(void) ndi_ra_free(dip,
993 			    (uint64_t)regs[i].pci_phys_low,
994 			    (uint64_t)regs[i].pci_size_low,
995 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
996 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
997 			    0);
998 			break;
999 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1000 			(void) ndi_ra_free(dip,
1001 			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1002 			    ((uint64_t)(regs[i].pci_phys_low)),
1003 			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
1004 			    ((uint64_t)(regs[i].pci_size_low)),
1005 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1006 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1007 			    0);
1008 			break;
1009 		    case PCI_REG_ADDR_G(PCI_ADDR_IO):
1010 			(void) ndi_ra_free(dip,
1011 			    (uint64_t)regs[i].pci_phys_low,
1012 			    (uint64_t)regs[i].pci_size_low,
1013 			    NDI_RA_TYPE_IO,
1014 			    0);
1015 			break;
1016 		    case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1017 			break;
1018 		    default:
1019 			cmn_err(CE_WARN,
1020 			    "pci_resource_setup: bad addr type: %x\n",
1021 			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1022 			break;
1023 		    }
1024 		}
1025 		kmem_free((caddr_t)regs, rlen);
1026 	}
1027 
1028 	/*
1029 	 * update resource map for available bus numbers if the node
1030 	 * has available-bus-range or bus-range property.
1031 	 */
1032 	len = sizeof (struct bus_range);
1033 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1034 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1035 	    DDI_SUCCESS) {
1036 		/*
1037 		 * Add bus numbers in the range to the free list.
1038 		 */
1039 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1040 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1041 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1042 	} else {
1043 		/*
1044 		 * We don't have an available-bus-range property. If, instead,
1045 		 * we have a bus-range property we add all the bus numbers
1046 		 * in that range to the free list but we must then scan
1047 		 * for pci-pci bridges on this bus to find out the if there
1048 		 * are any of those bus numbers already in use. If so, we can
1049 		 * reclaim them.
1050 		 */
1051 		len = sizeof (struct bus_range);
1052 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1053 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1054 		    &len) == DDI_SUCCESS) {
1055 			if (pci_bus_range.lo != pci_bus_range.hi) {
1056 				/*
1057 				 * Add bus numbers other than the secondary
1058 				 * bus number to the free list.
1059 				 */
1060 				(void) ndi_ra_free(dip,
1061 				    (uint64_t)pci_bus_range.lo + 1,
1062 				    (uint64_t)pci_bus_range.hi -
1063 				    (uint64_t)pci_bus_range.lo,
1064 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1065 
1066 				/* scan for pci-pci bridges */
1067 				ctrl.rv = DDI_SUCCESS;
1068 				ctrl.dip = dip;
1069 				ctrl.range = &pci_bus_range;
1070 				ndi_devi_enter(dip, &circular_count);
1071 				ddi_walk_devs(ddi_get_child(dip),
1072 				    claim_pci_busnum, (void *)&ctrl);
1073 				ndi_devi_exit(dip, circular_count);
1074 				if (ctrl.rv != DDI_SUCCESS) {
1075 					/* failed to create the map */
1076 					(void) ndi_ra_map_destroy(dip,
1077 					    NDI_RA_TYPE_PCI_BUSNUM);
1078 					rval = NDI_FAILURE;
1079 				}
1080 			}
1081 		}
1082 	}
1083 
1084 #ifdef BUSRA_DEBUG
1085 	if (busra_debug) {
1086 		(void) ra_dump_all(NULL, dip);
1087 	}
1088 #endif
1089 
1090 	return (rval);
1091 }
1092 
1093 /*
1094  * If the device is a PCI bus device (i.e bus-range property exists) then
1095  * claim the bus numbers used by the device from the specified bus
1096  * resource map.
1097  */
1098 static int
1099 claim_pci_busnum(dev_info_t *dip, void *arg)
1100 {
1101 	struct bus_range pci_bus_range;
1102 	struct busnum_ctrl *ctrl;
1103 	ndi_ra_request_t req;
1104 	char bus_type[16] = "(unknown)";
1105 	int len;
1106 	uint64_t base;
1107 	uint64_t retlen;
1108 
1109 	ctrl = (struct busnum_ctrl *)arg;
1110 
1111 	/* check if this is a PCI bus node */
1112 	len = sizeof (bus_type);
1113 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1114 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1115 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1116 		return (DDI_WALK_PRUNECHILD);
1117 
1118 	/* it is not a pci/pci-ex bus type */
1119 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1120 		return (DDI_WALK_PRUNECHILD);
1121 
1122 	/* look for the bus-range property */
1123 	len = sizeof (struct bus_range);
1124 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1125 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1126 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1127 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1128 
1129 			/* claim the bus range from the bus resource map */
1130 			bzero((caddr_t)&req, sizeof (req));
1131 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1132 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1133 			req.ra_len = (uint64_t)pci_bus_range.hi -
1134 			    (uint64_t)pci_bus_range.lo + 1;
1135 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1136 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1137 				return (DDI_WALK_PRUNECHILD);
1138 		}
1139 	}
1140 
1141 	/*
1142 	 * Error return.
1143 	 */
1144 	ctrl->rv = DDI_FAILURE;
1145 	return (DDI_WALK_TERMINATE);
1146 }
1147 
1148 void
1149 pci_resource_destroy(dev_info_t *dip)
1150 {
1151 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1152 
1153 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1154 
1155 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1156 
1157 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1158 }
1159 
1160 
1161 int
1162 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1163 {
1164 	int i;
1165 
1166 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1167 		return (NDI_FAILURE);
1168 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1169 		return (NDI_FAILURE);
1170 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1171 		return (NDI_FAILURE);
1172 
1173 	/* for each entry in the PCI "available" property */
1174 	for (i = 0; i < entries; i++, avail_p++) {
1175 		if (avail_p->pci_phys_hi == -1u)
1176 			goto err;
1177 
1178 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1179 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1180 			(void) ndi_ra_free(dip,
1181 				(uint64_t)avail_p->pci_phys_low,
1182 				(uint64_t)avail_p->pci_size_low,
1183 				(avail_p->pci_phys_hi &
1184 					PCI_REG_PF_M) ?
1185 					NDI_RA_TYPE_PCI_PREFETCH_MEM :
1186 					NDI_RA_TYPE_MEM,
1187 				0);
1188 			}
1189 			break;
1190 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1191 			(void) ndi_ra_free(dip,
1192 				(uint64_t)avail_p->pci_phys_low,
1193 				(uint64_t)avail_p->pci_size_low,
1194 				NDI_RA_TYPE_IO,
1195 				0);
1196 			break;
1197 		default:
1198 			goto err;
1199 		}
1200 	}
1201 #ifdef BUSRA_DEBUG
1202 	if (busra_debug) {
1203 		(void) ra_dump_all(NULL, dip);
1204 	}
1205 #endif
1206 	return (NDI_SUCCESS);
1207 
1208 err:
1209 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1210 		i, avail_p->pci_phys_hi);
1211 	return (NDI_FAILURE);
1212 }
1213