xref: /titanic_50/usr/src/uts/common/io/busra.c (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #if defined(DEBUG)
29 #define	BUSRA_DEBUG
30 #endif
31 
32 /*
33  * This module provides a set of resource management interfaces
34  * to manage bus resources globally in the system.
35  *
36  * The bus nexus drivers are typically responsible to setup resource
37  * maps for the bus resources available for a bus instance. However
38  * this module also provides resource setup functions for PCI bus
39  * (used by both SPARC and X86 platforms) and ISA bus instances (used
40  * only for X86 platforms).
41  */
42 
43 #include <sys/types.h>
44 #include <sys/systm.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/ndi_impldefs.h>
50 #include <sys/kmem.h>
51 #include <sys/pctypes.h>
52 #include <sys/modctl.h>
53 #include <sys/debug.h>
54 #include <sys/spl.h>
55 #include <sys/pci.h>
56 #include <sys/autoconf.h>
57 
58 #if defined(BUSRA_DEBUG)
59 int busra_debug = 0;
60 #define	DEBUGPRT \
61 	if (busra_debug) cmn_err
62 
63 #else
64 #define	DEBUGPRT \
65 	if (0) cmn_err
66 #endif
67 
68 
69 /*
70  * global mutex that protects the global list of resource maps.
71  */
72 kmutex_t ra_lock;
73 
74 /*
75  * basic resource element
76  */
77 struct ra_resource {
78 	struct ra_resource *ra_next;
79 	uint64_t	ra_base;
80 	uint64_t 	ra_len;
81 };
82 
83 /*
84  * link list element for the list of dips (and their resource ranges)
85  * for a particular resource type.
86  * ra_rangeset points to the list of resources available
87  * for this type and this dip.
88  */
89 struct ra_dip_type  {
90 	struct ra_dip_type *ra_next;
91 	struct ra_resource  *ra_rangeset;
92 	dev_info_t *ra_dip;
93 };
94 
95 
96 /*
97  * link list element for list of types resources. Each element
98  * has all resources for a particular type.
99  */
100 struct ra_type_map {
101 	struct ra_type_map *ra_next;
102 	struct ra_dip_type *ra_dip_list;
103 	char *type;
104 };
105 
106 
107 /*
108  * place holder to keep the head of the whole global list.
109  * the address of the first typemap would be stored in it.
110  */
111 static struct ra_type_map	*ra_map_list_head = NULL;
112 
113 
114 /*
115  * This is the loadable module wrapper.
116  * It is essentially boilerplate so isn't documented
117  */
118 extern struct mod_ops mod_miscops;
119 
120 #ifdef BUSRA_DEBUG
121 void ra_dump_all();
122 #endif
123 
124 /* internal function prototypes */
125 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
126     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
127     uint32_t flag);
128 static int isnot_pow2(uint64_t value);
129 static int claim_pci_busnum(dev_info_t *dip, void *arg);
130 static int ra_map_exist(dev_info_t *dip, char *type);
131 
132 
133 #define	RA_INSERT(prev, el) \
134 	el->ra_next = *prev; \
135 	*prev = el;
136 
137 #define	RA_REMOVE(prev, el) \
138 	*prev = el->ra_next;
139 
140 
141 static struct modlmisc modlmisc = {
142 	&mod_miscops,		/* Type of module. This one is a module */
143 	"Bus Resource Allocator (BUSRA) %I%",	/* Name of the module. */
144 };
145 
146 static struct modlinkage modlinkage = {
147 	MODREV_1, (void *)&modlmisc, NULL
148 };
149 
150 int
151 _init()
152 {
153 	int	ret;
154 
155 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
156 		(void *)(intptr_t)__ipltospl(SPL7 - 1));
157 	if ((ret = mod_install(&modlinkage)) != 0) {
158 		mutex_destroy(&ra_lock);
159 	}
160 	return (ret);
161 }
162 
163 int
164 _fini()
165 {
166 	int	ret;
167 
168 	mutex_enter(&ra_lock);
169 
170 	if (ra_map_list_head != NULL) {
171 		mutex_exit(&ra_lock);
172 		return (EBUSY);
173 	}
174 
175 	ret = mod_remove(&modlinkage);
176 
177 	mutex_exit(&ra_lock);
178 
179 	if (ret == 0)
180 		mutex_destroy(&ra_lock);
181 
182 	return (ret);
183 }
184 
185 int
186 _info(struct modinfo *modinfop)
187 
188 {
189 	return (mod_info(&modlinkage, modinfop));
190 }
191 
192 /*
193  * set up an empty resource map for a given type and dip
194  */
195 int
196 ndi_ra_map_setup(dev_info_t *dip, char *type)
197 {
198 	struct ra_type_map  *typemapp;
199 	struct ra_dip_type  *dipmap;
200 	struct ra_dip_type  **backdip;
201 	struct ra_type_map  **backtype;
202 
203 
204 	mutex_enter(&ra_lock);
205 
206 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
207 
208 	if (dipmap == NULL) {
209 		if (backtype == NULL) {
210 			typemapp = (struct ra_type_map *)
211 			kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
212 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
213 				KM_SLEEP);
214 			(void) strcpy(typemapp->type, type);
215 			RA_INSERT(&ra_map_list_head, typemapp);
216 		} else {
217 			typemapp = *backtype;
218 		}
219 		if (backdip == NULL) {
220 			/* allocate and insert in list of dips for this type */
221 			dipmap = (struct ra_dip_type *)
222 			kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
223 			dipmap->ra_dip = dip;
224 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
225 		}
226 	}
227 
228 	mutex_exit(&ra_lock);
229 	return (NDI_SUCCESS);
230 }
231 
232 /*
233  * destroys a resource map for a given dip and type
234  */
235 int
236 ndi_ra_map_destroy(dev_info_t *dip, char *type)
237 {
238 	struct ra_dip_type	*dipmap;
239 	struct ra_dip_type	**backdip;
240 	struct ra_type_map  	**backtype, *typemap;
241 	struct ra_resource	*range;
242 
243 	mutex_enter(&ra_lock);
244 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
245 
246 	if (dipmap == NULL) {
247 		mutex_exit(&ra_lock);
248 		return (NDI_FAILURE);
249 	}
250 
251 	/*
252 	 * destroy all resources for this dip
253 	 * remove dip from type list
254 	 */
255 	ASSERT((backdip != NULL) && (backtype != NULL));
256 	while (dipmap->ra_rangeset != NULL) {
257 		range = dipmap->ra_rangeset;
258 		RA_REMOVE(&dipmap->ra_rangeset, range);
259 		kmem_free((caddr_t)range, sizeof (*range));
260 	}
261 	/* remove from dip list */
262 	RA_REMOVE(backdip, dipmap);
263 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
264 	if ((*backtype)->ra_dip_list == NULL) {
265 		/*
266 		 * This was the last dip with this resource type.
267 		 * Remove the type from the global list.
268 		 */
269 		typemap = *backtype;
270 		RA_REMOVE(backtype, (*backtype));
271 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
272 		kmem_free((caddr_t)typemap, sizeof (*typemap));
273 	}
274 
275 	mutex_exit(&ra_lock);
276 	return (NDI_SUCCESS);
277 }
278 
279 static int
280 ra_map_exist(dev_info_t *dip, char *type)
281 {
282 	struct ra_dip_type  **backdip;
283 	struct ra_type_map  **backtype;
284 
285 	mutex_enter(&ra_lock);
286 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
287 		mutex_exit(&ra_lock);
288 		return (NDI_FAILURE);
289 	}
290 
291 	mutex_exit(&ra_lock);
292 	return (NDI_SUCCESS);
293 }
294 /*
295  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
296  * if found, backdip and backtype will be updated to point to the previous
297  * dip in the list and previous type for this dip in the list.
298  * If no such type at all in the resource list both backdip and backtype
299  * will be null. If the type found but no dip, back dip will be null.
300  */
301 
302 static struct ra_dip_type *
303 find_dip_map_resources(dev_info_t *dip, char *type,
304     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
305     uint32_t flag)
306 {
307 	struct ra_type_map **prevmap;
308 	struct ra_dip_type *dipmap, **prevdip;
309 
310 	ASSERT(mutex_owned(&ra_lock));
311 	prevdip = NULL;
312 	dipmap = NULL;
313 	prevmap = &ra_map_list_head;
314 
315 	while (*prevmap) {
316 		if (strcmp((*prevmap)->type, type) == 0)
317 			break;
318 		prevmap = &(*prevmap)->ra_next;
319 	}
320 
321 	if (*prevmap) {
322 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
323 			prevdip = &(*prevmap)->ra_dip_list;
324 			dipmap = *prevdip;
325 
326 			while (dipmap) {
327 				if (dipmap->ra_dip == dip)
328 					break;
329 				prevdip =  &dipmap->ra_next;
330 				dipmap = dipmap->ra_next;
331 			}
332 
333 			if (dipmap != NULL) {
334 				/* found it */
335 				break;
336 			}
337 
338 			if (!(flag & NDI_RA_PASS)) {
339 				break;
340 			}
341 		}
342 	}
343 
344 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
345 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
346 
347 	return (dipmap);
348 }
349 
350 int
351 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
352     uint32_t flag)
353 {
354 	struct ra_dip_type *dipmap;
355 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
356 	struct ra_resource  *mapp, **backp;
357 	uint64_t newend, mapend;
358 	struct ra_dip_type **backdip;
359 	struct ra_type_map **backtype;
360 
361 	if (len == 0) {
362 		return (NDI_SUCCESS);
363 	}
364 
365 	mutex_enter(&ra_lock);
366 
367 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
368 	    flag)) == NULL) {
369 		mutex_exit(&ra_lock);
370 		return (NDI_FAILURE);
371 	}
372 
373 	mapp = dipmap->ra_rangeset;
374 	backp = &dipmap->ra_rangeset;
375 
376 	/* now find where range lies and fix things up */
377 	newend = base + len;
378 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
379 		mapend = mapp->ra_base + mapp->ra_len;
380 
381 		/* check for overlap first */
382 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
383 		    (base > mapp->ra_base && base < mapend)) {
384 			/* overlap with mapp */
385 			overlapmap = mapp;
386 			goto overlap;
387 		} else if ((base == mapend && mapp->ra_next) &&
388 		    (newend > mapp->ra_next->ra_base)) {
389 			/* overlap with mapp->ra_next */
390 			overlapmap = mapp->ra_next;
391 			goto overlap;
392 		}
393 
394 		if (newend == mapp->ra_base) {
395 			/* simple - on front */
396 			mapp->ra_base = base;
397 			mapp->ra_len += len;
398 			/*
399 			 * don't need to check if it merges with
400 			 * previous since that would match on on end
401 			 */
402 			break;
403 		} else if (base == mapend) {
404 			/* simple - on end */
405 			mapp->ra_len += len;
406 			if (mapp->ra_next &&
407 			    (newend == mapp->ra_next->ra_base)) {
408 				/* merge with next node */
409 				oldmap = mapp->ra_next;
410 				mapp->ra_len += oldmap->ra_len;
411 				RA_REMOVE(&mapp->ra_next, oldmap);
412 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
413 			}
414 			break;
415 		} else if (base < mapp->ra_base) {
416 			/* somewhere in between so just an insert */
417 			newmap = (struct ra_resource *)
418 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
419 			newmap->ra_base = base;
420 			newmap->ra_len = len;
421 			RA_INSERT(backp, newmap);
422 			break;
423 		}
424 	}
425 	if (mapp == NULL) {
426 		/* stick on end */
427 		newmap = (struct ra_resource *)
428 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
429 		newmap->ra_base = base;
430 		newmap->ra_len = len;
431 		RA_INSERT(backp, newmap);
432 	}
433 
434 	mutex_exit(&ra_lock);
435 	return (NDI_SUCCESS);
436 
437 overlap:
438 	/*
439 	 * Bad free may happen on some x86 platforms with BIOS exporting
440 	 * incorrect resource maps. The system is otherwise functioning
441 	 * normally. We send such messages to syslog only.
442 	 */
443 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
444 	    (void *)dip, type);
445 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
446 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
447 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
448 	    overlapmap->ra_len);
449 
450 	mutex_exit(&ra_lock);
451 	return (NDI_FAILURE);
452 }
453 
454 /* check to see if value is power of 2 or not. */
455 static int
456 isnot_pow2(uint64_t value)
457 {
458 	uint32_t low;
459 	uint32_t hi;
460 
461 	low = value & 0xffffffff;
462 	hi = value >> 32;
463 
464 	/*
465 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
466 	 * won't work correctly for 64bit values
467 	 */
468 	if ((ddi_ffs(low) == ddi_fls(low)) &&
469 	    (ddi_ffs(hi) == ddi_fls(hi)))
470 		return (0);
471 	return (1);
472 }
473 
474 static  void
475 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
476 	    uint64_t base, uint64_t len)
477 {
478 	struct ra_resource *newmap;
479 	uint64_t newlen;
480 
481 	if (base != mapp->ra_base) {
482 		/* in the middle or end */
483 		newlen = base - mapp->ra_base;
484 		if ((mapp->ra_len - newlen) == len) {
485 			/* on the end */
486 			mapp->ra_len = newlen;
487 		} else {
488 			/* in the middle */
489 			newmap = (struct ra_resource *)
490 					kmem_zalloc(sizeof (*newmap), KM_SLEEP);
491 			newmap->ra_base = base + len;
492 			newmap->ra_len = mapp->ra_len -
493 				(len + newlen);
494 			mapp->ra_len = newlen;
495 			RA_INSERT(&(mapp->ra_next), newmap);
496 		}
497 	} else {
498 		/* at the beginning */
499 		mapp->ra_base += len;
500 		mapp->ra_len -= len;
501 		if (mapp->ra_len == 0) {
502 			/* remove the whole node */
503 			RA_REMOVE(backp, mapp);
504 			kmem_free((caddr_t)mapp, sizeof (*mapp));
505 		}
506 	}
507 }
508 
509 int
510 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
511     uint64_t *retlenp, char *type, uint32_t flag)
512 {
513 	struct ra_dip_type *dipmap;
514 	struct ra_resource *mapp, **backp, **backlargestp;
515 	uint64_t mask = 0;
516 	uint64_t len, remlen, largestbase, largestlen;
517 	uint64_t base, oldbase, lower, upper;
518 	struct ra_dip_type  **backdip;
519 	struct ra_type_map  **backtype;
520 	int  rval = NDI_FAILURE;
521 
522 
523 	len = req->ra_len;
524 
525 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
526 		if (isnot_pow2(req->ra_len)) {
527 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
528 				PRIx64, req->ra_len);
529 			*retbasep = 0;
530 			*retlenp = 0;
531 			return (NDI_FAILURE);
532 		}
533 	}
534 
535 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
536 	    req->ra_align_mask;
537 
538 
539 	mutex_enter(&ra_lock);
540 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
541 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
542 		mutex_exit(&ra_lock);
543 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
544 		return (NDI_FAILURE);
545 	}
546 
547 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
548 			PRIx64 "\n", (void *)mapp, len, mask);
549 
550 	backp = &(dipmap->ra_rangeset);
551 	backlargestp = NULL;
552 	largestbase = 0;
553 	largestlen = 0;
554 
555 	lower = 0;
556 	upper = ~(uint64_t)0;
557 
558 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
559 		/* bounded so skip to first possible */
560 		lower = req->ra_boundbase;
561 		upper = req->ra_boundlen + lower;
562 		if ((upper == 0) || (upper < req->ra_boundlen))
563 			upper = ~(uint64_t)0;
564 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
565 				PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
566 				"\n", mapp->ra_len, len, mapp->ra_base, mask);
567 		for (; mapp != NULL &&
568 			(mapp->ra_base + mapp->ra_len) < lower;
569 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
570 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
571 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
572 				/*
573 				 * This elements end goes beyond max uint64_t.
574 				 * potential candidate, check end against lower
575 				 * would not be precise.
576 				 */
577 				break;
578 
579 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
580 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
581 			}
582 
583 	}
584 
585 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
586 		/* first fit - not user specified */
587 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
588 			"lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
589 		for (; mapp != NULL && mapp->ra_base <= upper;
590 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
591 
592 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
593 			    ", len = %" PRIx64 "", mapp->ra_len, len);
594 			base = mapp->ra_base;
595 			if (base < lower) {
596 				base = lower;
597 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
598 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
599 				    base, mapp->ra_base, mask);
600 			}
601 
602 			if ((base & mask) != 0) {
603 				oldbase = base;
604 				/*
605 				 * failed a critical constraint
606 				 * adjust and see if it still fits
607 				 */
608 				base = base & ~mask;
609 				base += (mask + 1);
610 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
611 					base);
612 
613 				/*
614 				 * Check to see if the new base is past
615 				 * the end of the resource.
616 				 */
617 				if (base >= (oldbase + mapp->ra_len + 1)) {
618 					continue;
619 				}
620 			}
621 
622 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
623 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
624 					remlen = upper - base;
625 				else
626 					remlen = mapp->ra_len -
627 						(base - mapp->ra_base);
628 
629 				if ((backlargestp == NULL) ||
630 				    (largestlen < remlen)) {
631 
632 					backlargestp = backp;
633 					largestbase = base;
634 					largestlen = remlen;
635 				}
636 			}
637 
638 			if (mapp->ra_len >= len) {
639 				/* a candidate -- apply constraints */
640 				if ((len > (mapp->ra_len -
641 				    (base - mapp->ra_base))) ||
642 				    ((len - 1 + base) > upper)) {
643 					continue;
644 				}
645 
646 				/* we have a fit */
647 
648 				DEBUGPRT(CE_CONT, "\thave a fit\n");
649 
650 				adjust_link(backp, mapp, base, len);
651 				rval = NDI_SUCCESS;
652 				break;
653 
654 			}
655 		}
656 	} else {
657 		/* want an exact value/fit */
658 		base = req->ra_addr;
659 		len = req->ra_len;
660 		for (; mapp != NULL && mapp->ra_base <= upper;
661 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
662 			if (base >= mapp->ra_base &&
663 			    ((base - mapp->ra_base) < mapp->ra_len)) {
664 				/*
665 				 * This is the node with he requested base in
666 				 * its range
667 				 */
668 				if ((len > mapp->ra_len) ||
669 				    (base - mapp->ra_base >
670 				    mapp->ra_len - len)) {
671 					/* length requirement not satisfied */
672 					if (req->ra_flags &
673 					    NDI_RA_ALLOC_PARTIAL_OK) {
674 						if ((upper - mapp->ra_base)
675 						    < mapp->ra_len)
676 							remlen = upper - base;
677 						else
678 							remlen =
679 							    mapp->ra_len -
680 							    (base -
681 							    mapp->ra_base);
682 					}
683 					backlargestp = backp;
684 					largestbase = base;
685 					largestlen = remlen;
686 					base = 0;
687 				} else {
688 					/* We have a match */
689 					adjust_link(backp, mapp, base, len);
690 					rval = NDI_SUCCESS;
691 				}
692 				break;
693 			}
694 		}
695 	}
696 
697 	if ((rval != NDI_SUCCESS) &&
698 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
699 	    (backlargestp != NULL)) {
700 		adjust_link(backlargestp, *backlargestp, largestbase,
701 			largestlen);
702 
703 		base = largestbase;
704 		len = largestlen;
705 		rval = NDI_RA_PARTIAL_REQ;
706 	}
707 
708 	mutex_exit(&ra_lock);
709 
710 	if (rval == NDI_FAILURE) {
711 		*retbasep = 0;
712 		*retlenp = 0;
713 	} else {
714 		*retbasep = base;
715 		*retlenp = len;
716 	}
717 	return (rval);
718 }
719 
720 /*
721  * isa_resource_setup
722  *	check for /used-resources and initialize
723  *	based on info there.  If no /used-resources,
724  *	fail.
725  */
726 int
727 isa_resource_setup()
728 {
729 	dev_info_t *used, *usedpdip;
730 	/*
731 	 * note that at this time bootconf creates 32 bit properties for
732 	 * io-space and device-memory
733 	 */
734 	struct iorange {
735 		uint32_t	base;
736 		uint32_t	len;
737 	} *iorange;
738 	struct memrange {
739 		uint32_t	base;
740 		uint32_t	len;
741 	} *memrange;
742 	uint32_t *irq;
743 	int proplen;
744 	int i, len;
745 	int maxrange;
746 	ndi_ra_request_t req;
747 	uint64_t retbase;
748 	uint64_t retlen;
749 
750 	used = ddi_find_devinfo("used-resources", -1, 0);
751 	if (used == NULL) {
752 		DEBUGPRT(CE_CONT,
753 			"isa_resource_setup: used-resources not found");
754 		return (NDI_FAILURE);
755 	}
756 
757 	/*
758 	 * initialize to all resources being present
759 	 * and then remove the ones in use.
760 	 */
761 
762 	usedpdip = ddi_root_node();
763 
764 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
765 	    (void *)used, (void *)usedpdip);
766 
767 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
768 		return (NDI_FAILURE);
769 	}
770 
771 	/* initialize io space, highest end base is 0xffff */
772 	/* note that length is highest addr + 1 since starts from 0 */
773 
774 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
775 
776 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
777 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
778 		maxrange = proplen / sizeof (struct iorange);
779 		/* remove the "used" I/O resources */
780 		for (i = 0; i < maxrange; i++) {
781 			bzero((caddr_t)&req, sizeof (req));
782 			req.ra_addr =  (uint64_t)iorange[i].base;
783 			req.ra_len = (uint64_t)iorange[i].len;
784 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
785 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
786 			    NDI_RA_TYPE_IO, 0);
787 		}
788 
789 		kmem_free((caddr_t)iorange, proplen);
790 	}
791 
792 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
793 		return (NDI_FAILURE);
794 	}
795 	/* initialize memory space where highest end base is 0xffffffff */
796 	/* note that length is highest addr + 1 since starts from 0 */
797 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
798 	    NDI_RA_TYPE_MEM, 0);
799 
800 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
801 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
802 		maxrange = proplen / sizeof (struct memrange);
803 		/* remove the "used" memory resources */
804 		for (i = 0; i < maxrange; i++) {
805 			bzero((caddr_t)&req, sizeof (req));
806 			req.ra_addr = (uint64_t)memrange[i].base;
807 			req.ra_len = (uint64_t)memrange[i].len;
808 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
809 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
810 			    NDI_RA_TYPE_MEM, 0);
811 		}
812 
813 		kmem_free((caddr_t)memrange, proplen);
814 	}
815 
816 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
817 		return (NDI_FAILURE);
818 	}
819 
820 	/* initialize the interrupt space */
821 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
822 
823 #if defined(__i386) || defined(__amd64)
824 	bzero(&req, sizeof (req));
825 	req.ra_addr = 2;	/* 2 == 9 so never allow */
826 	req.ra_len = 1;
827 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
828 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
829 	    NDI_RA_TYPE_INTR, 0);
830 #endif
831 
832 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
833 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
834 		/* Initialize available interrupts by negating the used */
835 		len = (proplen / sizeof (uint32_t));
836 		for (i = 0; i < len; i++) {
837 			bzero((caddr_t)&req, sizeof (req));
838 			req.ra_addr = (uint64_t)irq[i];
839 			req.ra_len = 1;
840 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
841 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
842 			    NDI_RA_TYPE_INTR, 0);
843 		}
844 		kmem_free((caddr_t)irq, proplen);
845 	}
846 
847 #ifdef BUSRA_DEBUG
848 	if (busra_debug) {
849 		(void) ra_dump_all(NULL, usedpdip);
850 	}
851 #endif
852 	return (NDI_SUCCESS);
853 
854 }
855 
856 #ifdef BUSRA_DEBUG
857 void
858 ra_dump_all(char *type, dev_info_t *dip)
859 {
860 
861 	struct ra_type_map *typemap;
862 	struct ra_dip_type *dipmap;
863 	struct ra_resource *res;
864 
865 	typemap =  (struct ra_type_map *)ra_map_list_head;
866 
867 	for (; typemap != NULL; typemap = typemap->ra_next) {
868 		if (type != NULL) {
869 			if (strcmp(typemap->type, type) != 0)
870 				continue;
871 		}
872 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
873 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
874 			dipmap = dipmap->ra_next) {
875 			if (dip != NULL) {
876 				if ((dipmap->ra_dip) != dip)
877 					continue;
878 			}
879 			cmn_err(CE_CONT, "  dip is %p\n",
880 			    (void *)dipmap->ra_dip);
881 			for (res = dipmap->ra_rangeset; res != NULL;
882 				res = res->ra_next) {
883 				cmn_err(CE_CONT, "\t  range is %" PRIx64
884 				    " %" PRIx64 "\n", res->ra_base,
885 				    res->ra_len);
886 			}
887 			if (dip != NULL)
888 				break;
889 		}
890 		if (type != NULL)
891 			break;
892 	}
893 }
894 #endif
895 
896 struct bus_range {	/* 1275 "bus-range" property definition */
897 	uint32_t lo;
898 	uint32_t hi;
899 } pci_bus_range;
900 
901 struct busnum_ctrl {
902 	int	rv;
903 	dev_info_t *dip;
904 	struct	bus_range *range;
905 };
906 
907 
908 /*
909  * Setup resource map for the pci bus node based on the "available"
910  * property and "bus-range" property.
911  */
912 int
913 pci_resource_setup(dev_info_t *dip)
914 {
915 	pci_regspec_t *regs;
916 	int rlen, rcount, i;
917 	char bus_type[16] = "(unknown)";
918 	int len;
919 	struct busnum_ctrl ctrl;
920 	int circular_count;
921 	int rval = NDI_SUCCESS;
922 
923 	/*
924 	 * If this is a pci bus node then look for "available" property
925 	 * to find the available resources on this bus.
926 	 */
927 	len = sizeof (bus_type);
928 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
929 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
930 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
931 		return (NDI_FAILURE);
932 
933 	/* it is not a pci/pci-ex bus type */
934 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
935 		return (NDI_FAILURE);
936 
937 	/*
938 	 * The pci-hotplug project addresses adding the call
939 	 * to pci_resource_setup from pci nexus driver.
940 	 * However that project would initially be only for x86,
941 	 * so for sparc pcmcia-pci support we still need to call
942 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
943 	 * are updated to call pci_resource_setup this portion of the
944 	 * code would really become an assert to make sure this
945 	 * function is not called for the same dip twice.
946 	 */
947 	{
948 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
949 			return (NDI_FAILURE);
950 		}
951 	}
952 
953 
954 	/*
955 	 * Create empty resource maps first.
956 	 *
957 	 * NOTE: If all the allocated resources are already assigned to
958 	 * device(s) in the hot plug slot then "available" property may not
959 	 * be present. But, subsequent hot plug operation may unconfigure
960 	 * the device in the slot and try to free up it's resources. So,
961 	 * at the minimum we should create empty maps here.
962 	 */
963 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
964 		return (NDI_FAILURE);
965 	}
966 
967 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
968 		return (NDI_FAILURE);
969 	}
970 
971 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
972 		return (NDI_FAILURE);
973 	}
974 
975 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
976 	    NDI_FAILURE) {
977 		return (NDI_FAILURE);
978 	}
979 
980 	/* read the "available" property if it is available */
981 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
982 	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
983 		/*
984 		 * create the available resource list for both memory and
985 		 * io space
986 		 */
987 		rcount = rlen / sizeof (pci_regspec_t);
988 		for (i = 0; i < rcount; i++) {
989 		    switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
990 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
991 			(void) ndi_ra_free(dip,
992 			    (uint64_t)regs[i].pci_phys_low,
993 			    (uint64_t)regs[i].pci_size_low,
994 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
995 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
996 			    0);
997 			break;
998 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
999 			(void) ndi_ra_free(dip,
1000 			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1001 			    ((uint64_t)(regs[i].pci_phys_low)),
1002 			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
1003 			    ((uint64_t)(regs[i].pci_size_low)),
1004 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1005 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1006 			    0);
1007 			break;
1008 		    case PCI_REG_ADDR_G(PCI_ADDR_IO):
1009 			(void) ndi_ra_free(dip,
1010 			    (uint64_t)regs[i].pci_phys_low,
1011 			    (uint64_t)regs[i].pci_size_low,
1012 			    NDI_RA_TYPE_IO,
1013 			    0);
1014 			break;
1015 		    case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1016 			break;
1017 		    default:
1018 			cmn_err(CE_WARN,
1019 			    "pci_resource_setup: bad addr type: %x\n",
1020 			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1021 			break;
1022 		    }
1023 		}
1024 		kmem_free(regs, rlen);
1025 	}
1026 
1027 	/*
1028 	 * update resource map for available bus numbers if the node
1029 	 * has available-bus-range or bus-range property.
1030 	 */
1031 	len = sizeof (struct bus_range);
1032 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1033 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1034 	    DDI_SUCCESS) {
1035 		/*
1036 		 * Add bus numbers in the range to the free list.
1037 		 */
1038 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1039 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1040 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1041 	} else {
1042 		/*
1043 		 * We don't have an available-bus-range property. If, instead,
1044 		 * we have a bus-range property we add all the bus numbers
1045 		 * in that range to the free list but we must then scan
1046 		 * for pci-pci bridges on this bus to find out the if there
1047 		 * are any of those bus numbers already in use. If so, we can
1048 		 * reclaim them.
1049 		 */
1050 		len = sizeof (struct bus_range);
1051 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1052 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1053 		    &len) == DDI_SUCCESS) {
1054 			if (pci_bus_range.lo != pci_bus_range.hi) {
1055 				/*
1056 				 * Add bus numbers other than the secondary
1057 				 * bus number to the free list.
1058 				 */
1059 				(void) ndi_ra_free(dip,
1060 				    (uint64_t)pci_bus_range.lo + 1,
1061 				    (uint64_t)pci_bus_range.hi -
1062 				    (uint64_t)pci_bus_range.lo,
1063 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1064 
1065 				/* scan for pci-pci bridges */
1066 				ctrl.rv = DDI_SUCCESS;
1067 				ctrl.dip = dip;
1068 				ctrl.range = &pci_bus_range;
1069 				ndi_devi_enter(dip, &circular_count);
1070 				ddi_walk_devs(ddi_get_child(dip),
1071 				    claim_pci_busnum, (void *)&ctrl);
1072 				ndi_devi_exit(dip, circular_count);
1073 				if (ctrl.rv != DDI_SUCCESS) {
1074 					/* failed to create the map */
1075 					(void) ndi_ra_map_destroy(dip,
1076 					    NDI_RA_TYPE_PCI_BUSNUM);
1077 					rval = NDI_FAILURE;
1078 				}
1079 			}
1080 		}
1081 	}
1082 
1083 #ifdef BUSRA_DEBUG
1084 	if (busra_debug) {
1085 		(void) ra_dump_all(NULL, dip);
1086 	}
1087 #endif
1088 
1089 	return (rval);
1090 }
1091 
1092 /*
1093  * If the device is a PCI bus device (i.e bus-range property exists) then
1094  * claim the bus numbers used by the device from the specified bus
1095  * resource map.
1096  */
1097 static int
1098 claim_pci_busnum(dev_info_t *dip, void *arg)
1099 {
1100 	struct bus_range pci_bus_range;
1101 	struct busnum_ctrl *ctrl;
1102 	ndi_ra_request_t req;
1103 	char bus_type[16] = "(unknown)";
1104 	int len;
1105 	uint64_t base;
1106 	uint64_t retlen;
1107 
1108 	ctrl = (struct busnum_ctrl *)arg;
1109 
1110 	/* check if this is a PCI bus node */
1111 	len = sizeof (bus_type);
1112 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1113 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1114 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1115 		return (DDI_WALK_PRUNECHILD);
1116 
1117 	/* it is not a pci/pci-ex bus type */
1118 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1119 		return (DDI_WALK_PRUNECHILD);
1120 
1121 	/* look for the bus-range property */
1122 	len = sizeof (struct bus_range);
1123 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1124 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1125 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1126 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1127 
1128 			/* claim the bus range from the bus resource map */
1129 			bzero((caddr_t)&req, sizeof (req));
1130 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1131 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1132 			req.ra_len = (uint64_t)pci_bus_range.hi -
1133 			    (uint64_t)pci_bus_range.lo + 1;
1134 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1135 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1136 				return (DDI_WALK_PRUNECHILD);
1137 		}
1138 	}
1139 
1140 	/*
1141 	 * Error return.
1142 	 */
1143 	ctrl->rv = DDI_FAILURE;
1144 	return (DDI_WALK_TERMINATE);
1145 }
1146 
1147 void
1148 pci_resource_destroy(dev_info_t *dip)
1149 {
1150 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1151 
1152 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1153 
1154 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1155 
1156 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1157 }
1158 
1159 
1160 int
1161 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1162 {
1163 	int i;
1164 
1165 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1166 		return (NDI_FAILURE);
1167 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1168 		return (NDI_FAILURE);
1169 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1170 		return (NDI_FAILURE);
1171 
1172 	/* for each entry in the PCI "available" property */
1173 	for (i = 0; i < entries; i++, avail_p++) {
1174 		if (avail_p->pci_phys_hi == -1u)
1175 			goto err;
1176 
1177 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1178 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1179 			(void) ndi_ra_free(dip,
1180 				(uint64_t)avail_p->pci_phys_low,
1181 				(uint64_t)avail_p->pci_size_low,
1182 				(avail_p->pci_phys_hi &
1183 					PCI_REG_PF_M) ?
1184 					NDI_RA_TYPE_PCI_PREFETCH_MEM :
1185 					NDI_RA_TYPE_MEM,
1186 				0);
1187 			}
1188 			break;
1189 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1190 			(void) ndi_ra_free(dip,
1191 				(uint64_t)avail_p->pci_phys_low,
1192 				(uint64_t)avail_p->pci_size_low,
1193 				NDI_RA_TYPE_IO,
1194 				0);
1195 			break;
1196 		default:
1197 			goto err;
1198 		}
1199 	}
1200 #ifdef BUSRA_DEBUG
1201 	if (busra_debug) {
1202 		(void) ra_dump_all(NULL, dip);
1203 	}
1204 #endif
1205 	return (NDI_SUCCESS);
1206 
1207 err:
1208 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1209 		i, avail_p->pci_phys_hi);
1210 	return (NDI_FAILURE);
1211 }
1212