xref: /titanic_52/usr/src/uts/common/io/busra.c (revision 93c20f2609342fd05f6625f16dfcb9348e7977f2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #if defined(DEBUG)
27 #define	BUSRA_DEBUG
28 #endif
29 
30 /*
31  * This module provides a set of resource management interfaces
32  * to manage bus resources globally in the system.
33  *
34  * The bus nexus drivers are typically responsible to setup resource
35  * maps for the bus resources available for a bus instance. However
36  * this module also provides resource setup functions for PCI bus
37  * (used by both SPARC and X86 platforms) and ISA bus instances (used
38  * only for X86 platforms).
39  */
40 
41 #include <sys/types.h>
42 #include <sys/systm.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/ddi_impldefs.h>
47 #include <sys/ndi_impldefs.h>
48 #include <sys/kmem.h>
49 #include <sys/pctypes.h>
50 #include <sys/modctl.h>
51 #include <sys/debug.h>
52 #include <sys/spl.h>
53 #include <sys/pci.h>
54 #include <sys/autoconf.h>
55 
56 #if defined(BUSRA_DEBUG)
57 int busra_debug = 0;
58 #define	DEBUGPRT \
59 	if (busra_debug) cmn_err
60 
61 #else
62 #define	DEBUGPRT \
63 	if (0) cmn_err
64 #endif
65 
66 
67 /*
68  * global mutex that protects the global list of resource maps.
69  */
70 kmutex_t ra_lock;
71 
72 /*
73  * basic resource element
74  */
75 struct ra_resource {
76 	struct ra_resource *ra_next;
77 	uint64_t	ra_base;
78 	uint64_t 	ra_len;
79 };
80 
81 /*
82  * link list element for the list of dips (and their resource ranges)
83  * for a particular resource type.
84  * ra_rangeset points to the list of resources available
85  * for this type and this dip.
86  */
87 struct ra_dip_type  {
88 	struct ra_dip_type *ra_next;
89 	struct ra_resource  *ra_rangeset;
90 	dev_info_t *ra_dip;
91 };
92 
93 
94 /*
95  * link list element for list of types resources. Each element
96  * has all resources for a particular type.
97  */
98 struct ra_type_map {
99 	struct ra_type_map *ra_next;
100 	struct ra_dip_type *ra_dip_list;
101 	char *type;
102 };
103 
104 
105 /*
106  * place holder to keep the head of the whole global list.
107  * the address of the first typemap would be stored in it.
108  */
109 static struct ra_type_map	*ra_map_list_head = NULL;
110 
111 
112 /*
113  * This is the loadable module wrapper.
114  * It is essentially boilerplate so isn't documented
115  */
116 extern struct mod_ops mod_miscops;
117 
118 #ifdef BUSRA_DEBUG
119 void ra_dump_all();
120 #endif
121 
122 /* internal function prototypes */
123 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
124     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
125     uint32_t flag);
126 static int isnot_pow2(uint64_t value);
127 static int claim_pci_busnum(dev_info_t *dip, void *arg);
128 static int ra_map_exist(dev_info_t *dip, char *type);
129 
130 
131 #define	RA_INSERT(prev, el) \
132 	el->ra_next = *prev; \
133 	*prev = el;
134 
135 #define	RA_REMOVE(prev, el) \
136 	*prev = el->ra_next;
137 
138 
139 static struct modlmisc modlmisc = {
140 	&mod_miscops,		/* Type of module. This one is a module */
141 	"Bus Resource Allocator (BUSRA)",	/* Name of the module. */
142 };
143 
144 static struct modlinkage modlinkage = {
145 	MODREV_1, (void *)&modlmisc, NULL
146 };
147 
148 int
149 _init()
150 {
151 	int	ret;
152 
153 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
154 		(void *)(intptr_t)__ipltospl(SPL7 - 1));
155 	if ((ret = mod_install(&modlinkage)) != 0) {
156 		mutex_destroy(&ra_lock);
157 	}
158 	return (ret);
159 }
160 
161 int
162 _fini()
163 {
164 	int	ret;
165 
166 	mutex_enter(&ra_lock);
167 
168 	if (ra_map_list_head != NULL) {
169 		mutex_exit(&ra_lock);
170 		return (EBUSY);
171 	}
172 
173 	ret = mod_remove(&modlinkage);
174 
175 	mutex_exit(&ra_lock);
176 
177 	if (ret == 0)
178 		mutex_destroy(&ra_lock);
179 
180 	return (ret);
181 }
182 
183 int
184 _info(struct modinfo *modinfop)
185 
186 {
187 	return (mod_info(&modlinkage, modinfop));
188 }
189 
190 /*
191  * set up an empty resource map for a given type and dip
192  */
193 int
194 ndi_ra_map_setup(dev_info_t *dip, char *type)
195 {
196 	struct ra_type_map  *typemapp;
197 	struct ra_dip_type  *dipmap;
198 	struct ra_dip_type  **backdip;
199 	struct ra_type_map  **backtype;
200 
201 
202 	mutex_enter(&ra_lock);
203 
204 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
205 
206 	if (dipmap == NULL) {
207 		if (backtype == NULL) {
208 			typemapp = (struct ra_type_map *)
209 			kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
210 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
211 				KM_SLEEP);
212 			(void) strcpy(typemapp->type, type);
213 			RA_INSERT(&ra_map_list_head, typemapp);
214 		} else {
215 			typemapp = *backtype;
216 		}
217 		if (backdip == NULL) {
218 			/* allocate and insert in list of dips for this type */
219 			dipmap = (struct ra_dip_type *)
220 			kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
221 			dipmap->ra_dip = dip;
222 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
223 		}
224 	}
225 
226 	mutex_exit(&ra_lock);
227 	return (NDI_SUCCESS);
228 }
229 
230 /*
231  * destroys a resource map for a given dip and type
232  */
233 int
234 ndi_ra_map_destroy(dev_info_t *dip, char *type)
235 {
236 	struct ra_dip_type	*dipmap;
237 	struct ra_dip_type	**backdip;
238 	struct ra_type_map  	**backtype, *typemap;
239 	struct ra_resource	*range;
240 
241 	mutex_enter(&ra_lock);
242 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
243 
244 	if (dipmap == NULL) {
245 		mutex_exit(&ra_lock);
246 		return (NDI_FAILURE);
247 	}
248 
249 	/*
250 	 * destroy all resources for this dip
251 	 * remove dip from type list
252 	 */
253 	ASSERT((backdip != NULL) && (backtype != NULL));
254 	while (dipmap->ra_rangeset != NULL) {
255 		range = dipmap->ra_rangeset;
256 		RA_REMOVE(&dipmap->ra_rangeset, range);
257 		kmem_free((caddr_t)range, sizeof (*range));
258 	}
259 	/* remove from dip list */
260 	RA_REMOVE(backdip, dipmap);
261 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
262 	if ((*backtype)->ra_dip_list == NULL) {
263 		/*
264 		 * This was the last dip with this resource type.
265 		 * Remove the type from the global list.
266 		 */
267 		typemap = *backtype;
268 		RA_REMOVE(backtype, (*backtype));
269 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
270 		kmem_free((caddr_t)typemap, sizeof (*typemap));
271 	}
272 
273 	mutex_exit(&ra_lock);
274 	return (NDI_SUCCESS);
275 }
276 
277 static int
278 ra_map_exist(dev_info_t *dip, char *type)
279 {
280 	struct ra_dip_type  **backdip;
281 	struct ra_type_map  **backtype;
282 
283 	mutex_enter(&ra_lock);
284 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
285 		mutex_exit(&ra_lock);
286 		return (NDI_FAILURE);
287 	}
288 
289 	mutex_exit(&ra_lock);
290 	return (NDI_SUCCESS);
291 }
292 /*
293  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
294  * if found, backdip and backtype will be updated to point to the previous
295  * dip in the list and previous type for this dip in the list.
296  * If no such type at all in the resource list both backdip and backtype
297  * will be null. If the type found but no dip, back dip will be null.
298  */
299 
300 static struct ra_dip_type *
301 find_dip_map_resources(dev_info_t *dip, char *type,
302     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
303     uint32_t flag)
304 {
305 	struct ra_type_map **prevmap;
306 	struct ra_dip_type *dipmap, **prevdip;
307 
308 	ASSERT(mutex_owned(&ra_lock));
309 	prevdip = NULL;
310 	dipmap = NULL;
311 	prevmap = &ra_map_list_head;
312 
313 	while (*prevmap) {
314 		if (strcmp((*prevmap)->type, type) == 0)
315 			break;
316 		prevmap = &(*prevmap)->ra_next;
317 	}
318 
319 	if (*prevmap) {
320 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
321 			prevdip = &(*prevmap)->ra_dip_list;
322 			dipmap = *prevdip;
323 
324 			while (dipmap) {
325 				if (dipmap->ra_dip == dip)
326 					break;
327 				prevdip =  &dipmap->ra_next;
328 				dipmap = dipmap->ra_next;
329 			}
330 
331 			if (dipmap != NULL) {
332 				/* found it */
333 				break;
334 			}
335 
336 			if (!(flag & NDI_RA_PASS)) {
337 				break;
338 			}
339 		}
340 	}
341 
342 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
343 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
344 
345 	return (dipmap);
346 }
347 
348 int
349 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
350     uint32_t flag)
351 {
352 	struct ra_dip_type *dipmap;
353 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
354 	struct ra_resource  *mapp, **backp;
355 	uint64_t newend, mapend;
356 	struct ra_dip_type **backdip;
357 	struct ra_type_map **backtype;
358 
359 	if (len == 0) {
360 		return (NDI_SUCCESS);
361 	}
362 
363 	mutex_enter(&ra_lock);
364 
365 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
366 	    flag)) == NULL) {
367 		mutex_exit(&ra_lock);
368 		return (NDI_FAILURE);
369 	}
370 
371 	mapp = dipmap->ra_rangeset;
372 	backp = &dipmap->ra_rangeset;
373 
374 	/* now find where range lies and fix things up */
375 	newend = base + len;
376 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
377 		mapend = mapp->ra_base + mapp->ra_len;
378 
379 		/* check for overlap first */
380 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
381 		    (base > mapp->ra_base && base < mapend)) {
382 			/* overlap with mapp */
383 			overlapmap = mapp;
384 			goto overlap;
385 		} else if ((base == mapend && mapp->ra_next) &&
386 		    (newend > mapp->ra_next->ra_base)) {
387 			/* overlap with mapp->ra_next */
388 			overlapmap = mapp->ra_next;
389 			goto overlap;
390 		}
391 
392 		if (newend == mapp->ra_base) {
393 			/* simple - on front */
394 			mapp->ra_base = base;
395 			mapp->ra_len += len;
396 			/*
397 			 * don't need to check if it merges with
398 			 * previous since that would match on on end
399 			 */
400 			break;
401 		} else if (base == mapend) {
402 			/* simple - on end */
403 			mapp->ra_len += len;
404 			if (mapp->ra_next &&
405 			    (newend == mapp->ra_next->ra_base)) {
406 				/* merge with next node */
407 				oldmap = mapp->ra_next;
408 				mapp->ra_len += oldmap->ra_len;
409 				RA_REMOVE(&mapp->ra_next, oldmap);
410 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
411 			}
412 			break;
413 		} else if (base < mapp->ra_base) {
414 			/* somewhere in between so just an insert */
415 			newmap = (struct ra_resource *)
416 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
417 			newmap->ra_base = base;
418 			newmap->ra_len = len;
419 			RA_INSERT(backp, newmap);
420 			break;
421 		}
422 	}
423 	if (mapp == NULL) {
424 		/* stick on end */
425 		newmap = (struct ra_resource *)
426 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
427 		newmap->ra_base = base;
428 		newmap->ra_len = len;
429 		RA_INSERT(backp, newmap);
430 	}
431 
432 	mutex_exit(&ra_lock);
433 	return (NDI_SUCCESS);
434 
435 overlap:
436 	/*
437 	 * Bad free may happen on some x86 platforms with BIOS exporting
438 	 * incorrect resource maps. The system is otherwise functioning
439 	 * normally. We send such messages to syslog only.
440 	 */
441 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
442 	    (void *)dip, type);
443 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
444 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
445 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
446 	    overlapmap->ra_len);
447 
448 	mutex_exit(&ra_lock);
449 	return (NDI_FAILURE);
450 }
451 
452 /* check to see if value is power of 2 or not. */
453 static int
454 isnot_pow2(uint64_t value)
455 {
456 	uint32_t low;
457 	uint32_t hi;
458 
459 	low = value & 0xffffffff;
460 	hi = value >> 32;
461 
462 	/*
463 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
464 	 * won't work correctly for 64bit values
465 	 */
466 	if ((ddi_ffs(low) == ddi_fls(low)) &&
467 	    (ddi_ffs(hi) == ddi_fls(hi)))
468 		return (0);
469 	return (1);
470 }
471 
472 static  void
473 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
474 	    uint64_t base, uint64_t len)
475 {
476 	struct ra_resource *newmap;
477 	uint64_t newlen;
478 
479 	if (base != mapp->ra_base) {
480 		/* in the middle or end */
481 		newlen = base - mapp->ra_base;
482 		if ((mapp->ra_len - newlen) == len) {
483 			/* on the end */
484 			mapp->ra_len = newlen;
485 		} else {
486 			/* in the middle */
487 			newmap = (struct ra_resource *)
488 					kmem_zalloc(sizeof (*newmap), KM_SLEEP);
489 			newmap->ra_base = base + len;
490 			newmap->ra_len = mapp->ra_len -
491 				(len + newlen);
492 			mapp->ra_len = newlen;
493 			RA_INSERT(&(mapp->ra_next), newmap);
494 		}
495 	} else {
496 		/* at the beginning */
497 		mapp->ra_base += len;
498 		mapp->ra_len -= len;
499 		if (mapp->ra_len == 0) {
500 			/* remove the whole node */
501 			RA_REMOVE(backp, mapp);
502 			kmem_free((caddr_t)mapp, sizeof (*mapp));
503 		}
504 	}
505 }
506 
507 int
508 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
509     uint64_t *retlenp, char *type, uint32_t flag)
510 {
511 	struct ra_dip_type *dipmap;
512 	struct ra_resource *mapp, **backp, **backlargestp;
513 	uint64_t mask = 0;
514 	uint64_t len, remlen, largestbase, largestlen;
515 	uint64_t base, oldbase, lower, upper;
516 	struct ra_dip_type  **backdip;
517 	struct ra_type_map  **backtype;
518 	int  rval = NDI_FAILURE;
519 
520 
521 	len = req->ra_len;
522 
523 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
524 		if (isnot_pow2(req->ra_len)) {
525 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
526 				PRIx64, req->ra_len);
527 			*retbasep = 0;
528 			*retlenp = 0;
529 			return (NDI_FAILURE);
530 		}
531 	}
532 
533 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
534 	    req->ra_align_mask;
535 
536 
537 	mutex_enter(&ra_lock);
538 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
539 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
540 		mutex_exit(&ra_lock);
541 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
542 		return (NDI_FAILURE);
543 	}
544 
545 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
546 			PRIx64 "\n", (void *)mapp, len, mask);
547 
548 	backp = &(dipmap->ra_rangeset);
549 	backlargestp = NULL;
550 	largestbase = 0;
551 	largestlen = 0;
552 
553 	lower = 0;
554 	upper = ~(uint64_t)0;
555 
556 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
557 		/* bounded so skip to first possible */
558 		lower = req->ra_boundbase;
559 		upper = req->ra_boundlen + lower;
560 		if ((upper == 0) || (upper < req->ra_boundlen))
561 			upper = ~(uint64_t)0;
562 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
563 				PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
564 				"\n", mapp->ra_len, len, mapp->ra_base, mask);
565 		for (; mapp != NULL &&
566 			(mapp->ra_base + mapp->ra_len) < lower;
567 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
568 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
569 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
570 				/*
571 				 * This elements end goes beyond max uint64_t.
572 				 * potential candidate, check end against lower
573 				 * would not be precise.
574 				 */
575 				break;
576 
577 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
578 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
579 			}
580 
581 	}
582 
583 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
584 		/* first fit - not user specified */
585 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
586 			"lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
587 		for (; mapp != NULL && mapp->ra_base <= upper;
588 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
589 
590 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
591 			    ", len = %" PRIx64 "", mapp->ra_len, len);
592 			base = mapp->ra_base;
593 			if (base < lower) {
594 				base = lower;
595 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
596 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
597 				    base, mapp->ra_base, mask);
598 			}
599 
600 			if ((base & mask) != 0) {
601 				oldbase = base;
602 				/*
603 				 * failed a critical constraint
604 				 * adjust and see if it still fits
605 				 */
606 				base = base & ~mask;
607 				base += (mask + 1);
608 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
609 					base);
610 
611 				/*
612 				 * Check to see if the new base is past
613 				 * the end of the resource.
614 				 */
615 				if (base >= (oldbase + mapp->ra_len + 1)) {
616 					continue;
617 				}
618 			}
619 
620 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
621 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
622 					remlen = upper - base;
623 				else
624 					remlen = mapp->ra_len -
625 						(base - mapp->ra_base);
626 
627 				if ((backlargestp == NULL) ||
628 				    (largestlen < remlen)) {
629 
630 					backlargestp = backp;
631 					largestbase = base;
632 					largestlen = remlen;
633 				}
634 			}
635 
636 			if (mapp->ra_len >= len) {
637 				/* a candidate -- apply constraints */
638 				if ((len > (mapp->ra_len -
639 				    (base - mapp->ra_base))) ||
640 				    ((len - 1 + base) > upper)) {
641 					continue;
642 				}
643 
644 				/* we have a fit */
645 
646 				DEBUGPRT(CE_CONT, "\thave a fit\n");
647 
648 				adjust_link(backp, mapp, base, len);
649 				rval = NDI_SUCCESS;
650 				break;
651 
652 			}
653 		}
654 	} else {
655 		/* want an exact value/fit */
656 		base = req->ra_addr;
657 		len = req->ra_len;
658 		for (; mapp != NULL && mapp->ra_base <= upper;
659 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
660 			if (base >= mapp->ra_base &&
661 			    ((base - mapp->ra_base) < mapp->ra_len)) {
662 				/*
663 				 * This is the node with he requested base in
664 				 * its range
665 				 */
666 				if ((len > mapp->ra_len) ||
667 				    (base - mapp->ra_base >
668 				    mapp->ra_len - len)) {
669 					/* length requirement not satisfied */
670 					if (req->ra_flags &
671 					    NDI_RA_ALLOC_PARTIAL_OK) {
672 						if ((upper - mapp->ra_base)
673 						    < mapp->ra_len)
674 							remlen = upper - base;
675 						else
676 							remlen =
677 							    mapp->ra_len -
678 							    (base -
679 							    mapp->ra_base);
680 					}
681 					backlargestp = backp;
682 					largestbase = base;
683 					largestlen = remlen;
684 					base = 0;
685 				} else {
686 					/* We have a match */
687 					adjust_link(backp, mapp, base, len);
688 					rval = NDI_SUCCESS;
689 				}
690 				break;
691 			}
692 		}
693 	}
694 
695 	if ((rval != NDI_SUCCESS) &&
696 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
697 	    (backlargestp != NULL)) {
698 		adjust_link(backlargestp, *backlargestp, largestbase,
699 			largestlen);
700 
701 		base = largestbase;
702 		len = largestlen;
703 		rval = NDI_RA_PARTIAL_REQ;
704 	}
705 
706 	mutex_exit(&ra_lock);
707 
708 	if (rval == NDI_FAILURE) {
709 		*retbasep = 0;
710 		*retlenp = 0;
711 	} else {
712 		*retbasep = base;
713 		*retlenp = len;
714 	}
715 	return (rval);
716 }
717 
718 /*
719  * isa_resource_setup
720  *	check for /used-resources and initialize
721  *	based on info there.  If no /used-resources,
722  *	fail.
723  */
724 int
725 isa_resource_setup()
726 {
727 	dev_info_t *used, *usedpdip;
728 	/*
729 	 * note that at this time bootconf creates 32 bit properties for
730 	 * io-space and device-memory
731 	 */
732 	struct iorange {
733 		uint32_t	base;
734 		uint32_t	len;
735 	} *iorange;
736 	struct memrange {
737 		uint32_t	base;
738 		uint32_t	len;
739 	} *memrange;
740 	uint32_t *irq;
741 	int proplen;
742 	int i, len;
743 	int maxrange;
744 	ndi_ra_request_t req;
745 	uint64_t retbase;
746 	uint64_t retlen;
747 
748 	used = ddi_find_devinfo("used-resources", -1, 0);
749 	if (used == NULL) {
750 		DEBUGPRT(CE_CONT,
751 			"isa_resource_setup: used-resources not found");
752 		return (NDI_FAILURE);
753 	}
754 
755 	/*
756 	 * initialize to all resources being present
757 	 * and then remove the ones in use.
758 	 */
759 
760 	usedpdip = ddi_root_node();
761 
762 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
763 	    (void *)used, (void *)usedpdip);
764 
765 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
766 		return (NDI_FAILURE);
767 	}
768 
769 	/* initialize io space, highest end base is 0xffff */
770 	/* note that length is highest addr + 1 since starts from 0 */
771 
772 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
773 
774 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
775 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
776 		maxrange = proplen / sizeof (struct iorange);
777 		/* remove the "used" I/O resources */
778 		for (i = 0; i < maxrange; i++) {
779 			bzero((caddr_t)&req, sizeof (req));
780 			req.ra_addr =  (uint64_t)iorange[i].base;
781 			req.ra_len = (uint64_t)iorange[i].len;
782 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
783 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
784 			    NDI_RA_TYPE_IO, 0);
785 		}
786 
787 		kmem_free((caddr_t)iorange, proplen);
788 	}
789 
790 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
791 		return (NDI_FAILURE);
792 	}
793 	/* initialize memory space where highest end base is 0xffffffff */
794 	/* note that length is highest addr + 1 since starts from 0 */
795 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
796 	    NDI_RA_TYPE_MEM, 0);
797 
798 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
799 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
800 		maxrange = proplen / sizeof (struct memrange);
801 		/* remove the "used" memory resources */
802 		for (i = 0; i < maxrange; i++) {
803 			bzero((caddr_t)&req, sizeof (req));
804 			req.ra_addr = (uint64_t)memrange[i].base;
805 			req.ra_len = (uint64_t)memrange[i].len;
806 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
807 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
808 			    NDI_RA_TYPE_MEM, 0);
809 		}
810 
811 		kmem_free((caddr_t)memrange, proplen);
812 	}
813 
814 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
815 		return (NDI_FAILURE);
816 	}
817 
818 	/* initialize the interrupt space */
819 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
820 
821 #if defined(__i386) || defined(__amd64)
822 	bzero(&req, sizeof (req));
823 	req.ra_addr = 2;	/* 2 == 9 so never allow */
824 	req.ra_len = 1;
825 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
826 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
827 	    NDI_RA_TYPE_INTR, 0);
828 #endif
829 
830 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
831 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
832 		/* Initialize available interrupts by negating the used */
833 		len = (proplen / sizeof (uint32_t));
834 		for (i = 0; i < len; i++) {
835 			bzero((caddr_t)&req, sizeof (req));
836 			req.ra_addr = (uint64_t)irq[i];
837 			req.ra_len = 1;
838 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
839 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
840 			    NDI_RA_TYPE_INTR, 0);
841 		}
842 		kmem_free((caddr_t)irq, proplen);
843 	}
844 
845 #ifdef BUSRA_DEBUG
846 	if (busra_debug) {
847 		(void) ra_dump_all(NULL, usedpdip);
848 	}
849 #endif
850 	return (NDI_SUCCESS);
851 
852 }
853 
854 #ifdef BUSRA_DEBUG
855 void
856 ra_dump_all(char *type, dev_info_t *dip)
857 {
858 
859 	struct ra_type_map *typemap;
860 	struct ra_dip_type *dipmap;
861 	struct ra_resource *res;
862 
863 	typemap =  (struct ra_type_map *)ra_map_list_head;
864 
865 	for (; typemap != NULL; typemap = typemap->ra_next) {
866 		if (type != NULL) {
867 			if (strcmp(typemap->type, type) != 0)
868 				continue;
869 		}
870 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
871 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
872 			dipmap = dipmap->ra_next) {
873 			if (dip != NULL) {
874 				if ((dipmap->ra_dip) != dip)
875 					continue;
876 			}
877 			cmn_err(CE_CONT, "  dip is %p\n",
878 			    (void *)dipmap->ra_dip);
879 			for (res = dipmap->ra_rangeset; res != NULL;
880 				res = res->ra_next) {
881 				cmn_err(CE_CONT, "\t  range is %" PRIx64
882 				    " %" PRIx64 "\n", res->ra_base,
883 				    res->ra_len);
884 			}
885 			if (dip != NULL)
886 				break;
887 		}
888 		if (type != NULL)
889 			break;
890 	}
891 }
892 #endif
893 
894 struct bus_range {	/* 1275 "bus-range" property definition */
895 	uint32_t lo;
896 	uint32_t hi;
897 } pci_bus_range;
898 
899 struct busnum_ctrl {
900 	int	rv;
901 	dev_info_t *dip;
902 	struct	bus_range *range;
903 };
904 
905 
906 /*
907  * Setup resource map for the pci bus node based on the "available"
908  * property and "bus-range" property.
909  */
910 int
911 pci_resource_setup(dev_info_t *dip)
912 {
913 	pci_regspec_t *regs;
914 	int rlen, rcount, i;
915 	char bus_type[16] = "(unknown)";
916 	int len;
917 	struct busnum_ctrl ctrl;
918 	int circular_count;
919 	int rval = NDI_SUCCESS;
920 
921 	/*
922 	 * If this is a pci bus node then look for "available" property
923 	 * to find the available resources on this bus.
924 	 */
925 	len = sizeof (bus_type);
926 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
927 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
928 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
929 		return (NDI_FAILURE);
930 
931 	/* it is not a pci/pci-ex bus type */
932 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
933 		return (NDI_FAILURE);
934 
935 	/*
936 	 * The pci-hotplug project addresses adding the call
937 	 * to pci_resource_setup from pci nexus driver.
938 	 * However that project would initially be only for x86,
939 	 * so for sparc pcmcia-pci support we still need to call
940 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
941 	 * are updated to call pci_resource_setup this portion of the
942 	 * code would really become an assert to make sure this
943 	 * function is not called for the same dip twice.
944 	 */
945 	{
946 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
947 			return (NDI_FAILURE);
948 		}
949 	}
950 
951 
952 	/*
953 	 * Create empty resource maps first.
954 	 *
955 	 * NOTE: If all the allocated resources are already assigned to
956 	 * device(s) in the hot plug slot then "available" property may not
957 	 * be present. But, subsequent hot plug operation may unconfigure
958 	 * the device in the slot and try to free up it's resources. So,
959 	 * at the minimum we should create empty maps here.
960 	 */
961 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
962 		return (NDI_FAILURE);
963 	}
964 
965 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
966 		return (NDI_FAILURE);
967 	}
968 
969 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
970 		return (NDI_FAILURE);
971 	}
972 
973 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
974 	    NDI_FAILURE) {
975 		return (NDI_FAILURE);
976 	}
977 
978 	/* read the "available" property if it is available */
979 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
980 	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
981 		/*
982 		 * create the available resource list for both memory and
983 		 * io space
984 		 */
985 		rcount = rlen / sizeof (pci_regspec_t);
986 		for (i = 0; i < rcount; i++) {
987 		    switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
988 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
989 			(void) ndi_ra_free(dip,
990 			    (uint64_t)regs[i].pci_phys_low,
991 			    (uint64_t)regs[i].pci_size_low,
992 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
993 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
994 			    0);
995 			break;
996 		    case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
997 			(void) ndi_ra_free(dip,
998 			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
999 			    ((uint64_t)(regs[i].pci_phys_low)),
1000 			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
1001 			    ((uint64_t)(regs[i].pci_size_low)),
1002 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1003 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1004 			    0);
1005 			break;
1006 		    case PCI_REG_ADDR_G(PCI_ADDR_IO):
1007 			(void) ndi_ra_free(dip,
1008 			    (uint64_t)regs[i].pci_phys_low,
1009 			    (uint64_t)regs[i].pci_size_low,
1010 			    NDI_RA_TYPE_IO,
1011 			    0);
1012 			break;
1013 		    case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1014 			break;
1015 		    default:
1016 			cmn_err(CE_WARN,
1017 			    "pci_resource_setup: bad addr type: %x\n",
1018 			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1019 			break;
1020 		    }
1021 		}
1022 		kmem_free(regs, rlen);
1023 	}
1024 
1025 	/*
1026 	 * update resource map for available bus numbers if the node
1027 	 * has available-bus-range or bus-range property.
1028 	 */
1029 	len = sizeof (struct bus_range);
1030 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1031 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1032 	    DDI_SUCCESS) {
1033 		/*
1034 		 * Add bus numbers in the range to the free list.
1035 		 */
1036 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1037 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1038 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1039 	} else {
1040 		/*
1041 		 * We don't have an available-bus-range property. If, instead,
1042 		 * we have a bus-range property we add all the bus numbers
1043 		 * in that range to the free list but we must then scan
1044 		 * for pci-pci bridges on this bus to find out the if there
1045 		 * are any of those bus numbers already in use. If so, we can
1046 		 * reclaim them.
1047 		 */
1048 		len = sizeof (struct bus_range);
1049 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1050 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1051 		    &len) == DDI_SUCCESS) {
1052 			if (pci_bus_range.lo != pci_bus_range.hi) {
1053 				/*
1054 				 * Add bus numbers other than the secondary
1055 				 * bus number to the free list.
1056 				 */
1057 				(void) ndi_ra_free(dip,
1058 				    (uint64_t)pci_bus_range.lo + 1,
1059 				    (uint64_t)pci_bus_range.hi -
1060 				    (uint64_t)pci_bus_range.lo,
1061 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1062 
1063 				/* scan for pci-pci bridges */
1064 				ctrl.rv = DDI_SUCCESS;
1065 				ctrl.dip = dip;
1066 				ctrl.range = &pci_bus_range;
1067 				ndi_devi_enter(dip, &circular_count);
1068 				ddi_walk_devs(ddi_get_child(dip),
1069 				    claim_pci_busnum, (void *)&ctrl);
1070 				ndi_devi_exit(dip, circular_count);
1071 				if (ctrl.rv != DDI_SUCCESS) {
1072 					/* failed to create the map */
1073 					(void) ndi_ra_map_destroy(dip,
1074 					    NDI_RA_TYPE_PCI_BUSNUM);
1075 					rval = NDI_FAILURE;
1076 				}
1077 			}
1078 		}
1079 	}
1080 
1081 #ifdef BUSRA_DEBUG
1082 	if (busra_debug) {
1083 		(void) ra_dump_all(NULL, dip);
1084 	}
1085 #endif
1086 
1087 	return (rval);
1088 }
1089 
1090 /*
1091  * If the device is a PCI bus device (i.e bus-range property exists) then
1092  * claim the bus numbers used by the device from the specified bus
1093  * resource map.
1094  */
1095 static int
1096 claim_pci_busnum(dev_info_t *dip, void *arg)
1097 {
1098 	struct bus_range pci_bus_range;
1099 	struct busnum_ctrl *ctrl;
1100 	ndi_ra_request_t req;
1101 	char bus_type[16] = "(unknown)";
1102 	int len;
1103 	uint64_t base;
1104 	uint64_t retlen;
1105 
1106 	ctrl = (struct busnum_ctrl *)arg;
1107 
1108 	/* check if this is a PCI bus node */
1109 	len = sizeof (bus_type);
1110 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1111 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1112 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1113 		return (DDI_WALK_PRUNECHILD);
1114 
1115 	/* it is not a pci/pci-ex bus type */
1116 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1117 		return (DDI_WALK_PRUNECHILD);
1118 
1119 	/* look for the bus-range property */
1120 	len = sizeof (struct bus_range);
1121 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1122 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1123 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1124 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1125 
1126 			/* claim the bus range from the bus resource map */
1127 			bzero((caddr_t)&req, sizeof (req));
1128 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1129 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1130 			req.ra_len = (uint64_t)pci_bus_range.hi -
1131 			    (uint64_t)pci_bus_range.lo + 1;
1132 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1133 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1134 				return (DDI_WALK_PRUNECHILD);
1135 		}
1136 	}
1137 
1138 	/*
1139 	 * Error return.
1140 	 */
1141 	ctrl->rv = DDI_FAILURE;
1142 	return (DDI_WALK_TERMINATE);
1143 }
1144 
1145 void
1146 pci_resource_destroy(dev_info_t *dip)
1147 {
1148 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1149 
1150 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1151 
1152 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1153 
1154 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1155 }
1156 
1157 
1158 int
1159 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1160 {
1161 	int i;
1162 
1163 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1164 		return (NDI_FAILURE);
1165 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1166 		return (NDI_FAILURE);
1167 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1168 		return (NDI_FAILURE);
1169 
1170 	/* for each entry in the PCI "available" property */
1171 	for (i = 0; i < entries; i++, avail_p++) {
1172 		if (avail_p->pci_phys_hi == -1u)
1173 			goto err;
1174 
1175 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1176 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1177 			(void) ndi_ra_free(dip,
1178 				(uint64_t)avail_p->pci_phys_low,
1179 				(uint64_t)avail_p->pci_size_low,
1180 				(avail_p->pci_phys_hi &
1181 					PCI_REG_PF_M) ?
1182 					NDI_RA_TYPE_PCI_PREFETCH_MEM :
1183 					NDI_RA_TYPE_MEM,
1184 				0);
1185 			}
1186 			break;
1187 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1188 			(void) ndi_ra_free(dip,
1189 				(uint64_t)avail_p->pci_phys_low,
1190 				(uint64_t)avail_p->pci_size_low,
1191 				NDI_RA_TYPE_IO,
1192 				0);
1193 			break;
1194 		default:
1195 			goto err;
1196 		}
1197 	}
1198 #ifdef BUSRA_DEBUG
1199 	if (busra_debug) {
1200 		(void) ra_dump_all(NULL, dip);
1201 	}
1202 #endif
1203 	return (NDI_SUCCESS);
1204 
1205 err:
1206 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1207 		i, avail_p->pci_phys_hi);
1208 	return (NDI_FAILURE);
1209 }
1210