xref: /titanic_51/usr/src/uts/common/io/busra.c (revision 1a7c1b724419d3cb5fa6eea75123c6b2060ba31b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #if defined(DEBUG)
30 #define	BUSRA_DEBUG
31 #endif
32 
33 /*
34  * This module provides a set of resource management interfaces
35  * to manage bus resources globally in the system.
36  *
37  * The bus nexus drivers are typically responsible to setup resource
38  * maps for the bus resources available for a bus instance. However
39  * this module also provides resource setup functions for PCI bus
40  * (used by both SPARC and X86 platforms) and ISA bus instances (used
41  * only for X86 platforms).
42  */
43 
44 #include <sys/types.h>
45 #include <sys/systm.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ndi_impldefs.h>
51 #include <sys/kmem.h>
52 #include <sys/pctypes.h>
53 #include <sys/modctl.h>
54 #include <sys/debug.h>
55 #include <sys/spl.h>
56 #include <sys/pci.h>
57 #include <sys/autoconf.h>
58 
59 #if defined(BUSRA_DEBUG)
60 int busra_debug = 0;
61 #define	DEBUGPRT \
62 	if (busra_debug) cmn_err
63 
64 #else
65 #define	DEBUGPRT \
66 	if (0) cmn_err
67 #endif
68 
69 
70 /*
71  * global mutex that protects the global list of resource maps.
72  */
73 kmutex_t ra_lock;
74 
75 /*
76  * basic resource element
77  */
78 struct ra_resource {
79 	struct ra_resource *ra_next;
80 	uint64_t	ra_base;
81 	uint64_t 	ra_len;
82 };
83 
84 /*
85  * link list element for the list of dips (and their resource ranges)
86  * for a particular resource type.
87  * ra_rangeset points to the list of resources available
88  * for this type and this dip.
89  */
90 struct ra_dip_type  {
91 	struct ra_dip_type *ra_next;
92 	struct ra_resource  *ra_rangeset;
93 	dev_info_t *ra_dip;
94 };
95 
96 
97 /*
98  * link list element for list of types resources. Each element
99  * has all resources for a particular type.
100  */
101 struct ra_type_map {
102 	struct ra_type_map *ra_next;
103 	struct ra_dip_type *ra_dip_list;
104 	char *type;
105 };
106 
107 
108 /*
109  * place holder to keep the head of the whole global list.
110  * the address of the first typemap would be stored in it.
111  */
112 static struct ra_type_map	*ra_map_list_head = NULL;
113 
114 
115 /*
116  * This is the loadable module wrapper.
117  * It is essentially boilerplate so isn't documented
118  */
119 extern struct mod_ops mod_miscops;
120 
121 #ifdef BUSRA_DEBUG
122 void ra_dump_all();
123 #endif
124 
125 /* internal function prototypes */
126 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
127     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
128     uint32_t flag);
129 static int isnot_pow2(uint64_t value);
130 static int claim_pci_busnum(dev_info_t *dip, void *arg);
131 static int ra_map_exist(dev_info_t *dip, char *type);
132 
133 
134 #define	RA_INSERT(prev, el) \
135 	el->ra_next = *prev; \
136 	*prev = el;
137 
138 #define	RA_REMOVE(prev, el) \
139 	*prev = el->ra_next;
140 
141 
142 static struct modlmisc modlmisc = {
143 	&mod_miscops,		/* Type of module. This one is a module */
144 	"Bus Resource Allocator (BUSRA) %I%",	/* Name of the module. */
145 };
146 
147 static struct modlinkage modlinkage = {
148 	MODREV_1, (void *)&modlmisc, NULL
149 };
150 
151 int
152 _init()
153 {
154 	int	ret;
155 
156 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
157 		(void *)(intptr_t)__ipltospl(SPL7 - 1));
158 	if ((ret = mod_install(&modlinkage)) != 0) {
159 		mutex_destroy(&ra_lock);
160 	}
161 	return (ret);
162 }
163 
164 int
165 _fini()
166 {
167 	int	ret;
168 
169 	mutex_enter(&ra_lock);
170 
171 	if (ra_map_list_head != NULL) {
172 		mutex_exit(&ra_lock);
173 		return (EBUSY);
174 	}
175 
176 	ret = mod_remove(&modlinkage);
177 
178 	mutex_exit(&ra_lock);
179 
180 	if (ret == 0)
181 		mutex_destroy(&ra_lock);
182 
183 	return (ret);
184 }
185 
186 int
187 _info(struct modinfo *modinfop)
188 
189 {
190 	return (mod_info(&modlinkage, modinfop));
191 }
192 
193 /*
194  * set up an empty resource map for a given type and dip
195  */
196 int
197 ndi_ra_map_setup(dev_info_t *dip, char *type)
198 {
199 	struct ra_type_map  *typemapp;
200 	struct ra_dip_type  *dipmap;
201 	struct ra_dip_type  **backdip;
202 	struct ra_type_map  **backtype;
203 
204 
205 	mutex_enter(&ra_lock);
206 
207 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
208 
209 	if (dipmap == NULL) {
210 		if (backtype == NULL) {
211 			typemapp = (struct ra_type_map *)
212 			kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
213 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
214 				KM_SLEEP);
215 			(void) strcpy(typemapp->type, type);
216 			RA_INSERT(&ra_map_list_head, typemapp);
217 		} else {
218 			typemapp = *backtype;
219 		}
220 		if (backdip == NULL) {
221 			/* allocate and insert in list of dips for this type */
222 			dipmap = (struct ra_dip_type *)
223 			kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
224 			dipmap->ra_dip = dip;
225 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
226 		}
227 	}
228 
229 	mutex_exit(&ra_lock);
230 	return (NDI_SUCCESS);
231 }
232 
233 /*
234  * destroys a resource map for a given dip and type
235  */
236 int
237 ndi_ra_map_destroy(dev_info_t *dip, char *type)
238 {
239 	struct ra_dip_type	*dipmap;
240 	struct ra_dip_type	**backdip;
241 	struct ra_type_map  	**backtype, *typemap;
242 	struct ra_resource	*range;
243 
244 	mutex_enter(&ra_lock);
245 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
246 
247 	if (dipmap == NULL) {
248 		mutex_exit(&ra_lock);
249 		return (NDI_FAILURE);
250 	}
251 
252 	/*
253 	 * destroy all resources for this dip
254 	 * remove dip from type list
255 	 */
256 	ASSERT((backdip != NULL) && (backtype != NULL));
257 	while (dipmap->ra_rangeset != NULL) {
258 		range = dipmap->ra_rangeset;
259 		RA_REMOVE(&dipmap->ra_rangeset, range);
260 		kmem_free((caddr_t)range, sizeof (*range));
261 	}
262 	/* remove from dip list */
263 	RA_REMOVE(backdip, dipmap);
264 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
265 	if ((*backtype)->ra_dip_list == NULL) {
266 		/*
267 		 * This was the last dip with this resource type.
268 		 * Remove the type from the global list.
269 		 */
270 		typemap = *backtype;
271 		RA_REMOVE(backtype, (*backtype));
272 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
273 		kmem_free((caddr_t)typemap, sizeof (*typemap));
274 	}
275 
276 	mutex_exit(&ra_lock);
277 	return (NDI_SUCCESS);
278 }
279 
280 static int
281 ra_map_exist(dev_info_t *dip, char *type)
282 {
283 	struct ra_dip_type  **backdip;
284 	struct ra_type_map  **backtype;
285 
286 	mutex_enter(&ra_lock);
287 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
288 		mutex_exit(&ra_lock);
289 		return (NDI_FAILURE);
290 	}
291 
292 	mutex_exit(&ra_lock);
293 	return (NDI_SUCCESS);
294 }
295 /*
296  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
297  * if found, backdip and backtype will be updated to point to the previous
298  * dip in the list and previous type for this dip in the list.
299  * If no such type at all in the resource list both backdip and backtype
300  * will be null. If the type found but no dip, back dip will be null.
301  */
302 
303 static struct ra_dip_type *
304 find_dip_map_resources(dev_info_t *dip, char *type,
305     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
306     uint32_t flag)
307 {
308 	struct ra_type_map **prevmap;
309 	struct ra_dip_type *dipmap, **prevdip;
310 
311 	ASSERT(mutex_owned(&ra_lock));
312 	prevdip = NULL;
313 	dipmap = NULL;
314 	prevmap = &ra_map_list_head;
315 
316 	while (*prevmap) {
317 		if (strcmp((*prevmap)->type, type) == 0)
318 			break;
319 		prevmap = &(*prevmap)->ra_next;
320 	}
321 
322 	if (*prevmap) {
323 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
324 			prevdip = &(*prevmap)->ra_dip_list;
325 			dipmap = *prevdip;
326 
327 			while (dipmap) {
328 				if (dipmap->ra_dip == dip)
329 					break;
330 				prevdip =  &dipmap->ra_next;
331 				dipmap = dipmap->ra_next;
332 			}
333 
334 			if (dipmap != NULL) {
335 				/* found it */
336 				break;
337 			}
338 
339 			if (!(flag & NDI_RA_PASS)) {
340 				break;
341 			}
342 		}
343 	}
344 
345 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
346 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
347 
348 	return (dipmap);
349 }
350 
351 int
352 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
353     uint32_t flag)
354 {
355 	struct ra_dip_type *dipmap;
356 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
357 	struct ra_resource  *mapp, **backp;
358 	uint64_t newend, mapend;
359 	struct ra_dip_type **backdip;
360 	struct ra_type_map **backtype;
361 
362 	if (len == 0) {
363 		return (NDI_SUCCESS);
364 	}
365 
366 	mutex_enter(&ra_lock);
367 
368 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
369 	    flag)) == NULL) {
370 		mutex_exit(&ra_lock);
371 		return (NDI_FAILURE);
372 	}
373 
374 	mapp = dipmap->ra_rangeset;
375 	backp = &dipmap->ra_rangeset;
376 
377 	/* now find where range lies and fix things up */
378 	newend = base + len;
379 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
380 		mapend = mapp->ra_base + mapp->ra_len;
381 
382 		/* check for overlap first */
383 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
384 		    (base > mapp->ra_base && base < mapend)) {
385 			/* overlap with mapp */
386 			overlapmap = mapp;
387 			goto overlap;
388 		} else if ((base == mapend && mapp->ra_next) &&
389 		    (newend > mapp->ra_next->ra_base)) {
390 			/* overlap with mapp->ra_next */
391 			overlapmap = mapp->ra_next;
392 			goto overlap;
393 		}
394 
395 		if (newend == mapp->ra_base) {
396 			/* simple - on front */
397 			mapp->ra_base = base;
398 			mapp->ra_len += len;
399 			/*
400 			 * don't need to check if it merges with
401 			 * previous since that would match on on end
402 			 */
403 			break;
404 		} else if (base == mapend) {
405 			/* simple - on end */
406 			mapp->ra_len += len;
407 			if (mapp->ra_next &&
408 			    (newend == mapp->ra_next->ra_base)) {
409 				/* merge with next node */
410 				oldmap = mapp->ra_next;
411 				mapp->ra_len += oldmap->ra_len;
412 				RA_REMOVE(&mapp->ra_next, oldmap);
413 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
414 			}
415 			break;
416 		} else if (base < mapp->ra_base) {
417 			/* somewhere in between so just an insert */
418 			newmap = (struct ra_resource *)
419 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
420 			newmap->ra_base = base;
421 			newmap->ra_len = len;
422 			RA_INSERT(backp, newmap);
423 			break;
424 		}
425 	}
426 	if (mapp == NULL) {
427 		/* stick on end */
428 		newmap = (struct ra_resource *)
429 				kmem_zalloc(sizeof (*newmap), KM_SLEEP);
430 		newmap->ra_base = base;
431 		newmap->ra_len = len;
432 		RA_INSERT(backp, newmap);
433 	}
434 
435 	mutex_exit(&ra_lock);
436 	return (NDI_SUCCESS);
437 
438 overlap:
439 	/*
440 	 * Bad free may happen on some x86 platforms with BIOS exporting
441 	 * incorrect resource maps. The system is otherwise functioning
442 	 * normally. We send such messages to syslog only.
443 	 */
444 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
445 	    (void *)dip, type);
446 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
447 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
448 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
449 	    overlapmap->ra_len);
450 
451 	mutex_exit(&ra_lock);
452 	return (NDI_FAILURE);
453 }
454 
455 /* check to see if value is power of 2 or not. */
456 static int
457 isnot_pow2(uint64_t value)
458 {
459 	uint32_t low;
460 	uint32_t hi;
461 
462 	low = value & 0xffffffff;
463 	hi = value >> 32;
464 
465 	/*
466 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
467 	 * won't work correctly for 64bit values
468 	 */
469 	if ((ddi_ffs(low) == ddi_fls(low)) &&
470 	    (ddi_ffs(hi) == ddi_fls(hi)))
471 		return (0);
472 	return (1);
473 }
474 
475 static  void
476 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
477 	    uint64_t base, uint64_t len)
478 {
479 	struct ra_resource *newmap;
480 	uint64_t newlen;
481 
482 	if (base != mapp->ra_base) {
483 		/* in the middle or end */
484 		newlen = base - mapp->ra_base;
485 		if ((mapp->ra_len - newlen) == len) {
486 			/* on the end */
487 			mapp->ra_len = newlen;
488 		} else {
489 			/* in the middle */
490 			newmap = (struct ra_resource *)
491 					kmem_zalloc(sizeof (*newmap), KM_SLEEP);
492 			newmap->ra_base = base + len;
493 			newmap->ra_len = mapp->ra_len -
494 				(len + newlen);
495 			mapp->ra_len = newlen;
496 			RA_INSERT(&(mapp->ra_next), newmap);
497 		}
498 	} else {
499 		/* at the beginning */
500 		mapp->ra_base += len;
501 		mapp->ra_len -= len;
502 		if (mapp->ra_len == 0) {
503 			/* remove the whole node */
504 			RA_REMOVE(backp, mapp);
505 			kmem_free((caddr_t)mapp, sizeof (*mapp));
506 		}
507 	}
508 }
509 
510 int
511 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
512     uint64_t *retlenp, char *type, uint32_t flag)
513 {
514 	struct ra_dip_type *dipmap;
515 	struct ra_resource *mapp, **backp, **backlargestp;
516 	uint64_t mask = 0;
517 	uint64_t len, remlen, largestbase, largestlen;
518 	uint64_t base, oldbase, lower, upper;
519 	struct ra_dip_type  **backdip;
520 	struct ra_type_map  **backtype;
521 	int  rval = NDI_FAILURE;
522 
523 
524 	len = req->ra_len;
525 
526 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
527 		if (isnot_pow2(req->ra_len)) {
528 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
529 				PRIx64, req->ra_len);
530 			*retbasep = 0;
531 			*retlenp = 0;
532 			return (NDI_FAILURE);
533 		}
534 	}
535 
536 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
537 	    req->ra_align_mask;
538 
539 
540 	mutex_enter(&ra_lock);
541 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
542 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
543 		mutex_exit(&ra_lock);
544 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
545 		return (NDI_FAILURE);
546 	}
547 
548 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
549 			PRIx64 "\n", (void *)mapp, len, mask);
550 
551 	backp = &(dipmap->ra_rangeset);
552 	backlargestp = NULL;
553 	largestbase = 0;
554 	largestlen = 0;
555 
556 	lower = 0;
557 	upper = ~(uint64_t)0;
558 
559 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
560 		/* bounded so skip to first possible */
561 		lower = req->ra_boundbase;
562 		upper = req->ra_boundlen + lower;
563 		if ((upper == 0) || (upper < req->ra_boundlen))
564 			upper = ~(uint64_t)0;
565 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
566 				PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
567 				"\n", mapp->ra_len, len, mapp->ra_base, mask);
568 		for (; mapp != NULL &&
569 			(mapp->ra_base + mapp->ra_len) < lower;
570 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
571 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
572 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
573 				/*
574 				 * This elements end goes beyond max uint64_t.
575 				 * potential candidate, check end against lower
576 				 * would not be precise.
577 				 */
578 				break;
579 
580 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
581 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
582 			}
583 
584 	}
585 
586 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
587 		/* first fit - not user specified */
588 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
589 			"lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
590 		for (; mapp != NULL && mapp->ra_base <= upper;
591 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
592 
593 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
594 			    ", len = %" PRIx64 "", mapp->ra_len, len);
595 			base = mapp->ra_base;
596 			if (base < lower) {
597 				base = lower;
598 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
599 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
600 				    base, mapp->ra_base, mask);
601 			}
602 
603 			if ((base & mask) != 0) {
604 				oldbase = base;
605 				/*
606 				 * failed a critical constraint
607 				 * adjust and see if it still fits
608 				 */
609 				base = base & ~mask;
610 				base += (mask + 1);
611 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
612 					base);
613 
614 				/*
615 				 * Check to see if the new base is past
616 				 * the end of the resource.
617 				 */
618 				if (base >= (oldbase + mapp->ra_len + 1)) {
619 					continue;
620 				}
621 			}
622 
623 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
624 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
625 					remlen = upper - base;
626 				else
627 					remlen = mapp->ra_len -
628 						(base - mapp->ra_base);
629 
630 				if ((backlargestp == NULL) ||
631 				    (largestlen < remlen)) {
632 
633 					backlargestp = backp;
634 					largestbase = base;
635 					largestlen = remlen;
636 				}
637 			}
638 
639 			if (mapp->ra_len >= len) {
640 				/* a candidate -- apply constraints */
641 				if ((len > (mapp->ra_len -
642 				    (base - mapp->ra_base))) ||
643 				    ((len - 1 + base) > upper)) {
644 					continue;
645 				}
646 
647 				/* we have a fit */
648 
649 				DEBUGPRT(CE_CONT, "\thave a fit\n");
650 
651 				adjust_link(backp, mapp, base, len);
652 				rval = NDI_SUCCESS;
653 				break;
654 
655 			}
656 		}
657 	} else {
658 		/* want an exact value/fit */
659 		base = req->ra_addr;
660 		len = req->ra_len;
661 		for (; mapp != NULL && mapp->ra_base <= upper;
662 			backp = &(mapp->ra_next), mapp = mapp->ra_next) {
663 			if (base >= mapp->ra_base &&
664 			    ((base - mapp->ra_base) < mapp->ra_len)) {
665 				/*
666 				 * This is the node with he requested base in
667 				 * its range
668 				 */
669 				if ((len > mapp->ra_len) ||
670 				    (base - mapp->ra_base >
671 				    mapp->ra_len - len)) {
672 					/* length requirement not satisfied */
673 					if (req->ra_flags &
674 					    NDI_RA_ALLOC_PARTIAL_OK) {
675 						if ((upper - mapp->ra_base)
676 						    < mapp->ra_len)
677 							remlen = upper - base;
678 						else
679 							remlen =
680 							    mapp->ra_len -
681 							    (base -
682 							    mapp->ra_base);
683 					}
684 					backlargestp = backp;
685 					largestbase = base;
686 					largestlen = remlen;
687 					base = 0;
688 				} else {
689 					/* We have a match */
690 					adjust_link(backp, mapp, base, len);
691 					rval = NDI_SUCCESS;
692 				}
693 				break;
694 			}
695 		}
696 	}
697 
698 	if ((rval != NDI_SUCCESS) &&
699 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
700 	    (backlargestp != NULL)) {
701 		adjust_link(backlargestp, *backlargestp, largestbase,
702 			largestlen);
703 
704 		base = largestbase;
705 		len = largestlen;
706 		rval = NDI_RA_PARTIAL_REQ;
707 	}
708 
709 	mutex_exit(&ra_lock);
710 
711 	if (rval == NDI_FAILURE) {
712 		*retbasep = 0;
713 		*retlenp = 0;
714 	} else {
715 		*retbasep = base;
716 		*retlenp = len;
717 	}
718 	return (rval);
719 }
720 
721 /*
722  * isa_resource_setup
723  *	check for /used-resources and initialize
724  *	based on info there.  If no /used-resources,
725  *	fail.
726  */
727 int
728 isa_resource_setup()
729 {
730 	dev_info_t *used, *usedpdip;
731 	/*
732 	 * note that at this time bootconf creates 32 bit properties for
733 	 * io-space and device-memory
734 	 */
735 	struct iorange {
736 		uint32_t	base;
737 		uint32_t	len;
738 	} *iorange;
739 	struct memrange {
740 		uint32_t	base;
741 		uint32_t	len;
742 	} *memrange;
743 	uint32_t *irq;
744 	int proplen;
745 	int i, len;
746 	int maxrange;
747 	ndi_ra_request_t req;
748 	uint64_t retbase;
749 	uint64_t retlen;
750 
751 	used = ddi_find_devinfo("used-resources", -1, 0);
752 	if (used == NULL) {
753 		DEBUGPRT(CE_CONT,
754 			"isa_resource_setup: used-resources not found");
755 		return (NDI_FAILURE);
756 	}
757 
758 	/*
759 	 * initialize to all resources being present
760 	 * and then remove the ones in use.
761 	 */
762 
763 	usedpdip = ddi_root_node();
764 
765 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
766 	    (void *)used, (void *)usedpdip);
767 
768 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
769 		return (NDI_FAILURE);
770 	}
771 
772 	/* initialize io space, highest end base is 0xffff */
773 	/* note that length is highest addr + 1 since starts from 0 */
774 
775 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
776 
777 	if (ddi_getlongprop(DDI_DEV_T_NONE, used, DDI_PROP_DONTPASS,
778 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
779 		maxrange = proplen / sizeof (struct iorange);
780 		/* remove the "used" I/O resources */
781 		for (i = 0; i < maxrange; i++) {
782 			bzero((caddr_t)&req, sizeof (req));
783 			req.ra_addr =  (uint64_t)iorange[i].base;
784 			req.ra_len = (uint64_t)iorange[i].len;
785 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
786 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
787 			    NDI_RA_TYPE_IO, 0);
788 		}
789 
790 		kmem_free((caddr_t)iorange, proplen);
791 	}
792 
793 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
794 		return (NDI_FAILURE);
795 	}
796 	/* initialize memory space where highest end base is 0xffffffff */
797 	/* note that length is highest addr + 1 since starts from 0 */
798 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
799 	    NDI_RA_TYPE_MEM, 0);
800 
801 	if (ddi_getlongprop(DDI_DEV_T_NONE, used, DDI_PROP_DONTPASS,
802 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
803 		maxrange = proplen / sizeof (struct memrange);
804 		/* remove the "used" memory resources */
805 		for (i = 0; i < maxrange; i++) {
806 			bzero((caddr_t)&req, sizeof (req));
807 			req.ra_addr = (uint64_t)memrange[i].base;
808 			req.ra_len = (uint64_t)memrange[i].len;
809 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
810 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
811 			    NDI_RA_TYPE_MEM, 0);
812 		}
813 
814 		kmem_free((caddr_t)memrange, proplen);
815 	}
816 
817 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
818 		return (NDI_FAILURE);
819 	}
820 
821 	/* initialize the interrupt space */
822 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
823 
824 #if defined(__i386) || defined(__amd64)
825 	bzero(&req, sizeof (req));
826 	req.ra_addr = 2;	/* 2 == 9 so never allow */
827 	req.ra_len = 1;
828 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
829 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
830 	    NDI_RA_TYPE_INTR, 0);
831 #endif
832 
833 	if (ddi_getlongprop(DDI_DEV_T_NONE, used, DDI_PROP_DONTPASS,
834 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
835 		/* Initialize available interrupts by negating the used */
836 		len = (proplen / sizeof (uint32_t));
837 		for (i = 0; i < len; i++) {
838 			bzero((caddr_t)&req, sizeof (req));
839 			req.ra_addr = (uint64_t)irq[i];
840 			req.ra_len = 1;
841 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
842 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
843 			    NDI_RA_TYPE_INTR, 0);
844 		}
845 		kmem_free((caddr_t)irq, proplen);
846 	}
847 
848 #ifdef BUSRA_DEBUG
849 	if (busra_debug) {
850 		(void) ra_dump_all(NULL, usedpdip);
851 	}
852 #endif
853 	return (NDI_SUCCESS);
854 
855 }
856 
857 #ifdef BUSRA_DEBUG
858 void
859 ra_dump_all(char *type, dev_info_t *dip)
860 {
861 
862 	struct ra_type_map *typemap;
863 	struct ra_dip_type *dipmap;
864 	struct ra_resource *res;
865 
866 	typemap =  (struct ra_type_map *)ra_map_list_head;
867 
868 	for (; typemap != NULL; typemap = typemap->ra_next) {
869 		if (type != NULL) {
870 			if (strcmp(typemap->type, type) != 0)
871 				continue;
872 		}
873 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
874 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
875 			dipmap = dipmap->ra_next) {
876 			if (dip != NULL) {
877 				if ((dipmap->ra_dip) != dip)
878 					continue;
879 			}
880 			cmn_err(CE_CONT, "  dip is %p\n",
881 			    (void *)dipmap->ra_dip);
882 			for (res = dipmap->ra_rangeset; res != NULL;
883 				res = res->ra_next) {
884 				cmn_err(CE_CONT, "\t  range is %" PRIx64
885 				    " %" PRIx64 "\n", res->ra_base,
886 				    res->ra_len);
887 			}
888 			if (dip != NULL)
889 				break;
890 		}
891 		if (type != NULL)
892 			break;
893 	}
894 }
895 #endif
896 
897 struct bus_range {	/* 1275 "bus-range" property definition */
898 	uint32_t lo;
899 	uint32_t hi;
900 } pci_bus_range;
901 
902 struct busnum_ctrl {
903 	int	rv;
904 	dev_info_t *dip;
905 	struct	bus_range *range;
906 };
907 
908 
909 /*
910  * Setup resource map for the pci bus node based on the "available"
911  * property and "bus-range" property.
912  */
913 int
914 pci_resource_setup(dev_info_t *dip)
915 {
916 	pci_regspec_t *regs;
917 	int rlen, rcount, i;
918 	char bus_type[16] = "(unknown)";
919 	int len;
920 	struct busnum_ctrl ctrl;
921 	int circular_count;
922 	int rval = NDI_SUCCESS;
923 
924 	/*
925 	 * If this is a pci bus node then look for "available" property
926 	 * to find the available resources on this bus.
927 	 */
928 	len = sizeof (bus_type);
929 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
930 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
931 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
932 		return (NDI_FAILURE);
933 
934 	/* it is not a pci bus type */
935 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
936 		return (NDI_FAILURE);
937 
938 	/* read the "available" property if it is available */
939 	if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
940 	    "available", (caddr_t)&regs, &rlen) != DDI_SUCCESS)
941 		return (NDI_FAILURE);
942 
943 
944 	/*
945 	 * The pci-hotplug project addresses adding the call
946 	 * to pci_resource_setup from pci nexus driver.
947 	 * However that project would initially be only for x86,
948 	 * so for sparc pcmcia-pci support we still need to call
949 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
950 	 * are updated to call pci_resource_setup this portion of the
951 	 * code would really become an assert to make sure this
952 	 * function is not called for the same dip twice.
953 	 */
954 	{
955 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
956 			return (NDI_FAILURE);
957 		}
958 	}
959 
960 
961 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
962 		return (NDI_FAILURE);
963 	}
964 
965 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
966 		return (NDI_FAILURE);
967 	}
968 
969 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
970 		return (NDI_FAILURE);
971 	}
972 
973 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
974 	    NDI_FAILURE) {
975 		return (NDI_FAILURE);
976 	}
977 
978 
979 	/* create the available resource list for both memory and io space */
980 	rcount = rlen / sizeof (pci_regspec_t);
981 	for (i = 0; i < rcount; i++) {
982 		switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
983 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
984 			(void) ndi_ra_free(dip,
985 			    (uint64_t)regs[i].pci_phys_low,
986 			    (uint64_t)regs[i].pci_size_low,
987 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
988 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
989 			    0);
990 			break;
991 		case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
992 			(void) ndi_ra_free(dip,
993 			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
994 			    ((uint64_t)(regs[i].pci_phys_low)),
995 			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
996 			    ((uint64_t)(regs[i].pci_size_low)),
997 			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
998 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
999 			    0);
1000 			break;
1001 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1002 			(void) ndi_ra_free(dip,
1003 			    (uint64_t)regs[i].pci_phys_low,
1004 			    (uint64_t)regs[i].pci_size_low,
1005 			    NDI_RA_TYPE_IO,
1006 			    0);
1007 			break;
1008 		case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1009 			break;
1010 		default:
1011 			cmn_err(CE_WARN,
1012 			    "pci_resource_setup: bad addr type: %x\n",
1013 			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1014 			break;
1015 		}
1016 	}
1017 
1018 	kmem_free((caddr_t)regs, rlen);
1019 
1020 	/*
1021 	 * Create resource map for available bus numbers if the node
1022 	 * has available-bus-range or bus-range property.
1023 	 */
1024 	len = sizeof (struct bus_range);
1025 	if (ddi_getlongprop_buf(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1026 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1027 	    DDI_SUCCESS) {
1028 		/*
1029 		 * Add bus numbers in the range to the free list.
1030 		 */
1031 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1032 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1033 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1034 	} else {
1035 		/*
1036 		 * We don't have an available-bus-range property. If, instead,
1037 		 * we have a bus-range property we add all the bus numbers
1038 		 * in that range to the free list but we must then scan
1039 		 * for pci-pci bridges on this bus to find out the if there
1040 		 * are any of those bus numbers already in use. If so, we can
1041 		 * reclaim them.
1042 		 */
1043 		len = sizeof (struct bus_range);
1044 		if (ddi_getlongprop_buf(DDI_DEV_T_NONE, dip,
1045 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1046 		    &len) == DDI_SUCCESS) {
1047 			if (pci_bus_range.lo != pci_bus_range.hi) {
1048 				/*
1049 				 * Add bus numbers other than the secondary
1050 				 * bus number to the free list.
1051 				 */
1052 				(void) ndi_ra_free(dip,
1053 				    (uint64_t)pci_bus_range.lo + 1,
1054 				    (uint64_t)pci_bus_range.hi -
1055 				    (uint64_t)pci_bus_range.lo,
1056 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1057 
1058 				/* scan for pci-pci bridges */
1059 				ctrl.rv = DDI_SUCCESS;
1060 				ctrl.dip = dip;
1061 				ctrl.range = &pci_bus_range;
1062 				ndi_devi_enter(dip, &circular_count);
1063 				ddi_walk_devs(ddi_get_child(dip),
1064 				    claim_pci_busnum, (void *)&ctrl);
1065 				ndi_devi_exit(dip, circular_count);
1066 				if (ctrl.rv != DDI_SUCCESS) {
1067 					/* failed to create the map */
1068 					(void) ndi_ra_map_destroy(dip,
1069 					    NDI_RA_TYPE_PCI_BUSNUM);
1070 					rval = NDI_FAILURE;
1071 				}
1072 			}
1073 		}
1074 	}
1075 
1076 #ifdef BUSRA_DEBUG
1077 	if (busra_debug) {
1078 		(void) ra_dump_all(NULL, dip);
1079 	}
1080 #endif
1081 
1082 	return (rval);
1083 }
1084 
1085 /*
1086  * If the device is a PCI bus device (i.e bus-range property exists) then
1087  * claim the bus numbers used by the device from the specified bus
1088  * resource map.
1089  */
1090 static int
1091 claim_pci_busnum(dev_info_t *dip, void *arg)
1092 {
1093 	struct bus_range pci_bus_range;
1094 	struct busnum_ctrl *ctrl;
1095 	ndi_ra_request_t req;
1096 	char bus_type[16] = "(unknown)";
1097 	int len;
1098 	uint64_t base;
1099 	uint64_t retlen;
1100 
1101 	ctrl = (struct busnum_ctrl *)arg;
1102 
1103 	/* check if this is a PCI bus node */
1104 	len = sizeof (bus_type);
1105 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1106 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1107 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1108 		return (DDI_WALK_PRUNECHILD);
1109 
1110 	/* it is not a pci bus type */
1111 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1112 		return (DDI_WALK_PRUNECHILD);
1113 
1114 	/* look for the bus-range property */
1115 	len = sizeof (struct bus_range);
1116 	if (ddi_getlongprop_buf(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1117 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1118 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1119 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1120 
1121 			/* claim the bus range from the bus resource map */
1122 			bzero((caddr_t)&req, sizeof (req));
1123 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1124 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1125 			req.ra_len = (uint64_t)pci_bus_range.hi -
1126 			    (uint64_t)pci_bus_range.lo + 1;
1127 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1128 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1129 				return (DDI_WALK_PRUNECHILD);
1130 		}
1131 	}
1132 
1133 	/*
1134 	 * Error return.
1135 	 */
1136 	ctrl->rv = DDI_FAILURE;
1137 	return (DDI_WALK_TERMINATE);
1138 }
1139 
1140 void
1141 pci_resource_destroy(dev_info_t *dip)
1142 {
1143 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1144 
1145 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1146 
1147 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1148 
1149 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1150 }
1151 
1152 
1153 int
1154 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1155 {
1156 	int i;
1157 
1158 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1159 		return (NDI_FAILURE);
1160 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1161 		return (NDI_FAILURE);
1162 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1163 		return (NDI_FAILURE);
1164 
1165 	/* for each entry in the PCI "available" property */
1166 	for (i = 0; i < entries; i++, avail_p++) {
1167 		if (avail_p->pci_phys_hi == -1u)
1168 			goto err;
1169 
1170 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1171 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1172 			(void) ndi_ra_free(dip,
1173 				(uint64_t)avail_p->pci_phys_low,
1174 				(uint64_t)avail_p->pci_size_low,
1175 				(avail_p->pci_phys_hi &
1176 					PCI_REG_PF_M) ?
1177 					NDI_RA_TYPE_PCI_PREFETCH_MEM :
1178 					NDI_RA_TYPE_MEM,
1179 				0);
1180 			}
1181 			break;
1182 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1183 			(void) ndi_ra_free(dip,
1184 				(uint64_t)avail_p->pci_phys_low,
1185 				(uint64_t)avail_p->pci_size_low,
1186 				NDI_RA_TYPE_IO,
1187 				0);
1188 			break;
1189 		default:
1190 			goto err;
1191 		}
1192 	}
1193 #ifdef BUSRA_DEBUG
1194 	if (busra_debug) {
1195 		(void) ra_dump_all(NULL, dip);
1196 	}
1197 #endif
1198 	return (NDI_SUCCESS);
1199 
1200 err:
1201 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1202 		i, avail_p->pci_phys_hi);
1203 	return (NDI_FAILURE);
1204 }
1205