xref: /illumos-gate/usr/src/uts/common/io/busra.c (revision 09ded43e87ee0122d9819045291371874138f121)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright 2012 Milan Jurik. All rights reserved.
25  * Copyright (c) 2016 by Delphix. All rights reserved.
26  * Copyright 2023 Oxide Computer Company
27  */
28 
29 #if defined(DEBUG)
30 #define	BUSRA_DEBUG
31 #endif
32 
33 /*
34  * This module provides a set of resource management interfaces
35  * to manage bus resources globally in the system.
36  *
37  * The bus nexus drivers are typically responsible to setup resource
38  * maps for the bus resources available for a bus instance. However
39  * this module also provides resource setup functions for PCI bus
40  * (used by both SPARC and X86 platforms) and ISA bus instances (used
41  * only for X86 platforms).
42  */
43 
44 #include <sys/types.h>
45 #include <sys/systm.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ndi_impldefs.h>
51 #include <sys/kmem.h>
52 #include <sys/pctypes.h>
53 #include <sys/modctl.h>
54 #include <sys/debug.h>
55 #include <sys/spl.h>
56 #include <sys/pci.h>
57 #include <sys/autoconf.h>
58 
59 #if defined(BUSRA_DEBUG)
60 int busra_debug = 0;
61 #define	DEBUGPRT \
62 	if (busra_debug) cmn_err
63 
64 #else
65 #define	DEBUGPRT \
66 	if (0) cmn_err
67 #endif
68 
69 
70 /*
71  * global mutex that protects the global list of resource maps.
72  */
73 kmutex_t ra_lock;
74 
75 /*
76  * basic resource element
77  */
78 struct ra_resource {
79 	struct ra_resource *ra_next;
80 	uint64_t	ra_base;
81 	uint64_t	ra_len;
82 };
83 
84 /*
85  * link list element for the list of dips (and their resource ranges)
86  * for a particular resource type.
87  * ra_rangeset points to the list of resources available
88  * for this type and this dip.
89  */
90 struct ra_dip_type  {
91 	struct ra_dip_type *ra_next;
92 	struct ra_resource  *ra_rangeset;
93 	dev_info_t *ra_dip;
94 };
95 
96 
97 /*
98  * link list element for list of types resources. Each element
99  * has all resources for a particular type.
100  */
101 struct ra_type_map {
102 	struct ra_type_map *ra_next;
103 	struct ra_dip_type *ra_dip_list;
104 	char *type;
105 };
106 
107 
108 /*
109  * place holder to keep the head of the whole global list.
110  * the address of the first typemap would be stored in it.
111  */
112 static struct ra_type_map	*ra_map_list_head = NULL;
113 
114 
115 /*
116  * This is the loadable module wrapper.
117  * It is essentially boilerplate so isn't documented
118  */
119 extern struct mod_ops mod_miscops;
120 
121 #ifdef BUSRA_DEBUG
122 void ra_dump_all(char *, dev_info_t *);
123 #endif
124 
125 /* internal function prototypes */
126 static struct ra_dip_type *find_dip_map_resources(dev_info_t *dip, char *type,
127     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
128     uint32_t flag);
129 static int isnot_pow2(uint64_t value);
130 static int claim_pci_busnum(dev_info_t *dip, void *arg);
131 static int ra_map_exist(dev_info_t *dip, char *type);
132 
133 static int pci_get_available_prop(dev_info_t *dip, uint64_t base,
134     uint64_t len, char *busra_type);
135 static int pci_put_available_prop(dev_info_t *dip, uint64_t base,
136     uint64_t len, char *busra_type);
137 static uint32_t pci_type_ra2pci(char *type);
138 static boolean_t is_pcie_fabric(dev_info_t *dip);
139 
140 #define	PCI_ADDR_TYPE_MASK	(PCI_REG_ADDR_M | PCI_REG_PF_M)
141 #define	PCI_ADDR_TYPE_INVAL	0xffffffff
142 
143 #define	RA_INSERT(prev, el) \
144 	el->ra_next = *prev; \
145 	*prev = el;
146 
147 #define	RA_REMOVE(prev, el) \
148 	*prev = el->ra_next;
149 
150 
151 static struct modlmisc modlmisc = {
152 	&mod_miscops,		/* Type of module. This one is a module */
153 	"Bus Resource Allocator (BUSRA)",	/* Name of the module. */
154 };
155 
156 static struct modlinkage modlinkage = {
157 	MODREV_1, (void *)&modlmisc, NULL
158 };
159 
160 int
_init()161 _init()
162 {
163 	int	ret;
164 
165 	mutex_init(&ra_lock, NULL, MUTEX_DRIVER,
166 	    (void *)(intptr_t)__ipltospl(SPL7 - 1));
167 	if ((ret = mod_install(&modlinkage)) != 0) {
168 		mutex_destroy(&ra_lock);
169 	}
170 	return (ret);
171 }
172 
173 int
_fini()174 _fini()
175 {
176 	int	ret;
177 
178 	mutex_enter(&ra_lock);
179 
180 	if (ra_map_list_head != NULL) {
181 		mutex_exit(&ra_lock);
182 		return (EBUSY);
183 	}
184 
185 	ret = mod_remove(&modlinkage);
186 
187 	mutex_exit(&ra_lock);
188 
189 	if (ret == 0)
190 		mutex_destroy(&ra_lock);
191 
192 	return (ret);
193 }
194 
195 int
_info(struct modinfo * modinfop)196 _info(struct modinfo *modinfop)
197 {
198 	return (mod_info(&modlinkage, modinfop));
199 }
200 
201 /*
202  * set up an empty resource map for a given type and dip
203  */
204 int
ndi_ra_map_setup(dev_info_t * dip,char * type)205 ndi_ra_map_setup(dev_info_t *dip, char *type)
206 {
207 	struct ra_type_map  *typemapp;
208 	struct ra_dip_type  *dipmap;
209 	struct ra_dip_type  **backdip;
210 	struct ra_type_map  **backtype;
211 
212 
213 	mutex_enter(&ra_lock);
214 
215 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
216 
217 	if (dipmap == NULL) {
218 		if (backtype == NULL) {
219 			typemapp = (struct ra_type_map *)
220 			    kmem_zalloc(sizeof (*typemapp), KM_SLEEP);
221 			typemapp->type = (char *)kmem_zalloc(strlen(type) + 1,
222 			    KM_SLEEP);
223 			(void) strcpy(typemapp->type, type);
224 			RA_INSERT(&ra_map_list_head, typemapp);
225 		} else {
226 			typemapp = *backtype;
227 		}
228 		if (backdip == NULL) {
229 			/* allocate and insert in list of dips for this type */
230 			dipmap = (struct ra_dip_type *)
231 			    kmem_zalloc(sizeof (*dipmap), KM_SLEEP);
232 			dipmap->ra_dip = dip;
233 			RA_INSERT(&typemapp->ra_dip_list, dipmap);
234 		}
235 	}
236 
237 	mutex_exit(&ra_lock);
238 	return (NDI_SUCCESS);
239 }
240 
241 /*
242  * destroys a resource map for a given dip and type
243  */
244 int
ndi_ra_map_destroy(dev_info_t * dip,char * type)245 ndi_ra_map_destroy(dev_info_t *dip, char *type)
246 {
247 	struct ra_dip_type	*dipmap;
248 	struct ra_dip_type	**backdip;
249 	struct ra_type_map	**backtype, *typemap;
250 	struct ra_resource	*range;
251 
252 	mutex_enter(&ra_lock);
253 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, 0);
254 
255 	if (dipmap == NULL) {
256 		mutex_exit(&ra_lock);
257 		return (NDI_FAILURE);
258 	}
259 
260 	/*
261 	 * destroy all resources for this dip
262 	 * remove dip from type list
263 	 */
264 	ASSERT((backdip != NULL) && (backtype != NULL));
265 	while (dipmap->ra_rangeset != NULL) {
266 		range = dipmap->ra_rangeset;
267 		RA_REMOVE(&dipmap->ra_rangeset, range);
268 		kmem_free((caddr_t)range, sizeof (*range));
269 	}
270 	/* remove from dip list */
271 	RA_REMOVE(backdip, dipmap);
272 	kmem_free((caddr_t)dipmap, sizeof (*dipmap));
273 	if ((*backtype)->ra_dip_list == NULL) {
274 		/*
275 		 * This was the last dip with this resource type.
276 		 * Remove the type from the global list.
277 		 */
278 		typemap = *backtype;
279 		RA_REMOVE(backtype, (*backtype));
280 		kmem_free((caddr_t)typemap->type, strlen(typemap->type) + 1);
281 		kmem_free((caddr_t)typemap, sizeof (*typemap));
282 	}
283 
284 	mutex_exit(&ra_lock);
285 	return (NDI_SUCCESS);
286 }
287 
288 static int
ra_map_exist(dev_info_t * dip,char * type)289 ra_map_exist(dev_info_t *dip, char *type)
290 {
291 	struct ra_dip_type  **backdip;
292 	struct ra_type_map  **backtype;
293 
294 	mutex_enter(&ra_lock);
295 	if (find_dip_map_resources(dip, type, &backdip, &backtype, 0) == NULL) {
296 		mutex_exit(&ra_lock);
297 		return (NDI_FAILURE);
298 	}
299 
300 	mutex_exit(&ra_lock);
301 	return (NDI_SUCCESS);
302 }
303 /*
304  * Find a dip map for the specified type, if NDI_RA_PASS will go up on dev tree
305  * if found, backdip and backtype will be updated to point to the previous
306  * dip in the list and previous type for this dip in the list.
307  * If no such type at all in the resource list both backdip and backtype
308  * will be null. If the type found but no dip, back dip will be null.
309  */
310 
311 static struct ra_dip_type *
find_dip_map_resources(dev_info_t * dip,char * type,struct ra_dip_type *** backdip,struct ra_type_map *** backtype,uint32_t flag)312 find_dip_map_resources(dev_info_t *dip, char *type,
313     struct ra_dip_type ***backdip, struct ra_type_map ***backtype,
314     uint32_t flag)
315 {
316 	struct ra_type_map **prevmap;
317 	struct ra_dip_type *dipmap, **prevdip;
318 
319 	ASSERT(mutex_owned(&ra_lock));
320 	prevdip = NULL;
321 	dipmap = NULL;
322 	prevmap = &ra_map_list_head;
323 
324 	while (*prevmap) {
325 		if (strcmp((*prevmap)->type, type) == 0)
326 			break;
327 		prevmap = &(*prevmap)->ra_next;
328 	}
329 
330 	if (*prevmap) {
331 		for (; dip != NULL; dip = ddi_get_parent(dip)) {
332 			prevdip = &(*prevmap)->ra_dip_list;
333 			dipmap = *prevdip;
334 
335 			while (dipmap) {
336 				if (dipmap->ra_dip == dip)
337 					break;
338 				prevdip =  &dipmap->ra_next;
339 				dipmap = dipmap->ra_next;
340 			}
341 
342 			if (dipmap != NULL) {
343 				/* found it */
344 				break;
345 			}
346 
347 			if (!(flag & NDI_RA_PASS)) {
348 				break;
349 			}
350 		}
351 	}
352 
353 	*backtype = (*prevmap == NULL) ?  NULL: prevmap;
354 	*backdip = (dipmap == NULL) ?  NULL: prevdip;
355 
356 	return (dipmap);
357 }
358 
359 int
ndi_ra_free(dev_info_t * dip,uint64_t base,uint64_t len,char * type,uint32_t flag)360 ndi_ra_free(dev_info_t *dip, uint64_t base, uint64_t len, char *type,
361     uint32_t flag)
362 {
363 	struct ra_dip_type *dipmap;
364 	struct ra_resource *newmap, *overlapmap, *oldmap = NULL;
365 	struct ra_resource  *mapp, **backp;
366 	uint64_t newend, mapend;
367 	struct ra_dip_type **backdip;
368 	struct ra_type_map **backtype;
369 
370 	if (len == 0) {
371 		return (NDI_SUCCESS);
372 	}
373 
374 	mutex_enter(&ra_lock);
375 
376 	if ((dipmap = find_dip_map_resources(dip, type, &backdip, &backtype,
377 	    flag)) == NULL) {
378 		mutex_exit(&ra_lock);
379 		return (NDI_FAILURE);
380 	}
381 
382 	mapp = dipmap->ra_rangeset;
383 	backp = &dipmap->ra_rangeset;
384 
385 	/* now find where range lies and fix things up */
386 	newend = base + len;
387 	for (; mapp != NULL; backp = &(mapp->ra_next), mapp = mapp->ra_next) {
388 		mapend = mapp->ra_base + mapp->ra_len;
389 
390 		/* check for overlap first */
391 		if ((base <= mapp->ra_base && newend > mapp->ra_base) ||
392 		    (base > mapp->ra_base && base < mapend)) {
393 			/* overlap with mapp */
394 			overlapmap = mapp;
395 			goto overlap;
396 		} else if ((base == mapend && mapp->ra_next) &&
397 		    (newend > mapp->ra_next->ra_base)) {
398 			/* overlap with mapp->ra_next */
399 			overlapmap = mapp->ra_next;
400 			goto overlap;
401 		}
402 
403 		if (newend == mapp->ra_base) {
404 			/* simple - on front */
405 			mapp->ra_base = base;
406 			mapp->ra_len += len;
407 			/*
408 			 * don't need to check if it merges with
409 			 * previous since that would match on on end
410 			 */
411 			break;
412 		} else if (base == mapend) {
413 			/* simple - on end */
414 			mapp->ra_len += len;
415 			if (mapp->ra_next &&
416 			    (newend == mapp->ra_next->ra_base)) {
417 				/* merge with next node */
418 				oldmap = mapp->ra_next;
419 				mapp->ra_len += oldmap->ra_len;
420 				RA_REMOVE(&mapp->ra_next, oldmap);
421 				kmem_free((caddr_t)oldmap, sizeof (*oldmap));
422 			}
423 			break;
424 		} else if (base < mapp->ra_base) {
425 			/* somewhere in between so just an insert */
426 			newmap = (struct ra_resource *)
427 			    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
428 			newmap->ra_base = base;
429 			newmap->ra_len = len;
430 			RA_INSERT(backp, newmap);
431 			break;
432 		}
433 	}
434 	if (mapp == NULL) {
435 		/* stick on end */
436 		newmap = (struct ra_resource *)
437 		    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
438 		newmap->ra_base = base;
439 		newmap->ra_len = len;
440 		RA_INSERT(backp, newmap);
441 	}
442 
443 	mutex_exit(&ra_lock);
444 
445 	/*
446 	 * Update dip's "available" property, adding this piece of
447 	 * resource to the pool.
448 	 */
449 	(void) pci_put_available_prop(dipmap->ra_dip, base, len, type);
450 done:
451 	return (NDI_SUCCESS);
452 
453 overlap:
454 	/*
455 	 * Bad free may happen on some x86 platforms with BIOS exporting
456 	 * incorrect resource maps. The system is otherwise functioning
457 	 * normally. We send such messages to syslog only.
458 	 */
459 	cmn_err(CE_NOTE, "!ndi_ra_free: bad free, dip %p, resource type %s \n",
460 	    (void *)dip, type);
461 	cmn_err(CE_NOTE, "!ndi_ra_free: freeing base 0x%" PRIx64 ", len 0x%"
462 	    PRIX64 " overlaps with existing resource base 0x%" PRIx64
463 	    ", len 0x%" PRIx64 "\n", base, len, overlapmap->ra_base,
464 	    overlapmap->ra_len);
465 
466 	mutex_exit(&ra_lock);
467 	return (NDI_FAILURE);
468 }
469 
470 /* check to see if value is power of 2 or not. */
471 static int
isnot_pow2(uint64_t value)472 isnot_pow2(uint64_t value)
473 {
474 	uint32_t low;
475 	uint32_t hi;
476 
477 	low = value & 0xffffffff;
478 	hi = value >> 32;
479 
480 	/*
481 	 * ddi_ffs and ddi_fls gets long values, so in 32bit environment
482 	 * won't work correctly for 64bit values
483 	 */
484 	if ((ddi_ffs(low) == ddi_fls(low)) &&
485 	    (ddi_ffs(hi) == ddi_fls(hi)))
486 		return (0);
487 	return (1);
488 }
489 
490 static  void
adjust_link(struct ra_resource ** backp,struct ra_resource * mapp,uint64_t base,uint64_t len)491 adjust_link(struct ra_resource **backp, struct ra_resource *mapp,
492     uint64_t base, uint64_t len)
493 {
494 	struct ra_resource *newmap;
495 	uint64_t newlen;
496 
497 	if (base != mapp->ra_base) {
498 		/* in the middle or end */
499 		newlen = base - mapp->ra_base;
500 		if ((mapp->ra_len - newlen) == len) {
501 			/* on the end */
502 			mapp->ra_len = newlen;
503 		} else {
504 			/* in the middle */
505 			newmap = (struct ra_resource *)
506 			    kmem_zalloc(sizeof (*newmap), KM_SLEEP);
507 			newmap->ra_base = base + len;
508 			newmap->ra_len = mapp->ra_len - (len + newlen);
509 			mapp->ra_len = newlen;
510 			RA_INSERT(&(mapp->ra_next), newmap);
511 		}
512 	} else {
513 		/* at the beginning */
514 		mapp->ra_base += len;
515 		mapp->ra_len -= len;
516 		if (mapp->ra_len == 0) {
517 			/* remove the whole node */
518 			RA_REMOVE(backp, mapp);
519 			kmem_free((caddr_t)mapp, sizeof (*mapp));
520 		}
521 	}
522 }
523 
524 int
ndi_ra_alloc(dev_info_t * dip,ndi_ra_request_t * req,uint64_t * retbasep,uint64_t * retlenp,char * type,uint32_t flag)525 ndi_ra_alloc(dev_info_t *dip, ndi_ra_request_t *req, uint64_t *retbasep,
526     uint64_t *retlenp, char *type, uint32_t flag)
527 {
528 	struct ra_dip_type *dipmap;
529 	struct ra_resource *mapp, **backp, **backlargestp;
530 	uint64_t mask = 0;
531 	uint64_t len, remlen, largestbase, largestlen;
532 	uint64_t base, oldbase, lower, upper;
533 	struct ra_dip_type  **backdip;
534 	struct ra_type_map  **backtype;
535 	int  rval = NDI_FAILURE;
536 
537 
538 	len = req->ra_len;
539 
540 	if (req->ra_flags & NDI_RA_ALIGN_SIZE) {
541 		if (isnot_pow2(req->ra_len)) {
542 			DEBUGPRT(CE_WARN, "ndi_ra_alloc: bad length(pow2) 0x%"
543 			    PRIx64, req->ra_len);
544 			*retbasep = 0;
545 			*retlenp = 0;
546 			return (NDI_FAILURE);
547 		}
548 	}
549 
550 	mask = (req->ra_flags & NDI_RA_ALIGN_SIZE) ? (len - 1) :
551 	    req->ra_align_mask;
552 
553 
554 	mutex_enter(&ra_lock);
555 	dipmap = find_dip_map_resources(dip, type, &backdip, &backtype, flag);
556 	if ((dipmap == NULL) || ((mapp = dipmap->ra_rangeset) == NULL)) {
557 		mutex_exit(&ra_lock);
558 		DEBUGPRT(CE_CONT, "ndi_ra_alloc no map found for this type\n");
559 		return (NDI_FAILURE);
560 	}
561 
562 	DEBUGPRT(CE_CONT, "ndi_ra_alloc: mapp = %p len=%" PRIx64 ", mask=%"
563 	    PRIx64 "\n", (void *)mapp, len, mask);
564 
565 	backp = &(dipmap->ra_rangeset);
566 	backlargestp = NULL;
567 	largestbase = 0;
568 	largestlen = 0;
569 
570 	lower = 0;
571 	upper = ~(uint64_t)0;
572 
573 	if (req->ra_flags & NDI_RA_ALLOC_BOUNDED) {
574 		/* bounded so skip to first possible */
575 		lower = req->ra_boundbase;
576 		upper = req->ra_boundlen + lower;
577 		if ((upper == 0) || (upper < req->ra_boundlen))
578 			upper = ~(uint64_t)0;
579 		DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64 ", len = %"
580 		    PRIx64 " ra_base=%" PRIx64 ", mask=%" PRIx64
581 		    "\n", mapp->ra_len, len, mapp->ra_base, mask);
582 		for (; mapp != NULL && (mapp->ra_base + mapp->ra_len) < lower;
583 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
584 			if (((mapp->ra_len + mapp->ra_base) == 0) ||
585 			    ((mapp->ra_len + mapp->ra_base) < mapp->ra_len))
586 				/*
587 				 * This elements end goes beyond max uint64_t.
588 				 * potential candidate, check end against lower
589 				 * would not be precise.
590 				 */
591 				break;
592 
593 			DEBUGPRT(CE_CONT, " ra_len = %" PRIx64 ", ra_base=%"
594 			    PRIx64 "\n", mapp->ra_len, mapp->ra_base);
595 			}
596 
597 	}
598 
599 	if (!(req->ra_flags & NDI_RA_ALLOC_SPECIFIED)) {
600 		/* first fit - not user specified */
601 		DEBUGPRT(CE_CONT, "ndi_ra_alloc(unspecified request)"
602 		    "lower=%" PRIx64 ", upper=%" PRIx64 "\n", lower, upper);
603 		for (; mapp != NULL && mapp->ra_base <= upper;
604 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
605 
606 			DEBUGPRT(CE_CONT, "ndi_ra_alloc: ra_len = %" PRIx64
607 			    ", len = %" PRIx64 "", mapp->ra_len, len);
608 			base = mapp->ra_base;
609 			if (base < lower) {
610 				base = lower;
611 				DEBUGPRT(CE_CONT, "\tbase=%" PRIx64
612 				    ", ra_base=%" PRIx64 ", mask=%" PRIx64,
613 				    base, mapp->ra_base, mask);
614 			}
615 
616 			if ((base & mask) != 0) {
617 				oldbase = base;
618 				/*
619 				 * failed a critical constraint
620 				 * adjust and see if it still fits
621 				 */
622 				base = base & ~mask;
623 				base += (mask + 1);
624 				DEBUGPRT(CE_CONT, "\tnew base=%" PRIx64 "\n",
625 				    base);
626 
627 				/*
628 				 * Check to see if the new base is past
629 				 * the end of the resource.
630 				 */
631 				if (base >= (oldbase + mapp->ra_len + 1)) {
632 					continue;
633 				}
634 			}
635 
636 			if (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) {
637 				if ((upper - mapp->ra_base)  <  mapp->ra_len)
638 					remlen = upper - base;
639 				else
640 					remlen = mapp->ra_len -
641 					    (base - mapp->ra_base);
642 
643 				if ((backlargestp == NULL) ||
644 				    (largestlen < remlen)) {
645 
646 					backlargestp = backp;
647 					largestbase = base;
648 					largestlen = remlen;
649 				}
650 			}
651 
652 			if (mapp->ra_len >= len) {
653 				/* a candidate -- apply constraints */
654 				if ((len > (mapp->ra_len -
655 				    (base - mapp->ra_base))) ||
656 				    ((len - 1 + base) > upper)) {
657 					continue;
658 				}
659 
660 				/* we have a fit */
661 
662 				DEBUGPRT(CE_CONT, "\thave a fit\n");
663 
664 				adjust_link(backp, mapp, base, len);
665 				rval = NDI_SUCCESS;
666 				break;
667 
668 			}
669 		}
670 	} else {
671 		/* want an exact value/fit */
672 		base = req->ra_addr;
673 		len = req->ra_len;
674 		for (; mapp != NULL && mapp->ra_base <= upper;
675 		    backp = &(mapp->ra_next), mapp = mapp->ra_next) {
676 			if (base >= mapp->ra_base &&
677 			    ((base - mapp->ra_base) < mapp->ra_len)) {
678 				/*
679 				 * This is the node with the requested base in
680 				 * its range
681 				 */
682 				if ((len > mapp->ra_len) ||
683 				    (base - mapp->ra_base >
684 				    mapp->ra_len - len)) {
685 					/* length requirement not satisfied */
686 					if (req->ra_flags &
687 					    NDI_RA_ALLOC_PARTIAL_OK) {
688 						if ((upper - mapp->ra_base)
689 						    < mapp->ra_len)
690 							remlen = upper - base;
691 						else
692 							remlen =
693 							    mapp->ra_len -
694 							    (base -
695 							    mapp->ra_base);
696 					}
697 					backlargestp = backp;
698 					largestbase = base;
699 					largestlen = remlen;
700 					base = 0;
701 				} else {
702 					/* We have a match */
703 					adjust_link(backp, mapp, base, len);
704 					rval = NDI_SUCCESS;
705 				}
706 				break;
707 			}
708 		}
709 	}
710 
711 	if ((rval != NDI_SUCCESS) &&
712 	    (req->ra_flags & NDI_RA_ALLOC_PARTIAL_OK) &&
713 	    (backlargestp != NULL)) {
714 		adjust_link(backlargestp, *backlargestp, largestbase,
715 		    largestlen);
716 
717 		base = largestbase;
718 		len = largestlen;
719 		rval = NDI_RA_PARTIAL_REQ;
720 	}
721 
722 	mutex_exit(&ra_lock);
723 
724 	if (rval == NDI_FAILURE) {
725 		*retbasep = 0;
726 		*retlenp = 0;
727 	} else {
728 		*retbasep = base;
729 		*retlenp = len;
730 	}
731 
732 	/*
733 	 * Update dip's "available" property, substract this piece of
734 	 * resource from the pool.
735 	 */
736 	if ((rval == NDI_SUCCESS) || (rval == NDI_RA_PARTIAL_REQ))
737 		(void) pci_get_available_prop(dipmap->ra_dip,
738 		    *retbasep, *retlenp, type);
739 
740 	return (rval);
741 }
742 
743 /*
744  * isa_resource_setup
745  *	check for /used-resources and initialize
746  *	based on info there.  If no /used-resources,
747  *	fail.
748  */
749 int
isa_resource_setup()750 isa_resource_setup()
751 {
752 	dev_info_t *used, *usedpdip;
753 	/*
754 	 * note that at this time bootconf creates 32 bit properties for
755 	 * io-space and device-memory
756 	 */
757 	struct iorange {
758 		uint32_t	base;
759 		uint32_t	len;
760 	} *iorange;
761 	struct memrange {
762 		uint32_t	base;
763 		uint32_t	len;
764 	} *memrange;
765 	uint32_t *irq;
766 	int proplen;
767 	int i, len;
768 	int maxrange;
769 	ndi_ra_request_t req;
770 	uint64_t retbase;
771 	uint64_t retlen;
772 
773 	used = ddi_find_devinfo("used-resources", -1, 0);
774 	if (used == NULL) {
775 		DEBUGPRT(CE_CONT,
776 		    "isa_resource_setup: used-resources not found");
777 		return (NDI_FAILURE);
778 	}
779 
780 	/*
781 	 * initialize to all resources being present
782 	 * and then remove the ones in use.
783 	 */
784 
785 	usedpdip = ddi_root_node();
786 
787 	DEBUGPRT(CE_CONT, "isa_resource_setup: used = %p usedpdip = %p\n",
788 	    (void *)used, (void *)usedpdip);
789 
790 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
791 		return (NDI_FAILURE);
792 	}
793 
794 	/* initialize io space, highest end base is 0xffff */
795 	/* note that length is highest addr + 1 since starts from 0 */
796 
797 	(void) ndi_ra_free(usedpdip, 0, 0xffff + 1,  NDI_RA_TYPE_IO, 0);
798 
799 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
800 	    "io-space", (caddr_t)&iorange, &proplen) == DDI_SUCCESS) {
801 		maxrange = proplen / sizeof (struct iorange);
802 		/* remove the "used" I/O resources */
803 		for (i = 0; i < maxrange; i++) {
804 			bzero((caddr_t)&req, sizeof (req));
805 			req.ra_addr =  (uint64_t)iorange[i].base;
806 			req.ra_len = (uint64_t)iorange[i].len;
807 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
808 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
809 			    NDI_RA_TYPE_IO, 0);
810 		}
811 
812 		kmem_free((caddr_t)iorange, proplen);
813 	}
814 
815 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
816 		return (NDI_FAILURE);
817 	}
818 	/* initialize memory space where highest end base is 0xffffffff */
819 	/* note that length is highest addr + 1 since starts from 0 */
820 	(void) ndi_ra_free(usedpdip, 0, ((uint64_t)((uint32_t)~0)) + 1,
821 	    NDI_RA_TYPE_MEM, 0);
822 
823 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
824 	    "device-memory", (caddr_t)&memrange, &proplen) == DDI_SUCCESS) {
825 		maxrange = proplen / sizeof (struct memrange);
826 		/* remove the "used" memory resources */
827 		for (i = 0; i < maxrange; i++) {
828 			bzero((caddr_t)&req, sizeof (req));
829 			req.ra_addr = (uint64_t)memrange[i].base;
830 			req.ra_len = (uint64_t)memrange[i].len;
831 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
832 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
833 			    NDI_RA_TYPE_MEM, 0);
834 		}
835 
836 		kmem_free((caddr_t)memrange, proplen);
837 	}
838 
839 	if (ndi_ra_map_setup(usedpdip, NDI_RA_TYPE_INTR) == NDI_FAILURE) {
840 		return (NDI_FAILURE);
841 	}
842 
843 	/* initialize the interrupt space */
844 	(void) ndi_ra_free(usedpdip, 0, 16, NDI_RA_TYPE_INTR, 0);
845 
846 	/*
847 	 * The PC/AT had two PICs cascaded together through IRQ 2 on the
848 	 * primary with firmware providing compatibility.  Effectively IRQ 2
849 	 * and 9 are the same.  Intel platforms have retained compatibility
850 	 * for that since.
851 	 *
852 	 * Mark IRQ 2 as consumed, so it can never be allocated.
853 	 */
854 #if defined(__x86)
855 	bzero(&req, sizeof (req));
856 	req.ra_addr = 2;
857 	req.ra_len = 1;
858 	req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
859 	(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
860 	    NDI_RA_TYPE_INTR, 0);
861 #endif
862 
863 	if (ddi_getlongprop(DDI_DEV_T_ANY, used, DDI_PROP_DONTPASS,
864 	    "interrupts", (caddr_t)&irq, &proplen) == DDI_SUCCESS) {
865 		/* Initialize available interrupts by negating the used */
866 		len = (proplen / sizeof (uint32_t));
867 		for (i = 0; i < len; i++) {
868 			bzero((caddr_t)&req, sizeof (req));
869 			req.ra_addr = (uint64_t)irq[i];
870 			req.ra_len = 1;
871 			req.ra_flags = NDI_RA_ALLOC_SPECIFIED;
872 			(void) ndi_ra_alloc(usedpdip, &req, &retbase, &retlen,
873 			    NDI_RA_TYPE_INTR, 0);
874 		}
875 		kmem_free((caddr_t)irq, proplen);
876 	}
877 
878 #ifdef BUSRA_DEBUG
879 	if (busra_debug) {
880 		(void) ra_dump_all(NULL, usedpdip);
881 	}
882 #endif
883 	return (NDI_SUCCESS);
884 
885 }
886 
887 #ifdef BUSRA_DEBUG
888 void
ra_dump_all(char * type,dev_info_t * dip)889 ra_dump_all(char *type, dev_info_t *dip)
890 {
891 
892 	struct ra_type_map *typemap;
893 	struct ra_dip_type *dipmap;
894 	struct ra_resource *res;
895 
896 	typemap =  (struct ra_type_map *)ra_map_list_head;
897 
898 	for (; typemap != NULL; typemap = typemap->ra_next) {
899 		if (type != NULL) {
900 			if (strcmp(typemap->type, type) != 0)
901 				continue;
902 		}
903 		cmn_err(CE_CONT, "type is %s\n", typemap->type);
904 		for (dipmap = typemap->ra_dip_list; dipmap != NULL;
905 		    dipmap = dipmap->ra_next) {
906 			if (dip != NULL) {
907 				if ((dipmap->ra_dip) != dip)
908 					continue;
909 			}
910 			cmn_err(CE_CONT, "  dip is %p\n",
911 			    (void *)dipmap->ra_dip);
912 			for (res = dipmap->ra_rangeset; res != NULL;
913 			    res = res->ra_next) {
914 				cmn_err(CE_CONT, "\t  range is %" PRIx64
915 				    " %" PRIx64 "\n", res->ra_base,
916 				    res->ra_len);
917 			}
918 			if (dip != NULL)
919 				break;
920 		}
921 		if (type != NULL)
922 			break;
923 	}
924 }
925 #endif
926 
927 struct bus_range {	/* 1275 "bus-range" property definition */
928 	uint32_t lo;
929 	uint32_t hi;
930 } pci_bus_range;
931 
932 struct busnum_ctrl {
933 	int	rv;
934 	dev_info_t *dip;
935 	struct	bus_range *range;
936 };
937 
938 
939 /*
940  * Setup resource map for the pci bus node based on the "available"
941  * property and "bus-range" property.
942  */
943 int
pci_resource_setup(dev_info_t * dip)944 pci_resource_setup(dev_info_t *dip)
945 {
946 	pci_regspec_t *regs;
947 	int rlen, rcount, i;
948 	char bus_type[16] = "(unknown)";
949 	int len;
950 	struct busnum_ctrl ctrl;
951 	int rval = NDI_SUCCESS;
952 
953 	/*
954 	 * If this is a pci bus node then look for "available" property
955 	 * to find the available resources on this bus.
956 	 */
957 	len = sizeof (bus_type);
958 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
959 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
960 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
961 		return (NDI_FAILURE);
962 
963 	/* it is not a pci/pci-ex bus type */
964 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
965 		return (NDI_FAILURE);
966 
967 	/*
968 	 * The pci-hotplug project addresses adding the call
969 	 * to pci_resource_setup from pci nexus driver.
970 	 * However that project would initially be only for x86,
971 	 * so for sparc pcmcia-pci support we still need to call
972 	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
973 	 * are updated to call pci_resource_setup this portion of the
974 	 * code would really become an assert to make sure this
975 	 * function is not called for the same dip twice.
976 	 */
977 	/*
978 	 * Another user for the check below is hotplug PCI/PCIe bridges.
979 	 *
980 	 * For PCI/PCIE devices under a PCIE hierarchy, ndi_ra_alloc/free
981 	 * will update the devinfo node's "available" property, to reflect
982 	 * the fact that a piece of resource has been removed/added to
983 	 * a devinfo node.
984 	 * During probe of a new PCI bridge in the hotplug case, PCI
985 	 * configurator firstly allocates maximum MEM/IO from its parent,
986 	 * then calls ndi_ra_free() to use these resources to setup busra
987 	 * pool for the new bridge, as well as adding these resources to
988 	 * the "available" property of the new devinfo node. Then configu-
989 	 * rator will attach driver for the bridge before probing its
990 	 * children, and the bridge driver will then initialize its hotplug
991 	 * contollers (if it supports hotplug) and HPC driver will call
992 	 * this function to setup the busra pool, but the resource pool
993 	 * has already been setup at the first of pcicfg_probe_bridge(),
994 	 * thus we need the check below to return directly in this case.
995 	 * Otherwise the ndi_ra_free() below will see overlapping resources.
996 	 */
997 	{
998 		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
999 			return (NDI_FAILURE);
1000 		}
1001 	}
1002 
1003 
1004 	/*
1005 	 * Create empty resource maps first.
1006 	 *
1007 	 * NOTE: If all the allocated resources are already assigned to
1008 	 * device(s) in the hot plug slot then "available" property may not
1009 	 * be present. But, subsequent hot plug operation may unconfigure
1010 	 * the device in the slot and try to free up it's resources. So,
1011 	 * at the minimum we should create empty maps here.
1012 	 */
1013 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
1014 		return (NDI_FAILURE);
1015 	}
1016 
1017 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
1018 		return (NDI_FAILURE);
1019 	}
1020 
1021 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
1022 		return (NDI_FAILURE);
1023 	}
1024 
1025 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
1026 	    NDI_FAILURE) {
1027 		return (NDI_FAILURE);
1028 	}
1029 
1030 	/* read the "available" property if it is available */
1031 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1032 	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
1033 		/*
1034 		 * Remove "available" property as the entries will be
1035 		 * re-created in ndi_ra_free() below, note prom based
1036 		 * property will not be removed. But in ndi_ra_free()
1037 		 * we'll be creating non prom based property entries.
1038 		 */
1039 		(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1040 		/*
1041 		 * create the available resource list for both memory and
1042 		 * io space
1043 		 */
1044 		rcount = rlen / sizeof (pci_regspec_t);
1045 		for (i = 0; i < rcount; i++) {
1046 			switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
1047 			case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
1048 				(void) ndi_ra_free(dip,
1049 				    (uint64_t)regs[i].pci_phys_low,
1050 				    (uint64_t)regs[i].pci_size_low,
1051 				    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1052 				    NDI_RA_TYPE_PCI_PREFETCH_MEM :
1053 				    NDI_RA_TYPE_MEM,
1054 				    0);
1055 				break;
1056 			case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
1057 				(void) ndi_ra_free(dip,
1058 				    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1059 				    ((uint64_t)(regs[i].pci_phys_low)),
1060 				    ((uint64_t)(regs[i].pci_size_hi) << 32) |
1061 				    ((uint64_t)(regs[i].pci_size_low)),
1062 				    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
1063 				    NDI_RA_TYPE_PCI_PREFETCH_MEM :
1064 				    NDI_RA_TYPE_MEM,
1065 				    0);
1066 				break;
1067 			case PCI_REG_ADDR_G(PCI_ADDR_IO):
1068 				(void) ndi_ra_free(dip,
1069 				    (uint64_t)regs[i].pci_phys_low,
1070 				    (uint64_t)regs[i].pci_size_low,
1071 				    NDI_RA_TYPE_IO,
1072 				    0);
1073 				break;
1074 			case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
1075 				break;
1076 			default:
1077 				cmn_err(CE_WARN,
1078 				    "pci_resource_setup: bad addr type: %x\n",
1079 				    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
1080 				break;
1081 			}
1082 		}
1083 		kmem_free(regs, rlen);
1084 	}
1085 
1086 	/*
1087 	 * update resource map for available bus numbers if the node
1088 	 * has available-bus-range or bus-range property.
1089 	 */
1090 	len = sizeof (struct bus_range);
1091 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1092 	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
1093 	    DDI_SUCCESS) {
1094 		/*
1095 		 * Add bus numbers in the range to the free list.
1096 		 */
1097 		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
1098 		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
1099 		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
1100 	} else {
1101 		/*
1102 		 * We don't have an available-bus-range property. If, instead,
1103 		 * we have a bus-range property we add all the bus numbers
1104 		 * in that range to the free list but we must then scan
1105 		 * for pci-pci bridges on this bus to find out the if there
1106 		 * are any of those bus numbers already in use. If so, we can
1107 		 * reclaim them.
1108 		 */
1109 		len = sizeof (struct bus_range);
1110 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
1111 		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
1112 		    &len) == DDI_SUCCESS) {
1113 			if (pci_bus_range.lo != pci_bus_range.hi) {
1114 				/*
1115 				 * Add bus numbers other than the secondary
1116 				 * bus number to the free list.
1117 				 */
1118 				(void) ndi_ra_free(dip,
1119 				    (uint64_t)pci_bus_range.lo + 1,
1120 				    (uint64_t)pci_bus_range.hi -
1121 				    (uint64_t)pci_bus_range.lo,
1122 				    NDI_RA_TYPE_PCI_BUSNUM, 0);
1123 
1124 				/* scan for pci-pci bridges */
1125 				ctrl.rv = DDI_SUCCESS;
1126 				ctrl.dip = dip;
1127 				ctrl.range = &pci_bus_range;
1128 				ndi_devi_enter(dip);
1129 				ddi_walk_devs(ddi_get_child(dip),
1130 				    claim_pci_busnum, (void *)&ctrl);
1131 				ndi_devi_exit(dip);
1132 				if (ctrl.rv != DDI_SUCCESS) {
1133 					/* failed to create the map */
1134 					(void) ndi_ra_map_destroy(dip,
1135 					    NDI_RA_TYPE_PCI_BUSNUM);
1136 					rval = NDI_FAILURE;
1137 				}
1138 			}
1139 		}
1140 	}
1141 
1142 #ifdef BUSRA_DEBUG
1143 	if (busra_debug) {
1144 		(void) ra_dump_all(NULL, dip);
1145 	}
1146 #endif
1147 
1148 	return (rval);
1149 }
1150 
1151 /*
1152  * If the device is a PCI bus device (i.e bus-range property exists) then
1153  * claim the bus numbers used by the device from the specified bus
1154  * resource map.
1155  */
1156 static int
claim_pci_busnum(dev_info_t * dip,void * arg)1157 claim_pci_busnum(dev_info_t *dip, void *arg)
1158 {
1159 	struct bus_range pci_bus_range;
1160 	struct busnum_ctrl *ctrl;
1161 	ndi_ra_request_t req;
1162 	char bus_type[16] = "(unknown)";
1163 	int len;
1164 	uint64_t base;
1165 	uint64_t retlen;
1166 
1167 	ctrl = (struct busnum_ctrl *)arg;
1168 
1169 	/* check if this is a PCI bus node */
1170 	len = sizeof (bus_type);
1171 	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
1172 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
1173 	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
1174 		return (DDI_WALK_PRUNECHILD);
1175 
1176 	/* it is not a pci/pci-ex bus type */
1177 	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
1178 		return (DDI_WALK_PRUNECHILD);
1179 
1180 	/* look for the bus-range property */
1181 	len = sizeof (struct bus_range);
1182 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1183 	    "bus-range", (caddr_t)&pci_bus_range, &len) == DDI_SUCCESS) {
1184 		if ((pci_bus_range.lo >= ctrl->range->lo) &&
1185 		    (pci_bus_range.hi <= ctrl->range->hi)) {
1186 
1187 			/* claim the bus range from the bus resource map */
1188 			bzero((caddr_t)&req, sizeof (req));
1189 			req.ra_addr = (uint64_t)pci_bus_range.lo;
1190 			req.ra_flags |= NDI_RA_ALLOC_SPECIFIED;
1191 			req.ra_len = (uint64_t)pci_bus_range.hi -
1192 			    (uint64_t)pci_bus_range.lo + 1;
1193 			if (ndi_ra_alloc(ctrl->dip, &req, &base, &retlen,
1194 			    NDI_RA_TYPE_PCI_BUSNUM, 0) == NDI_SUCCESS)
1195 				return (DDI_WALK_PRUNECHILD);
1196 		}
1197 	}
1198 
1199 	/*
1200 	 * Error return.
1201 	 */
1202 	ctrl->rv = DDI_FAILURE;
1203 	return (DDI_WALK_TERMINATE);
1204 }
1205 
1206 void
pci_resource_destroy(dev_info_t * dip)1207 pci_resource_destroy(dev_info_t *dip)
1208 {
1209 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_IO);
1210 
1211 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_MEM);
1212 
1213 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_BUSNUM);
1214 
1215 	(void) ndi_ra_map_destroy(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM);
1216 }
1217 
1218 
1219 int
pci_resource_setup_avail(dev_info_t * dip,pci_regspec_t * avail_p,int entries)1220 pci_resource_setup_avail(dev_info_t *dip, pci_regspec_t *avail_p, int entries)
1221 {
1222 	int i;
1223 
1224 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE)
1225 		return (NDI_FAILURE);
1226 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE)
1227 		return (NDI_FAILURE);
1228 	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) == NDI_FAILURE)
1229 		return (NDI_FAILURE);
1230 
1231 	/* for each entry in the PCI "available" property */
1232 	for (i = 0; i < entries; i++, avail_p++) {
1233 		if (avail_p->pci_phys_hi == -1u)
1234 			goto err;
1235 
1236 		switch (PCI_REG_ADDR_G(avail_p->pci_phys_hi)) {
1237 		case PCI_REG_ADDR_G(PCI_ADDR_MEM32): {
1238 			(void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1239 			    (uint64_t)avail_p->pci_size_low,
1240 			    (avail_p->pci_phys_hi & PCI_REG_PF_M) ?
1241 			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
1242 			    0);
1243 			}
1244 			break;
1245 		case PCI_REG_ADDR_G(PCI_ADDR_IO):
1246 			(void) ndi_ra_free(dip, (uint64_t)avail_p->pci_phys_low,
1247 			    (uint64_t)avail_p->pci_size_low, NDI_RA_TYPE_IO, 0);
1248 			break;
1249 		default:
1250 			goto err;
1251 		}
1252 	}
1253 #ifdef BUSRA_DEBUG
1254 	if (busra_debug) {
1255 		(void) ra_dump_all(NULL, dip);
1256 	}
1257 #endif
1258 	return (NDI_SUCCESS);
1259 
1260 err:
1261 	cmn_err(CE_WARN, "pci_resource_setup_avail: bad entry[%d]=%x\n",
1262 	    i, avail_p->pci_phys_hi);
1263 	return (NDI_FAILURE);
1264 }
1265 
1266 /*
1267  * Return true if the devinfo node resides on PCI or PCI Express bus,
1268  * sitting in a PCI Express hierarchy.
1269  */
1270 static boolean_t
is_pcie_fabric(dev_info_t * dip)1271 is_pcie_fabric(dev_info_t *dip)
1272 {
1273 	dev_info_t *root = ddi_root_node();
1274 	dev_info_t *pdip;
1275 	boolean_t found = B_FALSE;
1276 	char *bus;
1277 
1278 	/*
1279 	 * Is this pci/pcie ?
1280 	 */
1281 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1282 	    DDI_PROP_DONTPASS, "device_type", &bus) !=
1283 	    DDI_PROP_SUCCESS) {
1284 		DEBUGPRT(CE_WARN, "is_pcie_fabric: cannot find "
1285 		    "\"device_type\" property for dip %p\n", (void *)dip);
1286 		return (B_FALSE);
1287 	}
1288 
1289 	if (strcmp(bus, "pciex") == 0) {
1290 		/* pcie bus, done */
1291 		ddi_prop_free(bus);
1292 		return (B_TRUE);
1293 	} else if (strcmp(bus, "pci") == 0) {
1294 		/*
1295 		 * pci bus, fall through to check if it resides in
1296 		 * a pcie hierarchy.
1297 		 */
1298 		ddi_prop_free(bus);
1299 	} else {
1300 		/* other bus, return failure */
1301 		ddi_prop_free(bus);
1302 		return (B_FALSE);
1303 	}
1304 
1305 	/*
1306 	 * Does this device reside in a pcie fabric ?
1307 	 */
1308 	for (pdip = ddi_get_parent(dip); pdip && (pdip != root) &&
1309 	    !found; pdip = ddi_get_parent(pdip)) {
1310 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
1311 		    DDI_PROP_DONTPASS, "device_type", &bus) !=
1312 		    DDI_PROP_SUCCESS)
1313 			break;
1314 
1315 		if (strcmp(bus, "pciex") == 0)
1316 			found = B_TRUE;
1317 
1318 		ddi_prop_free(bus);
1319 	}
1320 
1321 	return (found);
1322 }
1323 
1324 /*
1325  * Remove a piece of IO/MEM resource from "available" property of 'dip'.
1326  */
1327 static int
pci_get_available_prop(dev_info_t * dip,uint64_t base,uint64_t len,char * busra_type)1328 pci_get_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1329     char *busra_type)
1330 {
1331 	pci_regspec_t	*regs, *newregs;
1332 	uint_t		status;
1333 	int		rlen, rcount;
1334 	int		i, j, k;
1335 	uint64_t	dlen;
1336 	boolean_t	found = B_FALSE;
1337 	uint32_t	type;
1338 
1339 	/* check if we're manipulating MEM/IO resource */
1340 	if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1341 		return (DDI_SUCCESS);
1342 
1343 	/* check if dip is a pci/pcie device resides in a pcie fabric */
1344 	if (!is_pcie_fabric(dip))
1345 		return (DDI_SUCCESS);
1346 
1347 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1348 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1349 	    "available", (caddr_t)&regs, &rlen);
1350 
1351 	ASSERT(status == DDI_SUCCESS);
1352 	if (status != DDI_SUCCESS)
1353 		return (status);
1354 
1355 	/*
1356 	 * The updated "available" property will at most have one more entry
1357 	 * than existing one (when the requested range is in the middle of
1358 	 * the matched property entry)
1359 	 */
1360 	newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1361 
1362 	rcount = rlen / sizeof (pci_regspec_t);
1363 	for (i = 0, j = 0; i < rcount; i++) {
1364 		if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1365 			uint64_t range_base, range_len;
1366 
1367 			range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1368 			    ((uint64_t)(regs[i].pci_phys_low));
1369 			range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1370 			    ((uint64_t)(regs[i].pci_size_low));
1371 
1372 			if ((base < range_base) ||
1373 			    (base + len > range_base + range_len)) {
1374 				/*
1375 				 * not a match, copy the entry
1376 				 */
1377 				goto copy_entry;
1378 			}
1379 
1380 			/*
1381 			 * range_base	base	base+len	range_base
1382 			 *					+range_len
1383 			 *   +------------+-----------+----------+
1384 			 *   |		  |///////////|		 |
1385 			 *   +------------+-----------+----------+
1386 			 */
1387 			/*
1388 			 * Found a match, remove the range out of this entry.
1389 			 */
1390 			found = B_TRUE;
1391 
1392 			dlen = base - range_base;
1393 			if (dlen != 0) {
1394 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1395 				newregs[j].pci_phys_mid =
1396 				    (uint32_t)(range_base >> 32);
1397 				newregs[j].pci_phys_low =
1398 				    (uint32_t)(range_base);
1399 				newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1400 				newregs[j].pci_size_low = (uint32_t)dlen;
1401 				j++;
1402 			}
1403 
1404 			dlen = (range_base + range_len) - (base + len);
1405 			if (dlen != 0) {
1406 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1407 				newregs[j].pci_phys_mid =
1408 				    (uint32_t)((base + len)>> 32);
1409 				newregs[j].pci_phys_low =
1410 				    (uint32_t)(base + len);
1411 				newregs[j].pci_size_hi = (uint32_t)(dlen >> 32);
1412 				newregs[j].pci_size_low = (uint32_t)dlen;
1413 				j++;
1414 			}
1415 
1416 			/*
1417 			 * We've allocated the resource from the matched
1418 			 * entry, almost finished but still need to copy
1419 			 * the rest entries from the original property
1420 			 * array.
1421 			 */
1422 			for (k = i + 1; k < rcount; k++) {
1423 				newregs[j] = regs[k];
1424 				j++;
1425 			}
1426 
1427 			goto done;
1428 
1429 		} else {
1430 copy_entry:
1431 			newregs[j] = regs[i];
1432 			j++;
1433 		}
1434 	}
1435 
1436 done:
1437 	/*
1438 	 * This should not fail so assert it. For non-debug kernel we don't
1439 	 * want to panic thus only logging a warning message.
1440 	 */
1441 	ASSERT(found == B_TRUE);
1442 	if (!found) {
1443 		cmn_err(CE_WARN, "pci_get_available_prop: failed to remove "
1444 		    "resource from dip %p : base 0x%" PRIx64 ", len 0x%" PRIX64
1445 		    ", type 0x%x\n", (void *)dip, base, len, type);
1446 		kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1447 		kmem_free(regs, rlen);
1448 
1449 		return (DDI_FAILURE);
1450 	}
1451 
1452 	/*
1453 	 * Found the resources from parent, update the "available"
1454 	 * property.
1455 	 */
1456 	if (j == 0) {
1457 		/* all the resources are consumed, remove the property */
1458 		(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "available");
1459 	} else {
1460 		/*
1461 		 * There are still resource available in the parent dip,
1462 		 * update with the remaining resources.
1463 		 */
1464 		(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1465 		    "available", (int *)newregs,
1466 		    (j * sizeof (pci_regspec_t)) / sizeof (int));
1467 	}
1468 
1469 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1470 	kmem_free(regs, rlen);
1471 
1472 	return (DDI_SUCCESS);
1473 }
1474 
1475 /*
1476  * Add a piece of IO/MEM resource to "available" property of 'dip'.
1477  */
1478 static int
pci_put_available_prop(dev_info_t * dip,uint64_t base,uint64_t len,char * busra_type)1479 pci_put_available_prop(dev_info_t *dip, uint64_t base, uint64_t len,
1480     char *busra_type)
1481 {
1482 	pci_regspec_t	*regs, *newregs;
1483 	uint_t		status;
1484 	int		rlen, rcount;
1485 	int		i, j, k;
1486 	int		matched = 0;
1487 	uint64_t	orig_base = base;
1488 	uint64_t	orig_len = len;
1489 	uint32_t	type;
1490 
1491 	/* check if we're manipulating MEM/IO resource */
1492 	if ((type = pci_type_ra2pci(busra_type)) == PCI_ADDR_TYPE_INVAL)
1493 		return (DDI_SUCCESS);
1494 
1495 	/* check if dip is a pci/pcie device resides in a pcie fabric */
1496 	if (!is_pcie_fabric(dip))
1497 		return (DDI_SUCCESS);
1498 
1499 	status = ddi_getlongprop(DDI_DEV_T_ANY, dip,
1500 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1501 	    "available", (caddr_t)&regs, &rlen);
1502 
1503 	switch (status) {
1504 		case DDI_PROP_NOT_FOUND:
1505 			goto not_found;
1506 
1507 		case DDI_PROP_SUCCESS:
1508 			break;
1509 
1510 		default:
1511 			return (status);
1512 	}
1513 
1514 	/*
1515 	 * The "available" property exist on the node, try to put this
1516 	 * resource back, merge if there are adjacent resources.
1517 	 *
1518 	 * The updated "available" property will at most have one more entry
1519 	 * than existing one (when there is no adjacent entries thus the new
1520 	 * resource is appended at the end)
1521 	 */
1522 	newregs = kmem_alloc(rlen + sizeof (pci_regspec_t), KM_SLEEP);
1523 
1524 	rcount = rlen / sizeof (pci_regspec_t);
1525 	for (i = 0, j = 0; i < rcount; i++) {
1526 		if (type == (regs[i].pci_phys_hi & PCI_ADDR_TYPE_MASK)) {
1527 			uint64_t range_base, range_len;
1528 
1529 			range_base = ((uint64_t)(regs[i].pci_phys_mid) << 32) |
1530 			    ((uint64_t)(regs[i].pci_phys_low));
1531 			range_len = ((uint64_t)(regs[i].pci_size_hi) << 32) |
1532 			    ((uint64_t)(regs[i].pci_size_low));
1533 
1534 			if ((base + len < range_base) ||
1535 			    (base > range_base + range_len)) {
1536 				/*
1537 				 * Not adjacent, copy the entry and contiue
1538 				 */
1539 				goto copy_entry;
1540 			}
1541 
1542 			/*
1543 			 * Adjacent or overlap?
1544 			 *
1545 			 * Should not have overlapping resources so assert it.
1546 			 * For non-debug kernel we don't want to panic thus
1547 			 * only logging a warning message.
1548 			 */
1549 #if 0
1550 			ASSERT((base + len == range_base) ||
1551 			    (base == range_base + range_len));
1552 #endif
1553 			if ((base + len != range_base) &&
1554 			    (base != range_base + range_len)) {
1555 				cmn_err(CE_WARN, "pci_put_available_prop: "
1556 				    "failed to add resource to dip %p : "
1557 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1558 				    "overlaps with existing resource "
1559 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 "\n",
1560 				    (void *)dip, orig_base, orig_len,
1561 				    range_base, range_len);
1562 
1563 				goto failure;
1564 			}
1565 
1566 			/*
1567 			 * On the left:
1568 			 *
1569 			 * base		range_base
1570 			 *   +-------------+-------------+
1571 			 *   |/////////////|		 |
1572 			 *   +-------------+-------------+
1573 			 *	len		range_len
1574 			 *
1575 			 * On the right:
1576 			 *
1577 			 * range_base	 base
1578 			 *   +-------------+-------------+
1579 			 *   |		   |/////////////|
1580 			 *   +-------------+-------------+
1581 			 *	range_len	len
1582 			 */
1583 			/*
1584 			 * There are at most two piece of resources adjacent
1585 			 * with this resource, assert it.
1586 			 */
1587 			ASSERT(matched < 2);
1588 
1589 			if (!(matched < 2)) {
1590 				cmn_err(CE_WARN, "pci_put_available_prop: "
1591 				    "failed to add resource to dip %p : "
1592 				    "base 0x%" PRIx64 ", len 0x%" PRIx64 " "
1593 				    "found overlaps in existing resources\n",
1594 				    (void *)dip, orig_base, orig_len);
1595 
1596 				goto failure;
1597 			}
1598 
1599 			/* setup base & len to refer to the merged range */
1600 			len += range_len;
1601 			if (base == range_base + range_len)
1602 				base = range_base;
1603 
1604 			if (matched == 0) {
1605 				/*
1606 				 * One adjacent entry, add this resource in
1607 				 */
1608 				newregs[j].pci_phys_hi = regs[i].pci_phys_hi;
1609 				newregs[j].pci_phys_mid =
1610 				    (uint32_t)(base >> 32);
1611 				newregs[j].pci_phys_low = (uint32_t)(base);
1612 				newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1613 				newregs[j].pci_size_low = (uint32_t)len;
1614 
1615 				matched = 1;
1616 				k = j;
1617 				j++;
1618 			} else { /* matched == 1 */
1619 				/*
1620 				 * Two adjacent entries, merge them together
1621 				 */
1622 				newregs[k].pci_phys_hi = regs[i].pci_phys_hi;
1623 				newregs[k].pci_phys_mid =
1624 				    (uint32_t)(base >> 32);
1625 				newregs[k].pci_phys_low = (uint32_t)(base);
1626 				newregs[k].pci_size_hi = (uint32_t)(len >> 32);
1627 				newregs[k].pci_size_low = (uint32_t)len;
1628 
1629 				matched = 2;
1630 			}
1631 		} else {
1632 copy_entry:
1633 			newregs[j] = regs[i];
1634 			j++;
1635 		}
1636 	}
1637 
1638 	if (matched == 0) {
1639 		/* No adjacent entries, append at end */
1640 		ASSERT(j == rcount);
1641 
1642 		/*
1643 		 * According to page 15 of 1275 spec, bit "n" of "available"
1644 		 * should be set to 1.
1645 		 */
1646 		newregs[j].pci_phys_hi = type;
1647 		newregs[j].pci_phys_hi |= PCI_REG_REL_M;
1648 
1649 		newregs[j].pci_phys_mid = (uint32_t)(base >> 32);
1650 		newregs[j].pci_phys_low = (uint32_t)base;
1651 		newregs[j].pci_size_hi = (uint32_t)(len >> 32);
1652 		newregs[j].pci_size_low = (uint32_t)len;
1653 
1654 		j++;
1655 	}
1656 
1657 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1658 	    "available", (int *)newregs,
1659 	    (j * sizeof (pci_regspec_t)) / sizeof (int));
1660 
1661 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1662 	kmem_free(regs, rlen);
1663 	return (DDI_SUCCESS);
1664 
1665 not_found:
1666 	/*
1667 	 * There is no "available" property on the parent node, create it.
1668 	 */
1669 	newregs = kmem_alloc(sizeof (pci_regspec_t), KM_SLEEP);
1670 
1671 	/*
1672 	 * According to page 15 of 1275 spec, bit "n" of "available" should
1673 	 * be set to 1.
1674 	 */
1675 	newregs[0].pci_phys_hi = type;
1676 	newregs[0].pci_phys_hi |= PCI_REG_REL_M;
1677 
1678 	newregs[0].pci_phys_mid = (uint32_t)(base >> 32);
1679 	newregs[0].pci_phys_low = (uint32_t)base;
1680 	newregs[0].pci_size_hi = (uint32_t)(len >> 32);
1681 	newregs[0].pci_size_low = (uint32_t)len;
1682 
1683 	(void) ndi_prop_update_int_array(DDI_DEV_T_NONE, dip,
1684 	    "available", (int *)newregs,
1685 	    sizeof (pci_regspec_t) / sizeof (int));
1686 	kmem_free(newregs, sizeof (pci_regspec_t));
1687 	return (DDI_SUCCESS);
1688 
1689 failure:
1690 	kmem_free(newregs, rlen + sizeof (pci_regspec_t));
1691 	kmem_free(regs, rlen);
1692 	return (DDI_FAILURE);
1693 }
1694 
1695 static uint32_t
pci_type_ra2pci(char * type)1696 pci_type_ra2pci(char *type)
1697 {
1698 	uint32_t	pci_type = PCI_ADDR_TYPE_INVAL;
1699 
1700 	/*
1701 	 * No 64 bit mem support for now
1702 	 */
1703 	if (strcmp(type, NDI_RA_TYPE_IO) == 0) {
1704 		pci_type = PCI_ADDR_IO;
1705 
1706 	} else if (strcmp(type, NDI_RA_TYPE_MEM) == 0) {
1707 		pci_type = PCI_ADDR_MEM32;
1708 
1709 	} else if (strcmp(type, NDI_RA_TYPE_PCI_PREFETCH_MEM)  == 0) {
1710 		pci_type = PCI_ADDR_MEM32;
1711 		pci_type |= PCI_REG_PF_M;
1712 	}
1713 
1714 	return (pci_type);
1715 }
1716