1 /* 2 * linux/kernel/resource.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 6 * 7 * Arbitrary resource management. 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/export.h> 13 #include <linux/errno.h> 14 #include <linux/ioport.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/fs.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/device.h> 23 #include <linux/pfn.h> 24 #include <linux/mm.h> 25 #include <linux/resource_ext.h> 26 #include <asm/io.h> 27 28 29 struct resource ioport_resource = { 30 .name = "PCI IO", 31 .start = 0, 32 .end = IO_SPACE_LIMIT, 33 .flags = IORESOURCE_IO, 34 }; 35 EXPORT_SYMBOL(ioport_resource); 36 37 struct resource iomem_resource = { 38 .name = "PCI mem", 39 .start = 0, 40 .end = -1, 41 .flags = IORESOURCE_MEM, 42 }; 43 EXPORT_SYMBOL(iomem_resource); 44 45 /* constraints to be met while allocating resources */ 46 struct resource_constraint { 47 resource_size_t min, max, align; 48 resource_size_t (*alignf)(void *, const struct resource *, 49 resource_size_t, resource_size_t); 50 void *alignf_data; 51 }; 52 53 static DEFINE_RWLOCK(resource_lock); 54 55 /* 56 * For memory hotplug, there is no way to free resource entries allocated 57 * by boot mem after the system is up. So for reusing the resource entry 58 * we need to remember the resource. 59 */ 60 static struct resource *bootmem_resource_free; 61 static DEFINE_SPINLOCK(bootmem_resource_lock); 62 63 static struct resource *next_resource(struct resource *p, bool sibling_only) 64 { 65 /* Caller wants to traverse through siblings only */ 66 if (sibling_only) 67 return p->sibling; 68 69 if (p->child) 70 return p->child; 71 while (!p->sibling && p->parent) 72 p = p->parent; 73 return p->sibling; 74 } 75 76 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 77 { 78 struct resource *p = v; 79 (*pos)++; 80 return (void *)next_resource(p, false); 81 } 82 83 #ifdef CONFIG_PROC_FS 84 85 enum { MAX_IORES_LEVEL = 5 }; 86 87 static void *r_start(struct seq_file *m, loff_t *pos) 88 __acquires(resource_lock) 89 { 90 struct resource *p = m->private; 91 loff_t l = 0; 92 read_lock(&resource_lock); 93 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 94 ; 95 return p; 96 } 97 98 static void r_stop(struct seq_file *m, void *v) 99 __releases(resource_lock) 100 { 101 read_unlock(&resource_lock); 102 } 103 104 static int r_show(struct seq_file *m, void *v) 105 { 106 struct resource *root = m->private; 107 struct resource *r = v, *p; 108 unsigned long long start, end; 109 int width = root->end < 0x10000 ? 4 : 8; 110 int depth; 111 112 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 113 if (p->parent == root) 114 break; 115 116 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 117 start = r->start; 118 end = r->end; 119 } else { 120 start = end = 0; 121 } 122 123 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 124 depth * 2, "", 125 width, start, 126 width, end, 127 r->name ? r->name : "<BAD>"); 128 return 0; 129 } 130 131 static const struct seq_operations resource_op = { 132 .start = r_start, 133 .next = r_next, 134 .stop = r_stop, 135 .show = r_show, 136 }; 137 138 static int ioports_open(struct inode *inode, struct file *file) 139 { 140 int res = seq_open(file, &resource_op); 141 if (!res) { 142 struct seq_file *m = file->private_data; 143 m->private = &ioport_resource; 144 } 145 return res; 146 } 147 148 static int iomem_open(struct inode *inode, struct file *file) 149 { 150 int res = seq_open(file, &resource_op); 151 if (!res) { 152 struct seq_file *m = file->private_data; 153 m->private = &iomem_resource; 154 } 155 return res; 156 } 157 158 static const struct file_operations proc_ioports_operations = { 159 .open = ioports_open, 160 .read = seq_read, 161 .llseek = seq_lseek, 162 .release = seq_release, 163 }; 164 165 static const struct file_operations proc_iomem_operations = { 166 .open = iomem_open, 167 .read = seq_read, 168 .llseek = seq_lseek, 169 .release = seq_release, 170 }; 171 172 static int __init ioresources_init(void) 173 { 174 proc_create("ioports", 0, NULL, &proc_ioports_operations); 175 proc_create("iomem", 0, NULL, &proc_iomem_operations); 176 return 0; 177 } 178 __initcall(ioresources_init); 179 180 #endif /* CONFIG_PROC_FS */ 181 182 static void free_resource(struct resource *res) 183 { 184 if (!res) 185 return; 186 187 if (!PageSlab(virt_to_head_page(res))) { 188 spin_lock(&bootmem_resource_lock); 189 res->sibling = bootmem_resource_free; 190 bootmem_resource_free = res; 191 spin_unlock(&bootmem_resource_lock); 192 } else { 193 kfree(res); 194 } 195 } 196 197 static struct resource *alloc_resource(gfp_t flags) 198 { 199 struct resource *res = NULL; 200 201 spin_lock(&bootmem_resource_lock); 202 if (bootmem_resource_free) { 203 res = bootmem_resource_free; 204 bootmem_resource_free = res->sibling; 205 } 206 spin_unlock(&bootmem_resource_lock); 207 208 if (res) 209 memset(res, 0, sizeof(struct resource)); 210 else 211 res = kzalloc(sizeof(struct resource), flags); 212 213 return res; 214 } 215 216 /* Return the conflict entry if you can't request it */ 217 static struct resource * __request_resource(struct resource *root, struct resource *new) 218 { 219 resource_size_t start = new->start; 220 resource_size_t end = new->end; 221 struct resource *tmp, **p; 222 223 if (end < start) 224 return root; 225 if (start < root->start) 226 return root; 227 if (end > root->end) 228 return root; 229 p = &root->child; 230 for (;;) { 231 tmp = *p; 232 if (!tmp || tmp->start > end) { 233 new->sibling = tmp; 234 *p = new; 235 new->parent = root; 236 return NULL; 237 } 238 p = &tmp->sibling; 239 if (tmp->end < start) 240 continue; 241 return tmp; 242 } 243 } 244 245 static int __release_resource(struct resource *old, bool release_child) 246 { 247 struct resource *tmp, **p, *chd; 248 249 p = &old->parent->child; 250 for (;;) { 251 tmp = *p; 252 if (!tmp) 253 break; 254 if (tmp == old) { 255 if (release_child || !(tmp->child)) { 256 *p = tmp->sibling; 257 } else { 258 for (chd = tmp->child;; chd = chd->sibling) { 259 chd->parent = tmp->parent; 260 if (!(chd->sibling)) 261 break; 262 } 263 *p = tmp->child; 264 chd->sibling = tmp->sibling; 265 } 266 old->parent = NULL; 267 return 0; 268 } 269 p = &tmp->sibling; 270 } 271 return -EINVAL; 272 } 273 274 static void __release_child_resources(struct resource *r) 275 { 276 struct resource *tmp, *p; 277 resource_size_t size; 278 279 p = r->child; 280 r->child = NULL; 281 while (p) { 282 tmp = p; 283 p = p->sibling; 284 285 tmp->parent = NULL; 286 tmp->sibling = NULL; 287 __release_child_resources(tmp); 288 289 printk(KERN_DEBUG "release child resource %pR\n", tmp); 290 /* need to restore size, and keep flags */ 291 size = resource_size(tmp); 292 tmp->start = 0; 293 tmp->end = size - 1; 294 } 295 } 296 297 void release_child_resources(struct resource *r) 298 { 299 write_lock(&resource_lock); 300 __release_child_resources(r); 301 write_unlock(&resource_lock); 302 } 303 304 /** 305 * request_resource_conflict - request and reserve an I/O or memory resource 306 * @root: root resource descriptor 307 * @new: resource descriptor desired by caller 308 * 309 * Returns 0 for success, conflict resource on error. 310 */ 311 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 312 { 313 struct resource *conflict; 314 315 write_lock(&resource_lock); 316 conflict = __request_resource(root, new); 317 write_unlock(&resource_lock); 318 return conflict; 319 } 320 321 /** 322 * request_resource - request and reserve an I/O or memory resource 323 * @root: root resource descriptor 324 * @new: resource descriptor desired by caller 325 * 326 * Returns 0 for success, negative error code on error. 327 */ 328 int request_resource(struct resource *root, struct resource *new) 329 { 330 struct resource *conflict; 331 332 conflict = request_resource_conflict(root, new); 333 return conflict ? -EBUSY : 0; 334 } 335 336 EXPORT_SYMBOL(request_resource); 337 338 /** 339 * release_resource - release a previously reserved resource 340 * @old: resource pointer 341 */ 342 int release_resource(struct resource *old) 343 { 344 int retval; 345 346 write_lock(&resource_lock); 347 retval = __release_resource(old, true); 348 write_unlock(&resource_lock); 349 return retval; 350 } 351 352 EXPORT_SYMBOL(release_resource); 353 354 /* 355 * Finds the lowest iomem resource existing within [res->start.res->end). 356 * The caller must specify res->start, res->end, res->flags, and optionally 357 * desc. If found, returns 0, res is overwritten, if not found, returns -1. 358 * This function walks the whole tree and not just first level children until 359 * and unless first_level_children_only is true. 360 */ 361 static int find_next_iomem_res(struct resource *res, unsigned long desc, 362 bool first_level_children_only) 363 { 364 resource_size_t start, end; 365 struct resource *p; 366 bool sibling_only = false; 367 368 BUG_ON(!res); 369 370 start = res->start; 371 end = res->end; 372 BUG_ON(start >= end); 373 374 if (first_level_children_only) 375 sibling_only = true; 376 377 read_lock(&resource_lock); 378 379 for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { 380 if ((p->flags & res->flags) != res->flags) 381 continue; 382 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) 383 continue; 384 if (p->start > end) { 385 p = NULL; 386 break; 387 } 388 if ((p->end >= start) && (p->start < end)) 389 break; 390 } 391 392 read_unlock(&resource_lock); 393 if (!p) 394 return -1; 395 /* copy data */ 396 if (res->start < p->start) 397 res->start = p->start; 398 if (res->end > p->end) 399 res->end = p->end; 400 return 0; 401 } 402 403 /* 404 * Walks through iomem resources and calls func() with matching resource 405 * ranges. This walks through whole tree and not just first level children. 406 * All the memory ranges which overlap start,end and also match flags and 407 * desc are valid candidates. 408 * 409 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 410 * @flags: I/O resource flags 411 * @start: start addr 412 * @end: end addr 413 * 414 * NOTE: For a new descriptor search, define a new IORES_DESC in 415 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 416 */ 417 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 418 u64 end, void *arg, int (*func)(u64, u64, void *)) 419 { 420 struct resource res; 421 u64 orig_end; 422 int ret = -1; 423 424 res.start = start; 425 res.end = end; 426 res.flags = flags; 427 orig_end = res.end; 428 429 while ((res.start < res.end) && 430 (!find_next_iomem_res(&res, desc, false))) { 431 432 ret = (*func)(res.start, res.end, arg); 433 if (ret) 434 break; 435 436 res.start = res.end + 1; 437 res.end = orig_end; 438 } 439 440 return ret; 441 } 442 443 /* 444 * This function calls the @func callback against all memory ranges of type 445 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 446 * Now, this function is only for System RAM, it deals with full ranges and 447 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 448 * ranges. 449 */ 450 int walk_system_ram_res(u64 start, u64 end, void *arg, 451 int (*func)(u64, u64, void *)) 452 { 453 struct resource res; 454 u64 orig_end; 455 int ret = -1; 456 457 res.start = start; 458 res.end = end; 459 res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 460 orig_end = res.end; 461 while ((res.start < res.end) && 462 (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) { 463 ret = (*func)(res.start, res.end, arg); 464 if (ret) 465 break; 466 res.start = res.end + 1; 467 res.end = orig_end; 468 } 469 return ret; 470 } 471 472 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) 473 474 /* 475 * This function calls the @func callback against all memory ranges of type 476 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 477 * It is to be used only for System RAM. 478 */ 479 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 480 void *arg, int (*func)(unsigned long, unsigned long, void *)) 481 { 482 struct resource res; 483 unsigned long pfn, end_pfn; 484 u64 orig_end; 485 int ret = -1; 486 487 res.start = (u64) start_pfn << PAGE_SHIFT; 488 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 489 res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 490 orig_end = res.end; 491 while ((res.start < res.end) && 492 (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) { 493 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; 494 end_pfn = (res.end + 1) >> PAGE_SHIFT; 495 if (end_pfn > pfn) 496 ret = (*func)(pfn, end_pfn - pfn, arg); 497 if (ret) 498 break; 499 res.start = res.end + 1; 500 res.end = orig_end; 501 } 502 return ret; 503 } 504 505 #endif 506 507 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 508 { 509 return 1; 510 } 511 /* 512 * This generic page_is_ram() returns true if specified address is 513 * registered as System RAM in iomem_resource list. 514 */ 515 int __weak page_is_ram(unsigned long pfn) 516 { 517 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 518 } 519 EXPORT_SYMBOL_GPL(page_is_ram); 520 521 /** 522 * region_intersects() - determine intersection of region with known resources 523 * @start: region start address 524 * @size: size of region 525 * @flags: flags of resource (in iomem_resource) 526 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 527 * 528 * Check if the specified region partially overlaps or fully eclipses a 529 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 530 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 531 * return REGION_MIXED if the region overlaps @flags/@desc and another 532 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 533 * and no other defined resource. Note that REGION_INTERSECTS is also 534 * returned in the case when the specified region overlaps RAM and undefined 535 * memory holes. 536 * 537 * region_intersect() is used by memory remapping functions to ensure 538 * the user is not remapping RAM and is a vast speed up over walking 539 * through the resource table page by page. 540 */ 541 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 542 unsigned long desc) 543 { 544 resource_size_t end = start + size - 1; 545 int type = 0; int other = 0; 546 struct resource *p; 547 548 read_lock(&resource_lock); 549 for (p = iomem_resource.child; p ; p = p->sibling) { 550 bool is_type = (((p->flags & flags) == flags) && 551 ((desc == IORES_DESC_NONE) || 552 (desc == p->desc))); 553 554 if (start >= p->start && start <= p->end) 555 is_type ? type++ : other++; 556 if (end >= p->start && end <= p->end) 557 is_type ? type++ : other++; 558 if (p->start >= start && p->end <= end) 559 is_type ? type++ : other++; 560 } 561 read_unlock(&resource_lock); 562 563 if (other == 0) 564 return type ? REGION_INTERSECTS : REGION_DISJOINT; 565 566 if (type) 567 return REGION_MIXED; 568 569 return REGION_DISJOINT; 570 } 571 EXPORT_SYMBOL_GPL(region_intersects); 572 573 void __weak arch_remove_reservations(struct resource *avail) 574 { 575 } 576 577 static resource_size_t simple_align_resource(void *data, 578 const struct resource *avail, 579 resource_size_t size, 580 resource_size_t align) 581 { 582 return avail->start; 583 } 584 585 static void resource_clip(struct resource *res, resource_size_t min, 586 resource_size_t max) 587 { 588 if (res->start < min) 589 res->start = min; 590 if (res->end > max) 591 res->end = max; 592 } 593 594 /* 595 * Find empty slot in the resource tree with the given range and 596 * alignment constraints 597 */ 598 static int __find_resource(struct resource *root, struct resource *old, 599 struct resource *new, 600 resource_size_t size, 601 struct resource_constraint *constraint) 602 { 603 struct resource *this = root->child; 604 struct resource tmp = *new, avail, alloc; 605 606 tmp.start = root->start; 607 /* 608 * Skip past an allocated resource that starts at 0, since the assignment 609 * of this->start - 1 to tmp->end below would cause an underflow. 610 */ 611 if (this && this->start == root->start) { 612 tmp.start = (this == old) ? old->start : this->end + 1; 613 this = this->sibling; 614 } 615 for(;;) { 616 if (this) 617 tmp.end = (this == old) ? this->end : this->start - 1; 618 else 619 tmp.end = root->end; 620 621 if (tmp.end < tmp.start) 622 goto next; 623 624 resource_clip(&tmp, constraint->min, constraint->max); 625 arch_remove_reservations(&tmp); 626 627 /* Check for overflow after ALIGN() */ 628 avail.start = ALIGN(tmp.start, constraint->align); 629 avail.end = tmp.end; 630 avail.flags = new->flags & ~IORESOURCE_UNSET; 631 if (avail.start >= tmp.start) { 632 alloc.flags = avail.flags; 633 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 634 size, constraint->align); 635 alloc.end = alloc.start + size - 1; 636 if (resource_contains(&avail, &alloc)) { 637 new->start = alloc.start; 638 new->end = alloc.end; 639 return 0; 640 } 641 } 642 643 next: if (!this || this->end == root->end) 644 break; 645 646 if (this != old) 647 tmp.start = this->end + 1; 648 this = this->sibling; 649 } 650 return -EBUSY; 651 } 652 653 /* 654 * Find empty slot in the resource tree given range and alignment. 655 */ 656 static int find_resource(struct resource *root, struct resource *new, 657 resource_size_t size, 658 struct resource_constraint *constraint) 659 { 660 return __find_resource(root, NULL, new, size, constraint); 661 } 662 663 /** 664 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 665 * The resource will be relocated if the new size cannot be reallocated in the 666 * current location. 667 * 668 * @root: root resource descriptor 669 * @old: resource descriptor desired by caller 670 * @newsize: new size of the resource descriptor 671 * @constraint: the size and alignment constraints to be met. 672 */ 673 static int reallocate_resource(struct resource *root, struct resource *old, 674 resource_size_t newsize, 675 struct resource_constraint *constraint) 676 { 677 int err=0; 678 struct resource new = *old; 679 struct resource *conflict; 680 681 write_lock(&resource_lock); 682 683 if ((err = __find_resource(root, old, &new, newsize, constraint))) 684 goto out; 685 686 if (resource_contains(&new, old)) { 687 old->start = new.start; 688 old->end = new.end; 689 goto out; 690 } 691 692 if (old->child) { 693 err = -EBUSY; 694 goto out; 695 } 696 697 if (resource_contains(old, &new)) { 698 old->start = new.start; 699 old->end = new.end; 700 } else { 701 __release_resource(old, true); 702 *old = new; 703 conflict = __request_resource(root, old); 704 BUG_ON(conflict); 705 } 706 out: 707 write_unlock(&resource_lock); 708 return err; 709 } 710 711 712 /** 713 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 714 * The resource will be reallocated with a new size if it was already allocated 715 * @root: root resource descriptor 716 * @new: resource descriptor desired by caller 717 * @size: requested resource region size 718 * @min: minimum boundary to allocate 719 * @max: maximum boundary to allocate 720 * @align: alignment requested, in bytes 721 * @alignf: alignment function, optional, called if not NULL 722 * @alignf_data: arbitrary data to pass to the @alignf function 723 */ 724 int allocate_resource(struct resource *root, struct resource *new, 725 resource_size_t size, resource_size_t min, 726 resource_size_t max, resource_size_t align, 727 resource_size_t (*alignf)(void *, 728 const struct resource *, 729 resource_size_t, 730 resource_size_t), 731 void *alignf_data) 732 { 733 int err; 734 struct resource_constraint constraint; 735 736 if (!alignf) 737 alignf = simple_align_resource; 738 739 constraint.min = min; 740 constraint.max = max; 741 constraint.align = align; 742 constraint.alignf = alignf; 743 constraint.alignf_data = alignf_data; 744 745 if ( new->parent ) { 746 /* resource is already allocated, try reallocating with 747 the new constraints */ 748 return reallocate_resource(root, new, size, &constraint); 749 } 750 751 write_lock(&resource_lock); 752 err = find_resource(root, new, size, &constraint); 753 if (err >= 0 && __request_resource(root, new)) 754 err = -EBUSY; 755 write_unlock(&resource_lock); 756 return err; 757 } 758 759 EXPORT_SYMBOL(allocate_resource); 760 761 /** 762 * lookup_resource - find an existing resource by a resource start address 763 * @root: root resource descriptor 764 * @start: resource start address 765 * 766 * Returns a pointer to the resource if found, NULL otherwise 767 */ 768 struct resource *lookup_resource(struct resource *root, resource_size_t start) 769 { 770 struct resource *res; 771 772 read_lock(&resource_lock); 773 for (res = root->child; res; res = res->sibling) { 774 if (res->start == start) 775 break; 776 } 777 read_unlock(&resource_lock); 778 779 return res; 780 } 781 782 /* 783 * Insert a resource into the resource tree. If successful, return NULL, 784 * otherwise return the conflicting resource (compare to __request_resource()) 785 */ 786 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 787 { 788 struct resource *first, *next; 789 790 for (;; parent = first) { 791 first = __request_resource(parent, new); 792 if (!first) 793 return first; 794 795 if (first == parent) 796 return first; 797 if (WARN_ON(first == new)) /* duplicated insertion */ 798 return first; 799 800 if ((first->start > new->start) || (first->end < new->end)) 801 break; 802 if ((first->start == new->start) && (first->end == new->end)) 803 break; 804 } 805 806 for (next = first; ; next = next->sibling) { 807 /* Partial overlap? Bad, and unfixable */ 808 if (next->start < new->start || next->end > new->end) 809 return next; 810 if (!next->sibling) 811 break; 812 if (next->sibling->start > new->end) 813 break; 814 } 815 816 new->parent = parent; 817 new->sibling = next->sibling; 818 new->child = first; 819 820 next->sibling = NULL; 821 for (next = first; next; next = next->sibling) 822 next->parent = new; 823 824 if (parent->child == first) { 825 parent->child = new; 826 } else { 827 next = parent->child; 828 while (next->sibling != first) 829 next = next->sibling; 830 next->sibling = new; 831 } 832 return NULL; 833 } 834 835 /** 836 * insert_resource_conflict - Inserts resource in the resource tree 837 * @parent: parent of the new resource 838 * @new: new resource to insert 839 * 840 * Returns 0 on success, conflict resource if the resource can't be inserted. 841 * 842 * This function is equivalent to request_resource_conflict when no conflict 843 * happens. If a conflict happens, and the conflicting resources 844 * entirely fit within the range of the new resource, then the new 845 * resource is inserted and the conflicting resources become children of 846 * the new resource. 847 * 848 * This function is intended for producers of resources, such as FW modules 849 * and bus drivers. 850 */ 851 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 852 { 853 struct resource *conflict; 854 855 write_lock(&resource_lock); 856 conflict = __insert_resource(parent, new); 857 write_unlock(&resource_lock); 858 return conflict; 859 } 860 861 /** 862 * insert_resource - Inserts a resource in the resource tree 863 * @parent: parent of the new resource 864 * @new: new resource to insert 865 * 866 * Returns 0 on success, -EBUSY if the resource can't be inserted. 867 * 868 * This function is intended for producers of resources, such as FW modules 869 * and bus drivers. 870 */ 871 int insert_resource(struct resource *parent, struct resource *new) 872 { 873 struct resource *conflict; 874 875 conflict = insert_resource_conflict(parent, new); 876 return conflict ? -EBUSY : 0; 877 } 878 EXPORT_SYMBOL_GPL(insert_resource); 879 880 /** 881 * insert_resource_expand_to_fit - Insert a resource into the resource tree 882 * @root: root resource descriptor 883 * @new: new resource to insert 884 * 885 * Insert a resource into the resource tree, possibly expanding it in order 886 * to make it encompass any conflicting resources. 887 */ 888 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 889 { 890 if (new->parent) 891 return; 892 893 write_lock(&resource_lock); 894 for (;;) { 895 struct resource *conflict; 896 897 conflict = __insert_resource(root, new); 898 if (!conflict) 899 break; 900 if (conflict == root) 901 break; 902 903 /* Ok, expand resource to cover the conflict, then try again .. */ 904 if (conflict->start < new->start) 905 new->start = conflict->start; 906 if (conflict->end > new->end) 907 new->end = conflict->end; 908 909 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 910 } 911 write_unlock(&resource_lock); 912 } 913 914 /** 915 * remove_resource - Remove a resource in the resource tree 916 * @old: resource to remove 917 * 918 * Returns 0 on success, -EINVAL if the resource is not valid. 919 * 920 * This function removes a resource previously inserted by insert_resource() 921 * or insert_resource_conflict(), and moves the children (if any) up to 922 * where they were before. insert_resource() and insert_resource_conflict() 923 * insert a new resource, and move any conflicting resources down to the 924 * children of the new resource. 925 * 926 * insert_resource(), insert_resource_conflict() and remove_resource() are 927 * intended for producers of resources, such as FW modules and bus drivers. 928 */ 929 int remove_resource(struct resource *old) 930 { 931 int retval; 932 933 write_lock(&resource_lock); 934 retval = __release_resource(old, false); 935 write_unlock(&resource_lock); 936 return retval; 937 } 938 EXPORT_SYMBOL_GPL(remove_resource); 939 940 static int __adjust_resource(struct resource *res, resource_size_t start, 941 resource_size_t size) 942 { 943 struct resource *tmp, *parent = res->parent; 944 resource_size_t end = start + size - 1; 945 int result = -EBUSY; 946 947 if (!parent) 948 goto skip; 949 950 if ((start < parent->start) || (end > parent->end)) 951 goto out; 952 953 if (res->sibling && (res->sibling->start <= end)) 954 goto out; 955 956 tmp = parent->child; 957 if (tmp != res) { 958 while (tmp->sibling != res) 959 tmp = tmp->sibling; 960 if (start <= tmp->end) 961 goto out; 962 } 963 964 skip: 965 for (tmp = res->child; tmp; tmp = tmp->sibling) 966 if ((tmp->start < start) || (tmp->end > end)) 967 goto out; 968 969 res->start = start; 970 res->end = end; 971 result = 0; 972 973 out: 974 return result; 975 } 976 977 /** 978 * adjust_resource - modify a resource's start and size 979 * @res: resource to modify 980 * @start: new start value 981 * @size: new size 982 * 983 * Given an existing resource, change its start and size to match the 984 * arguments. Returns 0 on success, -EBUSY if it can't fit. 985 * Existing children of the resource are assumed to be immutable. 986 */ 987 int adjust_resource(struct resource *res, resource_size_t start, 988 resource_size_t size) 989 { 990 int result; 991 992 write_lock(&resource_lock); 993 result = __adjust_resource(res, start, size); 994 write_unlock(&resource_lock); 995 return result; 996 } 997 EXPORT_SYMBOL(adjust_resource); 998 999 static void __init __reserve_region_with_split(struct resource *root, 1000 resource_size_t start, resource_size_t end, 1001 const char *name) 1002 { 1003 struct resource *parent = root; 1004 struct resource *conflict; 1005 struct resource *res = alloc_resource(GFP_ATOMIC); 1006 struct resource *next_res = NULL; 1007 1008 if (!res) 1009 return; 1010 1011 res->name = name; 1012 res->start = start; 1013 res->end = end; 1014 res->flags = IORESOURCE_BUSY; 1015 res->desc = IORES_DESC_NONE; 1016 1017 while (1) { 1018 1019 conflict = __request_resource(parent, res); 1020 if (!conflict) { 1021 if (!next_res) 1022 break; 1023 res = next_res; 1024 next_res = NULL; 1025 continue; 1026 } 1027 1028 /* conflict covered whole area */ 1029 if (conflict->start <= res->start && 1030 conflict->end >= res->end) { 1031 free_resource(res); 1032 WARN_ON(next_res); 1033 break; 1034 } 1035 1036 /* failed, split and try again */ 1037 if (conflict->start > res->start) { 1038 end = res->end; 1039 res->end = conflict->start - 1; 1040 if (conflict->end < end) { 1041 next_res = alloc_resource(GFP_ATOMIC); 1042 if (!next_res) { 1043 free_resource(res); 1044 break; 1045 } 1046 next_res->name = name; 1047 next_res->start = conflict->end + 1; 1048 next_res->end = end; 1049 next_res->flags = IORESOURCE_BUSY; 1050 next_res->desc = IORES_DESC_NONE; 1051 } 1052 } else { 1053 res->start = conflict->end + 1; 1054 } 1055 } 1056 1057 } 1058 1059 void __init reserve_region_with_split(struct resource *root, 1060 resource_size_t start, resource_size_t end, 1061 const char *name) 1062 { 1063 int abort = 0; 1064 1065 write_lock(&resource_lock); 1066 if (root->start > start || root->end < end) { 1067 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1068 (unsigned long long)start, (unsigned long long)end, 1069 root); 1070 if (start > root->end || end < root->start) 1071 abort = 1; 1072 else { 1073 if (end > root->end) 1074 end = root->end; 1075 if (start < root->start) 1076 start = root->start; 1077 pr_err("fixing request to [0x%llx-0x%llx]\n", 1078 (unsigned long long)start, 1079 (unsigned long long)end); 1080 } 1081 dump_stack(); 1082 } 1083 if (!abort) 1084 __reserve_region_with_split(root, start, end, name); 1085 write_unlock(&resource_lock); 1086 } 1087 1088 /** 1089 * resource_alignment - calculate resource's alignment 1090 * @res: resource pointer 1091 * 1092 * Returns alignment on success, 0 (invalid alignment) on failure. 1093 */ 1094 resource_size_t resource_alignment(struct resource *res) 1095 { 1096 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1097 case IORESOURCE_SIZEALIGN: 1098 return resource_size(res); 1099 case IORESOURCE_STARTALIGN: 1100 return res->start; 1101 default: 1102 return 0; 1103 } 1104 } 1105 1106 /* 1107 * This is compatibility stuff for IO resources. 1108 * 1109 * Note how this, unlike the above, knows about 1110 * the IO flag meanings (busy etc). 1111 * 1112 * request_region creates a new busy region. 1113 * 1114 * release_region releases a matching busy region. 1115 */ 1116 1117 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1118 1119 /** 1120 * __request_region - create a new busy resource region 1121 * @parent: parent resource descriptor 1122 * @start: resource start address 1123 * @n: resource region size 1124 * @name: reserving caller's ID string 1125 * @flags: IO resource flags 1126 */ 1127 struct resource * __request_region(struct resource *parent, 1128 resource_size_t start, resource_size_t n, 1129 const char *name, int flags) 1130 { 1131 DECLARE_WAITQUEUE(wait, current); 1132 struct resource *res = alloc_resource(GFP_KERNEL); 1133 1134 if (!res) 1135 return NULL; 1136 1137 res->name = name; 1138 res->start = start; 1139 res->end = start + n - 1; 1140 1141 write_lock(&resource_lock); 1142 1143 for (;;) { 1144 struct resource *conflict; 1145 1146 res->flags = resource_type(parent) | resource_ext_type(parent); 1147 res->flags |= IORESOURCE_BUSY | flags; 1148 res->desc = parent->desc; 1149 1150 conflict = __request_resource(parent, res); 1151 if (!conflict) 1152 break; 1153 if (conflict != parent) { 1154 if (!(conflict->flags & IORESOURCE_BUSY)) { 1155 parent = conflict; 1156 continue; 1157 } 1158 } 1159 if (conflict->flags & flags & IORESOURCE_MUXED) { 1160 add_wait_queue(&muxed_resource_wait, &wait); 1161 write_unlock(&resource_lock); 1162 set_current_state(TASK_UNINTERRUPTIBLE); 1163 schedule(); 1164 remove_wait_queue(&muxed_resource_wait, &wait); 1165 write_lock(&resource_lock); 1166 continue; 1167 } 1168 /* Uhhuh, that didn't work out.. */ 1169 free_resource(res); 1170 res = NULL; 1171 break; 1172 } 1173 write_unlock(&resource_lock); 1174 return res; 1175 } 1176 EXPORT_SYMBOL(__request_region); 1177 1178 /** 1179 * __release_region - release a previously reserved resource region 1180 * @parent: parent resource descriptor 1181 * @start: resource start address 1182 * @n: resource region size 1183 * 1184 * The described resource region must match a currently busy region. 1185 */ 1186 void __release_region(struct resource *parent, resource_size_t start, 1187 resource_size_t n) 1188 { 1189 struct resource **p; 1190 resource_size_t end; 1191 1192 p = &parent->child; 1193 end = start + n - 1; 1194 1195 write_lock(&resource_lock); 1196 1197 for (;;) { 1198 struct resource *res = *p; 1199 1200 if (!res) 1201 break; 1202 if (res->start <= start && res->end >= end) { 1203 if (!(res->flags & IORESOURCE_BUSY)) { 1204 p = &res->child; 1205 continue; 1206 } 1207 if (res->start != start || res->end != end) 1208 break; 1209 *p = res->sibling; 1210 write_unlock(&resource_lock); 1211 if (res->flags & IORESOURCE_MUXED) 1212 wake_up(&muxed_resource_wait); 1213 free_resource(res); 1214 return; 1215 } 1216 p = &res->sibling; 1217 } 1218 1219 write_unlock(&resource_lock); 1220 1221 printk(KERN_WARNING "Trying to free nonexistent resource " 1222 "<%016llx-%016llx>\n", (unsigned long long)start, 1223 (unsigned long long)end); 1224 } 1225 EXPORT_SYMBOL(__release_region); 1226 1227 #ifdef CONFIG_MEMORY_HOTREMOVE 1228 /** 1229 * release_mem_region_adjustable - release a previously reserved memory region 1230 * @parent: parent resource descriptor 1231 * @start: resource start address 1232 * @size: resource region size 1233 * 1234 * This interface is intended for memory hot-delete. The requested region 1235 * is released from a currently busy memory resource. The requested region 1236 * must either match exactly or fit into a single busy resource entry. In 1237 * the latter case, the remaining resource is adjusted accordingly. 1238 * Existing children of the busy memory resource must be immutable in the 1239 * request. 1240 * 1241 * Note: 1242 * - Additional release conditions, such as overlapping region, can be 1243 * supported after they are confirmed as valid cases. 1244 * - When a busy memory resource gets split into two entries, the code 1245 * assumes that all children remain in the lower address entry for 1246 * simplicity. Enhance this logic when necessary. 1247 */ 1248 int release_mem_region_adjustable(struct resource *parent, 1249 resource_size_t start, resource_size_t size) 1250 { 1251 struct resource **p; 1252 struct resource *res; 1253 struct resource *new_res; 1254 resource_size_t end; 1255 int ret = -EINVAL; 1256 1257 end = start + size - 1; 1258 if ((start < parent->start) || (end > parent->end)) 1259 return ret; 1260 1261 /* The alloc_resource() result gets checked later */ 1262 new_res = alloc_resource(GFP_KERNEL); 1263 1264 p = &parent->child; 1265 write_lock(&resource_lock); 1266 1267 while ((res = *p)) { 1268 if (res->start >= end) 1269 break; 1270 1271 /* look for the next resource if it does not fit into */ 1272 if (res->start > start || res->end < end) { 1273 p = &res->sibling; 1274 continue; 1275 } 1276 1277 if (!(res->flags & IORESOURCE_MEM)) 1278 break; 1279 1280 if (!(res->flags & IORESOURCE_BUSY)) { 1281 p = &res->child; 1282 continue; 1283 } 1284 1285 /* found the target resource; let's adjust accordingly */ 1286 if (res->start == start && res->end == end) { 1287 /* free the whole entry */ 1288 *p = res->sibling; 1289 free_resource(res); 1290 ret = 0; 1291 } else if (res->start == start && res->end != end) { 1292 /* adjust the start */ 1293 ret = __adjust_resource(res, end + 1, 1294 res->end - end); 1295 } else if (res->start != start && res->end == end) { 1296 /* adjust the end */ 1297 ret = __adjust_resource(res, res->start, 1298 start - res->start); 1299 } else { 1300 /* split into two entries */ 1301 if (!new_res) { 1302 ret = -ENOMEM; 1303 break; 1304 } 1305 new_res->name = res->name; 1306 new_res->start = end + 1; 1307 new_res->end = res->end; 1308 new_res->flags = res->flags; 1309 new_res->desc = res->desc; 1310 new_res->parent = res->parent; 1311 new_res->sibling = res->sibling; 1312 new_res->child = NULL; 1313 1314 ret = __adjust_resource(res, res->start, 1315 start - res->start); 1316 if (ret) 1317 break; 1318 res->sibling = new_res; 1319 new_res = NULL; 1320 } 1321 1322 break; 1323 } 1324 1325 write_unlock(&resource_lock); 1326 free_resource(new_res); 1327 return ret; 1328 } 1329 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1330 1331 /* 1332 * Managed region resource 1333 */ 1334 static void devm_resource_release(struct device *dev, void *ptr) 1335 { 1336 struct resource **r = ptr; 1337 1338 release_resource(*r); 1339 } 1340 1341 /** 1342 * devm_request_resource() - request and reserve an I/O or memory resource 1343 * @dev: device for which to request the resource 1344 * @root: root of the resource tree from which to request the resource 1345 * @new: descriptor of the resource to request 1346 * 1347 * This is a device-managed version of request_resource(). There is usually 1348 * no need to release resources requested by this function explicitly since 1349 * that will be taken care of when the device is unbound from its driver. 1350 * If for some reason the resource needs to be released explicitly, because 1351 * of ordering issues for example, drivers must call devm_release_resource() 1352 * rather than the regular release_resource(). 1353 * 1354 * When a conflict is detected between any existing resources and the newly 1355 * requested resource, an error message will be printed. 1356 * 1357 * Returns 0 on success or a negative error code on failure. 1358 */ 1359 int devm_request_resource(struct device *dev, struct resource *root, 1360 struct resource *new) 1361 { 1362 struct resource *conflict, **ptr; 1363 1364 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1365 if (!ptr) 1366 return -ENOMEM; 1367 1368 *ptr = new; 1369 1370 conflict = request_resource_conflict(root, new); 1371 if (conflict) { 1372 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1373 new, conflict->name, conflict); 1374 devres_free(ptr); 1375 return -EBUSY; 1376 } 1377 1378 devres_add(dev, ptr); 1379 return 0; 1380 } 1381 EXPORT_SYMBOL(devm_request_resource); 1382 1383 static int devm_resource_match(struct device *dev, void *res, void *data) 1384 { 1385 struct resource **ptr = res; 1386 1387 return *ptr == data; 1388 } 1389 1390 /** 1391 * devm_release_resource() - release a previously requested resource 1392 * @dev: device for which to release the resource 1393 * @new: descriptor of the resource to release 1394 * 1395 * Releases a resource previously requested using devm_request_resource(). 1396 */ 1397 void devm_release_resource(struct device *dev, struct resource *new) 1398 { 1399 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1400 new)); 1401 } 1402 EXPORT_SYMBOL(devm_release_resource); 1403 1404 struct region_devres { 1405 struct resource *parent; 1406 resource_size_t start; 1407 resource_size_t n; 1408 }; 1409 1410 static void devm_region_release(struct device *dev, void *res) 1411 { 1412 struct region_devres *this = res; 1413 1414 __release_region(this->parent, this->start, this->n); 1415 } 1416 1417 static int devm_region_match(struct device *dev, void *res, void *match_data) 1418 { 1419 struct region_devres *this = res, *match = match_data; 1420 1421 return this->parent == match->parent && 1422 this->start == match->start && this->n == match->n; 1423 } 1424 1425 struct resource * __devm_request_region(struct device *dev, 1426 struct resource *parent, resource_size_t start, 1427 resource_size_t n, const char *name) 1428 { 1429 struct region_devres *dr = NULL; 1430 struct resource *res; 1431 1432 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1433 GFP_KERNEL); 1434 if (!dr) 1435 return NULL; 1436 1437 dr->parent = parent; 1438 dr->start = start; 1439 dr->n = n; 1440 1441 res = __request_region(parent, start, n, name, 0); 1442 if (res) 1443 devres_add(dev, dr); 1444 else 1445 devres_free(dr); 1446 1447 return res; 1448 } 1449 EXPORT_SYMBOL(__devm_request_region); 1450 1451 void __devm_release_region(struct device *dev, struct resource *parent, 1452 resource_size_t start, resource_size_t n) 1453 { 1454 struct region_devres match_data = { parent, start, n }; 1455 1456 __release_region(parent, start, n); 1457 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1458 &match_data)); 1459 } 1460 EXPORT_SYMBOL(__devm_release_region); 1461 1462 /* 1463 * Called from init/main.c to reserve IO ports. 1464 */ 1465 #define MAXRESERVE 4 1466 static int __init reserve_setup(char *str) 1467 { 1468 static int reserved; 1469 static struct resource reserve[MAXRESERVE]; 1470 1471 for (;;) { 1472 unsigned int io_start, io_num; 1473 int x = reserved; 1474 1475 if (get_option (&str, &io_start) != 2) 1476 break; 1477 if (get_option (&str, &io_num) == 0) 1478 break; 1479 if (x < MAXRESERVE) { 1480 struct resource *res = reserve + x; 1481 res->name = "reserved"; 1482 res->start = io_start; 1483 res->end = io_start + io_num - 1; 1484 res->flags = IORESOURCE_BUSY; 1485 res->desc = IORES_DESC_NONE; 1486 res->child = NULL; 1487 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) 1488 reserved = x+1; 1489 } 1490 } 1491 return 1; 1492 } 1493 1494 __setup("reserve=", reserve_setup); 1495 1496 /* 1497 * Check if the requested addr and size spans more than any slot in the 1498 * iomem resource tree. 1499 */ 1500 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1501 { 1502 struct resource *p = &iomem_resource; 1503 int err = 0; 1504 loff_t l; 1505 1506 read_lock(&resource_lock); 1507 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1508 /* 1509 * We can probably skip the resources without 1510 * IORESOURCE_IO attribute? 1511 */ 1512 if (p->start >= addr + size) 1513 continue; 1514 if (p->end < addr) 1515 continue; 1516 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1517 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1518 continue; 1519 /* 1520 * if a resource is "BUSY", it's not a hardware resource 1521 * but a driver mapping of such a resource; we don't want 1522 * to warn for those; some drivers legitimately map only 1523 * partial hardware resources. (example: vesafb) 1524 */ 1525 if (p->flags & IORESOURCE_BUSY) 1526 continue; 1527 1528 printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", 1529 (unsigned long long)addr, 1530 (unsigned long long)(addr + size - 1), 1531 p->name, p); 1532 err = -1; 1533 break; 1534 } 1535 read_unlock(&resource_lock); 1536 1537 return err; 1538 } 1539 1540 #ifdef CONFIG_STRICT_DEVMEM 1541 static int strict_iomem_checks = 1; 1542 #else 1543 static int strict_iomem_checks; 1544 #endif 1545 1546 /* 1547 * check if an address is reserved in the iomem resource tree 1548 * returns 1 if reserved, 0 if not reserved. 1549 */ 1550 int iomem_is_exclusive(u64 addr) 1551 { 1552 struct resource *p = &iomem_resource; 1553 int err = 0; 1554 loff_t l; 1555 int size = PAGE_SIZE; 1556 1557 if (!strict_iomem_checks) 1558 return 0; 1559 1560 addr = addr & PAGE_MASK; 1561 1562 read_lock(&resource_lock); 1563 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1564 /* 1565 * We can probably skip the resources without 1566 * IORESOURCE_IO attribute? 1567 */ 1568 if (p->start >= addr + size) 1569 break; 1570 if (p->end < addr) 1571 continue; 1572 /* 1573 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1574 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1575 * resource is busy. 1576 */ 1577 if ((p->flags & IORESOURCE_BUSY) == 0) 1578 continue; 1579 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1580 || p->flags & IORESOURCE_EXCLUSIVE) { 1581 err = 1; 1582 break; 1583 } 1584 } 1585 read_unlock(&resource_lock); 1586 1587 return err; 1588 } 1589 1590 struct resource_entry *resource_list_create_entry(struct resource *res, 1591 size_t extra_size) 1592 { 1593 struct resource_entry *entry; 1594 1595 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1596 if (entry) { 1597 INIT_LIST_HEAD(&entry->node); 1598 entry->res = res ? res : &entry->__res; 1599 } 1600 1601 return entry; 1602 } 1603 EXPORT_SYMBOL(resource_list_create_entry); 1604 1605 void resource_list_free(struct list_head *head) 1606 { 1607 struct resource_entry *entry, *tmp; 1608 1609 list_for_each_entry_safe(entry, tmp, head, node) 1610 resource_list_destroy_entry(entry); 1611 } 1612 EXPORT_SYMBOL(resource_list_free); 1613 1614 static int __init strict_iomem(char *str) 1615 { 1616 if (strstr(str, "relaxed")) 1617 strict_iomem_checks = 0; 1618 if (strstr(str, "strict")) 1619 strict_iomem_checks = 1; 1620 return 1; 1621 } 1622 1623 __setup("iomem=", strict_iomem); 1624