1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/resource.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 1999 Martin Mares <mj@ucw.cz> 7 * 8 * Arbitrary resource management. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/export.h> 14 #include <linux/errno.h> 15 #include <linux/ioport.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/spinlock.h> 19 #include <linux/fs.h> 20 #include <linux/proc_fs.h> 21 #include <linux/pseudo_fs.h> 22 #include <linux/sched.h> 23 #include <linux/seq_file.h> 24 #include <linux/device.h> 25 #include <linux/pfn.h> 26 #include <linux/mm.h> 27 #include <linux/mount.h> 28 #include <linux/resource_ext.h> 29 #include <uapi/linux/magic.h> 30 #include <asm/io.h> 31 32 33 struct resource ioport_resource = { 34 .name = "PCI IO", 35 .start = 0, 36 .end = IO_SPACE_LIMIT, 37 .flags = IORESOURCE_IO, 38 }; 39 EXPORT_SYMBOL(ioport_resource); 40 41 struct resource iomem_resource = { 42 .name = "PCI mem", 43 .start = 0, 44 .end = -1, 45 .flags = IORESOURCE_MEM, 46 }; 47 EXPORT_SYMBOL(iomem_resource); 48 49 /* constraints to be met while allocating resources */ 50 struct resource_constraint { 51 resource_size_t min, max, align; 52 resource_size_t (*alignf)(void *, const struct resource *, 53 resource_size_t, resource_size_t); 54 void *alignf_data; 55 }; 56 57 static DEFINE_RWLOCK(resource_lock); 58 59 static struct resource *next_resource(struct resource *p) 60 { 61 if (p->child) 62 return p->child; 63 while (!p->sibling && p->parent) 64 p = p->parent; 65 return p->sibling; 66 } 67 68 static struct resource *next_resource_skip_children(struct resource *p) 69 { 70 while (!p->sibling && p->parent) 71 p = p->parent; 72 return p->sibling; 73 } 74 75 #define for_each_resource(_root, _p, _skip_children) \ 76 for ((_p) = (_root)->child; (_p); \ 77 (_p) = (_skip_children) ? next_resource_skip_children(_p) : \ 78 next_resource(_p)) 79 80 static void *r_next(struct seq_file *m, void *v, loff_t *pos) 81 { 82 struct resource *p = v; 83 (*pos)++; 84 return (void *)next_resource(p); 85 } 86 87 #ifdef CONFIG_PROC_FS 88 89 enum { MAX_IORES_LEVEL = 5 }; 90 91 static void *r_start(struct seq_file *m, loff_t *pos) 92 __acquires(resource_lock) 93 { 94 struct resource *p = pde_data(file_inode(m->file)); 95 loff_t l = 0; 96 read_lock(&resource_lock); 97 for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) 98 ; 99 return p; 100 } 101 102 static void r_stop(struct seq_file *m, void *v) 103 __releases(resource_lock) 104 { 105 read_unlock(&resource_lock); 106 } 107 108 static int r_show(struct seq_file *m, void *v) 109 { 110 struct resource *root = pde_data(file_inode(m->file)); 111 struct resource *r = v, *p; 112 unsigned long long start, end; 113 int width = root->end < 0x10000 ? 4 : 8; 114 int depth; 115 116 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 117 if (p->parent == root) 118 break; 119 120 if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) { 121 start = r->start; 122 end = r->end; 123 } else { 124 start = end = 0; 125 } 126 127 seq_printf(m, "%*s%0*llx-%0*llx : %s\n", 128 depth * 2, "", 129 width, start, 130 width, end, 131 r->name ? r->name : "<BAD>"); 132 return 0; 133 } 134 135 static const struct seq_operations resource_op = { 136 .start = r_start, 137 .next = r_next, 138 .stop = r_stop, 139 .show = r_show, 140 }; 141 142 static int __init ioresources_init(void) 143 { 144 proc_create_seq_data("ioports", 0, NULL, &resource_op, 145 &ioport_resource); 146 proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource); 147 return 0; 148 } 149 __initcall(ioresources_init); 150 151 #endif /* CONFIG_PROC_FS */ 152 153 static void free_resource(struct resource *res) 154 { 155 /** 156 * If the resource was allocated using memblock early during boot 157 * we'll leak it here: we can only return full pages back to the 158 * buddy and trying to be smart and reusing them eventually in 159 * alloc_resource() overcomplicates resource handling. 160 */ 161 if (res && PageSlab(virt_to_head_page(res))) 162 kfree(res); 163 } 164 165 static struct resource *alloc_resource(gfp_t flags) 166 { 167 return kzalloc(sizeof(struct resource), flags); 168 } 169 170 /* Return the conflict entry if you can't request it */ 171 static struct resource * __request_resource(struct resource *root, struct resource *new) 172 { 173 resource_size_t start = new->start; 174 resource_size_t end = new->end; 175 struct resource *tmp, **p; 176 177 if (end < start) 178 return root; 179 if (start < root->start) 180 return root; 181 if (end > root->end) 182 return root; 183 p = &root->child; 184 for (;;) { 185 tmp = *p; 186 if (!tmp || tmp->start > end) { 187 new->sibling = tmp; 188 *p = new; 189 new->parent = root; 190 return NULL; 191 } 192 p = &tmp->sibling; 193 if (tmp->end < start) 194 continue; 195 return tmp; 196 } 197 } 198 199 static int __release_resource(struct resource *old, bool release_child) 200 { 201 struct resource *tmp, **p, *chd; 202 203 p = &old->parent->child; 204 for (;;) { 205 tmp = *p; 206 if (!tmp) 207 break; 208 if (tmp == old) { 209 if (release_child || !(tmp->child)) { 210 *p = tmp->sibling; 211 } else { 212 for (chd = tmp->child;; chd = chd->sibling) { 213 chd->parent = tmp->parent; 214 if (!(chd->sibling)) 215 break; 216 } 217 *p = tmp->child; 218 chd->sibling = tmp->sibling; 219 } 220 old->parent = NULL; 221 return 0; 222 } 223 p = &tmp->sibling; 224 } 225 return -EINVAL; 226 } 227 228 static void __release_child_resources(struct resource *r) 229 { 230 struct resource *tmp, *p; 231 resource_size_t size; 232 233 p = r->child; 234 r->child = NULL; 235 while (p) { 236 tmp = p; 237 p = p->sibling; 238 239 tmp->parent = NULL; 240 tmp->sibling = NULL; 241 __release_child_resources(tmp); 242 243 printk(KERN_DEBUG "release child resource %pR\n", tmp); 244 /* need to restore size, and keep flags */ 245 size = resource_size(tmp); 246 tmp->start = 0; 247 tmp->end = size - 1; 248 } 249 } 250 251 void release_child_resources(struct resource *r) 252 { 253 write_lock(&resource_lock); 254 __release_child_resources(r); 255 write_unlock(&resource_lock); 256 } 257 258 /** 259 * request_resource_conflict - request and reserve an I/O or memory resource 260 * @root: root resource descriptor 261 * @new: resource descriptor desired by caller 262 * 263 * Returns 0 for success, conflict resource on error. 264 */ 265 struct resource *request_resource_conflict(struct resource *root, struct resource *new) 266 { 267 struct resource *conflict; 268 269 write_lock(&resource_lock); 270 conflict = __request_resource(root, new); 271 write_unlock(&resource_lock); 272 return conflict; 273 } 274 275 /** 276 * request_resource - request and reserve an I/O or memory resource 277 * @root: root resource descriptor 278 * @new: resource descriptor desired by caller 279 * 280 * Returns 0 for success, negative error code on error. 281 */ 282 int request_resource(struct resource *root, struct resource *new) 283 { 284 struct resource *conflict; 285 286 conflict = request_resource_conflict(root, new); 287 return conflict ? -EBUSY : 0; 288 } 289 290 EXPORT_SYMBOL(request_resource); 291 292 /** 293 * release_resource - release a previously reserved resource 294 * @old: resource pointer 295 */ 296 int release_resource(struct resource *old) 297 { 298 int retval; 299 300 write_lock(&resource_lock); 301 retval = __release_resource(old, true); 302 write_unlock(&resource_lock); 303 return retval; 304 } 305 306 EXPORT_SYMBOL(release_resource); 307 308 /** 309 * find_next_iomem_res - Finds the lowest iomem resource that covers part of 310 * [@start..@end]. 311 * 312 * If a resource is found, returns 0 and @*res is overwritten with the part 313 * of the resource that's within [@start..@end]; if none is found, returns 314 * -ENODEV. Returns -EINVAL for invalid parameters. 315 * 316 * @start: start address of the resource searched for 317 * @end: end address of same resource 318 * @flags: flags which the resource must have 319 * @desc: descriptor the resource must have 320 * @res: return ptr, if resource found 321 * 322 * The caller must specify @start, @end, @flags, and @desc 323 * (which may be IORES_DESC_NONE). 324 */ 325 static int find_next_iomem_res(resource_size_t start, resource_size_t end, 326 unsigned long flags, unsigned long desc, 327 struct resource *res) 328 { 329 struct resource *p; 330 331 if (!res) 332 return -EINVAL; 333 334 if (start >= end) 335 return -EINVAL; 336 337 read_lock(&resource_lock); 338 339 for (p = iomem_resource.child; p; p = next_resource(p)) { 340 /* If we passed the resource we are looking for, stop */ 341 if (p->start > end) { 342 p = NULL; 343 break; 344 } 345 346 /* Skip until we find a range that matches what we look for */ 347 if (p->end < start) 348 continue; 349 350 if ((p->flags & flags) != flags) 351 continue; 352 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) 353 continue; 354 355 /* Found a match, break */ 356 break; 357 } 358 359 if (p) { 360 /* copy data */ 361 *res = (struct resource) { 362 .start = max(start, p->start), 363 .end = min(end, p->end), 364 .flags = p->flags, 365 .desc = p->desc, 366 .parent = p->parent, 367 }; 368 } 369 370 read_unlock(&resource_lock); 371 return p ? 0 : -ENODEV; 372 } 373 374 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end, 375 unsigned long flags, unsigned long desc, 376 void *arg, 377 int (*func)(struct resource *, void *)) 378 { 379 struct resource res; 380 int ret = -EINVAL; 381 382 while (start < end && 383 !find_next_iomem_res(start, end, flags, desc, &res)) { 384 ret = (*func)(&res, arg); 385 if (ret) 386 break; 387 388 start = res.end + 1; 389 } 390 391 return ret; 392 } 393 394 /** 395 * walk_iomem_res_desc - Walks through iomem resources and calls func() 396 * with matching resource ranges. 397 * * 398 * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check. 399 * @flags: I/O resource flags 400 * @start: start addr 401 * @end: end addr 402 * @arg: function argument for the callback @func 403 * @func: callback function that is called for each qualifying resource area 404 * 405 * All the memory ranges which overlap start,end and also match flags and 406 * desc are valid candidates. 407 * 408 * NOTE: For a new descriptor search, define a new IORES_DESC in 409 * <linux/ioport.h> and set it in 'desc' of a target resource entry. 410 */ 411 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, 412 u64 end, void *arg, int (*func)(struct resource *, void *)) 413 { 414 return __walk_iomem_res_desc(start, end, flags, desc, arg, func); 415 } 416 EXPORT_SYMBOL_GPL(walk_iomem_res_desc); 417 418 /* 419 * This function calls the @func callback against all memory ranges of type 420 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 421 * Now, this function is only for System RAM, it deals with full ranges and 422 * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate 423 * ranges. 424 */ 425 int walk_system_ram_res(u64 start, u64 end, void *arg, 426 int (*func)(struct resource *, void *)) 427 { 428 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 429 430 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 431 func); 432 } 433 434 /* 435 * This function calls the @func callback against all memory ranges, which 436 * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY. 437 */ 438 int walk_mem_res(u64 start, u64 end, void *arg, 439 int (*func)(struct resource *, void *)) 440 { 441 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; 442 443 return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg, 444 func); 445 } 446 447 /* 448 * This function calls the @func callback against all memory ranges of type 449 * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY. 450 * It is to be used only for System RAM. 451 */ 452 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 453 void *arg, int (*func)(unsigned long, unsigned long, void *)) 454 { 455 resource_size_t start, end; 456 unsigned long flags; 457 struct resource res; 458 unsigned long pfn, end_pfn; 459 int ret = -EINVAL; 460 461 start = (u64) start_pfn << PAGE_SHIFT; 462 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; 463 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 464 while (start < end && 465 !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) { 466 pfn = PFN_UP(res.start); 467 end_pfn = PFN_DOWN(res.end + 1); 468 if (end_pfn > pfn) 469 ret = (*func)(pfn, end_pfn - pfn, arg); 470 if (ret) 471 break; 472 start = res.end + 1; 473 } 474 return ret; 475 } 476 477 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) 478 { 479 return 1; 480 } 481 482 /* 483 * This generic page_is_ram() returns true if specified address is 484 * registered as System RAM in iomem_resource list. 485 */ 486 int __weak page_is_ram(unsigned long pfn) 487 { 488 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; 489 } 490 EXPORT_SYMBOL_GPL(page_is_ram); 491 492 static int __region_intersects(resource_size_t start, size_t size, 493 unsigned long flags, unsigned long desc) 494 { 495 struct resource res; 496 int type = 0; int other = 0; 497 struct resource *p; 498 499 res.start = start; 500 res.end = start + size - 1; 501 502 for (p = iomem_resource.child; p ; p = p->sibling) { 503 bool is_type = (((p->flags & flags) == flags) && 504 ((desc == IORES_DESC_NONE) || 505 (desc == p->desc))); 506 507 if (resource_overlaps(p, &res)) 508 is_type ? type++ : other++; 509 } 510 511 if (type == 0) 512 return REGION_DISJOINT; 513 514 if (other == 0) 515 return REGION_INTERSECTS; 516 517 return REGION_MIXED; 518 } 519 520 /** 521 * region_intersects() - determine intersection of region with known resources 522 * @start: region start address 523 * @size: size of region 524 * @flags: flags of resource (in iomem_resource) 525 * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE 526 * 527 * Check if the specified region partially overlaps or fully eclipses a 528 * resource identified by @flags and @desc (optional with IORES_DESC_NONE). 529 * Return REGION_DISJOINT if the region does not overlap @flags/@desc, 530 * return REGION_MIXED if the region overlaps @flags/@desc and another 531 * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc 532 * and no other defined resource. Note that REGION_INTERSECTS is also 533 * returned in the case when the specified region overlaps RAM and undefined 534 * memory holes. 535 * 536 * region_intersect() is used by memory remapping functions to ensure 537 * the user is not remapping RAM and is a vast speed up over walking 538 * through the resource table page by page. 539 */ 540 int region_intersects(resource_size_t start, size_t size, unsigned long flags, 541 unsigned long desc) 542 { 543 int ret; 544 545 read_lock(&resource_lock); 546 ret = __region_intersects(start, size, flags, desc); 547 read_unlock(&resource_lock); 548 549 return ret; 550 } 551 EXPORT_SYMBOL_GPL(region_intersects); 552 553 void __weak arch_remove_reservations(struct resource *avail) 554 { 555 } 556 557 static resource_size_t simple_align_resource(void *data, 558 const struct resource *avail, 559 resource_size_t size, 560 resource_size_t align) 561 { 562 return avail->start; 563 } 564 565 static void resource_clip(struct resource *res, resource_size_t min, 566 resource_size_t max) 567 { 568 if (res->start < min) 569 res->start = min; 570 if (res->end > max) 571 res->end = max; 572 } 573 574 /* 575 * Find empty slot in the resource tree with the given range and 576 * alignment constraints 577 */ 578 static int __find_resource(struct resource *root, struct resource *old, 579 struct resource *new, 580 resource_size_t size, 581 struct resource_constraint *constraint) 582 { 583 struct resource *this = root->child; 584 struct resource tmp = *new, avail, alloc; 585 586 tmp.start = root->start; 587 /* 588 * Skip past an allocated resource that starts at 0, since the assignment 589 * of this->start - 1 to tmp->end below would cause an underflow. 590 */ 591 if (this && this->start == root->start) { 592 tmp.start = (this == old) ? old->start : this->end + 1; 593 this = this->sibling; 594 } 595 for(;;) { 596 if (this) 597 tmp.end = (this == old) ? this->end : this->start - 1; 598 else 599 tmp.end = root->end; 600 601 if (tmp.end < tmp.start) 602 goto next; 603 604 resource_clip(&tmp, constraint->min, constraint->max); 605 arch_remove_reservations(&tmp); 606 607 /* Check for overflow after ALIGN() */ 608 avail.start = ALIGN(tmp.start, constraint->align); 609 avail.end = tmp.end; 610 avail.flags = new->flags & ~IORESOURCE_UNSET; 611 if (avail.start >= tmp.start) { 612 alloc.flags = avail.flags; 613 alloc.start = constraint->alignf(constraint->alignf_data, &avail, 614 size, constraint->align); 615 alloc.end = alloc.start + size - 1; 616 if (alloc.start <= alloc.end && 617 resource_contains(&avail, &alloc)) { 618 new->start = alloc.start; 619 new->end = alloc.end; 620 return 0; 621 } 622 } 623 624 next: if (!this || this->end == root->end) 625 break; 626 627 if (this != old) 628 tmp.start = this->end + 1; 629 this = this->sibling; 630 } 631 return -EBUSY; 632 } 633 634 /* 635 * Find empty slot in the resource tree given range and alignment. 636 */ 637 static int find_resource(struct resource *root, struct resource *new, 638 resource_size_t size, 639 struct resource_constraint *constraint) 640 { 641 return __find_resource(root, NULL, new, size, constraint); 642 } 643 644 /** 645 * reallocate_resource - allocate a slot in the resource tree given range & alignment. 646 * The resource will be relocated if the new size cannot be reallocated in the 647 * current location. 648 * 649 * @root: root resource descriptor 650 * @old: resource descriptor desired by caller 651 * @newsize: new size of the resource descriptor 652 * @constraint: the size and alignment constraints to be met. 653 */ 654 static int reallocate_resource(struct resource *root, struct resource *old, 655 resource_size_t newsize, 656 struct resource_constraint *constraint) 657 { 658 int err=0; 659 struct resource new = *old; 660 struct resource *conflict; 661 662 write_lock(&resource_lock); 663 664 if ((err = __find_resource(root, old, &new, newsize, constraint))) 665 goto out; 666 667 if (resource_contains(&new, old)) { 668 old->start = new.start; 669 old->end = new.end; 670 goto out; 671 } 672 673 if (old->child) { 674 err = -EBUSY; 675 goto out; 676 } 677 678 if (resource_contains(old, &new)) { 679 old->start = new.start; 680 old->end = new.end; 681 } else { 682 __release_resource(old, true); 683 *old = new; 684 conflict = __request_resource(root, old); 685 BUG_ON(conflict); 686 } 687 out: 688 write_unlock(&resource_lock); 689 return err; 690 } 691 692 693 /** 694 * allocate_resource - allocate empty slot in the resource tree given range & alignment. 695 * The resource will be reallocated with a new size if it was already allocated 696 * @root: root resource descriptor 697 * @new: resource descriptor desired by caller 698 * @size: requested resource region size 699 * @min: minimum boundary to allocate 700 * @max: maximum boundary to allocate 701 * @align: alignment requested, in bytes 702 * @alignf: alignment function, optional, called if not NULL 703 * @alignf_data: arbitrary data to pass to the @alignf function 704 */ 705 int allocate_resource(struct resource *root, struct resource *new, 706 resource_size_t size, resource_size_t min, 707 resource_size_t max, resource_size_t align, 708 resource_size_t (*alignf)(void *, 709 const struct resource *, 710 resource_size_t, 711 resource_size_t), 712 void *alignf_data) 713 { 714 int err; 715 struct resource_constraint constraint; 716 717 if (!alignf) 718 alignf = simple_align_resource; 719 720 constraint.min = min; 721 constraint.max = max; 722 constraint.align = align; 723 constraint.alignf = alignf; 724 constraint.alignf_data = alignf_data; 725 726 if ( new->parent ) { 727 /* resource is already allocated, try reallocating with 728 the new constraints */ 729 return reallocate_resource(root, new, size, &constraint); 730 } 731 732 write_lock(&resource_lock); 733 err = find_resource(root, new, size, &constraint); 734 if (err >= 0 && __request_resource(root, new)) 735 err = -EBUSY; 736 write_unlock(&resource_lock); 737 return err; 738 } 739 740 EXPORT_SYMBOL(allocate_resource); 741 742 /** 743 * lookup_resource - find an existing resource by a resource start address 744 * @root: root resource descriptor 745 * @start: resource start address 746 * 747 * Returns a pointer to the resource if found, NULL otherwise 748 */ 749 struct resource *lookup_resource(struct resource *root, resource_size_t start) 750 { 751 struct resource *res; 752 753 read_lock(&resource_lock); 754 for (res = root->child; res; res = res->sibling) { 755 if (res->start == start) 756 break; 757 } 758 read_unlock(&resource_lock); 759 760 return res; 761 } 762 763 /* 764 * Insert a resource into the resource tree. If successful, return NULL, 765 * otherwise return the conflicting resource (compare to __request_resource()) 766 */ 767 static struct resource * __insert_resource(struct resource *parent, struct resource *new) 768 { 769 struct resource *first, *next; 770 771 for (;; parent = first) { 772 first = __request_resource(parent, new); 773 if (!first) 774 return first; 775 776 if (first == parent) 777 return first; 778 if (WARN_ON(first == new)) /* duplicated insertion */ 779 return first; 780 781 if ((first->start > new->start) || (first->end < new->end)) 782 break; 783 if ((first->start == new->start) && (first->end == new->end)) 784 break; 785 } 786 787 for (next = first; ; next = next->sibling) { 788 /* Partial overlap? Bad, and unfixable */ 789 if (next->start < new->start || next->end > new->end) 790 return next; 791 if (!next->sibling) 792 break; 793 if (next->sibling->start > new->end) 794 break; 795 } 796 797 new->parent = parent; 798 new->sibling = next->sibling; 799 new->child = first; 800 801 next->sibling = NULL; 802 for (next = first; next; next = next->sibling) 803 next->parent = new; 804 805 if (parent->child == first) { 806 parent->child = new; 807 } else { 808 next = parent->child; 809 while (next->sibling != first) 810 next = next->sibling; 811 next->sibling = new; 812 } 813 return NULL; 814 } 815 816 /** 817 * insert_resource_conflict - Inserts resource in the resource tree 818 * @parent: parent of the new resource 819 * @new: new resource to insert 820 * 821 * Returns 0 on success, conflict resource if the resource can't be inserted. 822 * 823 * This function is equivalent to request_resource_conflict when no conflict 824 * happens. If a conflict happens, and the conflicting resources 825 * entirely fit within the range of the new resource, then the new 826 * resource is inserted and the conflicting resources become children of 827 * the new resource. 828 * 829 * This function is intended for producers of resources, such as FW modules 830 * and bus drivers. 831 */ 832 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) 833 { 834 struct resource *conflict; 835 836 write_lock(&resource_lock); 837 conflict = __insert_resource(parent, new); 838 write_unlock(&resource_lock); 839 return conflict; 840 } 841 842 /** 843 * insert_resource - Inserts a resource in the resource tree 844 * @parent: parent of the new resource 845 * @new: new resource to insert 846 * 847 * Returns 0 on success, -EBUSY if the resource can't be inserted. 848 * 849 * This function is intended for producers of resources, such as FW modules 850 * and bus drivers. 851 */ 852 int insert_resource(struct resource *parent, struct resource *new) 853 { 854 struct resource *conflict; 855 856 conflict = insert_resource_conflict(parent, new); 857 return conflict ? -EBUSY : 0; 858 } 859 EXPORT_SYMBOL_GPL(insert_resource); 860 861 /** 862 * insert_resource_expand_to_fit - Insert a resource into the resource tree 863 * @root: root resource descriptor 864 * @new: new resource to insert 865 * 866 * Insert a resource into the resource tree, possibly expanding it in order 867 * to make it encompass any conflicting resources. 868 */ 869 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) 870 { 871 if (new->parent) 872 return; 873 874 write_lock(&resource_lock); 875 for (;;) { 876 struct resource *conflict; 877 878 conflict = __insert_resource(root, new); 879 if (!conflict) 880 break; 881 if (conflict == root) 882 break; 883 884 /* Ok, expand resource to cover the conflict, then try again .. */ 885 if (conflict->start < new->start) 886 new->start = conflict->start; 887 if (conflict->end > new->end) 888 new->end = conflict->end; 889 890 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); 891 } 892 write_unlock(&resource_lock); 893 } 894 895 /** 896 * remove_resource - Remove a resource in the resource tree 897 * @old: resource to remove 898 * 899 * Returns 0 on success, -EINVAL if the resource is not valid. 900 * 901 * This function removes a resource previously inserted by insert_resource() 902 * or insert_resource_conflict(), and moves the children (if any) up to 903 * where they were before. insert_resource() and insert_resource_conflict() 904 * insert a new resource, and move any conflicting resources down to the 905 * children of the new resource. 906 * 907 * insert_resource(), insert_resource_conflict() and remove_resource() are 908 * intended for producers of resources, such as FW modules and bus drivers. 909 */ 910 int remove_resource(struct resource *old) 911 { 912 int retval; 913 914 write_lock(&resource_lock); 915 retval = __release_resource(old, false); 916 write_unlock(&resource_lock); 917 return retval; 918 } 919 EXPORT_SYMBOL_GPL(remove_resource); 920 921 static int __adjust_resource(struct resource *res, resource_size_t start, 922 resource_size_t size) 923 { 924 struct resource *tmp, *parent = res->parent; 925 resource_size_t end = start + size - 1; 926 int result = -EBUSY; 927 928 if (!parent) 929 goto skip; 930 931 if ((start < parent->start) || (end > parent->end)) 932 goto out; 933 934 if (res->sibling && (res->sibling->start <= end)) 935 goto out; 936 937 tmp = parent->child; 938 if (tmp != res) { 939 while (tmp->sibling != res) 940 tmp = tmp->sibling; 941 if (start <= tmp->end) 942 goto out; 943 } 944 945 skip: 946 for (tmp = res->child; tmp; tmp = tmp->sibling) 947 if ((tmp->start < start) || (tmp->end > end)) 948 goto out; 949 950 res->start = start; 951 res->end = end; 952 result = 0; 953 954 out: 955 return result; 956 } 957 958 /** 959 * adjust_resource - modify a resource's start and size 960 * @res: resource to modify 961 * @start: new start value 962 * @size: new size 963 * 964 * Given an existing resource, change its start and size to match the 965 * arguments. Returns 0 on success, -EBUSY if it can't fit. 966 * Existing children of the resource are assumed to be immutable. 967 */ 968 int adjust_resource(struct resource *res, resource_size_t start, 969 resource_size_t size) 970 { 971 int result; 972 973 write_lock(&resource_lock); 974 result = __adjust_resource(res, start, size); 975 write_unlock(&resource_lock); 976 return result; 977 } 978 EXPORT_SYMBOL(adjust_resource); 979 980 static void __init 981 __reserve_region_with_split(struct resource *root, resource_size_t start, 982 resource_size_t end, const char *name) 983 { 984 struct resource *parent = root; 985 struct resource *conflict; 986 struct resource *res = alloc_resource(GFP_ATOMIC); 987 struct resource *next_res = NULL; 988 int type = resource_type(root); 989 990 if (!res) 991 return; 992 993 res->name = name; 994 res->start = start; 995 res->end = end; 996 res->flags = type | IORESOURCE_BUSY; 997 res->desc = IORES_DESC_NONE; 998 999 while (1) { 1000 1001 conflict = __request_resource(parent, res); 1002 if (!conflict) { 1003 if (!next_res) 1004 break; 1005 res = next_res; 1006 next_res = NULL; 1007 continue; 1008 } 1009 1010 /* conflict covered whole area */ 1011 if (conflict->start <= res->start && 1012 conflict->end >= res->end) { 1013 free_resource(res); 1014 WARN_ON(next_res); 1015 break; 1016 } 1017 1018 /* failed, split and try again */ 1019 if (conflict->start > res->start) { 1020 end = res->end; 1021 res->end = conflict->start - 1; 1022 if (conflict->end < end) { 1023 next_res = alloc_resource(GFP_ATOMIC); 1024 if (!next_res) { 1025 free_resource(res); 1026 break; 1027 } 1028 next_res->name = name; 1029 next_res->start = conflict->end + 1; 1030 next_res->end = end; 1031 next_res->flags = type | IORESOURCE_BUSY; 1032 next_res->desc = IORES_DESC_NONE; 1033 } 1034 } else { 1035 res->start = conflict->end + 1; 1036 } 1037 } 1038 1039 } 1040 1041 void __init 1042 reserve_region_with_split(struct resource *root, resource_size_t start, 1043 resource_size_t end, const char *name) 1044 { 1045 int abort = 0; 1046 1047 write_lock(&resource_lock); 1048 if (root->start > start || root->end < end) { 1049 pr_err("requested range [0x%llx-0x%llx] not in root %pr\n", 1050 (unsigned long long)start, (unsigned long long)end, 1051 root); 1052 if (start > root->end || end < root->start) 1053 abort = 1; 1054 else { 1055 if (end > root->end) 1056 end = root->end; 1057 if (start < root->start) 1058 start = root->start; 1059 pr_err("fixing request to [0x%llx-0x%llx]\n", 1060 (unsigned long long)start, 1061 (unsigned long long)end); 1062 } 1063 dump_stack(); 1064 } 1065 if (!abort) 1066 __reserve_region_with_split(root, start, end, name); 1067 write_unlock(&resource_lock); 1068 } 1069 1070 /** 1071 * resource_alignment - calculate resource's alignment 1072 * @res: resource pointer 1073 * 1074 * Returns alignment on success, 0 (invalid alignment) on failure. 1075 */ 1076 resource_size_t resource_alignment(struct resource *res) 1077 { 1078 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 1079 case IORESOURCE_SIZEALIGN: 1080 return resource_size(res); 1081 case IORESOURCE_STARTALIGN: 1082 return res->start; 1083 default: 1084 return 0; 1085 } 1086 } 1087 1088 /* 1089 * This is compatibility stuff for IO resources. 1090 * 1091 * Note how this, unlike the above, knows about 1092 * the IO flag meanings (busy etc). 1093 * 1094 * request_region creates a new busy region. 1095 * 1096 * release_region releases a matching busy region. 1097 */ 1098 1099 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait); 1100 1101 static struct inode *iomem_inode; 1102 1103 #ifdef CONFIG_IO_STRICT_DEVMEM 1104 static void revoke_iomem(struct resource *res) 1105 { 1106 /* pairs with smp_store_release() in iomem_init_inode() */ 1107 struct inode *inode = smp_load_acquire(&iomem_inode); 1108 1109 /* 1110 * Check that the initialization has completed. Losing the race 1111 * is ok because it means drivers are claiming resources before 1112 * the fs_initcall level of init and prevent iomem_get_mapping users 1113 * from establishing mappings. 1114 */ 1115 if (!inode) 1116 return; 1117 1118 /* 1119 * The expectation is that the driver has successfully marked 1120 * the resource busy by this point, so devmem_is_allowed() 1121 * should start returning false, however for performance this 1122 * does not iterate the entire resource range. 1123 */ 1124 if (devmem_is_allowed(PHYS_PFN(res->start)) && 1125 devmem_is_allowed(PHYS_PFN(res->end))) { 1126 /* 1127 * *cringe* iomem=relaxed says "go ahead, what's the 1128 * worst that can happen?" 1129 */ 1130 return; 1131 } 1132 1133 unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1); 1134 } 1135 #else 1136 static void revoke_iomem(struct resource *res) {} 1137 #endif 1138 1139 struct address_space *iomem_get_mapping(void) 1140 { 1141 /* 1142 * This function is only called from file open paths, hence guaranteed 1143 * that fs_initcalls have completed and no need to check for NULL. But 1144 * since revoke_iomem can be called before the initcall we still need 1145 * the barrier to appease checkers. 1146 */ 1147 return smp_load_acquire(&iomem_inode)->i_mapping; 1148 } 1149 1150 static int __request_region_locked(struct resource *res, struct resource *parent, 1151 resource_size_t start, resource_size_t n, 1152 const char *name, int flags) 1153 { 1154 DECLARE_WAITQUEUE(wait, current); 1155 1156 res->name = name; 1157 res->start = start; 1158 res->end = start + n - 1; 1159 1160 for (;;) { 1161 struct resource *conflict; 1162 1163 res->flags = resource_type(parent) | resource_ext_type(parent); 1164 res->flags |= IORESOURCE_BUSY | flags; 1165 res->desc = parent->desc; 1166 1167 conflict = __request_resource(parent, res); 1168 if (!conflict) 1169 break; 1170 /* 1171 * mm/hmm.c reserves physical addresses which then 1172 * become unavailable to other users. Conflicts are 1173 * not expected. Warn to aid debugging if encountered. 1174 */ 1175 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 1176 pr_warn("Unaddressable device %s %pR conflicts with %pR", 1177 conflict->name, conflict, res); 1178 } 1179 if (conflict != parent) { 1180 if (!(conflict->flags & IORESOURCE_BUSY)) { 1181 parent = conflict; 1182 continue; 1183 } 1184 } 1185 if (conflict->flags & flags & IORESOURCE_MUXED) { 1186 add_wait_queue(&muxed_resource_wait, &wait); 1187 write_unlock(&resource_lock); 1188 set_current_state(TASK_UNINTERRUPTIBLE); 1189 schedule(); 1190 remove_wait_queue(&muxed_resource_wait, &wait); 1191 write_lock(&resource_lock); 1192 continue; 1193 } 1194 /* Uhhuh, that didn't work out.. */ 1195 return -EBUSY; 1196 } 1197 1198 return 0; 1199 } 1200 1201 /** 1202 * __request_region - create a new busy resource region 1203 * @parent: parent resource descriptor 1204 * @start: resource start address 1205 * @n: resource region size 1206 * @name: reserving caller's ID string 1207 * @flags: IO resource flags 1208 */ 1209 struct resource *__request_region(struct resource *parent, 1210 resource_size_t start, resource_size_t n, 1211 const char *name, int flags) 1212 { 1213 struct resource *res = alloc_resource(GFP_KERNEL); 1214 int ret; 1215 1216 if (!res) 1217 return NULL; 1218 1219 write_lock(&resource_lock); 1220 ret = __request_region_locked(res, parent, start, n, name, flags); 1221 write_unlock(&resource_lock); 1222 1223 if (ret) { 1224 free_resource(res); 1225 return NULL; 1226 } 1227 1228 if (parent == &iomem_resource) 1229 revoke_iomem(res); 1230 1231 return res; 1232 } 1233 EXPORT_SYMBOL(__request_region); 1234 1235 /** 1236 * __release_region - release a previously reserved resource region 1237 * @parent: parent resource descriptor 1238 * @start: resource start address 1239 * @n: resource region size 1240 * 1241 * The described resource region must match a currently busy region. 1242 */ 1243 void __release_region(struct resource *parent, resource_size_t start, 1244 resource_size_t n) 1245 { 1246 struct resource **p; 1247 resource_size_t end; 1248 1249 p = &parent->child; 1250 end = start + n - 1; 1251 1252 write_lock(&resource_lock); 1253 1254 for (;;) { 1255 struct resource *res = *p; 1256 1257 if (!res) 1258 break; 1259 if (res->start <= start && res->end >= end) { 1260 if (!(res->flags & IORESOURCE_BUSY)) { 1261 p = &res->child; 1262 continue; 1263 } 1264 if (res->start != start || res->end != end) 1265 break; 1266 *p = res->sibling; 1267 write_unlock(&resource_lock); 1268 if (res->flags & IORESOURCE_MUXED) 1269 wake_up(&muxed_resource_wait); 1270 free_resource(res); 1271 return; 1272 } 1273 p = &res->sibling; 1274 } 1275 1276 write_unlock(&resource_lock); 1277 1278 printk(KERN_WARNING "Trying to free nonexistent resource " 1279 "<%016llx-%016llx>\n", (unsigned long long)start, 1280 (unsigned long long)end); 1281 } 1282 EXPORT_SYMBOL(__release_region); 1283 1284 #ifdef CONFIG_MEMORY_HOTREMOVE 1285 /** 1286 * release_mem_region_adjustable - release a previously reserved memory region 1287 * @start: resource start address 1288 * @size: resource region size 1289 * 1290 * This interface is intended for memory hot-delete. The requested region 1291 * is released from a currently busy memory resource. The requested region 1292 * must either match exactly or fit into a single busy resource entry. In 1293 * the latter case, the remaining resource is adjusted accordingly. 1294 * Existing children of the busy memory resource must be immutable in the 1295 * request. 1296 * 1297 * Note: 1298 * - Additional release conditions, such as overlapping region, can be 1299 * supported after they are confirmed as valid cases. 1300 * - When a busy memory resource gets split into two entries, the code 1301 * assumes that all children remain in the lower address entry for 1302 * simplicity. Enhance this logic when necessary. 1303 */ 1304 void release_mem_region_adjustable(resource_size_t start, resource_size_t size) 1305 { 1306 struct resource *parent = &iomem_resource; 1307 struct resource *new_res = NULL; 1308 bool alloc_nofail = false; 1309 struct resource **p; 1310 struct resource *res; 1311 resource_size_t end; 1312 1313 end = start + size - 1; 1314 if (WARN_ON_ONCE((start < parent->start) || (end > parent->end))) 1315 return; 1316 1317 /* 1318 * We free up quite a lot of memory on memory hotunplug (esp., memap), 1319 * just before releasing the region. This is highly unlikely to 1320 * fail - let's play save and make it never fail as the caller cannot 1321 * perform any error handling (e.g., trying to re-add memory will fail 1322 * similarly). 1323 */ 1324 retry: 1325 new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0)); 1326 1327 p = &parent->child; 1328 write_lock(&resource_lock); 1329 1330 while ((res = *p)) { 1331 if (res->start >= end) 1332 break; 1333 1334 /* look for the next resource if it does not fit into */ 1335 if (res->start > start || res->end < end) { 1336 p = &res->sibling; 1337 continue; 1338 } 1339 1340 /* 1341 * All memory regions added from memory-hotplug path have the 1342 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have 1343 * this flag, we know that we are dealing with a resource coming 1344 * from HMM/devm. HMM/devm use another mechanism to add/release 1345 * a resource. This goes via devm_request_mem_region and 1346 * devm_release_mem_region. 1347 * HMM/devm take care to release their resources when they want, 1348 * so if we are dealing with them, let us just back off here. 1349 */ 1350 if (!(res->flags & IORESOURCE_SYSRAM)) { 1351 break; 1352 } 1353 1354 if (!(res->flags & IORESOURCE_MEM)) 1355 break; 1356 1357 if (!(res->flags & IORESOURCE_BUSY)) { 1358 p = &res->child; 1359 continue; 1360 } 1361 1362 /* found the target resource; let's adjust accordingly */ 1363 if (res->start == start && res->end == end) { 1364 /* free the whole entry */ 1365 *p = res->sibling; 1366 free_resource(res); 1367 } else if (res->start == start && res->end != end) { 1368 /* adjust the start */ 1369 WARN_ON_ONCE(__adjust_resource(res, end + 1, 1370 res->end - end)); 1371 } else if (res->start != start && res->end == end) { 1372 /* adjust the end */ 1373 WARN_ON_ONCE(__adjust_resource(res, res->start, 1374 start - res->start)); 1375 } else { 1376 /* split into two entries - we need a new resource */ 1377 if (!new_res) { 1378 new_res = alloc_resource(GFP_ATOMIC); 1379 if (!new_res) { 1380 alloc_nofail = true; 1381 write_unlock(&resource_lock); 1382 goto retry; 1383 } 1384 } 1385 new_res->name = res->name; 1386 new_res->start = end + 1; 1387 new_res->end = res->end; 1388 new_res->flags = res->flags; 1389 new_res->desc = res->desc; 1390 new_res->parent = res->parent; 1391 new_res->sibling = res->sibling; 1392 new_res->child = NULL; 1393 1394 if (WARN_ON_ONCE(__adjust_resource(res, res->start, 1395 start - res->start))) 1396 break; 1397 res->sibling = new_res; 1398 new_res = NULL; 1399 } 1400 1401 break; 1402 } 1403 1404 write_unlock(&resource_lock); 1405 free_resource(new_res); 1406 } 1407 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1408 1409 #ifdef CONFIG_MEMORY_HOTPLUG 1410 static bool system_ram_resources_mergeable(struct resource *r1, 1411 struct resource *r2) 1412 { 1413 /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */ 1414 return r1->flags == r2->flags && r1->end + 1 == r2->start && 1415 r1->name == r2->name && r1->desc == r2->desc && 1416 !r1->child && !r2->child; 1417 } 1418 1419 /** 1420 * merge_system_ram_resource - mark the System RAM resource mergeable and try to 1421 * merge it with adjacent, mergeable resources 1422 * @res: resource descriptor 1423 * 1424 * This interface is intended for memory hotplug, whereby lots of contiguous 1425 * system ram resources are added (e.g., via add_memory*()) by a driver, and 1426 * the actual resource boundaries are not of interest (e.g., it might be 1427 * relevant for DIMMs). Only resources that are marked mergeable, that have the 1428 * same parent, and that don't have any children are considered. All mergeable 1429 * resources must be immutable during the request. 1430 * 1431 * Note: 1432 * - The caller has to make sure that no pointers to resources that are 1433 * marked mergeable are used anymore after this call - the resource might 1434 * be freed and the pointer might be stale! 1435 * - release_mem_region_adjustable() will split on demand on memory hotunplug 1436 */ 1437 void merge_system_ram_resource(struct resource *res) 1438 { 1439 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 1440 struct resource *cur; 1441 1442 if (WARN_ON_ONCE((res->flags & flags) != flags)) 1443 return; 1444 1445 write_lock(&resource_lock); 1446 res->flags |= IORESOURCE_SYSRAM_MERGEABLE; 1447 1448 /* Try to merge with next item in the list. */ 1449 cur = res->sibling; 1450 if (cur && system_ram_resources_mergeable(res, cur)) { 1451 res->end = cur->end; 1452 res->sibling = cur->sibling; 1453 free_resource(cur); 1454 } 1455 1456 /* Try to merge with previous item in the list. */ 1457 cur = res->parent->child; 1458 while (cur && cur->sibling != res) 1459 cur = cur->sibling; 1460 if (cur && system_ram_resources_mergeable(cur, res)) { 1461 cur->end = res->end; 1462 cur->sibling = res->sibling; 1463 free_resource(res); 1464 } 1465 write_unlock(&resource_lock); 1466 } 1467 #endif /* CONFIG_MEMORY_HOTPLUG */ 1468 1469 /* 1470 * Managed region resource 1471 */ 1472 static void devm_resource_release(struct device *dev, void *ptr) 1473 { 1474 struct resource **r = ptr; 1475 1476 release_resource(*r); 1477 } 1478 1479 /** 1480 * devm_request_resource() - request and reserve an I/O or memory resource 1481 * @dev: device for which to request the resource 1482 * @root: root of the resource tree from which to request the resource 1483 * @new: descriptor of the resource to request 1484 * 1485 * This is a device-managed version of request_resource(). There is usually 1486 * no need to release resources requested by this function explicitly since 1487 * that will be taken care of when the device is unbound from its driver. 1488 * If for some reason the resource needs to be released explicitly, because 1489 * of ordering issues for example, drivers must call devm_release_resource() 1490 * rather than the regular release_resource(). 1491 * 1492 * When a conflict is detected between any existing resources and the newly 1493 * requested resource, an error message will be printed. 1494 * 1495 * Returns 0 on success or a negative error code on failure. 1496 */ 1497 int devm_request_resource(struct device *dev, struct resource *root, 1498 struct resource *new) 1499 { 1500 struct resource *conflict, **ptr; 1501 1502 ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL); 1503 if (!ptr) 1504 return -ENOMEM; 1505 1506 *ptr = new; 1507 1508 conflict = request_resource_conflict(root, new); 1509 if (conflict) { 1510 dev_err(dev, "resource collision: %pR conflicts with %s %pR\n", 1511 new, conflict->name, conflict); 1512 devres_free(ptr); 1513 return -EBUSY; 1514 } 1515 1516 devres_add(dev, ptr); 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(devm_request_resource); 1520 1521 static int devm_resource_match(struct device *dev, void *res, void *data) 1522 { 1523 struct resource **ptr = res; 1524 1525 return *ptr == data; 1526 } 1527 1528 /** 1529 * devm_release_resource() - release a previously requested resource 1530 * @dev: device for which to release the resource 1531 * @new: descriptor of the resource to release 1532 * 1533 * Releases a resource previously requested using devm_request_resource(). 1534 */ 1535 void devm_release_resource(struct device *dev, struct resource *new) 1536 { 1537 WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match, 1538 new)); 1539 } 1540 EXPORT_SYMBOL(devm_release_resource); 1541 1542 struct region_devres { 1543 struct resource *parent; 1544 resource_size_t start; 1545 resource_size_t n; 1546 }; 1547 1548 static void devm_region_release(struct device *dev, void *res) 1549 { 1550 struct region_devres *this = res; 1551 1552 __release_region(this->parent, this->start, this->n); 1553 } 1554 1555 static int devm_region_match(struct device *dev, void *res, void *match_data) 1556 { 1557 struct region_devres *this = res, *match = match_data; 1558 1559 return this->parent == match->parent && 1560 this->start == match->start && this->n == match->n; 1561 } 1562 1563 struct resource * 1564 __devm_request_region(struct device *dev, struct resource *parent, 1565 resource_size_t start, resource_size_t n, const char *name) 1566 { 1567 struct region_devres *dr = NULL; 1568 struct resource *res; 1569 1570 dr = devres_alloc(devm_region_release, sizeof(struct region_devres), 1571 GFP_KERNEL); 1572 if (!dr) 1573 return NULL; 1574 1575 dr->parent = parent; 1576 dr->start = start; 1577 dr->n = n; 1578 1579 res = __request_region(parent, start, n, name, 0); 1580 if (res) 1581 devres_add(dev, dr); 1582 else 1583 devres_free(dr); 1584 1585 return res; 1586 } 1587 EXPORT_SYMBOL(__devm_request_region); 1588 1589 void __devm_release_region(struct device *dev, struct resource *parent, 1590 resource_size_t start, resource_size_t n) 1591 { 1592 struct region_devres match_data = { parent, start, n }; 1593 1594 __release_region(parent, start, n); 1595 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, 1596 &match_data)); 1597 } 1598 EXPORT_SYMBOL(__devm_release_region); 1599 1600 /* 1601 * Reserve I/O ports or memory based on "reserve=" kernel parameter. 1602 */ 1603 #define MAXRESERVE 4 1604 static int __init reserve_setup(char *str) 1605 { 1606 static int reserved; 1607 static struct resource reserve[MAXRESERVE]; 1608 1609 for (;;) { 1610 unsigned int io_start, io_num; 1611 int x = reserved; 1612 struct resource *parent; 1613 1614 if (get_option(&str, &io_start) != 2) 1615 break; 1616 if (get_option(&str, &io_num) == 0) 1617 break; 1618 if (x < MAXRESERVE) { 1619 struct resource *res = reserve + x; 1620 1621 /* 1622 * If the region starts below 0x10000, we assume it's 1623 * I/O port space; otherwise assume it's memory. 1624 */ 1625 if (io_start < 0x10000) { 1626 res->flags = IORESOURCE_IO; 1627 parent = &ioport_resource; 1628 } else { 1629 res->flags = IORESOURCE_MEM; 1630 parent = &iomem_resource; 1631 } 1632 res->name = "reserved"; 1633 res->start = io_start; 1634 res->end = io_start + io_num - 1; 1635 res->flags |= IORESOURCE_BUSY; 1636 res->desc = IORES_DESC_NONE; 1637 res->child = NULL; 1638 if (request_resource(parent, res) == 0) 1639 reserved = x+1; 1640 } 1641 } 1642 return 1; 1643 } 1644 __setup("reserve=", reserve_setup); 1645 1646 /* 1647 * Check if the requested addr and size spans more than any slot in the 1648 * iomem resource tree. 1649 */ 1650 int iomem_map_sanity_check(resource_size_t addr, unsigned long size) 1651 { 1652 struct resource *p = &iomem_resource; 1653 int err = 0; 1654 loff_t l; 1655 1656 read_lock(&resource_lock); 1657 for (p = p->child; p ; p = r_next(NULL, p, &l)) { 1658 /* 1659 * We can probably skip the resources without 1660 * IORESOURCE_IO attribute? 1661 */ 1662 if (p->start >= addr + size) 1663 continue; 1664 if (p->end < addr) 1665 continue; 1666 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 1667 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 1668 continue; 1669 /* 1670 * if a resource is "BUSY", it's not a hardware resource 1671 * but a driver mapping of such a resource; we don't want 1672 * to warn for those; some drivers legitimately map only 1673 * partial hardware resources. (example: vesafb) 1674 */ 1675 if (p->flags & IORESOURCE_BUSY) 1676 continue; 1677 1678 printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", 1679 (unsigned long long)addr, 1680 (unsigned long long)(addr + size - 1), 1681 p->name, p); 1682 err = -1; 1683 break; 1684 } 1685 read_unlock(&resource_lock); 1686 1687 return err; 1688 } 1689 1690 #ifdef CONFIG_STRICT_DEVMEM 1691 static int strict_iomem_checks = 1; 1692 #else 1693 static int strict_iomem_checks; 1694 #endif 1695 1696 /* 1697 * Check if an address is exclusive to the kernel and must not be mapped to 1698 * user space, for example, via /dev/mem. 1699 * 1700 * Returns true if exclusive to the kernel, otherwise returns false. 1701 */ 1702 bool iomem_is_exclusive(u64 addr) 1703 { 1704 const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | 1705 IORESOURCE_EXCLUSIVE; 1706 bool skip_children = false, err = false; 1707 int size = PAGE_SIZE; 1708 struct resource *p; 1709 1710 addr = addr & PAGE_MASK; 1711 1712 read_lock(&resource_lock); 1713 for_each_resource(&iomem_resource, p, skip_children) { 1714 if (p->start >= addr + size) 1715 break; 1716 if (p->end < addr) { 1717 skip_children = true; 1718 continue; 1719 } 1720 skip_children = false; 1721 1722 /* 1723 * IORESOURCE_SYSTEM_RAM resources are exclusive if 1724 * IORESOURCE_EXCLUSIVE is set, even if they 1725 * are not busy and even if "iomem=relaxed" is set. The 1726 * responsible driver dynamically adds/removes system RAM within 1727 * such an area and uncontrolled access is dangerous. 1728 */ 1729 if ((p->flags & exclusive_system_ram) == exclusive_system_ram) { 1730 err = true; 1731 break; 1732 } 1733 1734 /* 1735 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set 1736 * or CONFIG_IO_STRICT_DEVMEM is enabled and the 1737 * resource is busy. 1738 */ 1739 if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY)) 1740 continue; 1741 if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) 1742 || p->flags & IORESOURCE_EXCLUSIVE) { 1743 err = true; 1744 break; 1745 } 1746 } 1747 read_unlock(&resource_lock); 1748 1749 return err; 1750 } 1751 1752 struct resource_entry *resource_list_create_entry(struct resource *res, 1753 size_t extra_size) 1754 { 1755 struct resource_entry *entry; 1756 1757 entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); 1758 if (entry) { 1759 INIT_LIST_HEAD(&entry->node); 1760 entry->res = res ? res : &entry->__res; 1761 } 1762 1763 return entry; 1764 } 1765 EXPORT_SYMBOL(resource_list_create_entry); 1766 1767 void resource_list_free(struct list_head *head) 1768 { 1769 struct resource_entry *entry, *tmp; 1770 1771 list_for_each_entry_safe(entry, tmp, head, node) 1772 resource_list_destroy_entry(entry); 1773 } 1774 EXPORT_SYMBOL(resource_list_free); 1775 1776 #ifdef CONFIG_DEVICE_PRIVATE 1777 static struct resource *__request_free_mem_region(struct device *dev, 1778 struct resource *base, unsigned long size, const char *name) 1779 { 1780 resource_size_t end, addr; 1781 struct resource *res; 1782 struct region_devres *dr = NULL; 1783 1784 size = ALIGN(size, 1UL << PA_SECTION_SHIFT); 1785 end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1); 1786 addr = end - size + 1UL; 1787 1788 res = alloc_resource(GFP_KERNEL); 1789 if (!res) 1790 return ERR_PTR(-ENOMEM); 1791 1792 if (dev) { 1793 dr = devres_alloc(devm_region_release, 1794 sizeof(struct region_devres), GFP_KERNEL); 1795 if (!dr) { 1796 free_resource(res); 1797 return ERR_PTR(-ENOMEM); 1798 } 1799 } 1800 1801 write_lock(&resource_lock); 1802 for (; addr > size && addr >= base->start; addr -= size) { 1803 if (__region_intersects(addr, size, 0, IORES_DESC_NONE) != 1804 REGION_DISJOINT) 1805 continue; 1806 1807 if (__request_region_locked(res, &iomem_resource, addr, size, 1808 name, 0)) 1809 break; 1810 1811 if (dev) { 1812 dr->parent = &iomem_resource; 1813 dr->start = addr; 1814 dr->n = size; 1815 devres_add(dev, dr); 1816 } 1817 1818 res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 1819 write_unlock(&resource_lock); 1820 1821 /* 1822 * A driver is claiming this region so revoke any mappings. 1823 */ 1824 revoke_iomem(res); 1825 return res; 1826 } 1827 write_unlock(&resource_lock); 1828 1829 free_resource(res); 1830 if (dr) 1831 devres_free(dr); 1832 1833 return ERR_PTR(-ERANGE); 1834 } 1835 1836 /** 1837 * devm_request_free_mem_region - find free region for device private memory 1838 * 1839 * @dev: device struct to bind the resource to 1840 * @size: size in bytes of the device memory to add 1841 * @base: resource tree to look in 1842 * 1843 * This function tries to find an empty range of physical address big enough to 1844 * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE 1845 * memory, which in turn allocates struct pages. 1846 */ 1847 struct resource *devm_request_free_mem_region(struct device *dev, 1848 struct resource *base, unsigned long size) 1849 { 1850 return __request_free_mem_region(dev, base, size, dev_name(dev)); 1851 } 1852 EXPORT_SYMBOL_GPL(devm_request_free_mem_region); 1853 1854 struct resource *request_free_mem_region(struct resource *base, 1855 unsigned long size, const char *name) 1856 { 1857 return __request_free_mem_region(NULL, base, size, name); 1858 } 1859 EXPORT_SYMBOL_GPL(request_free_mem_region); 1860 1861 #endif /* CONFIG_DEVICE_PRIVATE */ 1862 1863 static int __init strict_iomem(char *str) 1864 { 1865 if (strstr(str, "relaxed")) 1866 strict_iomem_checks = 0; 1867 if (strstr(str, "strict")) 1868 strict_iomem_checks = 1; 1869 return 1; 1870 } 1871 1872 static int iomem_fs_init_fs_context(struct fs_context *fc) 1873 { 1874 return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM; 1875 } 1876 1877 static struct file_system_type iomem_fs_type = { 1878 .name = "iomem", 1879 .owner = THIS_MODULE, 1880 .init_fs_context = iomem_fs_init_fs_context, 1881 .kill_sb = kill_anon_super, 1882 }; 1883 1884 static int __init iomem_init_inode(void) 1885 { 1886 static struct vfsmount *iomem_vfs_mount; 1887 static int iomem_fs_cnt; 1888 struct inode *inode; 1889 int rc; 1890 1891 rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt); 1892 if (rc < 0) { 1893 pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc); 1894 return rc; 1895 } 1896 1897 inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb); 1898 if (IS_ERR(inode)) { 1899 rc = PTR_ERR(inode); 1900 pr_err("Cannot allocate inode for iomem: %d\n", rc); 1901 simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt); 1902 return rc; 1903 } 1904 1905 /* 1906 * Publish iomem revocation inode initialized. 1907 * Pairs with smp_load_acquire() in revoke_iomem(). 1908 */ 1909 smp_store_release(&iomem_inode, inode); 1910 1911 return 0; 1912 } 1913 1914 fs_initcall(iomem_init_inode); 1915 1916 __setup("iomem=", strict_iomem); 1917